xref: /openbmc/linux/net/core/skbuff.c (revision de799101)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *	Routines having to do with the 'struct sk_buff' memory handlers.
41da177e4SLinus Torvalds  *
5113aa838SAlan Cox  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
61da177e4SLinus Torvalds  *			Florian La Roche <rzsfl@rz.uni-sb.de>
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *	Fixes:
91da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the worst of the load
101da177e4SLinus Torvalds  *					balancer bugs.
111da177e4SLinus Torvalds  *		Dave Platt	:	Interrupt stacking fix.
121da177e4SLinus Torvalds  *	Richard Kooijman	:	Timestamp fixes.
131da177e4SLinus Torvalds  *		Alan Cox	:	Changed buffer format.
141da177e4SLinus Torvalds  *		Alan Cox	:	destructor hook for AF_UNIX etc.
151da177e4SLinus Torvalds  *		Linus Torvalds	:	Better skb_clone.
161da177e4SLinus Torvalds  *		Alan Cox	:	Added skb_copy.
171da177e4SLinus Torvalds  *		Alan Cox	:	Added all the changed routines Linus
181da177e4SLinus Torvalds  *					only put in the headers
191da177e4SLinus Torvalds  *		Ray VanTassle	:	Fixed --skb->lock in free
201da177e4SLinus Torvalds  *		Alan Cox	:	skb_copy copy arp field
211da177e4SLinus Torvalds  *		Andi Kleen	:	slabified it.
221da177e4SLinus Torvalds  *		Robert Olsson	:	Removed skb_head_pool
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *	NOTE:
251da177e4SLinus Torvalds  *		The __skb_ routines should be called with interrupts
261da177e4SLinus Torvalds  *	disabled, or you better be *real* sure that the operation is atomic
271da177e4SLinus Torvalds  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
281da177e4SLinus Torvalds  *	or via disabling bottom half handlers, etc).
291da177e4SLinus Torvalds  */
301da177e4SLinus Torvalds 
311da177e4SLinus Torvalds /*
321da177e4SLinus Torvalds  *	The functions in this file will not compile correctly with gcc 2.4.x
331da177e4SLinus Torvalds  */
341da177e4SLinus Torvalds 
35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36e005d193SJoe Perches 
371da177e4SLinus Torvalds #include <linux/module.h>
381da177e4SLinus Torvalds #include <linux/types.h>
391da177e4SLinus Torvalds #include <linux/kernel.h>
401da177e4SLinus Torvalds #include <linux/mm.h>
411da177e4SLinus Torvalds #include <linux/interrupt.h>
421da177e4SLinus Torvalds #include <linux/in.h>
431da177e4SLinus Torvalds #include <linux/inet.h>
441da177e4SLinus Torvalds #include <linux/slab.h>
45de960aa9SFlorian Westphal #include <linux/tcp.h>
46de960aa9SFlorian Westphal #include <linux/udp.h>
4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h>
481da177e4SLinus Torvalds #include <linux/netdevice.h>
491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT
501da177e4SLinus Torvalds #include <net/pkt_sched.h>
511da177e4SLinus Torvalds #endif
521da177e4SLinus Torvalds #include <linux/string.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
549c55e01cSJens Axboe #include <linux/splice.h>
551da177e4SLinus Torvalds #include <linux/cache.h>
561da177e4SLinus Torvalds #include <linux/rtnetlink.h>
571da177e4SLinus Torvalds #include <linux/init.h>
58716ea3a7SDavid Howells #include <linux/scatterlist.h>
59ac45f602SPatrick Ohly #include <linux/errqueue.h>
60268bb0ceSLinus Torvalds #include <linux/prefetch.h>
610d5501c1SVlad Yasevich #include <linux/if_vlan.h>
622a2ea508SJohn Hurley #include <linux/mpls.h>
63183f47fcSSebastian Andrzej Siewior #include <linux/kcov.h>
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds #include <net/protocol.h>
661da177e4SLinus Torvalds #include <net/dst.h>
671da177e4SLinus Torvalds #include <net/sock.h>
681da177e4SLinus Torvalds #include <net/checksum.h>
69ed1f50c3SPaul Durrant #include <net/ip6_checksum.h>
701da177e4SLinus Torvalds #include <net/xfrm.h>
718822e270SJohn Hurley #include <net/mpls.h>
723ee17bc7SMat Martineau #include <net/mptcp.h>
7378476d31SJeremy Kerr #include <net/mctp.h>
746a5bcd84SIlias Apalodimas #include <net/page_pool.h>
751da177e4SLinus Torvalds 
767c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
77ad8d75ffSSteven Rostedt #include <trace/events/skb.h>
7851c56b00SEric Dumazet #include <linux/highmem.h>
79b245be1fSWillem de Bruijn #include <linux/capability.h>
80b245be1fSWillem de Bruijn #include <linux/user_namespace.h>
812544af03SMatteo Croce #include <linux/indirect_call_wrapper.h>
82a1f8e7f7SAl Viro 
837b7ed885SBart Van Assche #include "datagram.h"
847f678defSVasily Averin #include "sock_destructor.h"
857b7ed885SBart Van Assche 
8608009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init;
8708009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
88df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
89df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init;
90df5042f4SFlorian Westphal #endif
915f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
925f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags);
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds /**
95f05de73bSJean Sacren  *	skb_panic - private function for out-of-line support
961da177e4SLinus Torvalds  *	@skb:	buffer
971da177e4SLinus Torvalds  *	@sz:	size
98f05de73bSJean Sacren  *	@addr:	address
9999d5851eSJames Hogan  *	@msg:	skb_over_panic or skb_under_panic
1001da177e4SLinus Torvalds  *
101f05de73bSJean Sacren  *	Out-of-line support for skb_put() and skb_push().
102f05de73bSJean Sacren  *	Called via the wrapper skb_over_panic() or skb_under_panic().
103f05de73bSJean Sacren  *	Keep out of line to prevent kernel bloat.
104f05de73bSJean Sacren  *	__builtin_return_address is not used because it is not always reliable.
1051da177e4SLinus Torvalds  */
106f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
10799d5851eSJames Hogan 		      const char msg[])
1081da177e4SLinus Torvalds {
10941a46913SJesper Dangaard Brouer 	pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
11099d5851eSJames Hogan 		 msg, addr, skb->len, sz, skb->head, skb->data,
1114305b541SArnaldo Carvalho de Melo 		 (unsigned long)skb->tail, (unsigned long)skb->end,
11226095455SPatrick McHardy 		 skb->dev ? skb->dev->name : "<NULL>");
1131da177e4SLinus Torvalds 	BUG();
1141da177e4SLinus Torvalds }
1151da177e4SLinus Torvalds 
116f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
1171da177e4SLinus Torvalds {
118f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
1191da177e4SLinus Torvalds }
1201da177e4SLinus Torvalds 
121f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
122f05de73bSJean Sacren {
123f05de73bSJean Sacren 	skb_panic(skb, sz, addr, __func__);
124f05de73bSJean Sacren }
125c93bdd0eSMel Gorman 
12650fad4b5SAlexander Lobakin #define NAPI_SKB_CACHE_SIZE	64
127f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_BULK	16
128f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_HALF	(NAPI_SKB_CACHE_SIZE / 2)
12950fad4b5SAlexander Lobakin 
13050fad4b5SAlexander Lobakin struct napi_alloc_cache {
13150fad4b5SAlexander Lobakin 	struct page_frag_cache page;
13250fad4b5SAlexander Lobakin 	unsigned int skb_count;
13350fad4b5SAlexander Lobakin 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
13450fad4b5SAlexander Lobakin };
13550fad4b5SAlexander Lobakin 
13650fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
13750fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
13850fad4b5SAlexander Lobakin 
13932e3573fSYajun Deng void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
14050fad4b5SAlexander Lobakin {
14150fad4b5SAlexander Lobakin 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
14250fad4b5SAlexander Lobakin 
14350fad4b5SAlexander Lobakin 	fragsz = SKB_DATA_ALIGN(fragsz);
14450fad4b5SAlexander Lobakin 
14532e3573fSYajun Deng 	return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
14650fad4b5SAlexander Lobakin }
14750fad4b5SAlexander Lobakin EXPORT_SYMBOL(__napi_alloc_frag_align);
14850fad4b5SAlexander Lobakin 
14950fad4b5SAlexander Lobakin void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
15050fad4b5SAlexander Lobakin {
15150fad4b5SAlexander Lobakin 	void *data;
15250fad4b5SAlexander Lobakin 
15350fad4b5SAlexander Lobakin 	fragsz = SKB_DATA_ALIGN(fragsz);
154afa79d08SChangbin Du 	if (in_hardirq() || irqs_disabled()) {
15532e3573fSYajun Deng 		struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
15632e3573fSYajun Deng 
15750fad4b5SAlexander Lobakin 		data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
15850fad4b5SAlexander Lobakin 	} else {
15932e3573fSYajun Deng 		struct napi_alloc_cache *nc;
16032e3573fSYajun Deng 
16150fad4b5SAlexander Lobakin 		local_bh_disable();
16232e3573fSYajun Deng 		nc = this_cpu_ptr(&napi_alloc_cache);
16332e3573fSYajun Deng 		data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
16450fad4b5SAlexander Lobakin 		local_bh_enable();
16550fad4b5SAlexander Lobakin 	}
16650fad4b5SAlexander Lobakin 	return data;
16750fad4b5SAlexander Lobakin }
16850fad4b5SAlexander Lobakin EXPORT_SYMBOL(__netdev_alloc_frag_align);
16950fad4b5SAlexander Lobakin 
170f450d539SAlexander Lobakin static struct sk_buff *napi_skb_cache_get(void)
171f450d539SAlexander Lobakin {
172f450d539SAlexander Lobakin 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
173f450d539SAlexander Lobakin 	struct sk_buff *skb;
174f450d539SAlexander Lobakin 
175f450d539SAlexander Lobakin 	if (unlikely(!nc->skb_count))
176f450d539SAlexander Lobakin 		nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
177f450d539SAlexander Lobakin 						      GFP_ATOMIC,
178f450d539SAlexander Lobakin 						      NAPI_SKB_CACHE_BULK,
179f450d539SAlexander Lobakin 						      nc->skb_cache);
180f450d539SAlexander Lobakin 	if (unlikely(!nc->skb_count))
181f450d539SAlexander Lobakin 		return NULL;
182f450d539SAlexander Lobakin 
183f450d539SAlexander Lobakin 	skb = nc->skb_cache[--nc->skb_count];
184f450d539SAlexander Lobakin 	kasan_unpoison_object_data(skbuff_head_cache, skb);
185f450d539SAlexander Lobakin 
186f450d539SAlexander Lobakin 	return skb;
187f450d539SAlexander Lobakin }
188f450d539SAlexander Lobakin 
189ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */
190483126b3SAlexander Lobakin static void __build_skb_around(struct sk_buff *skb, void *data,
191483126b3SAlexander Lobakin 			       unsigned int frag_size)
192ba0509b6SJesper Dangaard Brouer {
193ba0509b6SJesper Dangaard Brouer 	struct skb_shared_info *shinfo;
194ba0509b6SJesper Dangaard Brouer 	unsigned int size = frag_size ? : ksize(data);
195ba0509b6SJesper Dangaard Brouer 
196ba0509b6SJesper Dangaard Brouer 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
197ba0509b6SJesper Dangaard Brouer 
198ba0509b6SJesper Dangaard Brouer 	/* Assumes caller memset cleared SKB */
199ba0509b6SJesper Dangaard Brouer 	skb->truesize = SKB_TRUESIZE(size);
200ba0509b6SJesper Dangaard Brouer 	refcount_set(&skb->users, 1);
201ba0509b6SJesper Dangaard Brouer 	skb->head = data;
202ba0509b6SJesper Dangaard Brouer 	skb->data = data;
203ba0509b6SJesper Dangaard Brouer 	skb_reset_tail_pointer(skb);
204763087daSEric Dumazet 	skb_set_end_offset(skb, size);
205ba0509b6SJesper Dangaard Brouer 	skb->mac_header = (typeof(skb->mac_header))~0U;
206ba0509b6SJesper Dangaard Brouer 	skb->transport_header = (typeof(skb->transport_header))~0U;
207ba0509b6SJesper Dangaard Brouer 
208ba0509b6SJesper Dangaard Brouer 	/* make sure we initialize shinfo sequentially */
209ba0509b6SJesper Dangaard Brouer 	shinfo = skb_shinfo(skb);
210ba0509b6SJesper Dangaard Brouer 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
211ba0509b6SJesper Dangaard Brouer 	atomic_set(&shinfo->dataref, 1);
212ba0509b6SJesper Dangaard Brouer 
2136370cc3bSAleksandr Nogikh 	skb_set_kcov_handle(skb, kcov_common_handle());
214ba0509b6SJesper Dangaard Brouer }
215ba0509b6SJesper Dangaard Brouer 
2161da177e4SLinus Torvalds /**
2172ea2f62cSEric Dumazet  * __build_skb - build a network buffer
218b2b5ce9dSEric Dumazet  * @data: data buffer provided by caller
2192ea2f62cSEric Dumazet  * @frag_size: size of data, or 0 if head was kmalloced
220b2b5ce9dSEric Dumazet  *
221b2b5ce9dSEric Dumazet  * Allocate a new &sk_buff. Caller provides space holding head and
222deceb4c0SFlorian Fainelli  * skb_shared_info. @data must have been allocated by kmalloc() only if
2232ea2f62cSEric Dumazet  * @frag_size is 0, otherwise data should come from the page allocator
2242ea2f62cSEric Dumazet  *  or vmalloc()
225b2b5ce9dSEric Dumazet  * The return is the new skb buffer.
226b2b5ce9dSEric Dumazet  * On a failure the return is %NULL, and @data is not freed.
227b2b5ce9dSEric Dumazet  * Notes :
228b2b5ce9dSEric Dumazet  *  Before IO, driver allocates only data buffer where NIC put incoming frame
229b2b5ce9dSEric Dumazet  *  Driver should add room at head (NET_SKB_PAD) and
230b2b5ce9dSEric Dumazet  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
231b2b5ce9dSEric Dumazet  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
232b2b5ce9dSEric Dumazet  *  before giving packet to stack.
233b2b5ce9dSEric Dumazet  *  RX rings only contains data buffers, not full skbs.
234b2b5ce9dSEric Dumazet  */
2352ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size)
236b2b5ce9dSEric Dumazet {
237b2b5ce9dSEric Dumazet 	struct sk_buff *skb;
238b2b5ce9dSEric Dumazet 
239b2b5ce9dSEric Dumazet 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
240ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
241b2b5ce9dSEric Dumazet 		return NULL;
242b2b5ce9dSEric Dumazet 
243b2b5ce9dSEric Dumazet 	memset(skb, 0, offsetof(struct sk_buff, tail));
244483126b3SAlexander Lobakin 	__build_skb_around(skb, data, frag_size);
245b2b5ce9dSEric Dumazet 
246483126b3SAlexander Lobakin 	return skb;
247b2b5ce9dSEric Dumazet }
2482ea2f62cSEric Dumazet 
2492ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically
2502ea2f62cSEric Dumazet  * takes care of skb->head and skb->pfmemalloc
2512ea2f62cSEric Dumazet  * This means that if @frag_size is not zero, then @data must be backed
2522ea2f62cSEric Dumazet  * by a page fragment, not kmalloc() or vmalloc()
2532ea2f62cSEric Dumazet  */
2542ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size)
2552ea2f62cSEric Dumazet {
2562ea2f62cSEric Dumazet 	struct sk_buff *skb = __build_skb(data, frag_size);
2572ea2f62cSEric Dumazet 
2582ea2f62cSEric Dumazet 	if (skb && frag_size) {
2592ea2f62cSEric Dumazet 		skb->head_frag = 1;
2602f064f34SMichal Hocko 		if (page_is_pfmemalloc(virt_to_head_page(data)))
2612ea2f62cSEric Dumazet 			skb->pfmemalloc = 1;
2622ea2f62cSEric Dumazet 	}
2632ea2f62cSEric Dumazet 	return skb;
2642ea2f62cSEric Dumazet }
265b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb);
266b2b5ce9dSEric Dumazet 
267ba0509b6SJesper Dangaard Brouer /**
268ba0509b6SJesper Dangaard Brouer  * build_skb_around - build a network buffer around provided skb
269ba0509b6SJesper Dangaard Brouer  * @skb: sk_buff provide by caller, must be memset cleared
270ba0509b6SJesper Dangaard Brouer  * @data: data buffer provided by caller
271ba0509b6SJesper Dangaard Brouer  * @frag_size: size of data, or 0 if head was kmalloced
272ba0509b6SJesper Dangaard Brouer  */
273ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb,
274ba0509b6SJesper Dangaard Brouer 				 void *data, unsigned int frag_size)
275ba0509b6SJesper Dangaard Brouer {
276ba0509b6SJesper Dangaard Brouer 	if (unlikely(!skb))
277ba0509b6SJesper Dangaard Brouer 		return NULL;
278ba0509b6SJesper Dangaard Brouer 
279483126b3SAlexander Lobakin 	__build_skb_around(skb, data, frag_size);
280ba0509b6SJesper Dangaard Brouer 
281483126b3SAlexander Lobakin 	if (frag_size) {
282ba0509b6SJesper Dangaard Brouer 		skb->head_frag = 1;
283ba0509b6SJesper Dangaard Brouer 		if (page_is_pfmemalloc(virt_to_head_page(data)))
284ba0509b6SJesper Dangaard Brouer 			skb->pfmemalloc = 1;
285ba0509b6SJesper Dangaard Brouer 	}
286ba0509b6SJesper Dangaard Brouer 	return skb;
287ba0509b6SJesper Dangaard Brouer }
288ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around);
289ba0509b6SJesper Dangaard Brouer 
290f450d539SAlexander Lobakin /**
291f450d539SAlexander Lobakin  * __napi_build_skb - build a network buffer
292f450d539SAlexander Lobakin  * @data: data buffer provided by caller
293f450d539SAlexander Lobakin  * @frag_size: size of data, or 0 if head was kmalloced
294f450d539SAlexander Lobakin  *
295f450d539SAlexander Lobakin  * Version of __build_skb() that uses NAPI percpu caches to obtain
296f450d539SAlexander Lobakin  * skbuff_head instead of inplace allocation.
297f450d539SAlexander Lobakin  *
298f450d539SAlexander Lobakin  * Returns a new &sk_buff on success, %NULL on allocation failure.
299f450d539SAlexander Lobakin  */
300f450d539SAlexander Lobakin static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
301f450d539SAlexander Lobakin {
302f450d539SAlexander Lobakin 	struct sk_buff *skb;
303f450d539SAlexander Lobakin 
304f450d539SAlexander Lobakin 	skb = napi_skb_cache_get();
305f450d539SAlexander Lobakin 	if (unlikely(!skb))
306f450d539SAlexander Lobakin 		return NULL;
307f450d539SAlexander Lobakin 
308f450d539SAlexander Lobakin 	memset(skb, 0, offsetof(struct sk_buff, tail));
309f450d539SAlexander Lobakin 	__build_skb_around(skb, data, frag_size);
310f450d539SAlexander Lobakin 
311f450d539SAlexander Lobakin 	return skb;
312f450d539SAlexander Lobakin }
313f450d539SAlexander Lobakin 
314f450d539SAlexander Lobakin /**
315f450d539SAlexander Lobakin  * napi_build_skb - build a network buffer
316f450d539SAlexander Lobakin  * @data: data buffer provided by caller
317f450d539SAlexander Lobakin  * @frag_size: size of data, or 0 if head was kmalloced
318f450d539SAlexander Lobakin  *
319f450d539SAlexander Lobakin  * Version of __napi_build_skb() that takes care of skb->head_frag
320f450d539SAlexander Lobakin  * and skb->pfmemalloc when the data is a page or page fragment.
321f450d539SAlexander Lobakin  *
322f450d539SAlexander Lobakin  * Returns a new &sk_buff on success, %NULL on allocation failure.
323f450d539SAlexander Lobakin  */
324f450d539SAlexander Lobakin struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
325f450d539SAlexander Lobakin {
326f450d539SAlexander Lobakin 	struct sk_buff *skb = __napi_build_skb(data, frag_size);
327f450d539SAlexander Lobakin 
328f450d539SAlexander Lobakin 	if (likely(skb) && frag_size) {
329f450d539SAlexander Lobakin 		skb->head_frag = 1;
330f450d539SAlexander Lobakin 		skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
331f450d539SAlexander Lobakin 	}
332f450d539SAlexander Lobakin 
333f450d539SAlexander Lobakin 	return skb;
334f450d539SAlexander Lobakin }
335f450d539SAlexander Lobakin EXPORT_SYMBOL(napi_build_skb);
336f450d539SAlexander Lobakin 
3375381b23dSAlexander Lobakin /*
3385381b23dSAlexander Lobakin  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
3395381b23dSAlexander Lobakin  * the caller if emergency pfmemalloc reserves are being used. If it is and
3405381b23dSAlexander Lobakin  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
3415381b23dSAlexander Lobakin  * may be used. Otherwise, the packet data may be discarded until enough
3425381b23dSAlexander Lobakin  * memory is free
3435381b23dSAlexander Lobakin  */
344ef28095fSAlexander Lobakin static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
345ef28095fSAlexander Lobakin 			     bool *pfmemalloc)
3465381b23dSAlexander Lobakin {
3475381b23dSAlexander Lobakin 	void *obj;
3485381b23dSAlexander Lobakin 	bool ret_pfmemalloc = false;
3495381b23dSAlexander Lobakin 
3505381b23dSAlexander Lobakin 	/*
3515381b23dSAlexander Lobakin 	 * Try a regular allocation, when that fails and we're not entitled
3525381b23dSAlexander Lobakin 	 * to the reserves, fail.
3535381b23dSAlexander Lobakin 	 */
3545381b23dSAlexander Lobakin 	obj = kmalloc_node_track_caller(size,
3555381b23dSAlexander Lobakin 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
3565381b23dSAlexander Lobakin 					node);
3575381b23dSAlexander Lobakin 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
3585381b23dSAlexander Lobakin 		goto out;
3595381b23dSAlexander Lobakin 
3605381b23dSAlexander Lobakin 	/* Try again but now we are using pfmemalloc reserves */
3615381b23dSAlexander Lobakin 	ret_pfmemalloc = true;
3625381b23dSAlexander Lobakin 	obj = kmalloc_node_track_caller(size, flags, node);
3635381b23dSAlexander Lobakin 
3645381b23dSAlexander Lobakin out:
3655381b23dSAlexander Lobakin 	if (pfmemalloc)
3665381b23dSAlexander Lobakin 		*pfmemalloc = ret_pfmemalloc;
3675381b23dSAlexander Lobakin 
3685381b23dSAlexander Lobakin 	return obj;
3695381b23dSAlexander Lobakin }
3705381b23dSAlexander Lobakin 
3715381b23dSAlexander Lobakin /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
3725381b23dSAlexander Lobakin  *	'private' fields and also do memory statistics to find all the
3735381b23dSAlexander Lobakin  *	[BEEP] leaks.
3745381b23dSAlexander Lobakin  *
3755381b23dSAlexander Lobakin  */
3765381b23dSAlexander Lobakin 
3775381b23dSAlexander Lobakin /**
3785381b23dSAlexander Lobakin  *	__alloc_skb	-	allocate a network buffer
3795381b23dSAlexander Lobakin  *	@size: size to allocate
3805381b23dSAlexander Lobakin  *	@gfp_mask: allocation mask
3815381b23dSAlexander Lobakin  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
3825381b23dSAlexander Lobakin  *		instead of head cache and allocate a cloned (child) skb.
3835381b23dSAlexander Lobakin  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
3845381b23dSAlexander Lobakin  *		allocations in case the data is required for writeback
3855381b23dSAlexander Lobakin  *	@node: numa node to allocate memory on
3865381b23dSAlexander Lobakin  *
3875381b23dSAlexander Lobakin  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
3885381b23dSAlexander Lobakin  *	tail room of at least size bytes. The object has a reference count
3895381b23dSAlexander Lobakin  *	of one. The return is the buffer. On a failure the return is %NULL.
3905381b23dSAlexander Lobakin  *
3915381b23dSAlexander Lobakin  *	Buffers may only be allocated from interrupts using a @gfp_mask of
3925381b23dSAlexander Lobakin  *	%GFP_ATOMIC.
3935381b23dSAlexander Lobakin  */
3945381b23dSAlexander Lobakin struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
3955381b23dSAlexander Lobakin 			    int flags, int node)
3965381b23dSAlexander Lobakin {
3975381b23dSAlexander Lobakin 	struct kmem_cache *cache;
3985381b23dSAlexander Lobakin 	struct sk_buff *skb;
399a5df6333SLi RongQing 	unsigned int osize;
4005381b23dSAlexander Lobakin 	bool pfmemalloc;
401a5df6333SLi RongQing 	u8 *data;
4025381b23dSAlexander Lobakin 
4035381b23dSAlexander Lobakin 	cache = (flags & SKB_ALLOC_FCLONE)
4045381b23dSAlexander Lobakin 		? skbuff_fclone_cache : skbuff_head_cache;
4055381b23dSAlexander Lobakin 
4065381b23dSAlexander Lobakin 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
4075381b23dSAlexander Lobakin 		gfp_mask |= __GFP_MEMALLOC;
4085381b23dSAlexander Lobakin 
4095381b23dSAlexander Lobakin 	/* Get the HEAD */
410d13612b5SAlexander Lobakin 	if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
411d13612b5SAlexander Lobakin 	    likely(node == NUMA_NO_NODE || node == numa_mem_id()))
412d13612b5SAlexander Lobakin 		skb = napi_skb_cache_get();
413d13612b5SAlexander Lobakin 	else
414d13612b5SAlexander Lobakin 		skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
415df1ae022SAlexander Lobakin 	if (unlikely(!skb))
416df1ae022SAlexander Lobakin 		return NULL;
4175381b23dSAlexander Lobakin 	prefetchw(skb);
4185381b23dSAlexander Lobakin 
4195381b23dSAlexander Lobakin 	/* We do our best to align skb_shared_info on a separate cache
4205381b23dSAlexander Lobakin 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
4215381b23dSAlexander Lobakin 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
4225381b23dSAlexander Lobakin 	 * Both skb->head and skb_shared_info are cache line aligned.
4235381b23dSAlexander Lobakin 	 */
4245381b23dSAlexander Lobakin 	size = SKB_DATA_ALIGN(size);
4255381b23dSAlexander Lobakin 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4265381b23dSAlexander Lobakin 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
427df1ae022SAlexander Lobakin 	if (unlikely(!data))
4285381b23dSAlexander Lobakin 		goto nodata;
4295381b23dSAlexander Lobakin 	/* kmalloc(size) might give us more room than requested.
4305381b23dSAlexander Lobakin 	 * Put skb_shared_info exactly at the end of allocated zone,
4315381b23dSAlexander Lobakin 	 * to allow max possible filling before reallocation.
4325381b23dSAlexander Lobakin 	 */
433a5df6333SLi RongQing 	osize = ksize(data);
434a5df6333SLi RongQing 	size = SKB_WITH_OVERHEAD(osize);
4355381b23dSAlexander Lobakin 	prefetchw(data + size);
4365381b23dSAlexander Lobakin 
4375381b23dSAlexander Lobakin 	/*
4385381b23dSAlexander Lobakin 	 * Only clear those fields we need to clear, not those that we will
4395381b23dSAlexander Lobakin 	 * actually initialise below. Hence, don't put any more fields after
4405381b23dSAlexander Lobakin 	 * the tail pointer in struct sk_buff!
4415381b23dSAlexander Lobakin 	 */
4425381b23dSAlexander Lobakin 	memset(skb, 0, offsetof(struct sk_buff, tail));
443a5df6333SLi RongQing 	__build_skb_around(skb, data, osize);
4445381b23dSAlexander Lobakin 	skb->pfmemalloc = pfmemalloc;
4455381b23dSAlexander Lobakin 
4465381b23dSAlexander Lobakin 	if (flags & SKB_ALLOC_FCLONE) {
4475381b23dSAlexander Lobakin 		struct sk_buff_fclones *fclones;
4485381b23dSAlexander Lobakin 
4495381b23dSAlexander Lobakin 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
4505381b23dSAlexander Lobakin 
4515381b23dSAlexander Lobakin 		skb->fclone = SKB_FCLONE_ORIG;
4525381b23dSAlexander Lobakin 		refcount_set(&fclones->fclone_ref, 1);
4535381b23dSAlexander Lobakin 
4545381b23dSAlexander Lobakin 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
4555381b23dSAlexander Lobakin 	}
4565381b23dSAlexander Lobakin 
4575381b23dSAlexander Lobakin 	return skb;
458df1ae022SAlexander Lobakin 
4595381b23dSAlexander Lobakin nodata:
4605381b23dSAlexander Lobakin 	kmem_cache_free(cache, skb);
461df1ae022SAlexander Lobakin 	return NULL;
4625381b23dSAlexander Lobakin }
4635381b23dSAlexander Lobakin EXPORT_SYMBOL(__alloc_skb);
4645381b23dSAlexander Lobakin 
4657ba7aeabSSebastian Andrzej Siewior /**
466fd11a83dSAlexander Duyck  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
467fd11a83dSAlexander Duyck  *	@dev: network device to receive on
468d7499160SMasanari Iida  *	@len: length to allocate
469fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
470fd11a83dSAlexander Duyck  *
471fd11a83dSAlexander Duyck  *	Allocate a new &sk_buff and assign it a usage count of one. The
472fd11a83dSAlexander Duyck  *	buffer has NET_SKB_PAD headroom built in. Users should allocate
473fd11a83dSAlexander Duyck  *	the headroom they think they need without accounting for the
474fd11a83dSAlexander Duyck  *	built in space. The built in space is used for optimisations.
475fd11a83dSAlexander Duyck  *
476fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
477fd11a83dSAlexander Duyck  */
4789451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
4799451980aSAlexander Duyck 				   gfp_t gfp_mask)
480fd11a83dSAlexander Duyck {
481b63ae8caSAlexander Duyck 	struct page_frag_cache *nc;
482fd11a83dSAlexander Duyck 	struct sk_buff *skb;
4839451980aSAlexander Duyck 	bool pfmemalloc;
4849451980aSAlexander Duyck 	void *data;
485fd11a83dSAlexander Duyck 
4869451980aSAlexander Duyck 	len += NET_SKB_PAD;
487fd11a83dSAlexander Duyck 
48866c55602SAlexander Lobakin 	/* If requested length is either too small or too big,
48966c55602SAlexander Lobakin 	 * we use kmalloc() for skb->head allocation.
49066c55602SAlexander Lobakin 	 */
49166c55602SAlexander Lobakin 	if (len <= SKB_WITH_OVERHEAD(1024) ||
49266c55602SAlexander Lobakin 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
493d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
494a080e7bdSAlexander Duyck 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
495a080e7bdSAlexander Duyck 		if (!skb)
496a080e7bdSAlexander Duyck 			goto skb_fail;
497a080e7bdSAlexander Duyck 		goto skb_success;
498a080e7bdSAlexander Duyck 	}
4999451980aSAlexander Duyck 
5009451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5019451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
5029451980aSAlexander Duyck 
5039451980aSAlexander Duyck 	if (sk_memalloc_socks())
5049451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
5059451980aSAlexander Duyck 
506afa79d08SChangbin Du 	if (in_hardirq() || irqs_disabled()) {
5079451980aSAlexander Duyck 		nc = this_cpu_ptr(&netdev_alloc_cache);
5088c2dd3e4SAlexander Duyck 		data = page_frag_alloc(nc, len, gfp_mask);
5099451980aSAlexander Duyck 		pfmemalloc = nc->pfmemalloc;
51092dcabd7SSebastian Andrzej Siewior 	} else {
51192dcabd7SSebastian Andrzej Siewior 		local_bh_disable();
51292dcabd7SSebastian Andrzej Siewior 		nc = this_cpu_ptr(&napi_alloc_cache.page);
51392dcabd7SSebastian Andrzej Siewior 		data = page_frag_alloc(nc, len, gfp_mask);
51492dcabd7SSebastian Andrzej Siewior 		pfmemalloc = nc->pfmemalloc;
51592dcabd7SSebastian Andrzej Siewior 		local_bh_enable();
51692dcabd7SSebastian Andrzej Siewior 	}
5179451980aSAlexander Duyck 
5189451980aSAlexander Duyck 	if (unlikely(!data))
5199451980aSAlexander Duyck 		return NULL;
5209451980aSAlexander Duyck 
5219451980aSAlexander Duyck 	skb = __build_skb(data, len);
5229451980aSAlexander Duyck 	if (unlikely(!skb)) {
523181edb2bSAlexander Duyck 		skb_free_frag(data);
5249451980aSAlexander Duyck 		return NULL;
5259451980aSAlexander Duyck 	}
5269451980aSAlexander Duyck 
5279451980aSAlexander Duyck 	if (pfmemalloc)
5289451980aSAlexander Duyck 		skb->pfmemalloc = 1;
5299451980aSAlexander Duyck 	skb->head_frag = 1;
5309451980aSAlexander Duyck 
531a080e7bdSAlexander Duyck skb_success:
5328af27456SChristoph Hellwig 	skb_reserve(skb, NET_SKB_PAD);
5337b2e497aSChristoph Hellwig 	skb->dev = dev;
534fd11a83dSAlexander Duyck 
535a080e7bdSAlexander Duyck skb_fail:
5368af27456SChristoph Hellwig 	return skb;
5378af27456SChristoph Hellwig }
538b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb);
5391da177e4SLinus Torvalds 
540fd11a83dSAlexander Duyck /**
541fd11a83dSAlexander Duyck  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
542fd11a83dSAlexander Duyck  *	@napi: napi instance this buffer was allocated for
543d7499160SMasanari Iida  *	@len: length to allocate
544fd11a83dSAlexander Duyck  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
545fd11a83dSAlexander Duyck  *
546fd11a83dSAlexander Duyck  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
547fd11a83dSAlexander Duyck  *	attempt to allocate the head from a special reserved region used
548fd11a83dSAlexander Duyck  *	only for NAPI Rx allocation.  By doing this we can save several
549fd11a83dSAlexander Duyck  *	CPU cycles by avoiding having to disable and re-enable IRQs.
550fd11a83dSAlexander Duyck  *
551fd11a83dSAlexander Duyck  *	%NULL is returned if there is no free memory.
552fd11a83dSAlexander Duyck  */
5539451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
5549451980aSAlexander Duyck 				 gfp_t gfp_mask)
555fd11a83dSAlexander Duyck {
5563226b158SEric Dumazet 	struct napi_alloc_cache *nc;
557fd11a83dSAlexander Duyck 	struct sk_buff *skb;
5589451980aSAlexander Duyck 	void *data;
559fd11a83dSAlexander Duyck 
5609451980aSAlexander Duyck 	len += NET_SKB_PAD + NET_IP_ALIGN;
561fd11a83dSAlexander Duyck 
5623226b158SEric Dumazet 	/* If requested length is either too small or too big,
5633226b158SEric Dumazet 	 * we use kmalloc() for skb->head allocation.
5643226b158SEric Dumazet 	 */
5653226b158SEric Dumazet 	if (len <= SKB_WITH_OVERHEAD(1024) ||
5663226b158SEric Dumazet 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
567d0164adcSMel Gorman 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
568cfb8ec65SAlexander Lobakin 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
569cfb8ec65SAlexander Lobakin 				  NUMA_NO_NODE);
570a080e7bdSAlexander Duyck 		if (!skb)
571a080e7bdSAlexander Duyck 			goto skb_fail;
572a080e7bdSAlexander Duyck 		goto skb_success;
573a080e7bdSAlexander Duyck 	}
5749451980aSAlexander Duyck 
5753226b158SEric Dumazet 	nc = this_cpu_ptr(&napi_alloc_cache);
5769451980aSAlexander Duyck 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5779451980aSAlexander Duyck 	len = SKB_DATA_ALIGN(len);
5789451980aSAlexander Duyck 
5799451980aSAlexander Duyck 	if (sk_memalloc_socks())
5809451980aSAlexander Duyck 		gfp_mask |= __GFP_MEMALLOC;
5819451980aSAlexander Duyck 
5828c2dd3e4SAlexander Duyck 	data = page_frag_alloc(&nc->page, len, gfp_mask);
5839451980aSAlexander Duyck 	if (unlikely(!data))
5849451980aSAlexander Duyck 		return NULL;
5859451980aSAlexander Duyck 
586cfb8ec65SAlexander Lobakin 	skb = __napi_build_skb(data, len);
5879451980aSAlexander Duyck 	if (unlikely(!skb)) {
588181edb2bSAlexander Duyck 		skb_free_frag(data);
5899451980aSAlexander Duyck 		return NULL;
5909451980aSAlexander Duyck 	}
5919451980aSAlexander Duyck 
592795bb1c0SJesper Dangaard Brouer 	if (nc->page.pfmemalloc)
5939451980aSAlexander Duyck 		skb->pfmemalloc = 1;
5949451980aSAlexander Duyck 	skb->head_frag = 1;
5959451980aSAlexander Duyck 
596a080e7bdSAlexander Duyck skb_success:
597fd11a83dSAlexander Duyck 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
598fd11a83dSAlexander Duyck 	skb->dev = napi->dev;
599fd11a83dSAlexander Duyck 
600a080e7bdSAlexander Duyck skb_fail:
601fd11a83dSAlexander Duyck 	return skb;
602fd11a83dSAlexander Duyck }
603fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb);
604fd11a83dSAlexander Duyck 
605654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
60650269e19SEric Dumazet 		     int size, unsigned int truesize)
607654bed16SPeter Zijlstra {
608654bed16SPeter Zijlstra 	skb_fill_page_desc(skb, i, page, off, size);
609654bed16SPeter Zijlstra 	skb->len += size;
610654bed16SPeter Zijlstra 	skb->data_len += size;
61150269e19SEric Dumazet 	skb->truesize += truesize;
612654bed16SPeter Zijlstra }
613654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag);
614654bed16SPeter Zijlstra 
615f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
616f8e617e1SJason Wang 			  unsigned int truesize)
617f8e617e1SJason Wang {
618f8e617e1SJason Wang 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
619f8e617e1SJason Wang 
620f8e617e1SJason Wang 	skb_frag_size_add(frag, size);
621f8e617e1SJason Wang 	skb->len += size;
622f8e617e1SJason Wang 	skb->data_len += size;
623f8e617e1SJason Wang 	skb->truesize += truesize;
624f8e617e1SJason Wang }
625f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag);
626f8e617e1SJason Wang 
62727b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp)
6281da177e4SLinus Torvalds {
629bd8a7036SEric Dumazet 	kfree_skb_list(*listp);
63027b437c8SHerbert Xu 	*listp = NULL;
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds 
63327b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb)
63427b437c8SHerbert Xu {
63527b437c8SHerbert Xu 	skb_drop_list(&skb_shinfo(skb)->frag_list);
63627b437c8SHerbert Xu }
63727b437c8SHerbert Xu 
6381da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb)
6391da177e4SLinus Torvalds {
6401da177e4SLinus Torvalds 	struct sk_buff *list;
6411da177e4SLinus Torvalds 
642fbb398a8SDavid S. Miller 	skb_walk_frags(skb, list)
6431da177e4SLinus Torvalds 		skb_get(list);
6441da177e4SLinus Torvalds }
6451da177e4SLinus Torvalds 
646d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb)
647d3836f21SEric Dumazet {
648181edb2bSAlexander Duyck 	unsigned char *head = skb->head;
649181edb2bSAlexander Duyck 
6506a5bcd84SIlias Apalodimas 	if (skb->head_frag) {
6516a5bcd84SIlias Apalodimas 		if (skb_pp_recycle(skb, head))
6526a5bcd84SIlias Apalodimas 			return;
653181edb2bSAlexander Duyck 		skb_free_frag(head);
6546a5bcd84SIlias Apalodimas 	} else {
655181edb2bSAlexander Duyck 		kfree(head);
656d3836f21SEric Dumazet 	}
6576a5bcd84SIlias Apalodimas }
658d3836f21SEric Dumazet 
6595bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb)
6601da177e4SLinus Torvalds {
661ff04a771SEric Dumazet 	struct skb_shared_info *shinfo = skb_shinfo(skb);
6621da177e4SLinus Torvalds 	int i;
663ff04a771SEric Dumazet 
664ff04a771SEric Dumazet 	if (skb->cloned &&
665ff04a771SEric Dumazet 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
666ff04a771SEric Dumazet 			      &shinfo->dataref))
6672cc3aeb5SIlias Apalodimas 		goto exit;
668ff04a771SEric Dumazet 
66970c43167SJonathan Lemon 	skb_zcopy_clear(skb, true);
67070c43167SJonathan Lemon 
671ff04a771SEric Dumazet 	for (i = 0; i < shinfo->nr_frags; i++)
6726a5bcd84SIlias Apalodimas 		__skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
6731da177e4SLinus Torvalds 
674ff04a771SEric Dumazet 	if (shinfo->frag_list)
675ff04a771SEric Dumazet 		kfree_skb_list(shinfo->frag_list);
6761da177e4SLinus Torvalds 
677d3836f21SEric Dumazet 	skb_free_head(skb);
6782cc3aeb5SIlias Apalodimas exit:
6792cc3aeb5SIlias Apalodimas 	/* When we clone an SKB we copy the reycling bit. The pp_recycle
6802cc3aeb5SIlias Apalodimas 	 * bit is only set on the head though, so in order to avoid races
6812cc3aeb5SIlias Apalodimas 	 * while trying to recycle fragments on __skb_frag_unref() we need
6822cc3aeb5SIlias Apalodimas 	 * to make one SKB responsible for triggering the recycle path.
6832cc3aeb5SIlias Apalodimas 	 * So disable the recycling bit if an SKB is cloned and we have
68458e61e41STom Rix 	 * additional references to the fragmented part of the SKB.
6852cc3aeb5SIlias Apalodimas 	 * Eventually the last SKB will have the recycling bit set and it's
6862cc3aeb5SIlias Apalodimas 	 * dataref set to 0, which will trigger the recycling
6872cc3aeb5SIlias Apalodimas 	 */
6882cc3aeb5SIlias Apalodimas 	skb->pp_recycle = 0;
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds 
6911da177e4SLinus Torvalds /*
6921da177e4SLinus Torvalds  *	Free an skbuff by memory without cleaning the state.
6931da177e4SLinus Torvalds  */
6942d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb)
6951da177e4SLinus Torvalds {
696d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones;
697d179cd12SDavid S. Miller 
698d179cd12SDavid S. Miller 	switch (skb->fclone) {
699d179cd12SDavid S. Miller 	case SKB_FCLONE_UNAVAILABLE:
7001da177e4SLinus Torvalds 		kmem_cache_free(skbuff_head_cache, skb);
7016ffe75ebSEric Dumazet 		return;
702d179cd12SDavid S. Miller 
703d179cd12SDavid S. Miller 	case SKB_FCLONE_ORIG:
704d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
7056ffe75ebSEric Dumazet 
7066ffe75ebSEric Dumazet 		/* We usually free the clone (TX completion) before original skb
7076ffe75ebSEric Dumazet 		 * This test would have no chance to be true for the clone,
7086ffe75ebSEric Dumazet 		 * while here, branch prediction will be good.
7096ffe75ebSEric Dumazet 		 */
7102638595aSReshetova, Elena 		if (refcount_read(&fclones->fclone_ref) == 1)
7116ffe75ebSEric Dumazet 			goto fastpath;
712d179cd12SDavid S. Miller 		break;
713d179cd12SDavid S. Miller 
7146ffe75ebSEric Dumazet 	default: /* SKB_FCLONE_CLONE */
715d0bf4a9eSEric Dumazet 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
716d179cd12SDavid S. Miller 		break;
7173ff50b79SStephen Hemminger 	}
7182638595aSReshetova, Elena 	if (!refcount_dec_and_test(&fclones->fclone_ref))
7196ffe75ebSEric Dumazet 		return;
7206ffe75ebSEric Dumazet fastpath:
7216ffe75ebSEric Dumazet 	kmem_cache_free(skbuff_fclone_cache, fclones);
7221da177e4SLinus Torvalds }
7231da177e4SLinus Torvalds 
7240a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb)
7251da177e4SLinus Torvalds {
726adf30907SEric Dumazet 	skb_dst_drop(skb);
7271da177e4SLinus Torvalds 	if (skb->destructor) {
728afa79d08SChangbin Du 		WARN_ON(in_hardirq());
7291da177e4SLinus Torvalds 		skb->destructor(skb);
7301da177e4SLinus Torvalds 	}
731a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK)
732cb9c6836SFlorian Westphal 	nf_conntrack_put(skb_nfct(skb));
7332fc72c7bSKOVACS Krisztian #endif
734df5042f4SFlorian Westphal 	skb_ext_put(skb);
73504a4bb55SLennert Buytenhek }
73604a4bb55SLennert Buytenhek 
73704a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */
73804a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb)
73904a4bb55SLennert Buytenhek {
74004a4bb55SLennert Buytenhek 	skb_release_head_state(skb);
741a28b1b90SFlorian Westphal 	if (likely(skb->head))
7422d4baff8SHerbert Xu 		skb_release_data(skb);
7432d4baff8SHerbert Xu }
7441da177e4SLinus Torvalds 
7452d4baff8SHerbert Xu /**
7462d4baff8SHerbert Xu  *	__kfree_skb - private function
7472d4baff8SHerbert Xu  *	@skb: buffer
7482d4baff8SHerbert Xu  *
7492d4baff8SHerbert Xu  *	Free an sk_buff. Release anything attached to the buffer.
7502d4baff8SHerbert Xu  *	Clean the state. This is an internal helper function. Users should
7512d4baff8SHerbert Xu  *	always call kfree_skb
7522d4baff8SHerbert Xu  */
7532d4baff8SHerbert Xu 
7542d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb)
7552d4baff8SHerbert Xu {
7562d4baff8SHerbert Xu 	skb_release_all(skb);
7571da177e4SLinus Torvalds 	kfree_skbmem(skb);
7581da177e4SLinus Torvalds }
759b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb);
7601da177e4SLinus Torvalds 
7611da177e4SLinus Torvalds /**
762c504e5c2SMenglong Dong  *	kfree_skb_reason - free an sk_buff with special reason
763231d06aeSJörn Engel  *	@skb: buffer to free
764c504e5c2SMenglong Dong  *	@reason: reason why this skb is dropped
765231d06aeSJörn Engel  *
766231d06aeSJörn Engel  *	Drop a reference to the buffer and free it if the usage count has
767c504e5c2SMenglong Dong  *	hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
768c504e5c2SMenglong Dong  *	tracepoint.
769231d06aeSJörn Engel  */
770c504e5c2SMenglong Dong void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
771231d06aeSJörn Engel {
7723889a803SPaolo Abeni 	if (!skb_unref(skb))
773231d06aeSJörn Engel 		return;
7743889a803SPaolo Abeni 
775c504e5c2SMenglong Dong 	trace_kfree_skb(skb, __builtin_return_address(0), reason);
776231d06aeSJörn Engel 	__kfree_skb(skb);
777231d06aeSJörn Engel }
778c504e5c2SMenglong Dong EXPORT_SYMBOL(kfree_skb_reason);
779231d06aeSJörn Engel 
780bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs)
781bd8a7036SEric Dumazet {
782bd8a7036SEric Dumazet 	while (segs) {
783bd8a7036SEric Dumazet 		struct sk_buff *next = segs->next;
784bd8a7036SEric Dumazet 
785bd8a7036SEric Dumazet 		kfree_skb(segs);
786bd8a7036SEric Dumazet 		segs = next;
787bd8a7036SEric Dumazet 	}
788bd8a7036SEric Dumazet }
789bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list);
790bd8a7036SEric Dumazet 
7916413139dSWillem de Bruijn /* Dump skb information and contents.
7926413139dSWillem de Bruijn  *
7936413139dSWillem de Bruijn  * Must only be called from net_ratelimit()-ed paths.
7946413139dSWillem de Bruijn  *
795302af7c6SVladimir Oltean  * Dumps whole packets if full_pkt, only headers otherwise.
7966413139dSWillem de Bruijn  */
7976413139dSWillem de Bruijn void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
7986413139dSWillem de Bruijn {
7996413139dSWillem de Bruijn 	struct skb_shared_info *sh = skb_shinfo(skb);
8006413139dSWillem de Bruijn 	struct net_device *dev = skb->dev;
8016413139dSWillem de Bruijn 	struct sock *sk = skb->sk;
8026413139dSWillem de Bruijn 	struct sk_buff *list_skb;
8036413139dSWillem de Bruijn 	bool has_mac, has_trans;
8046413139dSWillem de Bruijn 	int headroom, tailroom;
8056413139dSWillem de Bruijn 	int i, len, seg_len;
8066413139dSWillem de Bruijn 
8076413139dSWillem de Bruijn 	if (full_pkt)
8086413139dSWillem de Bruijn 		len = skb->len;
8096413139dSWillem de Bruijn 	else
8106413139dSWillem de Bruijn 		len = min_t(int, skb->len, MAX_HEADER + 128);
8116413139dSWillem de Bruijn 
8126413139dSWillem de Bruijn 	headroom = skb_headroom(skb);
8136413139dSWillem de Bruijn 	tailroom = skb_tailroom(skb);
8146413139dSWillem de Bruijn 
8156413139dSWillem de Bruijn 	has_mac = skb_mac_header_was_set(skb);
8166413139dSWillem de Bruijn 	has_trans = skb_transport_header_was_set(skb);
8176413139dSWillem de Bruijn 
8186413139dSWillem de Bruijn 	printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
8196413139dSWillem de Bruijn 	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
8206413139dSWillem de Bruijn 	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
8216413139dSWillem de Bruijn 	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
8226413139dSWillem de Bruijn 	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
8236413139dSWillem de Bruijn 	       level, skb->len, headroom, skb_headlen(skb), tailroom,
8246413139dSWillem de Bruijn 	       has_mac ? skb->mac_header : -1,
8256413139dSWillem de Bruijn 	       has_mac ? skb_mac_header_len(skb) : -1,
8266413139dSWillem de Bruijn 	       skb->network_header,
8276413139dSWillem de Bruijn 	       has_trans ? skb_network_header_len(skb) : -1,
8286413139dSWillem de Bruijn 	       has_trans ? skb->transport_header : -1,
8296413139dSWillem de Bruijn 	       sh->tx_flags, sh->nr_frags,
8306413139dSWillem de Bruijn 	       sh->gso_size, sh->gso_type, sh->gso_segs,
8316413139dSWillem de Bruijn 	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
8326413139dSWillem de Bruijn 	       skb->csum_valid, skb->csum_level,
8336413139dSWillem de Bruijn 	       skb->hash, skb->sw_hash, skb->l4_hash,
8346413139dSWillem de Bruijn 	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
8356413139dSWillem de Bruijn 
8366413139dSWillem de Bruijn 	if (dev)
8378a03ef67SGal Pressman 		printk("%sdev name=%s feat=%pNF\n",
8386413139dSWillem de Bruijn 		       level, dev->name, &dev->features);
8396413139dSWillem de Bruijn 	if (sk)
840db8051f3SQian Cai 		printk("%ssk family=%hu type=%u proto=%u\n",
8416413139dSWillem de Bruijn 		       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
8426413139dSWillem de Bruijn 
8436413139dSWillem de Bruijn 	if (full_pkt && headroom)
8446413139dSWillem de Bruijn 		print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
8456413139dSWillem de Bruijn 			       16, 1, skb->head, headroom, false);
8466413139dSWillem de Bruijn 
8476413139dSWillem de Bruijn 	seg_len = min_t(int, skb_headlen(skb), len);
8486413139dSWillem de Bruijn 	if (seg_len)
8496413139dSWillem de Bruijn 		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
8506413139dSWillem de Bruijn 			       16, 1, skb->data, seg_len, false);
8516413139dSWillem de Bruijn 	len -= seg_len;
8526413139dSWillem de Bruijn 
8536413139dSWillem de Bruijn 	if (full_pkt && tailroom)
8546413139dSWillem de Bruijn 		print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
8556413139dSWillem de Bruijn 			       16, 1, skb_tail_pointer(skb), tailroom, false);
8566413139dSWillem de Bruijn 
8576413139dSWillem de Bruijn 	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
8586413139dSWillem de Bruijn 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8596413139dSWillem de Bruijn 		u32 p_off, p_len, copied;
8606413139dSWillem de Bruijn 		struct page *p;
8616413139dSWillem de Bruijn 		u8 *vaddr;
8626413139dSWillem de Bruijn 
863b54c9d5bSJonathan Lemon 		skb_frag_foreach_page(frag, skb_frag_off(frag),
8646413139dSWillem de Bruijn 				      skb_frag_size(frag), p, p_off, p_len,
8656413139dSWillem de Bruijn 				      copied) {
8666413139dSWillem de Bruijn 			seg_len = min_t(int, p_len, len);
8676413139dSWillem de Bruijn 			vaddr = kmap_atomic(p);
8686413139dSWillem de Bruijn 			print_hex_dump(level, "skb frag:     ",
8696413139dSWillem de Bruijn 				       DUMP_PREFIX_OFFSET,
8706413139dSWillem de Bruijn 				       16, 1, vaddr + p_off, seg_len, false);
8716413139dSWillem de Bruijn 			kunmap_atomic(vaddr);
8726413139dSWillem de Bruijn 			len -= seg_len;
8736413139dSWillem de Bruijn 			if (!len)
8746413139dSWillem de Bruijn 				break;
8756413139dSWillem de Bruijn 		}
8766413139dSWillem de Bruijn 	}
8776413139dSWillem de Bruijn 
8786413139dSWillem de Bruijn 	if (full_pkt && skb_has_frag_list(skb)) {
8796413139dSWillem de Bruijn 		printk("skb fraglist:\n");
8806413139dSWillem de Bruijn 		skb_walk_frags(skb, list_skb)
8816413139dSWillem de Bruijn 			skb_dump(level, list_skb, true);
8826413139dSWillem de Bruijn 	}
8836413139dSWillem de Bruijn }
8846413139dSWillem de Bruijn EXPORT_SYMBOL(skb_dump);
8856413139dSWillem de Bruijn 
886d1a203eaSStephen Hemminger /**
88725121173SMichael S. Tsirkin  *	skb_tx_error - report an sk_buff xmit error
88825121173SMichael S. Tsirkin  *	@skb: buffer that triggered an error
88925121173SMichael S. Tsirkin  *
89025121173SMichael S. Tsirkin  *	Report xmit error if a device callback is tracking this skb.
89125121173SMichael S. Tsirkin  *	skb must be freed afterwards.
89225121173SMichael S. Tsirkin  */
89325121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb)
89425121173SMichael S. Tsirkin {
8951f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, true);
89625121173SMichael S. Tsirkin }
89725121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error);
89825121173SMichael S. Tsirkin 
899be769db2SHerbert Xu #ifdef CONFIG_TRACEPOINTS
90025121173SMichael S. Tsirkin /**
901ead2ceb0SNeil Horman  *	consume_skb - free an skbuff
902ead2ceb0SNeil Horman  *	@skb: buffer to free
903ead2ceb0SNeil Horman  *
904ead2ceb0SNeil Horman  *	Drop a ref to the buffer and free it if the usage count has hit zero
905ead2ceb0SNeil Horman  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
906ead2ceb0SNeil Horman  *	is being dropped after a failure and notes that
907ead2ceb0SNeil Horman  */
908ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb)
909ead2ceb0SNeil Horman {
9103889a803SPaolo Abeni 	if (!skb_unref(skb))
911ead2ceb0SNeil Horman 		return;
9123889a803SPaolo Abeni 
91307dc22e7SKoki Sanagi 	trace_consume_skb(skb);
914ead2ceb0SNeil Horman 	__kfree_skb(skb);
915ead2ceb0SNeil Horman }
916ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb);
917be769db2SHerbert Xu #endif
918ead2ceb0SNeil Horman 
9190a463c78SPaolo Abeni /**
920c1639be9SMauro Carvalho Chehab  *	__consume_stateless_skb - free an skbuff, assuming it is stateless
9210a463c78SPaolo Abeni  *	@skb: buffer to free
9220a463c78SPaolo Abeni  *
923ca2c1418SPaolo Abeni  *	Alike consume_skb(), but this variant assumes that this is the last
924ca2c1418SPaolo Abeni  *	skb reference and all the head states have been already dropped
9250a463c78SPaolo Abeni  */
926ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb)
9270a463c78SPaolo Abeni {
9280a463c78SPaolo Abeni 	trace_consume_skb(skb);
9290a463c78SPaolo Abeni 	skb_release_data(skb);
9300a463c78SPaolo Abeni 	kfree_skbmem(skb);
9310a463c78SPaolo Abeni }
9320a463c78SPaolo Abeni 
933f450d539SAlexander Lobakin static void napi_skb_cache_put(struct sk_buff *skb)
934795bb1c0SJesper Dangaard Brouer {
935795bb1c0SJesper Dangaard Brouer 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
936f450d539SAlexander Lobakin 	u32 i;
937795bb1c0SJesper Dangaard Brouer 
938f450d539SAlexander Lobakin 	kasan_poison_object_data(skbuff_head_cache, skb);
939795bb1c0SJesper Dangaard Brouer 	nc->skb_cache[nc->skb_count++] = skb;
940795bb1c0SJesper Dangaard Brouer 
941795bb1c0SJesper Dangaard Brouer 	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
942f450d539SAlexander Lobakin 		for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
943f450d539SAlexander Lobakin 			kasan_unpoison_object_data(skbuff_head_cache,
944f450d539SAlexander Lobakin 						   nc->skb_cache[i]);
945f450d539SAlexander Lobakin 
946f450d539SAlexander Lobakin 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
947f450d539SAlexander Lobakin 				     nc->skb_cache + NAPI_SKB_CACHE_HALF);
948f450d539SAlexander Lobakin 		nc->skb_count = NAPI_SKB_CACHE_HALF;
949795bb1c0SJesper Dangaard Brouer 	}
950795bb1c0SJesper Dangaard Brouer }
951f450d539SAlexander Lobakin 
95215fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb)
95315fad714SJesper Dangaard Brouer {
9549243adfcSAlexander Lobakin 	skb_release_all(skb);
9559243adfcSAlexander Lobakin 	napi_skb_cache_put(skb);
9569243adfcSAlexander Lobakin }
9579243adfcSAlexander Lobakin 
9589243adfcSAlexander Lobakin void napi_skb_free_stolen_head(struct sk_buff *skb)
9599243adfcSAlexander Lobakin {
9609efb4b5bSPaolo Abeni 	if (unlikely(skb->slow_gro)) {
9618550ff8dSPaul Blakey 		nf_reset_ct(skb);
9629243adfcSAlexander Lobakin 		skb_dst_drop(skb);
9639243adfcSAlexander Lobakin 		skb_ext_put(skb);
9645e10da53SPaolo Abeni 		skb_orphan(skb);
9659efb4b5bSPaolo Abeni 		skb->slow_gro = 0;
9669efb4b5bSPaolo Abeni 	}
967f450d539SAlexander Lobakin 	napi_skb_cache_put(skb);
96815fad714SJesper Dangaard Brouer }
969795bb1c0SJesper Dangaard Brouer 
970795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget)
971795bb1c0SJesper Dangaard Brouer {
972885eb0a5SJesper Dangaard Brouer 	/* Zero budget indicate non-NAPI context called us, like netpoll */
973795bb1c0SJesper Dangaard Brouer 	if (unlikely(!budget)) {
974885eb0a5SJesper Dangaard Brouer 		dev_consume_skb_any(skb);
975795bb1c0SJesper Dangaard Brouer 		return;
976795bb1c0SJesper Dangaard Brouer 	}
977795bb1c0SJesper Dangaard Brouer 
9786454eca8SYunsheng Lin 	lockdep_assert_in_softirq();
9796454eca8SYunsheng Lin 
9807608894eSPaolo Abeni 	if (!skb_unref(skb))
981795bb1c0SJesper Dangaard Brouer 		return;
9827608894eSPaolo Abeni 
983795bb1c0SJesper Dangaard Brouer 	/* if reaching here SKB is ready to free */
984795bb1c0SJesper Dangaard Brouer 	trace_consume_skb(skb);
985795bb1c0SJesper Dangaard Brouer 
986795bb1c0SJesper Dangaard Brouer 	/* if SKB is a clone, don't handle this case */
987abbdb5a7SEric Dumazet 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
988795bb1c0SJesper Dangaard Brouer 		__kfree_skb(skb);
989795bb1c0SJesper Dangaard Brouer 		return;
990795bb1c0SJesper Dangaard Brouer 	}
991795bb1c0SJesper Dangaard Brouer 
9929243adfcSAlexander Lobakin 	skb_release_all(skb);
993f450d539SAlexander Lobakin 	napi_skb_cache_put(skb);
994795bb1c0SJesper Dangaard Brouer }
995795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb);
996795bb1c0SJesper Dangaard Brouer 
99703f61041SKees Cook /* Make sure a field is contained by headers group */
998b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \
99903f61041SKees Cook 	BUILD_BUG_ON(offsetof(struct sk_buff, field) !=		\
100003f61041SKees Cook 		     offsetof(struct sk_buff, headers.field));	\
1001b1937227SEric Dumazet 
1002dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1003dec18810SHerbert Xu {
1004dec18810SHerbert Xu 	new->tstamp		= old->tstamp;
1005b1937227SEric Dumazet 	/* We do not copy old->sk */
1006dec18810SHerbert Xu 	new->dev		= old->dev;
1007b1937227SEric Dumazet 	memcpy(new->cb, old->cb, sizeof(old->cb));
10087fee226aSEric Dumazet 	skb_dst_copy(new, old);
1009df5042f4SFlorian Westphal 	__skb_ext_copy(new, old);
1010b1937227SEric Dumazet 	__nf_copy(new, old, false);
10116aa895b0SPatrick McHardy 
101203f61041SKees Cook 	/* Note : this field could be in the headers group.
1013b1937227SEric Dumazet 	 * It is not yet because we do not want to have a 16 bit hole
1014b1937227SEric Dumazet 	 */
1015b1937227SEric Dumazet 	new->queue_mapping = old->queue_mapping;
101606021292SEliezer Tamir 
101703f61041SKees Cook 	memcpy(&new->headers, &old->headers, sizeof(new->headers));
1018b1937227SEric Dumazet 	CHECK_SKB_FIELD(protocol);
1019b1937227SEric Dumazet 	CHECK_SKB_FIELD(csum);
1020b1937227SEric Dumazet 	CHECK_SKB_FIELD(hash);
1021b1937227SEric Dumazet 	CHECK_SKB_FIELD(priority);
1022b1937227SEric Dumazet 	CHECK_SKB_FIELD(skb_iif);
1023b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_proto);
1024b1937227SEric Dumazet 	CHECK_SKB_FIELD(vlan_tci);
1025b1937227SEric Dumazet 	CHECK_SKB_FIELD(transport_header);
1026b1937227SEric Dumazet 	CHECK_SKB_FIELD(network_header);
1027b1937227SEric Dumazet 	CHECK_SKB_FIELD(mac_header);
1028b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_protocol);
1029b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_transport_header);
1030b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_network_header);
1031b1937227SEric Dumazet 	CHECK_SKB_FIELD(inner_mac_header);
1032b1937227SEric Dumazet 	CHECK_SKB_FIELD(mark);
1033b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK
1034b1937227SEric Dumazet 	CHECK_SKB_FIELD(secmark);
1035b1937227SEric Dumazet #endif
1036e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL
1037b1937227SEric Dumazet 	CHECK_SKB_FIELD(napi_id);
103806021292SEliezer Tamir #endif
10392bd82484SEric Dumazet #ifdef CONFIG_XPS
10402bd82484SEric Dumazet 	CHECK_SKB_FIELD(sender_cpu);
10412bd82484SEric Dumazet #endif
1042b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED
1043b1937227SEric Dumazet 	CHECK_SKB_FIELD(tc_index);
1044b1937227SEric Dumazet #endif
1045b1937227SEric Dumazet 
1046dec18810SHerbert Xu }
1047dec18810SHerbert Xu 
104882c49a35SHerbert Xu /*
104982c49a35SHerbert Xu  * You should not add any new code to this function.  Add it to
105082c49a35SHerbert Xu  * __copy_skb_header above instead.
105182c49a35SHerbert Xu  */
1052e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
10531da177e4SLinus Torvalds {
10541da177e4SLinus Torvalds #define C(x) n->x = skb->x
10551da177e4SLinus Torvalds 
10561da177e4SLinus Torvalds 	n->next = n->prev = NULL;
10571da177e4SLinus Torvalds 	n->sk = NULL;
1058dec18810SHerbert Xu 	__copy_skb_header(n, skb);
1059dec18810SHerbert Xu 
10601da177e4SLinus Torvalds 	C(len);
10611da177e4SLinus Torvalds 	C(data_len);
10623e6b3b2eSAlexey Dobriyan 	C(mac_len);
1063334a8132SPatrick McHardy 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
106402f1c89dSPaul Moore 	n->cloned = 1;
10651da177e4SLinus Torvalds 	n->nohdr = 0;
1066b13dda9fSEric Dumazet 	n->peeked = 0;
1067e78bfb07SStefano Brivio 	C(pfmemalloc);
10686a5bcd84SIlias Apalodimas 	C(pp_recycle);
10691da177e4SLinus Torvalds 	n->destructor = NULL;
10701da177e4SLinus Torvalds 	C(tail);
10711da177e4SLinus Torvalds 	C(end);
107202f1c89dSPaul Moore 	C(head);
1073d3836f21SEric Dumazet 	C(head_frag);
107402f1c89dSPaul Moore 	C(data);
107502f1c89dSPaul Moore 	C(truesize);
107663354797SReshetova, Elena 	refcount_set(&n->users, 1);
10771da177e4SLinus Torvalds 
10781da177e4SLinus Torvalds 	atomic_inc(&(skb_shinfo(skb)->dataref));
10791da177e4SLinus Torvalds 	skb->cloned = 1;
10801da177e4SLinus Torvalds 
10811da177e4SLinus Torvalds 	return n;
1082e0053ec0SHerbert Xu #undef C
1083e0053ec0SHerbert Xu }
1084e0053ec0SHerbert Xu 
1085e0053ec0SHerbert Xu /**
1086da29e4b4SJakub Kicinski  * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1087da29e4b4SJakub Kicinski  * @first: first sk_buff of the msg
1088da29e4b4SJakub Kicinski  */
1089da29e4b4SJakub Kicinski struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1090da29e4b4SJakub Kicinski {
1091da29e4b4SJakub Kicinski 	struct sk_buff *n;
1092da29e4b4SJakub Kicinski 
1093da29e4b4SJakub Kicinski 	n = alloc_skb(0, GFP_ATOMIC);
1094da29e4b4SJakub Kicinski 	if (!n)
1095da29e4b4SJakub Kicinski 		return NULL;
1096da29e4b4SJakub Kicinski 
1097da29e4b4SJakub Kicinski 	n->len = first->len;
1098da29e4b4SJakub Kicinski 	n->data_len = first->len;
1099da29e4b4SJakub Kicinski 	n->truesize = first->truesize;
1100da29e4b4SJakub Kicinski 
1101da29e4b4SJakub Kicinski 	skb_shinfo(n)->frag_list = first;
1102da29e4b4SJakub Kicinski 
1103da29e4b4SJakub Kicinski 	__copy_skb_header(n, first);
1104da29e4b4SJakub Kicinski 	n->destructor = NULL;
1105da29e4b4SJakub Kicinski 
1106da29e4b4SJakub Kicinski 	return n;
1107da29e4b4SJakub Kicinski }
1108da29e4b4SJakub Kicinski EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1109da29e4b4SJakub Kicinski 
1110da29e4b4SJakub Kicinski /**
1111e0053ec0SHerbert Xu  *	skb_morph	-	morph one skb into another
1112e0053ec0SHerbert Xu  *	@dst: the skb to receive the contents
1113e0053ec0SHerbert Xu  *	@src: the skb to supply the contents
1114e0053ec0SHerbert Xu  *
1115e0053ec0SHerbert Xu  *	This is identical to skb_clone except that the target skb is
1116e0053ec0SHerbert Xu  *	supplied by the user.
1117e0053ec0SHerbert Xu  *
1118e0053ec0SHerbert Xu  *	The target skb is returned upon exit.
1119e0053ec0SHerbert Xu  */
1120e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1121e0053ec0SHerbert Xu {
11222d4baff8SHerbert Xu 	skb_release_all(dst);
1123e0053ec0SHerbert Xu 	return __skb_clone(dst, src);
1124e0053ec0SHerbert Xu }
1125e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph);
1126e0053ec0SHerbert Xu 
11276f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1128a91dbff5SWillem de Bruijn {
1129a91dbff5SWillem de Bruijn 	unsigned long max_pg, num_pg, new_pg, old_pg;
1130a91dbff5SWillem de Bruijn 	struct user_struct *user;
1131a91dbff5SWillem de Bruijn 
1132a91dbff5SWillem de Bruijn 	if (capable(CAP_IPC_LOCK) || !size)
1133a91dbff5SWillem de Bruijn 		return 0;
1134a91dbff5SWillem de Bruijn 
1135a91dbff5SWillem de Bruijn 	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
1136a91dbff5SWillem de Bruijn 	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1137a91dbff5SWillem de Bruijn 	user = mmp->user ? : current_user();
1138a91dbff5SWillem de Bruijn 
1139a91dbff5SWillem de Bruijn 	do {
1140a91dbff5SWillem de Bruijn 		old_pg = atomic_long_read(&user->locked_vm);
1141a91dbff5SWillem de Bruijn 		new_pg = old_pg + num_pg;
1142a91dbff5SWillem de Bruijn 		if (new_pg > max_pg)
1143a91dbff5SWillem de Bruijn 			return -ENOBUFS;
1144a91dbff5SWillem de Bruijn 	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1145a91dbff5SWillem de Bruijn 		 old_pg);
1146a91dbff5SWillem de Bruijn 
1147a91dbff5SWillem de Bruijn 	if (!mmp->user) {
1148a91dbff5SWillem de Bruijn 		mmp->user = get_uid(user);
1149a91dbff5SWillem de Bruijn 		mmp->num_pg = num_pg;
1150a91dbff5SWillem de Bruijn 	} else {
1151a91dbff5SWillem de Bruijn 		mmp->num_pg += num_pg;
1152a91dbff5SWillem de Bruijn 	}
1153a91dbff5SWillem de Bruijn 
1154a91dbff5SWillem de Bruijn 	return 0;
1155a91dbff5SWillem de Bruijn }
11566f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1157a91dbff5SWillem de Bruijn 
11586f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp)
1159a91dbff5SWillem de Bruijn {
1160a91dbff5SWillem de Bruijn 	if (mmp->user) {
1161a91dbff5SWillem de Bruijn 		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1162a91dbff5SWillem de Bruijn 		free_uid(mmp->user);
1163a91dbff5SWillem de Bruijn 	}
1164a91dbff5SWillem de Bruijn }
11656f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1166a91dbff5SWillem de Bruijn 
11678c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
116852267790SWillem de Bruijn {
116952267790SWillem de Bruijn 	struct ubuf_info *uarg;
117052267790SWillem de Bruijn 	struct sk_buff *skb;
117152267790SWillem de Bruijn 
117252267790SWillem de Bruijn 	WARN_ON_ONCE(!in_task());
117352267790SWillem de Bruijn 
117452267790SWillem de Bruijn 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
117552267790SWillem de Bruijn 	if (!skb)
117652267790SWillem de Bruijn 		return NULL;
117752267790SWillem de Bruijn 
117852267790SWillem de Bruijn 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
117952267790SWillem de Bruijn 	uarg = (void *)skb->cb;
1180a91dbff5SWillem de Bruijn 	uarg->mmp.user = NULL;
1181a91dbff5SWillem de Bruijn 
1182a91dbff5SWillem de Bruijn 	if (mm_account_pinned_pages(&uarg->mmp, size)) {
1183a91dbff5SWillem de Bruijn 		kfree_skb(skb);
1184a91dbff5SWillem de Bruijn 		return NULL;
1185a91dbff5SWillem de Bruijn 	}
118652267790SWillem de Bruijn 
11878c793822SJonathan Lemon 	uarg->callback = msg_zerocopy_callback;
11884ab6c99dSWillem de Bruijn 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
11894ab6c99dSWillem de Bruijn 	uarg->len = 1;
11904ab6c99dSWillem de Bruijn 	uarg->bytelen = size;
119152267790SWillem de Bruijn 	uarg->zerocopy = 1;
119204c2d33eSJonathan Lemon 	uarg->flags = SKBFL_ZEROCOPY_FRAG;
1193c1d1b437SEric Dumazet 	refcount_set(&uarg->refcnt, 1);
119452267790SWillem de Bruijn 	sock_hold(sk);
119552267790SWillem de Bruijn 
119652267790SWillem de Bruijn 	return uarg;
119752267790SWillem de Bruijn }
11988c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_alloc);
119952267790SWillem de Bruijn 
120052267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
120152267790SWillem de Bruijn {
120252267790SWillem de Bruijn 	return container_of((void *)uarg, struct sk_buff, cb);
120352267790SWillem de Bruijn }
120452267790SWillem de Bruijn 
12058c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
12064ab6c99dSWillem de Bruijn 				       struct ubuf_info *uarg)
12074ab6c99dSWillem de Bruijn {
12084ab6c99dSWillem de Bruijn 	if (uarg) {
12094ab6c99dSWillem de Bruijn 		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
12104ab6c99dSWillem de Bruijn 		u32 bytelen, next;
12114ab6c99dSWillem de Bruijn 
12124ab6c99dSWillem de Bruijn 		/* realloc only when socket is locked (TCP, UDP cork),
12134ab6c99dSWillem de Bruijn 		 * so uarg->len and sk_zckey access is serialized
12144ab6c99dSWillem de Bruijn 		 */
12154ab6c99dSWillem de Bruijn 		if (!sock_owned_by_user(sk)) {
12164ab6c99dSWillem de Bruijn 			WARN_ON_ONCE(1);
12174ab6c99dSWillem de Bruijn 			return NULL;
12184ab6c99dSWillem de Bruijn 		}
12194ab6c99dSWillem de Bruijn 
12204ab6c99dSWillem de Bruijn 		bytelen = uarg->bytelen + size;
12214ab6c99dSWillem de Bruijn 		if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
12224ab6c99dSWillem de Bruijn 			/* TCP can create new skb to attach new uarg */
12234ab6c99dSWillem de Bruijn 			if (sk->sk_type == SOCK_STREAM)
12244ab6c99dSWillem de Bruijn 				goto new_alloc;
12254ab6c99dSWillem de Bruijn 			return NULL;
12264ab6c99dSWillem de Bruijn 		}
12274ab6c99dSWillem de Bruijn 
12284ab6c99dSWillem de Bruijn 		next = (u32)atomic_read(&sk->sk_zckey);
12294ab6c99dSWillem de Bruijn 		if ((u32)(uarg->id + uarg->len) == next) {
1230a91dbff5SWillem de Bruijn 			if (mm_account_pinned_pages(&uarg->mmp, size))
1231a91dbff5SWillem de Bruijn 				return NULL;
12324ab6c99dSWillem de Bruijn 			uarg->len++;
12334ab6c99dSWillem de Bruijn 			uarg->bytelen = bytelen;
12344ab6c99dSWillem de Bruijn 			atomic_set(&sk->sk_zckey, ++next);
1235100f6d8eSWillem de Bruijn 
1236100f6d8eSWillem de Bruijn 			/* no extra ref when appending to datagram (MSG_MORE) */
1237100f6d8eSWillem de Bruijn 			if (sk->sk_type == SOCK_STREAM)
12388e044917SJonathan Lemon 				net_zcopy_get(uarg);
1239100f6d8eSWillem de Bruijn 
12404ab6c99dSWillem de Bruijn 			return uarg;
12414ab6c99dSWillem de Bruijn 		}
12424ab6c99dSWillem de Bruijn 	}
12434ab6c99dSWillem de Bruijn 
12444ab6c99dSWillem de Bruijn new_alloc:
12458c793822SJonathan Lemon 	return msg_zerocopy_alloc(sk, size);
12464ab6c99dSWillem de Bruijn }
12478c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
12484ab6c99dSWillem de Bruijn 
12494ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
12504ab6c99dSWillem de Bruijn {
12514ab6c99dSWillem de Bruijn 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
12524ab6c99dSWillem de Bruijn 	u32 old_lo, old_hi;
12534ab6c99dSWillem de Bruijn 	u64 sum_len;
12544ab6c99dSWillem de Bruijn 
12554ab6c99dSWillem de Bruijn 	old_lo = serr->ee.ee_info;
12564ab6c99dSWillem de Bruijn 	old_hi = serr->ee.ee_data;
12574ab6c99dSWillem de Bruijn 	sum_len = old_hi - old_lo + 1ULL + len;
12584ab6c99dSWillem de Bruijn 
12594ab6c99dSWillem de Bruijn 	if (sum_len >= (1ULL << 32))
12604ab6c99dSWillem de Bruijn 		return false;
12614ab6c99dSWillem de Bruijn 
12624ab6c99dSWillem de Bruijn 	if (lo != old_hi + 1)
12634ab6c99dSWillem de Bruijn 		return false;
12644ab6c99dSWillem de Bruijn 
12654ab6c99dSWillem de Bruijn 	serr->ee.ee_data += len;
12664ab6c99dSWillem de Bruijn 	return true;
12674ab6c99dSWillem de Bruijn }
12684ab6c99dSWillem de Bruijn 
12698c793822SJonathan Lemon static void __msg_zerocopy_callback(struct ubuf_info *uarg)
127052267790SWillem de Bruijn {
12714ab6c99dSWillem de Bruijn 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
127252267790SWillem de Bruijn 	struct sock_exterr_skb *serr;
127352267790SWillem de Bruijn 	struct sock *sk = skb->sk;
12744ab6c99dSWillem de Bruijn 	struct sk_buff_head *q;
12754ab6c99dSWillem de Bruijn 	unsigned long flags;
12763bdd5ee0SWillem de Bruijn 	bool is_zerocopy;
12774ab6c99dSWillem de Bruijn 	u32 lo, hi;
12784ab6c99dSWillem de Bruijn 	u16 len;
127952267790SWillem de Bruijn 
1280ccaffff1SWillem de Bruijn 	mm_unaccount_pinned_pages(&uarg->mmp);
1281ccaffff1SWillem de Bruijn 
12824ab6c99dSWillem de Bruijn 	/* if !len, there was only 1 call, and it was aborted
12834ab6c99dSWillem de Bruijn 	 * so do not queue a completion notification
12844ab6c99dSWillem de Bruijn 	 */
12854ab6c99dSWillem de Bruijn 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
128652267790SWillem de Bruijn 		goto release;
128752267790SWillem de Bruijn 
12884ab6c99dSWillem de Bruijn 	len = uarg->len;
12894ab6c99dSWillem de Bruijn 	lo = uarg->id;
12904ab6c99dSWillem de Bruijn 	hi = uarg->id + len - 1;
12913bdd5ee0SWillem de Bruijn 	is_zerocopy = uarg->zerocopy;
12924ab6c99dSWillem de Bruijn 
129352267790SWillem de Bruijn 	serr = SKB_EXT_ERR(skb);
129452267790SWillem de Bruijn 	memset(serr, 0, sizeof(*serr));
129552267790SWillem de Bruijn 	serr->ee.ee_errno = 0;
129652267790SWillem de Bruijn 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
12974ab6c99dSWillem de Bruijn 	serr->ee.ee_data = hi;
12984ab6c99dSWillem de Bruijn 	serr->ee.ee_info = lo;
12993bdd5ee0SWillem de Bruijn 	if (!is_zerocopy)
130052267790SWillem de Bruijn 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
130152267790SWillem de Bruijn 
13024ab6c99dSWillem de Bruijn 	q = &sk->sk_error_queue;
13034ab6c99dSWillem de Bruijn 	spin_lock_irqsave(&q->lock, flags);
13044ab6c99dSWillem de Bruijn 	tail = skb_peek_tail(q);
13054ab6c99dSWillem de Bruijn 	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
13064ab6c99dSWillem de Bruijn 	    !skb_zerocopy_notify_extend(tail, lo, len)) {
13074ab6c99dSWillem de Bruijn 		__skb_queue_tail(q, skb);
130852267790SWillem de Bruijn 		skb = NULL;
13094ab6c99dSWillem de Bruijn 	}
13104ab6c99dSWillem de Bruijn 	spin_unlock_irqrestore(&q->lock, flags);
131152267790SWillem de Bruijn 
1312e3ae2365SAlexander Aring 	sk_error_report(sk);
131352267790SWillem de Bruijn 
131452267790SWillem de Bruijn release:
131552267790SWillem de Bruijn 	consume_skb(skb);
131652267790SWillem de Bruijn 	sock_put(sk);
131752267790SWillem de Bruijn }
131875518851SJonathan Lemon 
13198c793822SJonathan Lemon void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
132036177832SJonathan Lemon 			   bool success)
132175518851SJonathan Lemon {
132275518851SJonathan Lemon 	uarg->zerocopy = uarg->zerocopy & success;
132375518851SJonathan Lemon 
132475518851SJonathan Lemon 	if (refcount_dec_and_test(&uarg->refcnt))
13258c793822SJonathan Lemon 		__msg_zerocopy_callback(uarg);
132675518851SJonathan Lemon }
13278c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
132852267790SWillem de Bruijn 
13298c793822SJonathan Lemon void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
133052267790SWillem de Bruijn {
133152267790SWillem de Bruijn 	struct sock *sk = skb_from_uarg(uarg)->sk;
133252267790SWillem de Bruijn 
133352267790SWillem de Bruijn 	atomic_dec(&sk->sk_zckey);
13344ab6c99dSWillem de Bruijn 	uarg->len--;
133552267790SWillem de Bruijn 
133652900d22SWillem de Bruijn 	if (have_uref)
13378c793822SJonathan Lemon 		msg_zerocopy_callback(NULL, uarg, true);
133852267790SWillem de Bruijn }
13398c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
134052267790SWillem de Bruijn 
1341b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1342b5947e5dSWillem de Bruijn {
1343b5947e5dSWillem de Bruijn 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1344b5947e5dSWillem de Bruijn }
1345b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1346b5947e5dSWillem de Bruijn 
134752267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
134852267790SWillem de Bruijn 			     struct msghdr *msg, int len,
134952267790SWillem de Bruijn 			     struct ubuf_info *uarg)
135052267790SWillem de Bruijn {
13514ab6c99dSWillem de Bruijn 	struct ubuf_info *orig_uarg = skb_zcopy(skb);
135252267790SWillem de Bruijn 	struct iov_iter orig_iter = msg->msg_iter;
135352267790SWillem de Bruijn 	int err, orig_len = skb->len;
135452267790SWillem de Bruijn 
13554ab6c99dSWillem de Bruijn 	/* An skb can only point to one uarg. This edge case happens when
13564ab6c99dSWillem de Bruijn 	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
13574ab6c99dSWillem de Bruijn 	 */
13584ab6c99dSWillem de Bruijn 	if (orig_uarg && uarg != orig_uarg)
13594ab6c99dSWillem de Bruijn 		return -EEXIST;
13604ab6c99dSWillem de Bruijn 
136152267790SWillem de Bruijn 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
136252267790SWillem de Bruijn 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
136354d43117SWillem de Bruijn 		struct sock *save_sk = skb->sk;
136454d43117SWillem de Bruijn 
136552267790SWillem de Bruijn 		/* Streams do not free skb on error. Reset to prev state. */
136652267790SWillem de Bruijn 		msg->msg_iter = orig_iter;
136754d43117SWillem de Bruijn 		skb->sk = sk;
136852267790SWillem de Bruijn 		___pskb_trim(skb, orig_len);
136954d43117SWillem de Bruijn 		skb->sk = save_sk;
137052267790SWillem de Bruijn 		return err;
137152267790SWillem de Bruijn 	}
137252267790SWillem de Bruijn 
137352900d22SWillem de Bruijn 	skb_zcopy_set(skb, uarg, NULL);
137452267790SWillem de Bruijn 	return skb->len - orig_len;
137552267790SWillem de Bruijn }
137652267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
137752267790SWillem de Bruijn 
13781f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
137952267790SWillem de Bruijn 			      gfp_t gfp_mask)
138052267790SWillem de Bruijn {
138152267790SWillem de Bruijn 	if (skb_zcopy(orig)) {
138252267790SWillem de Bruijn 		if (skb_zcopy(nskb)) {
138352267790SWillem de Bruijn 			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
138452267790SWillem de Bruijn 			if (!gfp_mask) {
138552267790SWillem de Bruijn 				WARN_ON_ONCE(1);
138652267790SWillem de Bruijn 				return -ENOMEM;
138752267790SWillem de Bruijn 			}
138852267790SWillem de Bruijn 			if (skb_uarg(nskb) == skb_uarg(orig))
138952267790SWillem de Bruijn 				return 0;
139052267790SWillem de Bruijn 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
139152267790SWillem de Bruijn 				return -EIO;
139252267790SWillem de Bruijn 		}
139352900d22SWillem de Bruijn 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
139452267790SWillem de Bruijn 	}
139552267790SWillem de Bruijn 	return 0;
139652267790SWillem de Bruijn }
139752267790SWillem de Bruijn 
13982c53040fSBen Hutchings /**
13992c53040fSBen Hutchings  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
140048c83012SMichael S. Tsirkin  *	@skb: the skb to modify
140148c83012SMichael S. Tsirkin  *	@gfp_mask: allocation priority
140248c83012SMichael S. Tsirkin  *
140306b4feb3SJonathan Lemon  *	This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
140448c83012SMichael S. Tsirkin  *	It will copy all frags into kernel and drop the reference
140548c83012SMichael S. Tsirkin  *	to userspace pages.
140648c83012SMichael S. Tsirkin  *
140748c83012SMichael S. Tsirkin  *	If this function is called from an interrupt gfp_mask() must be
140848c83012SMichael S. Tsirkin  *	%GFP_ATOMIC.
140948c83012SMichael S. Tsirkin  *
141048c83012SMichael S. Tsirkin  *	Returns 0 on success or a negative error code on failure
141148c83012SMichael S. Tsirkin  *	to allocate kernel memory to copy to.
141248c83012SMichael S. Tsirkin  */
141348c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1414a6686f2fSShirley Ma {
1415a6686f2fSShirley Ma 	int num_frags = skb_shinfo(skb)->nr_frags;
1416a6686f2fSShirley Ma 	struct page *page, *head = NULL;
14173ece7826SWillem de Bruijn 	int i, new_frags;
14183ece7826SWillem de Bruijn 	u32 d_off;
1419a6686f2fSShirley Ma 
14203ece7826SWillem de Bruijn 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
14213ece7826SWillem de Bruijn 		return -EINVAL;
14223ece7826SWillem de Bruijn 
1423f72c4ac6SWillem de Bruijn 	if (!num_frags)
1424f72c4ac6SWillem de Bruijn 		goto release;
1425f72c4ac6SWillem de Bruijn 
14263ece7826SWillem de Bruijn 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
14273ece7826SWillem de Bruijn 	for (i = 0; i < new_frags; i++) {
142802756ed4SKrishna Kumar 		page = alloc_page(gfp_mask);
1429a6686f2fSShirley Ma 		if (!page) {
1430a6686f2fSShirley Ma 			while (head) {
143140dadff2SSunghan Suh 				struct page *next = (struct page *)page_private(head);
1432a6686f2fSShirley Ma 				put_page(head);
1433a6686f2fSShirley Ma 				head = next;
1434a6686f2fSShirley Ma 			}
1435a6686f2fSShirley Ma 			return -ENOMEM;
1436a6686f2fSShirley Ma 		}
14373ece7826SWillem de Bruijn 		set_page_private(page, (unsigned long)head);
14383ece7826SWillem de Bruijn 		head = page;
14393ece7826SWillem de Bruijn 	}
14403ece7826SWillem de Bruijn 
14413ece7826SWillem de Bruijn 	page = head;
14423ece7826SWillem de Bruijn 	d_off = 0;
14433ece7826SWillem de Bruijn 	for (i = 0; i < num_frags; i++) {
14443ece7826SWillem de Bruijn 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
14453ece7826SWillem de Bruijn 		u32 p_off, p_len, copied;
14463ece7826SWillem de Bruijn 		struct page *p;
14473ece7826SWillem de Bruijn 		u8 *vaddr;
1448c613c209SWillem de Bruijn 
1449b54c9d5bSJonathan Lemon 		skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1450c613c209SWillem de Bruijn 				      p, p_off, p_len, copied) {
14513ece7826SWillem de Bruijn 			u32 copy, done = 0;
1452c613c209SWillem de Bruijn 			vaddr = kmap_atomic(p);
14533ece7826SWillem de Bruijn 
14543ece7826SWillem de Bruijn 			while (done < p_len) {
14553ece7826SWillem de Bruijn 				if (d_off == PAGE_SIZE) {
14563ece7826SWillem de Bruijn 					d_off = 0;
14573ece7826SWillem de Bruijn 					page = (struct page *)page_private(page);
14583ece7826SWillem de Bruijn 				}
14593ece7826SWillem de Bruijn 				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
14603ece7826SWillem de Bruijn 				memcpy(page_address(page) + d_off,
14613ece7826SWillem de Bruijn 				       vaddr + p_off + done, copy);
14623ece7826SWillem de Bruijn 				done += copy;
14633ece7826SWillem de Bruijn 				d_off += copy;
14643ece7826SWillem de Bruijn 			}
146551c56b00SEric Dumazet 			kunmap_atomic(vaddr);
1466c613c209SWillem de Bruijn 		}
1467a6686f2fSShirley Ma 	}
1468a6686f2fSShirley Ma 
1469a6686f2fSShirley Ma 	/* skb frags release userspace buffers */
147002756ed4SKrishna Kumar 	for (i = 0; i < num_frags; i++)
1471a8605c60SIan Campbell 		skb_frag_unref(skb, i);
1472a6686f2fSShirley Ma 
1473a6686f2fSShirley Ma 	/* skb frags point to kernel buffers */
14743ece7826SWillem de Bruijn 	for (i = 0; i < new_frags - 1; i++) {
14753ece7826SWillem de Bruijn 		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
147640dadff2SSunghan Suh 		head = (struct page *)page_private(head);
1477a6686f2fSShirley Ma 	}
14783ece7826SWillem de Bruijn 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
14793ece7826SWillem de Bruijn 	skb_shinfo(skb)->nr_frags = new_frags;
148048c83012SMichael S. Tsirkin 
1481b90ddd56SWillem de Bruijn release:
14821f8b977aSWillem de Bruijn 	skb_zcopy_clear(skb, false);
1483a6686f2fSShirley Ma 	return 0;
1484a6686f2fSShirley Ma }
1485dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1486a6686f2fSShirley Ma 
1487e0053ec0SHerbert Xu /**
1488e0053ec0SHerbert Xu  *	skb_clone	-	duplicate an sk_buff
1489e0053ec0SHerbert Xu  *	@skb: buffer to clone
1490e0053ec0SHerbert Xu  *	@gfp_mask: allocation priority
1491e0053ec0SHerbert Xu  *
1492e0053ec0SHerbert Xu  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
1493e0053ec0SHerbert Xu  *	copies share the same packet data but not structure. The new
1494e0053ec0SHerbert Xu  *	buffer has a reference count of 1. If the allocation fails the
1495e0053ec0SHerbert Xu  *	function returns %NULL otherwise the new buffer is returned.
1496e0053ec0SHerbert Xu  *
1497e0053ec0SHerbert Xu  *	If this function is called from an interrupt gfp_mask() must be
1498e0053ec0SHerbert Xu  *	%GFP_ATOMIC.
1499e0053ec0SHerbert Xu  */
1500e0053ec0SHerbert Xu 
1501e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1502e0053ec0SHerbert Xu {
1503d0bf4a9eSEric Dumazet 	struct sk_buff_fclones *fclones = container_of(skb,
1504d0bf4a9eSEric Dumazet 						       struct sk_buff_fclones,
1505d0bf4a9eSEric Dumazet 						       skb1);
15066ffe75ebSEric Dumazet 	struct sk_buff *n;
1507e0053ec0SHerbert Xu 
150870008aa5SMichael S. Tsirkin 	if (skb_orphan_frags(skb, gfp_mask))
1509a6686f2fSShirley Ma 		return NULL;
1510a6686f2fSShirley Ma 
1511e0053ec0SHerbert Xu 	if (skb->fclone == SKB_FCLONE_ORIG &&
15122638595aSReshetova, Elena 	    refcount_read(&fclones->fclone_ref) == 1) {
15136ffe75ebSEric Dumazet 		n = &fclones->skb2;
15142638595aSReshetova, Elena 		refcount_set(&fclones->fclone_ref, 2);
1515e0053ec0SHerbert Xu 	} else {
1516c93bdd0eSMel Gorman 		if (skb_pfmemalloc(skb))
1517c93bdd0eSMel Gorman 			gfp_mask |= __GFP_MEMALLOC;
1518c93bdd0eSMel Gorman 
1519e0053ec0SHerbert Xu 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1520e0053ec0SHerbert Xu 		if (!n)
1521e0053ec0SHerbert Xu 			return NULL;
1522fe55f6d5SVegard Nossum 
1523e0053ec0SHerbert Xu 		n->fclone = SKB_FCLONE_UNAVAILABLE;
1524e0053ec0SHerbert Xu 	}
1525e0053ec0SHerbert Xu 
1526e0053ec0SHerbert Xu 	return __skb_clone(n, skb);
15271da177e4SLinus Torvalds }
1528b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone);
15291da177e4SLinus Torvalds 
1530b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off)
1531f5b17294SPravin B Shelar {
1532030737bcSEric Dumazet 	/* Only adjust this if it actually is csum_start rather than csum */
1533030737bcSEric Dumazet 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1534030737bcSEric Dumazet 		skb->csum_start += off;
1535f5b17294SPravin B Shelar 	/* {transport,network,mac}_header and tail are relative to skb->head */
1536f5b17294SPravin B Shelar 	skb->transport_header += off;
1537f5b17294SPravin B Shelar 	skb->network_header   += off;
1538f5b17294SPravin B Shelar 	if (skb_mac_header_was_set(skb))
1539f5b17294SPravin B Shelar 		skb->mac_header += off;
1540f5b17294SPravin B Shelar 	skb->inner_transport_header += off;
1541f5b17294SPravin B Shelar 	skb->inner_network_header += off;
1542aefbd2b3SPravin B Shelar 	skb->inner_mac_header += off;
1543f5b17294SPravin B Shelar }
1544b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update);
1545f5b17294SPravin B Shelar 
154608303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
15471da177e4SLinus Torvalds {
1548dec18810SHerbert Xu 	__copy_skb_header(new, old);
1549dec18810SHerbert Xu 
15507967168cSHerbert Xu 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
15517967168cSHerbert Xu 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
15527967168cSHerbert Xu 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
15531da177e4SLinus Torvalds }
155408303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header);
15551da177e4SLinus Torvalds 
1556c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1557c93bdd0eSMel Gorman {
1558c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1559c93bdd0eSMel Gorman 		return SKB_ALLOC_RX;
1560c93bdd0eSMel Gorman 	return 0;
1561c93bdd0eSMel Gorman }
1562c93bdd0eSMel Gorman 
15631da177e4SLinus Torvalds /**
15641da177e4SLinus Torvalds  *	skb_copy	-	create private copy of an sk_buff
15651da177e4SLinus Torvalds  *	@skb: buffer to copy
15661da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
15671da177e4SLinus Torvalds  *
15681da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data. This is used when the
15691da177e4SLinus Torvalds  *	caller wishes to modify the data and needs a private copy of the
15701da177e4SLinus Torvalds  *	data to alter. Returns %NULL on failure or the pointer to the buffer
15711da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
15721da177e4SLinus Torvalds  *
15731da177e4SLinus Torvalds  *	As by-product this function converts non-linear &sk_buff to linear
15741da177e4SLinus Torvalds  *	one, so that &sk_buff becomes completely private and caller is allowed
15751da177e4SLinus Torvalds  *	to modify all the data of returned buffer. This means that this
15761da177e4SLinus Torvalds  *	function is not recommended for use in circumstances when only
15771da177e4SLinus Torvalds  *	header is going to be modified. Use pskb_copy() instead.
15781da177e4SLinus Torvalds  */
15791da177e4SLinus Torvalds 
1580dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
15811da177e4SLinus Torvalds {
15826602cebbSEric Dumazet 	int headerlen = skb_headroom(skb);
1583ec47ea82SAlexander Duyck 	unsigned int size = skb_end_offset(skb) + skb->data_len;
1584c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
1585c93bdd0eSMel Gorman 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
15866602cebbSEric Dumazet 
15871da177e4SLinus Torvalds 	if (!n)
15881da177e4SLinus Torvalds 		return NULL;
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds 	/* Set the data pointer */
15911da177e4SLinus Torvalds 	skb_reserve(n, headerlen);
15921da177e4SLinus Torvalds 	/* Set the tail pointer and length */
15931da177e4SLinus Torvalds 	skb_put(n, skb->len);
15941da177e4SLinus Torvalds 
15959f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
15961da177e4SLinus Torvalds 
159708303c18SIlya Lesokhin 	skb_copy_header(n, skb);
15981da177e4SLinus Torvalds 	return n;
15991da177e4SLinus Torvalds }
1600b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy);
16011da177e4SLinus Torvalds 
16021da177e4SLinus Torvalds /**
1603bad93e9dSOctavian Purdila  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
16041da177e4SLinus Torvalds  *	@skb: buffer to copy
1605117632e6SEric Dumazet  *	@headroom: headroom of new skb
16061da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
1607bad93e9dSOctavian Purdila  *	@fclone: if true allocate the copy of the skb from the fclone
1608bad93e9dSOctavian Purdila  *	cache instead of the head cache; it is recommended to set this
1609bad93e9dSOctavian Purdila  *	to true for the cases where the copy will likely be cloned
16101da177e4SLinus Torvalds  *
16111da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and part of its data, located
16121da177e4SLinus Torvalds  *	in header. Fragmented data remain shared. This is used when
16131da177e4SLinus Torvalds  *	the caller wishes to modify only header of &sk_buff and needs
16141da177e4SLinus Torvalds  *	private copy of the header to alter. Returns %NULL on failure
16151da177e4SLinus Torvalds  *	or the pointer to the buffer on success.
16161da177e4SLinus Torvalds  *	The returned buffer has a reference count of 1.
16171da177e4SLinus Torvalds  */
16181da177e4SLinus Torvalds 
1619bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1620bad93e9dSOctavian Purdila 				   gfp_t gfp_mask, bool fclone)
16211da177e4SLinus Torvalds {
1622117632e6SEric Dumazet 	unsigned int size = skb_headlen(skb) + headroom;
1623bad93e9dSOctavian Purdila 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1624bad93e9dSOctavian Purdila 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
16256602cebbSEric Dumazet 
16261da177e4SLinus Torvalds 	if (!n)
16271da177e4SLinus Torvalds 		goto out;
16281da177e4SLinus Torvalds 
16291da177e4SLinus Torvalds 	/* Set the data pointer */
1630117632e6SEric Dumazet 	skb_reserve(n, headroom);
16311da177e4SLinus Torvalds 	/* Set the tail pointer and length */
16321da177e4SLinus Torvalds 	skb_put(n, skb_headlen(skb));
16331da177e4SLinus Torvalds 	/* Copy the bytes */
1634d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, n->data, n->len);
16351da177e4SLinus Torvalds 
163625f484a6SHerbert Xu 	n->truesize += skb->data_len;
16371da177e4SLinus Torvalds 	n->data_len  = skb->data_len;
16381da177e4SLinus Torvalds 	n->len	     = skb->len;
16391da177e4SLinus Torvalds 
16401da177e4SLinus Torvalds 	if (skb_shinfo(skb)->nr_frags) {
16411da177e4SLinus Torvalds 		int i;
16421da177e4SLinus Torvalds 
16431f8b977aSWillem de Bruijn 		if (skb_orphan_frags(skb, gfp_mask) ||
16441f8b977aSWillem de Bruijn 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
16451511022cSDan Carpenter 			kfree_skb(n);
16461511022cSDan Carpenter 			n = NULL;
1647a6686f2fSShirley Ma 			goto out;
1648a6686f2fSShirley Ma 		}
16491da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
16501da177e4SLinus Torvalds 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1651ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
16521da177e4SLinus Torvalds 		}
16531da177e4SLinus Torvalds 		skb_shinfo(n)->nr_frags = i;
16541da177e4SLinus Torvalds 	}
16551da177e4SLinus Torvalds 
165621dc3301SDavid S. Miller 	if (skb_has_frag_list(skb)) {
16571da177e4SLinus Torvalds 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
16581da177e4SLinus Torvalds 		skb_clone_fraglist(n);
16591da177e4SLinus Torvalds 	}
16601da177e4SLinus Torvalds 
166108303c18SIlya Lesokhin 	skb_copy_header(n, skb);
16621da177e4SLinus Torvalds out:
16631da177e4SLinus Torvalds 	return n;
16641da177e4SLinus Torvalds }
1665bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone);
16661da177e4SLinus Torvalds 
16671da177e4SLinus Torvalds /**
16681da177e4SLinus Torvalds  *	pskb_expand_head - reallocate header of &sk_buff
16691da177e4SLinus Torvalds  *	@skb: buffer to reallocate
16701da177e4SLinus Torvalds  *	@nhead: room to add at head
16711da177e4SLinus Torvalds  *	@ntail: room to add at tail
16721da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
16731da177e4SLinus Torvalds  *
1674bc32383cSMathias Krause  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1675bc32383cSMathias Krause  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
16761da177e4SLinus Torvalds  *	reference count of 1. Returns zero in the case of success or error,
16771da177e4SLinus Torvalds  *	if expansion failed. In the last case, &sk_buff is not changed.
16781da177e4SLinus Torvalds  *
16791da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
16801da177e4SLinus Torvalds  *	reloaded after call to this function.
16811da177e4SLinus Torvalds  */
16821da177e4SLinus Torvalds 
168386a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1684dd0fc66fSAl Viro 		     gfp_t gfp_mask)
16851da177e4SLinus Torvalds {
1686158f323bSEric Dumazet 	int i, osize = skb_end_offset(skb);
1687158f323bSEric Dumazet 	int size = osize + nhead + ntail;
16881da177e4SLinus Torvalds 	long off;
1689158f323bSEric Dumazet 	u8 *data;
16901da177e4SLinus Torvalds 
16914edd87adSHerbert Xu 	BUG_ON(nhead < 0);
16924edd87adSHerbert Xu 
16939f77fad3STim Hansen 	BUG_ON(skb_shared(skb));
16941da177e4SLinus Torvalds 
16951da177e4SLinus Torvalds 	size = SKB_DATA_ALIGN(size);
16961da177e4SLinus Torvalds 
1697c93bdd0eSMel Gorman 	if (skb_pfmemalloc(skb))
1698c93bdd0eSMel Gorman 		gfp_mask |= __GFP_MEMALLOC;
1699c93bdd0eSMel Gorman 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1700c93bdd0eSMel Gorman 			       gfp_mask, NUMA_NO_NODE, NULL);
17011da177e4SLinus Torvalds 	if (!data)
17021da177e4SLinus Torvalds 		goto nodata;
170387151b86SEric Dumazet 	size = SKB_WITH_OVERHEAD(ksize(data));
17041da177e4SLinus Torvalds 
17051da177e4SLinus Torvalds 	/* Copy only real data... and, alas, header. This should be
17066602cebbSEric Dumazet 	 * optimized for the cases when header is void.
17076602cebbSEric Dumazet 	 */
17086602cebbSEric Dumazet 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
17096602cebbSEric Dumazet 
17106602cebbSEric Dumazet 	memcpy((struct skb_shared_info *)(data + size),
17116602cebbSEric Dumazet 	       skb_shinfo(skb),
1712fed66381SEric Dumazet 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
17131da177e4SLinus Torvalds 
17143e24591aSAlexander Duyck 	/*
17153e24591aSAlexander Duyck 	 * if shinfo is shared we must drop the old head gracefully, but if it
17163e24591aSAlexander Duyck 	 * is not we can just drop the old head and let the existing refcount
17173e24591aSAlexander Duyck 	 * be since all we did is relocate the values
17183e24591aSAlexander Duyck 	 */
17193e24591aSAlexander Duyck 	if (skb_cloned(skb)) {
172070008aa5SMichael S. Tsirkin 		if (skb_orphan_frags(skb, gfp_mask))
1721a6686f2fSShirley Ma 			goto nofrags;
17221f8b977aSWillem de Bruijn 		if (skb_zcopy(skb))
1723c1d1b437SEric Dumazet 			refcount_inc(&skb_uarg(skb)->refcnt);
17241da177e4SLinus Torvalds 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1725ea2ab693SIan Campbell 			skb_frag_ref(skb, i);
17261da177e4SLinus Torvalds 
172721dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
17281da177e4SLinus Torvalds 			skb_clone_fraglist(skb);
17291da177e4SLinus Torvalds 
17301da177e4SLinus Torvalds 		skb_release_data(skb);
17313e24591aSAlexander Duyck 	} else {
17323e24591aSAlexander Duyck 		skb_free_head(skb);
17331fd63041SEric Dumazet 	}
17341da177e4SLinus Torvalds 	off = (data + nhead) - skb->head;
17351da177e4SLinus Torvalds 
17361da177e4SLinus Torvalds 	skb->head     = data;
1737d3836f21SEric Dumazet 	skb->head_frag = 0;
17381da177e4SLinus Torvalds 	skb->data    += off;
1739763087daSEric Dumazet 
1740763087daSEric Dumazet 	skb_set_end_offset(skb, size);
17414305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET
174256eb8882SPatrick McHardy 	off           = nhead;
174356eb8882SPatrick McHardy #endif
174427a884dcSArnaldo Carvalho de Melo 	skb->tail	      += off;
1745b41abb42SPeter Pan(潘卫平) 	skb_headers_offset_update(skb, nhead);
17461da177e4SLinus Torvalds 	skb->cloned   = 0;
1747334a8132SPatrick McHardy 	skb->hdr_len  = 0;
17481da177e4SLinus Torvalds 	skb->nohdr    = 0;
17491da177e4SLinus Torvalds 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1750158f323bSEric Dumazet 
1751de8f3a83SDaniel Borkmann 	skb_metadata_clear(skb);
1752de8f3a83SDaniel Borkmann 
1753158f323bSEric Dumazet 	/* It is not generally safe to change skb->truesize.
1754158f323bSEric Dumazet 	 * For the moment, we really care of rx path, or
1755158f323bSEric Dumazet 	 * when skb is orphaned (not attached to a socket).
1756158f323bSEric Dumazet 	 */
1757158f323bSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
1758158f323bSEric Dumazet 		skb->truesize += size - osize;
1759158f323bSEric Dumazet 
17601da177e4SLinus Torvalds 	return 0;
17611da177e4SLinus Torvalds 
1762a6686f2fSShirley Ma nofrags:
1763a6686f2fSShirley Ma 	kfree(data);
17641da177e4SLinus Torvalds nodata:
17651da177e4SLinus Torvalds 	return -ENOMEM;
17661da177e4SLinus Torvalds }
1767b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head);
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */
17701da177e4SLinus Torvalds 
17711da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
17721da177e4SLinus Torvalds {
17731da177e4SLinus Torvalds 	struct sk_buff *skb2;
17741da177e4SLinus Torvalds 	int delta = headroom - skb_headroom(skb);
17751da177e4SLinus Torvalds 
17761da177e4SLinus Torvalds 	if (delta <= 0)
17771da177e4SLinus Torvalds 		skb2 = pskb_copy(skb, GFP_ATOMIC);
17781da177e4SLinus Torvalds 	else {
17791da177e4SLinus Torvalds 		skb2 = skb_clone(skb, GFP_ATOMIC);
17801da177e4SLinus Torvalds 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
17811da177e4SLinus Torvalds 					     GFP_ATOMIC)) {
17821da177e4SLinus Torvalds 			kfree_skb(skb2);
17831da177e4SLinus Torvalds 			skb2 = NULL;
17841da177e4SLinus Torvalds 		}
17851da177e4SLinus Torvalds 	}
17861da177e4SLinus Torvalds 	return skb2;
17871da177e4SLinus Torvalds }
1788b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom);
17891da177e4SLinus Torvalds 
17902b88cba5SEric Dumazet int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
17912b88cba5SEric Dumazet {
17922b88cba5SEric Dumazet 	unsigned int saved_end_offset, saved_truesize;
17932b88cba5SEric Dumazet 	struct skb_shared_info *shinfo;
17942b88cba5SEric Dumazet 	int res;
17952b88cba5SEric Dumazet 
17962b88cba5SEric Dumazet 	saved_end_offset = skb_end_offset(skb);
17972b88cba5SEric Dumazet 	saved_truesize = skb->truesize;
17982b88cba5SEric Dumazet 
17992b88cba5SEric Dumazet 	res = pskb_expand_head(skb, 0, 0, pri);
18002b88cba5SEric Dumazet 	if (res)
18012b88cba5SEric Dumazet 		return res;
18022b88cba5SEric Dumazet 
18032b88cba5SEric Dumazet 	skb->truesize = saved_truesize;
18042b88cba5SEric Dumazet 
18052b88cba5SEric Dumazet 	if (likely(skb_end_offset(skb) == saved_end_offset))
18062b88cba5SEric Dumazet 		return 0;
18072b88cba5SEric Dumazet 
18082b88cba5SEric Dumazet 	shinfo = skb_shinfo(skb);
18092b88cba5SEric Dumazet 
18102b88cba5SEric Dumazet 	/* We are about to change back skb->end,
18112b88cba5SEric Dumazet 	 * we need to move skb_shinfo() to its new location.
18122b88cba5SEric Dumazet 	 */
18132b88cba5SEric Dumazet 	memmove(skb->head + saved_end_offset,
18142b88cba5SEric Dumazet 		shinfo,
18152b88cba5SEric Dumazet 		offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
18162b88cba5SEric Dumazet 
18172b88cba5SEric Dumazet 	skb_set_end_offset(skb, saved_end_offset);
18182b88cba5SEric Dumazet 
18192b88cba5SEric Dumazet 	return 0;
18202b88cba5SEric Dumazet }
18212b88cba5SEric Dumazet 
18221da177e4SLinus Torvalds /**
1823f1260ff1SVasily Averin  *	skb_expand_head - reallocate header of &sk_buff
1824f1260ff1SVasily Averin  *	@skb: buffer to reallocate
1825f1260ff1SVasily Averin  *	@headroom: needed headroom
1826f1260ff1SVasily Averin  *
1827f1260ff1SVasily Averin  *	Unlike skb_realloc_headroom, this one does not allocate a new skb
1828f1260ff1SVasily Averin  *	if possible; copies skb->sk to new skb as needed
1829f1260ff1SVasily Averin  *	and frees original skb in case of failures.
1830f1260ff1SVasily Averin  *
1831f1260ff1SVasily Averin  *	It expect increased headroom and generates warning otherwise.
1832f1260ff1SVasily Averin  */
1833f1260ff1SVasily Averin 
1834f1260ff1SVasily Averin struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
1835f1260ff1SVasily Averin {
1836f1260ff1SVasily Averin 	int delta = headroom - skb_headroom(skb);
18377f678defSVasily Averin 	int osize = skb_end_offset(skb);
18387f678defSVasily Averin 	struct sock *sk = skb->sk;
1839f1260ff1SVasily Averin 
1840f1260ff1SVasily Averin 	if (WARN_ONCE(delta <= 0,
1841f1260ff1SVasily Averin 		      "%s is expecting an increase in the headroom", __func__))
1842f1260ff1SVasily Averin 		return skb;
1843f1260ff1SVasily Averin 
18447f678defSVasily Averin 	delta = SKB_DATA_ALIGN(delta);
18457f678defSVasily Averin 	/* pskb_expand_head() might crash, if skb is shared. */
18467f678defSVasily Averin 	if (skb_shared(skb) || !is_skb_wmem(skb)) {
1847f1260ff1SVasily Averin 		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1848f1260ff1SVasily Averin 
18497f678defSVasily Averin 		if (unlikely(!nskb))
18507f678defSVasily Averin 			goto fail;
18517f678defSVasily Averin 
18527f678defSVasily Averin 		if (sk)
18537f678defSVasily Averin 			skb_set_owner_w(nskb, sk);
1854f1260ff1SVasily Averin 		consume_skb(skb);
1855f1260ff1SVasily Averin 		skb = nskb;
1856f1260ff1SVasily Averin 	}
18577f678defSVasily Averin 	if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
18587f678defSVasily Averin 		goto fail;
18597f678defSVasily Averin 
18607f678defSVasily Averin 	if (sk && is_skb_wmem(skb)) {
18617f678defSVasily Averin 		delta = skb_end_offset(skb) - osize;
18627f678defSVasily Averin 		refcount_add(delta, &sk->sk_wmem_alloc);
18637f678defSVasily Averin 		skb->truesize += delta;
1864f1260ff1SVasily Averin 	}
1865f1260ff1SVasily Averin 	return skb;
18667f678defSVasily Averin 
18677f678defSVasily Averin fail:
18687f678defSVasily Averin 	kfree_skb(skb);
18697f678defSVasily Averin 	return NULL;
1870f1260ff1SVasily Averin }
1871f1260ff1SVasily Averin EXPORT_SYMBOL(skb_expand_head);
1872f1260ff1SVasily Averin 
1873f1260ff1SVasily Averin /**
18741da177e4SLinus Torvalds  *	skb_copy_expand	-	copy and expand sk_buff
18751da177e4SLinus Torvalds  *	@skb: buffer to copy
18761da177e4SLinus Torvalds  *	@newheadroom: new free bytes at head
18771da177e4SLinus Torvalds  *	@newtailroom: new free bytes at tail
18781da177e4SLinus Torvalds  *	@gfp_mask: allocation priority
18791da177e4SLinus Torvalds  *
18801da177e4SLinus Torvalds  *	Make a copy of both an &sk_buff and its data and while doing so
18811da177e4SLinus Torvalds  *	allocate additional space.
18821da177e4SLinus Torvalds  *
18831da177e4SLinus Torvalds  *	This is used when the caller wishes to modify the data and needs a
18841da177e4SLinus Torvalds  *	private copy of the data to alter as well as more space for new fields.
18851da177e4SLinus Torvalds  *	Returns %NULL on failure or the pointer to the buffer
18861da177e4SLinus Torvalds  *	on success. The returned buffer has a reference count of 1.
18871da177e4SLinus Torvalds  *
18881da177e4SLinus Torvalds  *	You must pass %GFP_ATOMIC as the allocation priority if this function
18891da177e4SLinus Torvalds  *	is called from an interrupt.
18901da177e4SLinus Torvalds  */
18911da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
189286a76cafSVictor Fusco 				int newheadroom, int newtailroom,
1893dd0fc66fSAl Viro 				gfp_t gfp_mask)
18941da177e4SLinus Torvalds {
18951da177e4SLinus Torvalds 	/*
18961da177e4SLinus Torvalds 	 *	Allocate the copy buffer
18971da177e4SLinus Torvalds 	 */
1898c93bdd0eSMel Gorman 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1899c93bdd0eSMel Gorman 					gfp_mask, skb_alloc_rx_flag(skb),
1900c93bdd0eSMel Gorman 					NUMA_NO_NODE);
1901efd1e8d5SPatrick McHardy 	int oldheadroom = skb_headroom(skb);
19021da177e4SLinus Torvalds 	int head_copy_len, head_copy_off;
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 	if (!n)
19051da177e4SLinus Torvalds 		return NULL;
19061da177e4SLinus Torvalds 
19071da177e4SLinus Torvalds 	skb_reserve(n, newheadroom);
19081da177e4SLinus Torvalds 
19091da177e4SLinus Torvalds 	/* Set the tail pointer and length */
19101da177e4SLinus Torvalds 	skb_put(n, skb->len);
19111da177e4SLinus Torvalds 
1912efd1e8d5SPatrick McHardy 	head_copy_len = oldheadroom;
19131da177e4SLinus Torvalds 	head_copy_off = 0;
19141da177e4SLinus Torvalds 	if (newheadroom <= head_copy_len)
19151da177e4SLinus Torvalds 		head_copy_len = newheadroom;
19161da177e4SLinus Torvalds 	else
19171da177e4SLinus Torvalds 		head_copy_off = newheadroom - head_copy_len;
19181da177e4SLinus Torvalds 
19191da177e4SLinus Torvalds 	/* Copy the linear header and data. */
19209f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
19219f77fad3STim Hansen 			     skb->len + head_copy_len));
19221da177e4SLinus Torvalds 
192308303c18SIlya Lesokhin 	skb_copy_header(n, skb);
19241da177e4SLinus Torvalds 
1925030737bcSEric Dumazet 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1926efd1e8d5SPatrick McHardy 
19271da177e4SLinus Torvalds 	return n;
19281da177e4SLinus Torvalds }
1929b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand);
19301da177e4SLinus Torvalds 
19311da177e4SLinus Torvalds /**
1932cd0a137aSFlorian Fainelli  *	__skb_pad		-	zero pad the tail of an skb
19331da177e4SLinus Torvalds  *	@skb: buffer to pad
19341da177e4SLinus Torvalds  *	@pad: space to pad
1935cd0a137aSFlorian Fainelli  *	@free_on_error: free buffer on error
19361da177e4SLinus Torvalds  *
19371da177e4SLinus Torvalds  *	Ensure that a buffer is followed by a padding area that is zero
19381da177e4SLinus Torvalds  *	filled. Used by network drivers which may DMA or transfer data
19391da177e4SLinus Torvalds  *	beyond the buffer end onto the wire.
19401da177e4SLinus Torvalds  *
1941cd0a137aSFlorian Fainelli  *	May return error in out of memory cases. The skb is freed on error
1942cd0a137aSFlorian Fainelli  *	if @free_on_error is true.
19431da177e4SLinus Torvalds  */
19441da177e4SLinus Torvalds 
1945cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
19461da177e4SLinus Torvalds {
19475b057c6bSHerbert Xu 	int err;
19485b057c6bSHerbert Xu 	int ntail;
19491da177e4SLinus Torvalds 
19501da177e4SLinus Torvalds 	/* If the skbuff is non linear tailroom is always zero.. */
19515b057c6bSHerbert Xu 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
19521da177e4SLinus Torvalds 		memset(skb->data+skb->len, 0, pad);
19535b057c6bSHerbert Xu 		return 0;
19541da177e4SLinus Torvalds 	}
19551da177e4SLinus Torvalds 
19564305b541SArnaldo Carvalho de Melo 	ntail = skb->data_len + pad - (skb->end - skb->tail);
19575b057c6bSHerbert Xu 	if (likely(skb_cloned(skb) || ntail > 0)) {
19585b057c6bSHerbert Xu 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
19595b057c6bSHerbert Xu 		if (unlikely(err))
19605b057c6bSHerbert Xu 			goto free_skb;
19615b057c6bSHerbert Xu 	}
19625b057c6bSHerbert Xu 
19635b057c6bSHerbert Xu 	/* FIXME: The use of this function with non-linear skb's really needs
19645b057c6bSHerbert Xu 	 * to be audited.
19655b057c6bSHerbert Xu 	 */
19665b057c6bSHerbert Xu 	err = skb_linearize(skb);
19675b057c6bSHerbert Xu 	if (unlikely(err))
19685b057c6bSHerbert Xu 		goto free_skb;
19695b057c6bSHerbert Xu 
19705b057c6bSHerbert Xu 	memset(skb->data + skb->len, 0, pad);
19715b057c6bSHerbert Xu 	return 0;
19725b057c6bSHerbert Xu 
19735b057c6bSHerbert Xu free_skb:
1974cd0a137aSFlorian Fainelli 	if (free_on_error)
19751da177e4SLinus Torvalds 		kfree_skb(skb);
19765b057c6bSHerbert Xu 	return err;
19771da177e4SLinus Torvalds }
1978cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad);
19791da177e4SLinus Torvalds 
19800dde3e16SIlpo Järvinen /**
19810c7ddf36SMathias Krause  *	pskb_put - add data to the tail of a potentially fragmented buffer
19820c7ddf36SMathias Krause  *	@skb: start of the buffer to use
19830c7ddf36SMathias Krause  *	@tail: tail fragment of the buffer to use
19840c7ddf36SMathias Krause  *	@len: amount of data to add
19850c7ddf36SMathias Krause  *
19860c7ddf36SMathias Krause  *	This function extends the used data area of the potentially
19870c7ddf36SMathias Krause  *	fragmented buffer. @tail must be the last fragment of @skb -- or
19880c7ddf36SMathias Krause  *	@skb itself. If this would exceed the total buffer size the kernel
19890c7ddf36SMathias Krause  *	will panic. A pointer to the first byte of the extra data is
19900c7ddf36SMathias Krause  *	returned.
19910c7ddf36SMathias Krause  */
19920c7ddf36SMathias Krause 
19934df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
19940c7ddf36SMathias Krause {
19950c7ddf36SMathias Krause 	if (tail != skb) {
19960c7ddf36SMathias Krause 		skb->data_len += len;
19970c7ddf36SMathias Krause 		skb->len += len;
19980c7ddf36SMathias Krause 	}
19990c7ddf36SMathias Krause 	return skb_put(tail, len);
20000c7ddf36SMathias Krause }
20010c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put);
20020c7ddf36SMathias Krause 
20030c7ddf36SMathias Krause /**
20040dde3e16SIlpo Järvinen  *	skb_put - add data to a buffer
20050dde3e16SIlpo Järvinen  *	@skb: buffer to use
20060dde3e16SIlpo Järvinen  *	@len: amount of data to add
20070dde3e16SIlpo Järvinen  *
20080dde3e16SIlpo Järvinen  *	This function extends the used data area of the buffer. If this would
20090dde3e16SIlpo Järvinen  *	exceed the total buffer size the kernel will panic. A pointer to the
20100dde3e16SIlpo Järvinen  *	first byte of the extra data is returned.
20110dde3e16SIlpo Järvinen  */
20124df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len)
20130dde3e16SIlpo Järvinen {
20144df864c1SJohannes Berg 	void *tmp = skb_tail_pointer(skb);
20150dde3e16SIlpo Järvinen 	SKB_LINEAR_ASSERT(skb);
20160dde3e16SIlpo Järvinen 	skb->tail += len;
20170dde3e16SIlpo Järvinen 	skb->len  += len;
20180dde3e16SIlpo Järvinen 	if (unlikely(skb->tail > skb->end))
20190dde3e16SIlpo Järvinen 		skb_over_panic(skb, len, __builtin_return_address(0));
20200dde3e16SIlpo Järvinen 	return tmp;
20210dde3e16SIlpo Järvinen }
20220dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put);
20230dde3e16SIlpo Järvinen 
20246be8ac2fSIlpo Järvinen /**
2025c2aa270aSIlpo Järvinen  *	skb_push - add data to the start of a buffer
2026c2aa270aSIlpo Järvinen  *	@skb: buffer to use
2027c2aa270aSIlpo Järvinen  *	@len: amount of data to add
2028c2aa270aSIlpo Järvinen  *
2029c2aa270aSIlpo Järvinen  *	This function extends the used data area of the buffer at the buffer
2030c2aa270aSIlpo Järvinen  *	start. If this would exceed the total buffer headroom the kernel will
2031c2aa270aSIlpo Järvinen  *	panic. A pointer to the first byte of the extra data is returned.
2032c2aa270aSIlpo Järvinen  */
2033d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len)
2034c2aa270aSIlpo Järvinen {
2035c2aa270aSIlpo Järvinen 	skb->data -= len;
2036c2aa270aSIlpo Järvinen 	skb->len  += len;
2037c2aa270aSIlpo Järvinen 	if (unlikely(skb->data < skb->head))
2038c2aa270aSIlpo Järvinen 		skb_under_panic(skb, len, __builtin_return_address(0));
2039c2aa270aSIlpo Järvinen 	return skb->data;
2040c2aa270aSIlpo Järvinen }
2041c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push);
2042c2aa270aSIlpo Järvinen 
2043c2aa270aSIlpo Järvinen /**
20446be8ac2fSIlpo Järvinen  *	skb_pull - remove data from the start of a buffer
20456be8ac2fSIlpo Järvinen  *	@skb: buffer to use
20466be8ac2fSIlpo Järvinen  *	@len: amount of data to remove
20476be8ac2fSIlpo Järvinen  *
20486be8ac2fSIlpo Järvinen  *	This function removes data from the start of a buffer, returning
20496be8ac2fSIlpo Järvinen  *	the memory to the headroom. A pointer to the next data in the buffer
20506be8ac2fSIlpo Järvinen  *	is returned. Once the data has been pulled future pushes will overwrite
20516be8ac2fSIlpo Järvinen  *	the old data.
20526be8ac2fSIlpo Järvinen  */
2053af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len)
20546be8ac2fSIlpo Järvinen {
205547d29646SDavid S. Miller 	return skb_pull_inline(skb, len);
20566be8ac2fSIlpo Järvinen }
20576be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull);
20586be8ac2fSIlpo Järvinen 
2059419ae74eSIlpo Järvinen /**
206013244cccSLuiz Augusto von Dentz  *	skb_pull_data - remove data from the start of a buffer returning its
206113244cccSLuiz Augusto von Dentz  *	original position.
206213244cccSLuiz Augusto von Dentz  *	@skb: buffer to use
206313244cccSLuiz Augusto von Dentz  *	@len: amount of data to remove
206413244cccSLuiz Augusto von Dentz  *
206513244cccSLuiz Augusto von Dentz  *	This function removes data from the start of a buffer, returning
206613244cccSLuiz Augusto von Dentz  *	the memory to the headroom. A pointer to the original data in the buffer
206713244cccSLuiz Augusto von Dentz  *	is returned after checking if there is enough data to pull. Once the
206813244cccSLuiz Augusto von Dentz  *	data has been pulled future pushes will overwrite the old data.
206913244cccSLuiz Augusto von Dentz  */
207013244cccSLuiz Augusto von Dentz void *skb_pull_data(struct sk_buff *skb, size_t len)
207113244cccSLuiz Augusto von Dentz {
207213244cccSLuiz Augusto von Dentz 	void *data = skb->data;
207313244cccSLuiz Augusto von Dentz 
207413244cccSLuiz Augusto von Dentz 	if (skb->len < len)
207513244cccSLuiz Augusto von Dentz 		return NULL;
207613244cccSLuiz Augusto von Dentz 
207713244cccSLuiz Augusto von Dentz 	skb_pull(skb, len);
207813244cccSLuiz Augusto von Dentz 
207913244cccSLuiz Augusto von Dentz 	return data;
208013244cccSLuiz Augusto von Dentz }
208113244cccSLuiz Augusto von Dentz EXPORT_SYMBOL(skb_pull_data);
208213244cccSLuiz Augusto von Dentz 
208313244cccSLuiz Augusto von Dentz /**
2084419ae74eSIlpo Järvinen  *	skb_trim - remove end from a buffer
2085419ae74eSIlpo Järvinen  *	@skb: buffer to alter
2086419ae74eSIlpo Järvinen  *	@len: new length
2087419ae74eSIlpo Järvinen  *
2088419ae74eSIlpo Järvinen  *	Cut the length of a buffer down by removing data from the tail. If
2089419ae74eSIlpo Järvinen  *	the buffer is already under the length specified it is not modified.
2090419ae74eSIlpo Järvinen  *	The skb must be linear.
2091419ae74eSIlpo Järvinen  */
2092419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len)
2093419ae74eSIlpo Järvinen {
2094419ae74eSIlpo Järvinen 	if (skb->len > len)
2095419ae74eSIlpo Järvinen 		__skb_trim(skb, len);
2096419ae74eSIlpo Järvinen }
2097419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim);
2098419ae74eSIlpo Järvinen 
20993cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers.
21001da177e4SLinus Torvalds  */
21011da177e4SLinus Torvalds 
21023cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len)
21031da177e4SLinus Torvalds {
210427b437c8SHerbert Xu 	struct sk_buff **fragp;
210527b437c8SHerbert Xu 	struct sk_buff *frag;
21061da177e4SLinus Torvalds 	int offset = skb_headlen(skb);
21071da177e4SLinus Torvalds 	int nfrags = skb_shinfo(skb)->nr_frags;
21081da177e4SLinus Torvalds 	int i;
210927b437c8SHerbert Xu 	int err;
211027b437c8SHerbert Xu 
211127b437c8SHerbert Xu 	if (skb_cloned(skb) &&
211227b437c8SHerbert Xu 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
211327b437c8SHerbert Xu 		return err;
21141da177e4SLinus Torvalds 
2115f4d26fb3SHerbert Xu 	i = 0;
2116f4d26fb3SHerbert Xu 	if (offset >= len)
2117f4d26fb3SHerbert Xu 		goto drop_pages;
2118f4d26fb3SHerbert Xu 
2119f4d26fb3SHerbert Xu 	for (; i < nfrags; i++) {
21209e903e08SEric Dumazet 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
212127b437c8SHerbert Xu 
212227b437c8SHerbert Xu 		if (end < len) {
21231da177e4SLinus Torvalds 			offset = end;
212427b437c8SHerbert Xu 			continue;
21251da177e4SLinus Torvalds 		}
21261da177e4SLinus Torvalds 
21279e903e08SEric Dumazet 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
212827b437c8SHerbert Xu 
2129f4d26fb3SHerbert Xu drop_pages:
213027b437c8SHerbert Xu 		skb_shinfo(skb)->nr_frags = i;
213127b437c8SHerbert Xu 
213227b437c8SHerbert Xu 		for (; i < nfrags; i++)
2133ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
213427b437c8SHerbert Xu 
213521dc3301SDavid S. Miller 		if (skb_has_frag_list(skb))
213627b437c8SHerbert Xu 			skb_drop_fraglist(skb);
2137f4d26fb3SHerbert Xu 		goto done;
213827b437c8SHerbert Xu 	}
213927b437c8SHerbert Xu 
214027b437c8SHerbert Xu 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
214127b437c8SHerbert Xu 	     fragp = &frag->next) {
214227b437c8SHerbert Xu 		int end = offset + frag->len;
214327b437c8SHerbert Xu 
214427b437c8SHerbert Xu 		if (skb_shared(frag)) {
214527b437c8SHerbert Xu 			struct sk_buff *nfrag;
214627b437c8SHerbert Xu 
214727b437c8SHerbert Xu 			nfrag = skb_clone(frag, GFP_ATOMIC);
214827b437c8SHerbert Xu 			if (unlikely(!nfrag))
214927b437c8SHerbert Xu 				return -ENOMEM;
215027b437c8SHerbert Xu 
215127b437c8SHerbert Xu 			nfrag->next = frag->next;
215285bb2a60SEric Dumazet 			consume_skb(frag);
215327b437c8SHerbert Xu 			frag = nfrag;
215427b437c8SHerbert Xu 			*fragp = frag;
215527b437c8SHerbert Xu 		}
215627b437c8SHerbert Xu 
215727b437c8SHerbert Xu 		if (end < len) {
215827b437c8SHerbert Xu 			offset = end;
215927b437c8SHerbert Xu 			continue;
216027b437c8SHerbert Xu 		}
216127b437c8SHerbert Xu 
216227b437c8SHerbert Xu 		if (end > len &&
216327b437c8SHerbert Xu 		    unlikely((err = pskb_trim(frag, len - offset))))
216427b437c8SHerbert Xu 			return err;
216527b437c8SHerbert Xu 
216627b437c8SHerbert Xu 		if (frag->next)
216727b437c8SHerbert Xu 			skb_drop_list(&frag->next);
216827b437c8SHerbert Xu 		break;
216927b437c8SHerbert Xu 	}
217027b437c8SHerbert Xu 
2171f4d26fb3SHerbert Xu done:
217227b437c8SHerbert Xu 	if (len > skb_headlen(skb)) {
21731da177e4SLinus Torvalds 		skb->data_len -= skb->len - len;
21741da177e4SLinus Torvalds 		skb->len       = len;
21751da177e4SLinus Torvalds 	} else {
21761da177e4SLinus Torvalds 		skb->len       = len;
21771da177e4SLinus Torvalds 		skb->data_len  = 0;
217827a884dcSArnaldo Carvalho de Melo 		skb_set_tail_pointer(skb, len);
21791da177e4SLinus Torvalds 	}
21801da177e4SLinus Torvalds 
2181c21b48ccSEric Dumazet 	if (!skb->sk || skb->destructor == sock_edemux)
2182c21b48ccSEric Dumazet 		skb_condense(skb);
21831da177e4SLinus Torvalds 	return 0;
21841da177e4SLinus Torvalds }
2185b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim);
21861da177e4SLinus Torvalds 
218788078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly
218888078d98SEric Dumazet  */
218988078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
219088078d98SEric Dumazet {
219188078d98SEric Dumazet 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
219288078d98SEric Dumazet 		int delta = skb->len - len;
219388078d98SEric Dumazet 
2194d55bef50SDimitris Michailidis 		skb->csum = csum_block_sub(skb->csum,
2195d55bef50SDimitris Michailidis 					   skb_checksum(skb, len, delta, 0),
2196d55bef50SDimitris Michailidis 					   len);
219754970a2fSVasily Averin 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
219854970a2fSVasily Averin 		int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
219954970a2fSVasily Averin 		int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
220054970a2fSVasily Averin 
220154970a2fSVasily Averin 		if (offset + sizeof(__sum16) > hdlen)
220254970a2fSVasily Averin 			return -EINVAL;
220388078d98SEric Dumazet 	}
220488078d98SEric Dumazet 	return __pskb_trim(skb, len);
220588078d98SEric Dumazet }
220688078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow);
220788078d98SEric Dumazet 
22081da177e4SLinus Torvalds /**
22091da177e4SLinus Torvalds  *	__pskb_pull_tail - advance tail of skb header
22101da177e4SLinus Torvalds  *	@skb: buffer to reallocate
22111da177e4SLinus Torvalds  *	@delta: number of bytes to advance tail
22121da177e4SLinus Torvalds  *
22131da177e4SLinus Torvalds  *	The function makes a sense only on a fragmented &sk_buff,
22141da177e4SLinus Torvalds  *	it expands header moving its tail forward and copying necessary
22151da177e4SLinus Torvalds  *	data from fragmented part.
22161da177e4SLinus Torvalds  *
22171da177e4SLinus Torvalds  *	&sk_buff MUST have reference count of 1.
22181da177e4SLinus Torvalds  *
22191da177e4SLinus Torvalds  *	Returns %NULL (and &sk_buff does not change) if pull failed
22201da177e4SLinus Torvalds  *	or value of new tail of skb in the case of success.
22211da177e4SLinus Torvalds  *
22221da177e4SLinus Torvalds  *	All the pointers pointing into skb header may change and must be
22231da177e4SLinus Torvalds  *	reloaded after call to this function.
22241da177e4SLinus Torvalds  */
22251da177e4SLinus Torvalds 
22261da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part,
22271da177e4SLinus Torvalds  * when it is necessary.
22281da177e4SLinus Torvalds  * 1. It may fail due to malloc failure.
22291da177e4SLinus Torvalds  * 2. It may change skb pointers.
22301da177e4SLinus Torvalds  *
22311da177e4SLinus Torvalds  * It is pretty complicated. Luckily, it is called only in exceptional cases.
22321da177e4SLinus Torvalds  */
2233af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta)
22341da177e4SLinus Torvalds {
22351da177e4SLinus Torvalds 	/* If skb has not enough free space at tail, get new one
22361da177e4SLinus Torvalds 	 * plus 128 bytes for future expansions. If we have enough
22371da177e4SLinus Torvalds 	 * room at tail, reallocate without expansion only if skb is cloned.
22381da177e4SLinus Torvalds 	 */
22394305b541SArnaldo Carvalho de Melo 	int i, k, eat = (skb->tail + delta) - skb->end;
22401da177e4SLinus Torvalds 
22411da177e4SLinus Torvalds 	if (eat > 0 || skb_cloned(skb)) {
22421da177e4SLinus Torvalds 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
22431da177e4SLinus Torvalds 				     GFP_ATOMIC))
22441da177e4SLinus Torvalds 			return NULL;
22451da177e4SLinus Torvalds 	}
22461da177e4SLinus Torvalds 
22479f77fad3STim Hansen 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
22489f77fad3STim Hansen 			     skb_tail_pointer(skb), delta));
22491da177e4SLinus Torvalds 
22501da177e4SLinus Torvalds 	/* Optimization: no fragments, no reasons to preestimate
22511da177e4SLinus Torvalds 	 * size of pulled pages. Superb.
22521da177e4SLinus Torvalds 	 */
225321dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb))
22541da177e4SLinus Torvalds 		goto pull_pages;
22551da177e4SLinus Torvalds 
22561da177e4SLinus Torvalds 	/* Estimate size of pulled pages. */
22571da177e4SLinus Torvalds 	eat = delta;
22581da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
22599e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
22609e903e08SEric Dumazet 
22619e903e08SEric Dumazet 		if (size >= eat)
22621da177e4SLinus Torvalds 			goto pull_pages;
22639e903e08SEric Dumazet 		eat -= size;
22641da177e4SLinus Torvalds 	}
22651da177e4SLinus Torvalds 
22661da177e4SLinus Torvalds 	/* If we need update frag list, we are in troubles.
226709001b03SWenhua Shi 	 * Certainly, it is possible to add an offset to skb data,
22681da177e4SLinus Torvalds 	 * but taking into account that pulling is expected to
22691da177e4SLinus Torvalds 	 * be very rare operation, it is worth to fight against
22701da177e4SLinus Torvalds 	 * further bloating skb head and crucify ourselves here instead.
22711da177e4SLinus Torvalds 	 * Pure masohism, indeed. 8)8)
22721da177e4SLinus Torvalds 	 */
22731da177e4SLinus Torvalds 	if (eat) {
22741da177e4SLinus Torvalds 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
22751da177e4SLinus Torvalds 		struct sk_buff *clone = NULL;
22761da177e4SLinus Torvalds 		struct sk_buff *insp = NULL;
22771da177e4SLinus Torvalds 
22781da177e4SLinus Torvalds 		do {
22791da177e4SLinus Torvalds 			if (list->len <= eat) {
22801da177e4SLinus Torvalds 				/* Eaten as whole. */
22811da177e4SLinus Torvalds 				eat -= list->len;
22821da177e4SLinus Torvalds 				list = list->next;
22831da177e4SLinus Torvalds 				insp = list;
22841da177e4SLinus Torvalds 			} else {
22851da177e4SLinus Torvalds 				/* Eaten partially. */
22861da177e4SLinus Torvalds 
22871da177e4SLinus Torvalds 				if (skb_shared(list)) {
22881da177e4SLinus Torvalds 					/* Sucks! We need to fork list. :-( */
22891da177e4SLinus Torvalds 					clone = skb_clone(list, GFP_ATOMIC);
22901da177e4SLinus Torvalds 					if (!clone)
22911da177e4SLinus Torvalds 						return NULL;
22921da177e4SLinus Torvalds 					insp = list->next;
22931da177e4SLinus Torvalds 					list = clone;
22941da177e4SLinus Torvalds 				} else {
22951da177e4SLinus Torvalds 					/* This may be pulled without
22961da177e4SLinus Torvalds 					 * problems. */
22971da177e4SLinus Torvalds 					insp = list;
22981da177e4SLinus Torvalds 				}
22991da177e4SLinus Torvalds 				if (!pskb_pull(list, eat)) {
23001da177e4SLinus Torvalds 					kfree_skb(clone);
23011da177e4SLinus Torvalds 					return NULL;
23021da177e4SLinus Torvalds 				}
23031da177e4SLinus Torvalds 				break;
23041da177e4SLinus Torvalds 			}
23051da177e4SLinus Torvalds 		} while (eat);
23061da177e4SLinus Torvalds 
23071da177e4SLinus Torvalds 		/* Free pulled out fragments. */
23081da177e4SLinus Torvalds 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
23091da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = list->next;
2310ef527f96SEric Dumazet 			consume_skb(list);
23111da177e4SLinus Torvalds 		}
23121da177e4SLinus Torvalds 		/* And insert new clone at head. */
23131da177e4SLinus Torvalds 		if (clone) {
23141da177e4SLinus Torvalds 			clone->next = list;
23151da177e4SLinus Torvalds 			skb_shinfo(skb)->frag_list = clone;
23161da177e4SLinus Torvalds 		}
23171da177e4SLinus Torvalds 	}
23181da177e4SLinus Torvalds 	/* Success! Now we may commit changes to skb data. */
23191da177e4SLinus Torvalds 
23201da177e4SLinus Torvalds pull_pages:
23211da177e4SLinus Torvalds 	eat = delta;
23221da177e4SLinus Torvalds 	k = 0;
23231da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
23249e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
23259e903e08SEric Dumazet 
23269e903e08SEric Dumazet 		if (size <= eat) {
2327ea2ab693SIan Campbell 			skb_frag_unref(skb, i);
23289e903e08SEric Dumazet 			eat -= size;
23291da177e4SLinus Torvalds 		} else {
2330b54c9d5bSJonathan Lemon 			skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2331b54c9d5bSJonathan Lemon 
2332b54c9d5bSJonathan Lemon 			*frag = skb_shinfo(skb)->frags[i];
23331da177e4SLinus Torvalds 			if (eat) {
2334b54c9d5bSJonathan Lemon 				skb_frag_off_add(frag, eat);
2335b54c9d5bSJonathan Lemon 				skb_frag_size_sub(frag, eat);
23363ccc6c6fSlinzhang 				if (!i)
23373ccc6c6fSlinzhang 					goto end;
23381da177e4SLinus Torvalds 				eat = 0;
23391da177e4SLinus Torvalds 			}
23401da177e4SLinus Torvalds 			k++;
23411da177e4SLinus Torvalds 		}
23421da177e4SLinus Torvalds 	}
23431da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = k;
23441da177e4SLinus Torvalds 
23453ccc6c6fSlinzhang end:
23461da177e4SLinus Torvalds 	skb->tail     += delta;
23471da177e4SLinus Torvalds 	skb->data_len -= delta;
23481da177e4SLinus Torvalds 
23491f8b977aSWillem de Bruijn 	if (!skb->data_len)
23501f8b977aSWillem de Bruijn 		skb_zcopy_clear(skb, false);
23511f8b977aSWillem de Bruijn 
235227a884dcSArnaldo Carvalho de Melo 	return skb_tail_pointer(skb);
23531da177e4SLinus Torvalds }
2354b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail);
23551da177e4SLinus Torvalds 
235622019b17SEric Dumazet /**
235722019b17SEric Dumazet  *	skb_copy_bits - copy bits from skb to kernel buffer
235822019b17SEric Dumazet  *	@skb: source skb
235922019b17SEric Dumazet  *	@offset: offset in source
236022019b17SEric Dumazet  *	@to: destination buffer
236122019b17SEric Dumazet  *	@len: number of bytes to copy
236222019b17SEric Dumazet  *
236322019b17SEric Dumazet  *	Copy the specified number of bytes from the source skb to the
236422019b17SEric Dumazet  *	destination buffer.
236522019b17SEric Dumazet  *
236622019b17SEric Dumazet  *	CAUTION ! :
236722019b17SEric Dumazet  *		If its prototype is ever changed,
236822019b17SEric Dumazet  *		check arch/{*}/net/{*}.S files,
236922019b17SEric Dumazet  *		since it is called from BPF assembly code.
237022019b17SEric Dumazet  */
23711da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
23721da177e4SLinus Torvalds {
23731a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2374fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2375fbb398a8SDavid S. Miller 	int i, copy;
23761da177e4SLinus Torvalds 
23771da177e4SLinus Torvalds 	if (offset > (int)skb->len - len)
23781da177e4SLinus Torvalds 		goto fault;
23791da177e4SLinus Torvalds 
23801da177e4SLinus Torvalds 	/* Copy header. */
23811a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
23821da177e4SLinus Torvalds 		if (copy > len)
23831da177e4SLinus Torvalds 			copy = len;
2384d626f62bSArnaldo Carvalho de Melo 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
23851da177e4SLinus Torvalds 		if ((len -= copy) == 0)
23861da177e4SLinus Torvalds 			return 0;
23871da177e4SLinus Torvalds 		offset += copy;
23881da177e4SLinus Torvalds 		to     += copy;
23891da177e4SLinus Torvalds 	}
23901da177e4SLinus Torvalds 
23911da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
23921a028e50SDavid S. Miller 		int end;
239351c56b00SEric Dumazet 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
23941da177e4SLinus Torvalds 
2395547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
23961a028e50SDavid S. Miller 
239751c56b00SEric Dumazet 		end = start + skb_frag_size(f);
23981da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2399c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2400c613c209SWillem de Bruijn 			struct page *p;
24011da177e4SLinus Torvalds 			u8 *vaddr;
24021da177e4SLinus Torvalds 
24031da177e4SLinus Torvalds 			if (copy > len)
24041da177e4SLinus Torvalds 				copy = len;
24051da177e4SLinus Torvalds 
2406c613c209SWillem de Bruijn 			skb_frag_foreach_page(f,
2407b54c9d5bSJonathan Lemon 					      skb_frag_off(f) + offset - start,
2408c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2409c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2410c613c209SWillem de Bruijn 				memcpy(to + copied, vaddr + p_off, p_len);
241151c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2412c613c209SWillem de Bruijn 			}
24131da177e4SLinus Torvalds 
24141da177e4SLinus Torvalds 			if ((len -= copy) == 0)
24151da177e4SLinus Torvalds 				return 0;
24161da177e4SLinus Torvalds 			offset += copy;
24171da177e4SLinus Torvalds 			to     += copy;
24181da177e4SLinus Torvalds 		}
24191a028e50SDavid S. Miller 		start = end;
24201da177e4SLinus Torvalds 	}
24211da177e4SLinus Torvalds 
2422fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
24231a028e50SDavid S. Miller 		int end;
24241da177e4SLinus Torvalds 
2425547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
24261a028e50SDavid S. Miller 
2427fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
24281da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
24291da177e4SLinus Torvalds 			if (copy > len)
24301da177e4SLinus Torvalds 				copy = len;
2431fbb398a8SDavid S. Miller 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
24321da177e4SLinus Torvalds 				goto fault;
24331da177e4SLinus Torvalds 			if ((len -= copy) == 0)
24341da177e4SLinus Torvalds 				return 0;
24351da177e4SLinus Torvalds 			offset += copy;
24361da177e4SLinus Torvalds 			to     += copy;
24371da177e4SLinus Torvalds 		}
24381a028e50SDavid S. Miller 		start = end;
24391da177e4SLinus Torvalds 	}
2440a6686f2fSShirley Ma 
24411da177e4SLinus Torvalds 	if (!len)
24421da177e4SLinus Torvalds 		return 0;
24431da177e4SLinus Torvalds 
24441da177e4SLinus Torvalds fault:
24451da177e4SLinus Torvalds 	return -EFAULT;
24461da177e4SLinus Torvalds }
2447b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits);
24481da177e4SLinus Torvalds 
24499c55e01cSJens Axboe /*
24509c55e01cSJens Axboe  * Callback from splice_to_pipe(), if we need to release some pages
24519c55e01cSJens Axboe  * at the end of the spd in case we error'ed out in filling the pipe.
24529c55e01cSJens Axboe  */
24539c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
24549c55e01cSJens Axboe {
24558b9d3728SJarek Poplawski 	put_page(spd->pages[i]);
24568b9d3728SJarek Poplawski }
24579c55e01cSJens Axboe 
2458a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len,
24594fb66994SJarek Poplawski 				   unsigned int *offset,
246018aafc62SEric Dumazet 				   struct sock *sk)
24618b9d3728SJarek Poplawski {
24625640f768SEric Dumazet 	struct page_frag *pfrag = sk_page_frag(sk);
24638b9d3728SJarek Poplawski 
24645640f768SEric Dumazet 	if (!sk_page_frag_refill(sk, pfrag))
24658b9d3728SJarek Poplawski 		return NULL;
24664fb66994SJarek Poplawski 
24675640f768SEric Dumazet 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
24684fb66994SJarek Poplawski 
24695640f768SEric Dumazet 	memcpy(page_address(pfrag->page) + pfrag->offset,
24705640f768SEric Dumazet 	       page_address(page) + *offset, *len);
24715640f768SEric Dumazet 	*offset = pfrag->offset;
24725640f768SEric Dumazet 	pfrag->offset += *len;
24734fb66994SJarek Poplawski 
24745640f768SEric Dumazet 	return pfrag->page;
24759c55e01cSJens Axboe }
24769c55e01cSJens Axboe 
247741c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
247841c73a0dSEric Dumazet 			     struct page *page,
247941c73a0dSEric Dumazet 			     unsigned int offset)
248041c73a0dSEric Dumazet {
248141c73a0dSEric Dumazet 	return	spd->nr_pages &&
248241c73a0dSEric Dumazet 		spd->pages[spd->nr_pages - 1] == page &&
248341c73a0dSEric Dumazet 		(spd->partial[spd->nr_pages - 1].offset +
248441c73a0dSEric Dumazet 		 spd->partial[spd->nr_pages - 1].len == offset);
248541c73a0dSEric Dumazet }
248641c73a0dSEric Dumazet 
24879c55e01cSJens Axboe /*
24889c55e01cSJens Axboe  * Fill page/offset/length into spd, if it can hold more pages.
24899c55e01cSJens Axboe  */
2490a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd,
249135f3d14dSJens Axboe 			  struct pipe_inode_info *pipe, struct page *page,
24924fb66994SJarek Poplawski 			  unsigned int *len, unsigned int offset,
249318aafc62SEric Dumazet 			  bool linear,
24947a67e56fSJarek Poplawski 			  struct sock *sk)
24959c55e01cSJens Axboe {
249641c73a0dSEric Dumazet 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2497a108d5f3SDavid S. Miller 		return true;
24989c55e01cSJens Axboe 
24998b9d3728SJarek Poplawski 	if (linear) {
250018aafc62SEric Dumazet 		page = linear_to_page(page, len, &offset, sk);
25018b9d3728SJarek Poplawski 		if (!page)
2502a108d5f3SDavid S. Miller 			return true;
250341c73a0dSEric Dumazet 	}
250441c73a0dSEric Dumazet 	if (spd_can_coalesce(spd, page, offset)) {
250541c73a0dSEric Dumazet 		spd->partial[spd->nr_pages - 1].len += *len;
2506a108d5f3SDavid S. Miller 		return false;
250741c73a0dSEric Dumazet 	}
25088b9d3728SJarek Poplawski 	get_page(page);
25099c55e01cSJens Axboe 	spd->pages[spd->nr_pages] = page;
25104fb66994SJarek Poplawski 	spd->partial[spd->nr_pages].len = *len;
25119c55e01cSJens Axboe 	spd->partial[spd->nr_pages].offset = offset;
25129c55e01cSJens Axboe 	spd->nr_pages++;
25138b9d3728SJarek Poplawski 
2514a108d5f3SDavid S. Miller 	return false;
25159c55e01cSJens Axboe }
25169c55e01cSJens Axboe 
2517a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff,
25182870c43dSOctavian Purdila 			     unsigned int plen, unsigned int *off,
251918aafc62SEric Dumazet 			     unsigned int *len,
2520d7ccf7c0SEric Dumazet 			     struct splice_pipe_desc *spd, bool linear,
252135f3d14dSJens Axboe 			     struct sock *sk,
252235f3d14dSJens Axboe 			     struct pipe_inode_info *pipe)
25239c55e01cSJens Axboe {
25242870c43dSOctavian Purdila 	if (!*len)
2525a108d5f3SDavid S. Miller 		return true;
25269c55e01cSJens Axboe 
25272870c43dSOctavian Purdila 	/* skip this segment if already processed */
25282870c43dSOctavian Purdila 	if (*off >= plen) {
25292870c43dSOctavian Purdila 		*off -= plen;
2530a108d5f3SDavid S. Miller 		return false;
25312870c43dSOctavian Purdila 	}
25322870c43dSOctavian Purdila 
25332870c43dSOctavian Purdila 	/* ignore any bits we already processed */
25349ca1b22dSEric Dumazet 	poff += *off;
25359ca1b22dSEric Dumazet 	plen -= *off;
25362870c43dSOctavian Purdila 	*off = 0;
25372870c43dSOctavian Purdila 
253818aafc62SEric Dumazet 	do {
253918aafc62SEric Dumazet 		unsigned int flen = min(*len, plen);
25402870c43dSOctavian Purdila 
254118aafc62SEric Dumazet 		if (spd_fill_page(spd, pipe, page, &flen, poff,
254218aafc62SEric Dumazet 				  linear, sk))
2543a108d5f3SDavid S. Miller 			return true;
254418aafc62SEric Dumazet 		poff += flen;
254518aafc62SEric Dumazet 		plen -= flen;
25462870c43dSOctavian Purdila 		*len -= flen;
254718aafc62SEric Dumazet 	} while (*len && plen);
25482870c43dSOctavian Purdila 
2549a108d5f3SDavid S. Miller 	return false;
2550db43a282SOctavian Purdila }
25519c55e01cSJens Axboe 
25529c55e01cSJens Axboe /*
2553a108d5f3SDavid S. Miller  * Map linear and fragment data from the skb to spd. It reports true if the
25542870c43dSOctavian Purdila  * pipe is full or if we already spliced the requested length.
25559c55e01cSJens Axboe  */
2556a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
255735f3d14dSJens Axboe 			      unsigned int *offset, unsigned int *len,
255835f3d14dSJens Axboe 			      struct splice_pipe_desc *spd, struct sock *sk)
25592870c43dSOctavian Purdila {
25602870c43dSOctavian Purdila 	int seg;
2561fa9835e5STom Herbert 	struct sk_buff *iter;
25629c55e01cSJens Axboe 
25631d0c0b32SEric Dumazet 	/* map the linear part :
25642996d31fSAlexander Duyck 	 * If skb->head_frag is set, this 'linear' part is backed by a
25652996d31fSAlexander Duyck 	 * fragment, and if the head is not shared with any clones then
25662996d31fSAlexander Duyck 	 * we can avoid a copy since we own the head portion of this page.
25679c55e01cSJens Axboe 	 */
25682870c43dSOctavian Purdila 	if (__splice_segment(virt_to_page(skb->data),
25692870c43dSOctavian Purdila 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
25702870c43dSOctavian Purdila 			     skb_headlen(skb),
257118aafc62SEric Dumazet 			     offset, len, spd,
25723a7c1ee4SAlexander Duyck 			     skb_head_is_locked(skb),
25731d0c0b32SEric Dumazet 			     sk, pipe))
2574a108d5f3SDavid S. Miller 		return true;
25759c55e01cSJens Axboe 
25769c55e01cSJens Axboe 	/*
25779c55e01cSJens Axboe 	 * then map the fragments
25789c55e01cSJens Axboe 	 */
25799c55e01cSJens Axboe 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
25809c55e01cSJens Axboe 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
25819c55e01cSJens Axboe 
2582ea2ab693SIan Campbell 		if (__splice_segment(skb_frag_page(f),
2583b54c9d5bSJonathan Lemon 				     skb_frag_off(f), skb_frag_size(f),
258418aafc62SEric Dumazet 				     offset, len, spd, false, sk, pipe))
2585a108d5f3SDavid S. Miller 			return true;
25869c55e01cSJens Axboe 	}
25879c55e01cSJens Axboe 
2588fa9835e5STom Herbert 	skb_walk_frags(skb, iter) {
2589fa9835e5STom Herbert 		if (*offset >= iter->len) {
2590fa9835e5STom Herbert 			*offset -= iter->len;
2591fa9835e5STom Herbert 			continue;
2592fa9835e5STom Herbert 		}
2593fa9835e5STom Herbert 		/* __skb_splice_bits() only fails if the output has no room
2594fa9835e5STom Herbert 		 * left, so no point in going over the frag_list for the error
2595fa9835e5STom Herbert 		 * case.
2596fa9835e5STom Herbert 		 */
2597fa9835e5STom Herbert 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2598fa9835e5STom Herbert 			return true;
2599fa9835e5STom Herbert 	}
2600fa9835e5STom Herbert 
2601a108d5f3SDavid S. Miller 	return false;
26029c55e01cSJens Axboe }
26039c55e01cSJens Axboe 
26049c55e01cSJens Axboe /*
26059c55e01cSJens Axboe  * Map data from the skb to a pipe. Should handle both the linear part,
2606fa9835e5STom Herbert  * the fragments, and the frag list.
26079c55e01cSJens Axboe  */
2608a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
26099c55e01cSJens Axboe 		    struct pipe_inode_info *pipe, unsigned int tlen,
261025869262SAl Viro 		    unsigned int flags)
26119c55e01cSJens Axboe {
261241c73a0dSEric Dumazet 	struct partial_page partial[MAX_SKB_FRAGS];
261341c73a0dSEric Dumazet 	struct page *pages[MAX_SKB_FRAGS];
26149c55e01cSJens Axboe 	struct splice_pipe_desc spd = {
26159c55e01cSJens Axboe 		.pages = pages,
26169c55e01cSJens Axboe 		.partial = partial,
2617047fe360SEric Dumazet 		.nr_pages_max = MAX_SKB_FRAGS,
261828a625cbSMiklos Szeredi 		.ops = &nosteal_pipe_buf_ops,
26199c55e01cSJens Axboe 		.spd_release = sock_spd_release,
26209c55e01cSJens Axboe 	};
262135f3d14dSJens Axboe 	int ret = 0;
262235f3d14dSJens Axboe 
2623fa9835e5STom Herbert 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
26249c55e01cSJens Axboe 
2625a60e3cc7SHannes Frederic Sowa 	if (spd.nr_pages)
262625869262SAl Viro 		ret = splice_to_pipe(pipe, &spd);
26279c55e01cSJens Axboe 
262835f3d14dSJens Axboe 	return ret;
26299c55e01cSJens Axboe }
26302b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits);
26319c55e01cSJens Axboe 
26320739cd28SCong Wang static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg,
26330739cd28SCong Wang 			    struct kvec *vec, size_t num, size_t size)
26340739cd28SCong Wang {
26350739cd28SCong Wang 	struct socket *sock = sk->sk_socket;
26360739cd28SCong Wang 
26370739cd28SCong Wang 	if (!sock)
26380739cd28SCong Wang 		return -EINVAL;
26390739cd28SCong Wang 	return kernel_sendmsg(sock, msg, vec, num, size);
26400739cd28SCong Wang }
26410739cd28SCong Wang 
26420739cd28SCong Wang static int sendpage_unlocked(struct sock *sk, struct page *page, int offset,
26430739cd28SCong Wang 			     size_t size, int flags)
26440739cd28SCong Wang {
26450739cd28SCong Wang 	struct socket *sock = sk->sk_socket;
26460739cd28SCong Wang 
26470739cd28SCong Wang 	if (!sock)
26480739cd28SCong Wang 		return -EINVAL;
26490739cd28SCong Wang 	return kernel_sendpage(sock, page, offset, size, flags);
26500739cd28SCong Wang }
26510739cd28SCong Wang 
26520739cd28SCong Wang typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg,
26530739cd28SCong Wang 			    struct kvec *vec, size_t num, size_t size);
26540739cd28SCong Wang typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
26550739cd28SCong Wang 			     size_t size, int flags);
26560739cd28SCong Wang static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
26570739cd28SCong Wang 			   int len, sendmsg_func sendmsg, sendpage_func sendpage)
265820bf50deSTom Herbert {
265920bf50deSTom Herbert 	unsigned int orig_len = len;
266020bf50deSTom Herbert 	struct sk_buff *head = skb;
266120bf50deSTom Herbert 	unsigned short fragidx;
266220bf50deSTom Herbert 	int slen, ret;
266320bf50deSTom Herbert 
266420bf50deSTom Herbert do_frag_list:
266520bf50deSTom Herbert 
266620bf50deSTom Herbert 	/* Deal with head data */
266720bf50deSTom Herbert 	while (offset < skb_headlen(skb) && len) {
266820bf50deSTom Herbert 		struct kvec kv;
266920bf50deSTom Herbert 		struct msghdr msg;
267020bf50deSTom Herbert 
267120bf50deSTom Herbert 		slen = min_t(int, len, skb_headlen(skb) - offset);
267220bf50deSTom Herbert 		kv.iov_base = skb->data + offset;
2673db5980d8SJohn Fastabend 		kv.iov_len = slen;
267420bf50deSTom Herbert 		memset(&msg, 0, sizeof(msg));
2675bd95e678SJohn Fastabend 		msg.msg_flags = MSG_DONTWAIT;
267620bf50deSTom Herbert 
26770739cd28SCong Wang 		ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked,
26780739cd28SCong Wang 				      sendmsg_unlocked, sk, &msg, &kv, 1, slen);
267920bf50deSTom Herbert 		if (ret <= 0)
268020bf50deSTom Herbert 			goto error;
268120bf50deSTom Herbert 
268220bf50deSTom Herbert 		offset += ret;
268320bf50deSTom Herbert 		len -= ret;
268420bf50deSTom Herbert 	}
268520bf50deSTom Herbert 
268620bf50deSTom Herbert 	/* All the data was skb head? */
268720bf50deSTom Herbert 	if (!len)
268820bf50deSTom Herbert 		goto out;
268920bf50deSTom Herbert 
269020bf50deSTom Herbert 	/* Make offset relative to start of frags */
269120bf50deSTom Herbert 	offset -= skb_headlen(skb);
269220bf50deSTom Herbert 
269320bf50deSTom Herbert 	/* Find where we are in frag list */
269420bf50deSTom Herbert 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
269520bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
269620bf50deSTom Herbert 
2697d8e18a51SMatthew Wilcox (Oracle) 		if (offset < skb_frag_size(frag))
269820bf50deSTom Herbert 			break;
269920bf50deSTom Herbert 
2700d8e18a51SMatthew Wilcox (Oracle) 		offset -= skb_frag_size(frag);
270120bf50deSTom Herbert 	}
270220bf50deSTom Herbert 
270320bf50deSTom Herbert 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
270420bf50deSTom Herbert 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
270520bf50deSTom Herbert 
2706d8e18a51SMatthew Wilcox (Oracle) 		slen = min_t(size_t, len, skb_frag_size(frag) - offset);
270720bf50deSTom Herbert 
270820bf50deSTom Herbert 		while (slen) {
27090739cd28SCong Wang 			ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked,
27100739cd28SCong Wang 					      sendpage_unlocked, sk,
27110739cd28SCong Wang 					      skb_frag_page(frag),
2712b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset,
271320bf50deSTom Herbert 					      slen, MSG_DONTWAIT);
271420bf50deSTom Herbert 			if (ret <= 0)
271520bf50deSTom Herbert 				goto error;
271620bf50deSTom Herbert 
271720bf50deSTom Herbert 			len -= ret;
271820bf50deSTom Herbert 			offset += ret;
271920bf50deSTom Herbert 			slen -= ret;
272020bf50deSTom Herbert 		}
272120bf50deSTom Herbert 
272220bf50deSTom Herbert 		offset = 0;
272320bf50deSTom Herbert 	}
272420bf50deSTom Herbert 
272520bf50deSTom Herbert 	if (len) {
272620bf50deSTom Herbert 		/* Process any frag lists */
272720bf50deSTom Herbert 
272820bf50deSTom Herbert 		if (skb == head) {
272920bf50deSTom Herbert 			if (skb_has_frag_list(skb)) {
273020bf50deSTom Herbert 				skb = skb_shinfo(skb)->frag_list;
273120bf50deSTom Herbert 				goto do_frag_list;
273220bf50deSTom Herbert 			}
273320bf50deSTom Herbert 		} else if (skb->next) {
273420bf50deSTom Herbert 			skb = skb->next;
273520bf50deSTom Herbert 			goto do_frag_list;
273620bf50deSTom Herbert 		}
273720bf50deSTom Herbert 	}
273820bf50deSTom Herbert 
273920bf50deSTom Herbert out:
274020bf50deSTom Herbert 	return orig_len - len;
274120bf50deSTom Herbert 
274220bf50deSTom Herbert error:
274320bf50deSTom Herbert 	return orig_len == len ? ret : orig_len - len;
274420bf50deSTom Herbert }
27450739cd28SCong Wang 
27460739cd28SCong Wang /* Send skb data on a socket. Socket must be locked. */
27470739cd28SCong Wang int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
27480739cd28SCong Wang 			 int len)
27490739cd28SCong Wang {
27500739cd28SCong Wang 	return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked,
27510739cd28SCong Wang 			       kernel_sendpage_locked);
27520739cd28SCong Wang }
275320bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked);
275420bf50deSTom Herbert 
27550739cd28SCong Wang /* Send skb data on a socket. Socket must be unlocked. */
27560739cd28SCong Wang int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
27570739cd28SCong Wang {
27580739cd28SCong Wang 	return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked,
27590739cd28SCong Wang 			       sendpage_unlocked);
27600739cd28SCong Wang }
27610739cd28SCong Wang 
2762357b40a1SHerbert Xu /**
2763357b40a1SHerbert Xu  *	skb_store_bits - store bits from kernel buffer to skb
2764357b40a1SHerbert Xu  *	@skb: destination buffer
2765357b40a1SHerbert Xu  *	@offset: offset in destination
2766357b40a1SHerbert Xu  *	@from: source buffer
2767357b40a1SHerbert Xu  *	@len: number of bytes to copy
2768357b40a1SHerbert Xu  *
2769357b40a1SHerbert Xu  *	Copy the specified number of bytes from the source buffer to the
2770357b40a1SHerbert Xu  *	destination skb.  This function handles all the messy bits of
2771357b40a1SHerbert Xu  *	traversing fragment lists and such.
2772357b40a1SHerbert Xu  */
2773357b40a1SHerbert Xu 
27740c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2775357b40a1SHerbert Xu {
27761a028e50SDavid S. Miller 	int start = skb_headlen(skb);
2777fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
2778fbb398a8SDavid S. Miller 	int i, copy;
2779357b40a1SHerbert Xu 
2780357b40a1SHerbert Xu 	if (offset > (int)skb->len - len)
2781357b40a1SHerbert Xu 		goto fault;
2782357b40a1SHerbert Xu 
27831a028e50SDavid S. Miller 	if ((copy = start - offset) > 0) {
2784357b40a1SHerbert Xu 		if (copy > len)
2785357b40a1SHerbert Xu 			copy = len;
278627d7ff46SArnaldo Carvalho de Melo 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
2787357b40a1SHerbert Xu 		if ((len -= copy) == 0)
2788357b40a1SHerbert Xu 			return 0;
2789357b40a1SHerbert Xu 		offset += copy;
2790357b40a1SHerbert Xu 		from += copy;
2791357b40a1SHerbert Xu 	}
2792357b40a1SHerbert Xu 
2793357b40a1SHerbert Xu 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2794357b40a1SHerbert Xu 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
27951a028e50SDavid S. Miller 		int end;
2796357b40a1SHerbert Xu 
2797547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
27981a028e50SDavid S. Miller 
27999e903e08SEric Dumazet 		end = start + skb_frag_size(frag);
2800357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2801c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2802c613c209SWillem de Bruijn 			struct page *p;
2803357b40a1SHerbert Xu 			u8 *vaddr;
2804357b40a1SHerbert Xu 
2805357b40a1SHerbert Xu 			if (copy > len)
2806357b40a1SHerbert Xu 				copy = len;
2807357b40a1SHerbert Xu 
2808c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2809b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2810c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2811c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2812c613c209SWillem de Bruijn 				memcpy(vaddr + p_off, from + copied, p_len);
281351c56b00SEric Dumazet 				kunmap_atomic(vaddr);
2814c613c209SWillem de Bruijn 			}
2815357b40a1SHerbert Xu 
2816357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2817357b40a1SHerbert Xu 				return 0;
2818357b40a1SHerbert Xu 			offset += copy;
2819357b40a1SHerbert Xu 			from += copy;
2820357b40a1SHerbert Xu 		}
28211a028e50SDavid S. Miller 		start = end;
2822357b40a1SHerbert Xu 	}
2823357b40a1SHerbert Xu 
2824fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
28251a028e50SDavid S. Miller 		int end;
2826357b40a1SHerbert Xu 
2827547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
28281a028e50SDavid S. Miller 
2829fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
2830357b40a1SHerbert Xu 		if ((copy = end - offset) > 0) {
2831357b40a1SHerbert Xu 			if (copy > len)
2832357b40a1SHerbert Xu 				copy = len;
2833fbb398a8SDavid S. Miller 			if (skb_store_bits(frag_iter, offset - start,
28341a028e50SDavid S. Miller 					   from, copy))
2835357b40a1SHerbert Xu 				goto fault;
2836357b40a1SHerbert Xu 			if ((len -= copy) == 0)
2837357b40a1SHerbert Xu 				return 0;
2838357b40a1SHerbert Xu 			offset += copy;
2839357b40a1SHerbert Xu 			from += copy;
2840357b40a1SHerbert Xu 		}
28411a028e50SDavid S. Miller 		start = end;
2842357b40a1SHerbert Xu 	}
2843357b40a1SHerbert Xu 	if (!len)
2844357b40a1SHerbert Xu 		return 0;
2845357b40a1SHerbert Xu 
2846357b40a1SHerbert Xu fault:
2847357b40a1SHerbert Xu 	return -EFAULT;
2848357b40a1SHerbert Xu }
2849357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits);
2850357b40a1SHerbert Xu 
28511da177e4SLinus Torvalds /* Checksum skb data. */
28522817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
28532817a336SDaniel Borkmann 		      __wsum csum, const struct skb_checksum_ops *ops)
28541da177e4SLinus Torvalds {
28551a028e50SDavid S. Miller 	int start = skb_headlen(skb);
28561a028e50SDavid S. Miller 	int i, copy = start - offset;
2857fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
28581da177e4SLinus Torvalds 	int pos = 0;
28591da177e4SLinus Torvalds 
28601da177e4SLinus Torvalds 	/* Checksum header. */
28611da177e4SLinus Torvalds 	if (copy > 0) {
28621da177e4SLinus Torvalds 		if (copy > len)
28631da177e4SLinus Torvalds 			copy = len;
28642544af03SMatteo Croce 		csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
28652544af03SMatteo Croce 				       skb->data + offset, copy, csum);
28661da177e4SLinus Torvalds 		if ((len -= copy) == 0)
28671da177e4SLinus Torvalds 			return csum;
28681da177e4SLinus Torvalds 		offset += copy;
28691da177e4SLinus Torvalds 		pos	= copy;
28701da177e4SLinus Torvalds 	}
28711da177e4SLinus Torvalds 
28721da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
28731a028e50SDavid S. Miller 		int end;
287451c56b00SEric Dumazet 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
28751da177e4SLinus Torvalds 
2876547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
28771a028e50SDavid S. Miller 
287851c56b00SEric Dumazet 		end = start + skb_frag_size(frag);
28791da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2880c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2881c613c209SWillem de Bruijn 			struct page *p;
288244bb9363SAl Viro 			__wsum csum2;
28831da177e4SLinus Torvalds 			u8 *vaddr;
28841da177e4SLinus Torvalds 
28851da177e4SLinus Torvalds 			if (copy > len)
28861da177e4SLinus Torvalds 				copy = len;
2887c613c209SWillem de Bruijn 
2888c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2889b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2890c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2891c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
28922544af03SMatteo Croce 				csum2 = INDIRECT_CALL_1(ops->update,
28932544af03SMatteo Croce 							csum_partial_ext,
28942544af03SMatteo Croce 							vaddr + p_off, p_len, 0);
289551c56b00SEric Dumazet 				kunmap_atomic(vaddr);
28962544af03SMatteo Croce 				csum = INDIRECT_CALL_1(ops->combine,
28972544af03SMatteo Croce 						       csum_block_add_ext, csum,
28982544af03SMatteo Croce 						       csum2, pos, p_len);
2899c613c209SWillem de Bruijn 				pos += p_len;
2900c613c209SWillem de Bruijn 			}
2901c613c209SWillem de Bruijn 
29021da177e4SLinus Torvalds 			if (!(len -= copy))
29031da177e4SLinus Torvalds 				return csum;
29041da177e4SLinus Torvalds 			offset += copy;
29051da177e4SLinus Torvalds 		}
29061a028e50SDavid S. Miller 		start = end;
29071da177e4SLinus Torvalds 	}
29081da177e4SLinus Torvalds 
2909fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
29101a028e50SDavid S. Miller 		int end;
29111da177e4SLinus Torvalds 
2912547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
29131a028e50SDavid S. Miller 
2914fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
29151da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
29165f92a738SAl Viro 			__wsum csum2;
29171da177e4SLinus Torvalds 			if (copy > len)
29181da177e4SLinus Torvalds 				copy = len;
29192817a336SDaniel Borkmann 			csum2 = __skb_checksum(frag_iter, offset - start,
29202817a336SDaniel Borkmann 					       copy, 0, ops);
29212544af03SMatteo Croce 			csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
29222544af03SMatteo Croce 					       csum, csum2, pos, copy);
29231da177e4SLinus Torvalds 			if ((len -= copy) == 0)
29241da177e4SLinus Torvalds 				return csum;
29251da177e4SLinus Torvalds 			offset += copy;
29261da177e4SLinus Torvalds 			pos    += copy;
29271da177e4SLinus Torvalds 		}
29281a028e50SDavid S. Miller 		start = end;
29291da177e4SLinus Torvalds 	}
293009a62660SKris Katterjohn 	BUG_ON(len);
29311da177e4SLinus Torvalds 
29321da177e4SLinus Torvalds 	return csum;
29331da177e4SLinus Torvalds }
29342817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum);
29352817a336SDaniel Borkmann 
29362817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset,
29372817a336SDaniel Borkmann 		    int len, __wsum csum)
29382817a336SDaniel Borkmann {
29392817a336SDaniel Borkmann 	const struct skb_checksum_ops ops = {
2940cea80ea8SDaniel Borkmann 		.update  = csum_partial_ext,
29412817a336SDaniel Borkmann 		.combine = csum_block_add_ext,
29422817a336SDaniel Borkmann 	};
29432817a336SDaniel Borkmann 
29442817a336SDaniel Borkmann 	return __skb_checksum(skb, offset, len, csum, &ops);
29452817a336SDaniel Borkmann }
2946b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum);
29471da177e4SLinus Torvalds 
29481da177e4SLinus Torvalds /* Both of above in one bottle. */
29491da177e4SLinus Torvalds 
295081d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
29518d5930dfSAl Viro 				    u8 *to, int len)
29521da177e4SLinus Torvalds {
29531a028e50SDavid S. Miller 	int start = skb_headlen(skb);
29541a028e50SDavid S. Miller 	int i, copy = start - offset;
2955fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
29561da177e4SLinus Torvalds 	int pos = 0;
29578d5930dfSAl Viro 	__wsum csum = 0;
29581da177e4SLinus Torvalds 
29591da177e4SLinus Torvalds 	/* Copy header. */
29601da177e4SLinus Torvalds 	if (copy > 0) {
29611da177e4SLinus Torvalds 		if (copy > len)
29621da177e4SLinus Torvalds 			copy = len;
29631da177e4SLinus Torvalds 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2964cc44c17bSAl Viro 						 copy);
29651da177e4SLinus Torvalds 		if ((len -= copy) == 0)
29661da177e4SLinus Torvalds 			return csum;
29671da177e4SLinus Torvalds 		offset += copy;
29681da177e4SLinus Torvalds 		to     += copy;
29691da177e4SLinus Torvalds 		pos	= copy;
29701da177e4SLinus Torvalds 	}
29711da177e4SLinus Torvalds 
29721da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
29731a028e50SDavid S. Miller 		int end;
29741da177e4SLinus Torvalds 
2975547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
29761a028e50SDavid S. Miller 
29779e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
29781da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
2979c613c209SWillem de Bruijn 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2980c613c209SWillem de Bruijn 			u32 p_off, p_len, copied;
2981c613c209SWillem de Bruijn 			struct page *p;
29825084205fSAl Viro 			__wsum csum2;
29831da177e4SLinus Torvalds 			u8 *vaddr;
29841da177e4SLinus Torvalds 
29851da177e4SLinus Torvalds 			if (copy > len)
29861da177e4SLinus Torvalds 				copy = len;
2987c613c209SWillem de Bruijn 
2988c613c209SWillem de Bruijn 			skb_frag_foreach_page(frag,
2989b54c9d5bSJonathan Lemon 					      skb_frag_off(frag) + offset - start,
2990c613c209SWillem de Bruijn 					      copy, p, p_off, p_len, copied) {
2991c613c209SWillem de Bruijn 				vaddr = kmap_atomic(p);
2992c613c209SWillem de Bruijn 				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2993c613c209SWillem de Bruijn 								  to + copied,
2994cc44c17bSAl Viro 								  p_len);
299551c56b00SEric Dumazet 				kunmap_atomic(vaddr);
29961da177e4SLinus Torvalds 				csum = csum_block_add(csum, csum2, pos);
2997c613c209SWillem de Bruijn 				pos += p_len;
2998c613c209SWillem de Bruijn 			}
2999c613c209SWillem de Bruijn 
30001da177e4SLinus Torvalds 			if (!(len -= copy))
30011da177e4SLinus Torvalds 				return csum;
30021da177e4SLinus Torvalds 			offset += copy;
30031da177e4SLinus Torvalds 			to     += copy;
30041da177e4SLinus Torvalds 		}
30051a028e50SDavid S. Miller 		start = end;
30061da177e4SLinus Torvalds 	}
30071da177e4SLinus Torvalds 
3008fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
300981d77662SAl Viro 		__wsum csum2;
30101a028e50SDavid S. Miller 		int end;
30111da177e4SLinus Torvalds 
3012547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
30131a028e50SDavid S. Miller 
3014fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
30151da177e4SLinus Torvalds 		if ((copy = end - offset) > 0) {
30161da177e4SLinus Torvalds 			if (copy > len)
30171da177e4SLinus Torvalds 				copy = len;
3018fbb398a8SDavid S. Miller 			csum2 = skb_copy_and_csum_bits(frag_iter,
30191a028e50SDavid S. Miller 						       offset - start,
30208d5930dfSAl Viro 						       to, copy);
30211da177e4SLinus Torvalds 			csum = csum_block_add(csum, csum2, pos);
30221da177e4SLinus Torvalds 			if ((len -= copy) == 0)
30231da177e4SLinus Torvalds 				return csum;
30241da177e4SLinus Torvalds 			offset += copy;
30251da177e4SLinus Torvalds 			to     += copy;
30261da177e4SLinus Torvalds 			pos    += copy;
30271da177e4SLinus Torvalds 		}
30281a028e50SDavid S. Miller 		start = end;
30291da177e4SLinus Torvalds 	}
303009a62660SKris Katterjohn 	BUG_ON(len);
30311da177e4SLinus Torvalds 	return csum;
30321da177e4SLinus Torvalds }
3033b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits);
30341da177e4SLinus Torvalds 
303549f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
303649f8e832SCong Wang {
303749f8e832SCong Wang 	__sum16 sum;
303849f8e832SCong Wang 
303949f8e832SCong Wang 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
304014641931SCong Wang 	/* See comments in __skb_checksum_complete(). */
304149f8e832SCong Wang 	if (likely(!sum)) {
304249f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
304349f8e832SCong Wang 		    !skb->csum_complete_sw)
30447fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
304549f8e832SCong Wang 	}
304649f8e832SCong Wang 	if (!skb_shared(skb))
304749f8e832SCong Wang 		skb->csum_valid = !sum;
304849f8e832SCong Wang 	return sum;
304949f8e832SCong Wang }
305049f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head);
305149f8e832SCong Wang 
305214641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum,
305314641931SCong Wang  * which has been changed from the hardware checksum, for example, by
305414641931SCong Wang  * __skb_checksum_validate_complete(). And, the original skb->csum must
305514641931SCong Wang  * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
305614641931SCong Wang  *
305714641931SCong Wang  * It returns non-zero if the recomputed checksum is still invalid, otherwise
305814641931SCong Wang  * zero. The new checksum is stored back into skb->csum unless the skb is
305914641931SCong Wang  * shared.
306014641931SCong Wang  */
306149f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb)
306249f8e832SCong Wang {
306349f8e832SCong Wang 	__wsum csum;
306449f8e832SCong Wang 	__sum16 sum;
306549f8e832SCong Wang 
306649f8e832SCong Wang 	csum = skb_checksum(skb, 0, skb->len, 0);
306749f8e832SCong Wang 
306849f8e832SCong Wang 	sum = csum_fold(csum_add(skb->csum, csum));
306914641931SCong Wang 	/* This check is inverted, because we already knew the hardware
307014641931SCong Wang 	 * checksum is invalid before calling this function. So, if the
307114641931SCong Wang 	 * re-computed checksum is valid instead, then we have a mismatch
307214641931SCong Wang 	 * between the original skb->csum and skb_checksum(). This means either
307314641931SCong Wang 	 * the original hardware checksum is incorrect or we screw up skb->csum
307414641931SCong Wang 	 * when moving skb->data around.
307514641931SCong Wang 	 */
307649f8e832SCong Wang 	if (likely(!sum)) {
307749f8e832SCong Wang 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
307849f8e832SCong Wang 		    !skb->csum_complete_sw)
30797fe50ac8SCong Wang 			netdev_rx_csum_fault(skb->dev, skb);
308049f8e832SCong Wang 	}
308149f8e832SCong Wang 
308249f8e832SCong Wang 	if (!skb_shared(skb)) {
308349f8e832SCong Wang 		/* Save full packet checksum */
308449f8e832SCong Wang 		skb->csum = csum;
308549f8e832SCong Wang 		skb->ip_summed = CHECKSUM_COMPLETE;
308649f8e832SCong Wang 		skb->csum_complete_sw = 1;
308749f8e832SCong Wang 		skb->csum_valid = !sum;
308849f8e832SCong Wang 	}
308949f8e832SCong Wang 
309049f8e832SCong Wang 	return sum;
309149f8e832SCong Wang }
309249f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete);
309349f8e832SCong Wang 
30949617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
30959617813dSDavide Caratti {
30969617813dSDavide Caratti 	net_warn_ratelimited(
30979617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
30989617813dSDavide Caratti 		__func__);
30999617813dSDavide Caratti 	return 0;
31009617813dSDavide Caratti }
31019617813dSDavide Caratti 
31029617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
31039617813dSDavide Caratti 				       int offset, int len)
31049617813dSDavide Caratti {
31059617813dSDavide Caratti 	net_warn_ratelimited(
31069617813dSDavide Caratti 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
31079617813dSDavide Caratti 		__func__);
31089617813dSDavide Caratti 	return 0;
31099617813dSDavide Caratti }
31109617813dSDavide Caratti 
31119617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = {
31129617813dSDavide Caratti 	.update  = warn_crc32c_csum_update,
31139617813dSDavide Caratti 	.combine = warn_crc32c_csum_combine,
31149617813dSDavide Caratti };
31159617813dSDavide Caratti 
31169617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
31179617813dSDavide Caratti 	&default_crc32c_ops;
31189617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub);
31199617813dSDavide Caratti 
3120af2806f8SThomas Graf  /**
3121af2806f8SThomas Graf  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3122af2806f8SThomas Graf  *	@from: source buffer
3123af2806f8SThomas Graf  *
3124af2806f8SThomas Graf  *	Calculates the amount of linear headroom needed in the 'to' skb passed
3125af2806f8SThomas Graf  *	into skb_zerocopy().
3126af2806f8SThomas Graf  */
3127af2806f8SThomas Graf unsigned int
3128af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from)
3129af2806f8SThomas Graf {
3130af2806f8SThomas Graf 	unsigned int hlen = 0;
3131af2806f8SThomas Graf 
3132af2806f8SThomas Graf 	if (!from->head_frag ||
3133af2806f8SThomas Graf 	    skb_headlen(from) < L1_CACHE_BYTES ||
3134a17ad096SPravin B Shelar 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3135af2806f8SThomas Graf 		hlen = skb_headlen(from);
3136a17ad096SPravin B Shelar 		if (!hlen)
3137a17ad096SPravin B Shelar 			hlen = from->len;
3138a17ad096SPravin B Shelar 	}
3139af2806f8SThomas Graf 
3140af2806f8SThomas Graf 	if (skb_has_frag_list(from))
3141af2806f8SThomas Graf 		hlen = from->len;
3142af2806f8SThomas Graf 
3143af2806f8SThomas Graf 	return hlen;
3144af2806f8SThomas Graf }
3145af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3146af2806f8SThomas Graf 
3147af2806f8SThomas Graf /**
3148af2806f8SThomas Graf  *	skb_zerocopy - Zero copy skb to skb
3149af2806f8SThomas Graf  *	@to: destination buffer
31507fceb4deSMasanari Iida  *	@from: source buffer
3151af2806f8SThomas Graf  *	@len: number of bytes to copy from source buffer
3152af2806f8SThomas Graf  *	@hlen: size of linear headroom in destination buffer
3153af2806f8SThomas Graf  *
3154af2806f8SThomas Graf  *	Copies up to `len` bytes from `from` to `to` by creating references
3155af2806f8SThomas Graf  *	to the frags in the source buffer.
3156af2806f8SThomas Graf  *
3157af2806f8SThomas Graf  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3158af2806f8SThomas Graf  *	headroom in the `to` buffer.
315936d5fe6aSZoltan Kiss  *
316036d5fe6aSZoltan Kiss  *	Return value:
316136d5fe6aSZoltan Kiss  *	0: everything is OK
316236d5fe6aSZoltan Kiss  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
316336d5fe6aSZoltan Kiss  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
3164af2806f8SThomas Graf  */
316536d5fe6aSZoltan Kiss int
316636d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3167af2806f8SThomas Graf {
3168af2806f8SThomas Graf 	int i, j = 0;
3169af2806f8SThomas Graf 	int plen = 0; /* length of skb->head fragment */
317036d5fe6aSZoltan Kiss 	int ret;
3171af2806f8SThomas Graf 	struct page *page;
3172af2806f8SThomas Graf 	unsigned int offset;
3173af2806f8SThomas Graf 
3174af2806f8SThomas Graf 	BUG_ON(!from->head_frag && !hlen);
3175af2806f8SThomas Graf 
3176af2806f8SThomas Graf 	/* dont bother with small payloads */
317736d5fe6aSZoltan Kiss 	if (len <= skb_tailroom(to))
317836d5fe6aSZoltan Kiss 		return skb_copy_bits(from, 0, skb_put(to, len), len);
3179af2806f8SThomas Graf 
3180af2806f8SThomas Graf 	if (hlen) {
318136d5fe6aSZoltan Kiss 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
318236d5fe6aSZoltan Kiss 		if (unlikely(ret))
318336d5fe6aSZoltan Kiss 			return ret;
3184af2806f8SThomas Graf 		len -= hlen;
3185af2806f8SThomas Graf 	} else {
3186af2806f8SThomas Graf 		plen = min_t(int, skb_headlen(from), len);
3187af2806f8SThomas Graf 		if (plen) {
3188af2806f8SThomas Graf 			page = virt_to_head_page(from->head);
3189af2806f8SThomas Graf 			offset = from->data - (unsigned char *)page_address(page);
3190af2806f8SThomas Graf 			__skb_fill_page_desc(to, 0, page, offset, plen);
3191af2806f8SThomas Graf 			get_page(page);
3192af2806f8SThomas Graf 			j = 1;
3193af2806f8SThomas Graf 			len -= plen;
3194af2806f8SThomas Graf 		}
3195af2806f8SThomas Graf 	}
3196af2806f8SThomas Graf 
3197af2806f8SThomas Graf 	to->truesize += len + plen;
3198af2806f8SThomas Graf 	to->len += len + plen;
3199af2806f8SThomas Graf 	to->data_len += len + plen;
3200af2806f8SThomas Graf 
320136d5fe6aSZoltan Kiss 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
320236d5fe6aSZoltan Kiss 		skb_tx_error(from);
320336d5fe6aSZoltan Kiss 		return -ENOMEM;
320436d5fe6aSZoltan Kiss 	}
32051f8b977aSWillem de Bruijn 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
320636d5fe6aSZoltan Kiss 
3207af2806f8SThomas Graf 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3208d8e18a51SMatthew Wilcox (Oracle) 		int size;
3209d8e18a51SMatthew Wilcox (Oracle) 
3210af2806f8SThomas Graf 		if (!len)
3211af2806f8SThomas Graf 			break;
3212af2806f8SThomas Graf 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3213d8e18a51SMatthew Wilcox (Oracle) 		size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3214d8e18a51SMatthew Wilcox (Oracle) 					len);
3215d8e18a51SMatthew Wilcox (Oracle) 		skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3216d8e18a51SMatthew Wilcox (Oracle) 		len -= size;
3217af2806f8SThomas Graf 		skb_frag_ref(to, j);
3218af2806f8SThomas Graf 		j++;
3219af2806f8SThomas Graf 	}
3220af2806f8SThomas Graf 	skb_shinfo(to)->nr_frags = j;
322136d5fe6aSZoltan Kiss 
322236d5fe6aSZoltan Kiss 	return 0;
3223af2806f8SThomas Graf }
3224af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy);
3225af2806f8SThomas Graf 
32261da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
32271da177e4SLinus Torvalds {
3228d3bc23e7SAl Viro 	__wsum csum;
32291da177e4SLinus Torvalds 	long csstart;
32301da177e4SLinus Torvalds 
323184fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL)
323255508d60SMichał Mirosław 		csstart = skb_checksum_start_offset(skb);
32331da177e4SLinus Torvalds 	else
32341da177e4SLinus Torvalds 		csstart = skb_headlen(skb);
32351da177e4SLinus Torvalds 
323609a62660SKris Katterjohn 	BUG_ON(csstart > skb_headlen(skb));
32371da177e4SLinus Torvalds 
3238d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data(skb, to, csstart);
32391da177e4SLinus Torvalds 
32401da177e4SLinus Torvalds 	csum = 0;
32411da177e4SLinus Torvalds 	if (csstart != skb->len)
32421da177e4SLinus Torvalds 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
32438d5930dfSAl Viro 					      skb->len - csstart);
32441da177e4SLinus Torvalds 
324584fa7933SPatrick McHardy 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3246ff1dcadbSAl Viro 		long csstuff = csstart + skb->csum_offset;
32471da177e4SLinus Torvalds 
3248d3bc23e7SAl Viro 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
32491da177e4SLinus Torvalds 	}
32501da177e4SLinus Torvalds }
3251b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev);
32521da177e4SLinus Torvalds 
32531da177e4SLinus Torvalds /**
32541da177e4SLinus Torvalds  *	skb_dequeue - remove from the head of the queue
32551da177e4SLinus Torvalds  *	@list: list to dequeue from
32561da177e4SLinus Torvalds  *
32571da177e4SLinus Torvalds  *	Remove the head of the list. The list lock is taken so the function
32581da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The head item is
32591da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
32601da177e4SLinus Torvalds  */
32611da177e4SLinus Torvalds 
32621da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list)
32631da177e4SLinus Torvalds {
32641da177e4SLinus Torvalds 	unsigned long flags;
32651da177e4SLinus Torvalds 	struct sk_buff *result;
32661da177e4SLinus Torvalds 
32671da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
32681da177e4SLinus Torvalds 	result = __skb_dequeue(list);
32691da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
32701da177e4SLinus Torvalds 	return result;
32711da177e4SLinus Torvalds }
3272b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue);
32731da177e4SLinus Torvalds 
32741da177e4SLinus Torvalds /**
32751da177e4SLinus Torvalds  *	skb_dequeue_tail - remove from the tail of the queue
32761da177e4SLinus Torvalds  *	@list: list to dequeue from
32771da177e4SLinus Torvalds  *
32781da177e4SLinus Torvalds  *	Remove the tail of the list. The list lock is taken so the function
32791da177e4SLinus Torvalds  *	may be used safely with other locking list functions. The tail item is
32801da177e4SLinus Torvalds  *	returned or %NULL if the list is empty.
32811da177e4SLinus Torvalds  */
32821da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
32831da177e4SLinus Torvalds {
32841da177e4SLinus Torvalds 	unsigned long flags;
32851da177e4SLinus Torvalds 	struct sk_buff *result;
32861da177e4SLinus Torvalds 
32871da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
32881da177e4SLinus Torvalds 	result = __skb_dequeue_tail(list);
32891da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
32901da177e4SLinus Torvalds 	return result;
32911da177e4SLinus Torvalds }
3292b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail);
32931da177e4SLinus Torvalds 
32941da177e4SLinus Torvalds /**
32951da177e4SLinus Torvalds  *	skb_queue_purge - empty a list
32961da177e4SLinus Torvalds  *	@list: list to empty
32971da177e4SLinus Torvalds  *
32981da177e4SLinus Torvalds  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
32991da177e4SLinus Torvalds  *	the list and one reference dropped. This function takes the list
33001da177e4SLinus Torvalds  *	lock and is atomic with respect to other list locking functions.
33011da177e4SLinus Torvalds  */
33021da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list)
33031da177e4SLinus Torvalds {
33041da177e4SLinus Torvalds 	struct sk_buff *skb;
33051da177e4SLinus Torvalds 	while ((skb = skb_dequeue(list)) != NULL)
33061da177e4SLinus Torvalds 		kfree_skb(skb);
33071da177e4SLinus Torvalds }
3308b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge);
33091da177e4SLinus Torvalds 
33101da177e4SLinus Torvalds /**
33119f5afeaeSYaogong Wang  *	skb_rbtree_purge - empty a skb rbtree
33129f5afeaeSYaogong Wang  *	@root: root of the rbtree to empty
3313385114deSPeter Oskolkov  *	Return value: the sum of truesizes of all purged skbs.
33149f5afeaeSYaogong Wang  *
33159f5afeaeSYaogong Wang  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
33169f5afeaeSYaogong Wang  *	the list and one reference dropped. This function does not take
33179f5afeaeSYaogong Wang  *	any lock. Synchronization should be handled by the caller (e.g., TCP
33189f5afeaeSYaogong Wang  *	out-of-order queue is protected by the socket lock).
33199f5afeaeSYaogong Wang  */
3320385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root)
33219f5afeaeSYaogong Wang {
33227c90584cSEric Dumazet 	struct rb_node *p = rb_first(root);
3323385114deSPeter Oskolkov 	unsigned int sum = 0;
33249f5afeaeSYaogong Wang 
33257c90584cSEric Dumazet 	while (p) {
33267c90584cSEric Dumazet 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
33277c90584cSEric Dumazet 
33287c90584cSEric Dumazet 		p = rb_next(p);
33297c90584cSEric Dumazet 		rb_erase(&skb->rbnode, root);
3330385114deSPeter Oskolkov 		sum += skb->truesize;
33319f5afeaeSYaogong Wang 		kfree_skb(skb);
33327c90584cSEric Dumazet 	}
3333385114deSPeter Oskolkov 	return sum;
33349f5afeaeSYaogong Wang }
33359f5afeaeSYaogong Wang 
33369f5afeaeSYaogong Wang /**
33371da177e4SLinus Torvalds  *	skb_queue_head - queue a buffer at the list head
33381da177e4SLinus Torvalds  *	@list: list to use
33391da177e4SLinus Torvalds  *	@newsk: buffer to queue
33401da177e4SLinus Torvalds  *
33411da177e4SLinus Torvalds  *	Queue a buffer at the start of the list. This function takes the
33421da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
33431da177e4SLinus Torvalds  *	safely.
33441da177e4SLinus Torvalds  *
33451da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
33461da177e4SLinus Torvalds  */
33471da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
33481da177e4SLinus Torvalds {
33491da177e4SLinus Torvalds 	unsigned long flags;
33501da177e4SLinus Torvalds 
33511da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
33521da177e4SLinus Torvalds 	__skb_queue_head(list, newsk);
33531da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
33541da177e4SLinus Torvalds }
3355b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head);
33561da177e4SLinus Torvalds 
33571da177e4SLinus Torvalds /**
33581da177e4SLinus Torvalds  *	skb_queue_tail - queue a buffer at the list tail
33591da177e4SLinus Torvalds  *	@list: list to use
33601da177e4SLinus Torvalds  *	@newsk: buffer to queue
33611da177e4SLinus Torvalds  *
33621da177e4SLinus Torvalds  *	Queue a buffer at the tail of the list. This function takes the
33631da177e4SLinus Torvalds  *	list lock and can be used safely with other locking &sk_buff functions
33641da177e4SLinus Torvalds  *	safely.
33651da177e4SLinus Torvalds  *
33661da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
33671da177e4SLinus Torvalds  */
33681da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
33691da177e4SLinus Torvalds {
33701da177e4SLinus Torvalds 	unsigned long flags;
33711da177e4SLinus Torvalds 
33721da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
33731da177e4SLinus Torvalds 	__skb_queue_tail(list, newsk);
33741da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
33751da177e4SLinus Torvalds }
3376b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail);
33778728b834SDavid S. Miller 
33781da177e4SLinus Torvalds /**
33791da177e4SLinus Torvalds  *	skb_unlink	-	remove a buffer from a list
33801da177e4SLinus Torvalds  *	@skb: buffer to remove
33818728b834SDavid S. Miller  *	@list: list to use
33821da177e4SLinus Torvalds  *
33838728b834SDavid S. Miller  *	Remove a packet from a list. The list locks are taken and this
33848728b834SDavid S. Miller  *	function is atomic with respect to other list locked calls
33851da177e4SLinus Torvalds  *
33868728b834SDavid S. Miller  *	You must know what list the SKB is on.
33871da177e4SLinus Torvalds  */
33888728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
33891da177e4SLinus Torvalds {
33901da177e4SLinus Torvalds 	unsigned long flags;
33911da177e4SLinus Torvalds 
33921da177e4SLinus Torvalds 	spin_lock_irqsave(&list->lock, flags);
33938728b834SDavid S. Miller 	__skb_unlink(skb, list);
33941da177e4SLinus Torvalds 	spin_unlock_irqrestore(&list->lock, flags);
33951da177e4SLinus Torvalds }
3396b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink);
33971da177e4SLinus Torvalds 
33981da177e4SLinus Torvalds /**
33991da177e4SLinus Torvalds  *	skb_append	-	append a buffer
34001da177e4SLinus Torvalds  *	@old: buffer to insert after
34011da177e4SLinus Torvalds  *	@newsk: buffer to insert
34028728b834SDavid S. Miller  *	@list: list to use
34031da177e4SLinus Torvalds  *
34041da177e4SLinus Torvalds  *	Place a packet after a given packet in a list. The list locks are taken
34051da177e4SLinus Torvalds  *	and this function is atomic with respect to other list locked calls.
34061da177e4SLinus Torvalds  *	A buffer cannot be placed on two lists at the same time.
34071da177e4SLinus Torvalds  */
34088728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
34091da177e4SLinus Torvalds {
34101da177e4SLinus Torvalds 	unsigned long flags;
34111da177e4SLinus Torvalds 
34128728b834SDavid S. Miller 	spin_lock_irqsave(&list->lock, flags);
34137de6c033SGerrit Renker 	__skb_queue_after(list, old, newsk);
34148728b834SDavid S. Miller 	spin_unlock_irqrestore(&list->lock, flags);
34151da177e4SLinus Torvalds }
3416b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append);
34171da177e4SLinus Torvalds 
34181da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb,
34191da177e4SLinus Torvalds 					   struct sk_buff* skb1,
34201da177e4SLinus Torvalds 					   const u32 len, const int pos)
34211da177e4SLinus Torvalds {
34221da177e4SLinus Torvalds 	int i;
34231da177e4SLinus Torvalds 
3424d626f62bSArnaldo Carvalho de Melo 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3425d626f62bSArnaldo Carvalho de Melo 					 pos - len);
34261da177e4SLinus Torvalds 	/* And move data appendix as is. */
34271da177e4SLinus Torvalds 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
34281da177e4SLinus Torvalds 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
34291da177e4SLinus Torvalds 
34301da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
34311da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags  = 0;
34321da177e4SLinus Torvalds 	skb1->data_len		   = skb->data_len;
34331da177e4SLinus Torvalds 	skb1->len		   += skb1->data_len;
34341da177e4SLinus Torvalds 	skb->data_len		   = 0;
34351da177e4SLinus Torvalds 	skb->len		   = len;
343627a884dcSArnaldo Carvalho de Melo 	skb_set_tail_pointer(skb, len);
34371da177e4SLinus Torvalds }
34381da177e4SLinus Torvalds 
34391da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb,
34401da177e4SLinus Torvalds 				       struct sk_buff* skb1,
34411da177e4SLinus Torvalds 				       const u32 len, int pos)
34421da177e4SLinus Torvalds {
34431da177e4SLinus Torvalds 	int i, k = 0;
34441da177e4SLinus Torvalds 	const int nfrags = skb_shinfo(skb)->nr_frags;
34451da177e4SLinus Torvalds 
34461da177e4SLinus Torvalds 	skb_shinfo(skb)->nr_frags = 0;
34471da177e4SLinus Torvalds 	skb1->len		  = skb1->data_len = skb->len - len;
34481da177e4SLinus Torvalds 	skb->len		  = len;
34491da177e4SLinus Torvalds 	skb->data_len		  = len - pos;
34501da177e4SLinus Torvalds 
34511da177e4SLinus Torvalds 	for (i = 0; i < nfrags; i++) {
34529e903e08SEric Dumazet 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
34531da177e4SLinus Torvalds 
34541da177e4SLinus Torvalds 		if (pos + size > len) {
34551da177e4SLinus Torvalds 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
34561da177e4SLinus Torvalds 
34571da177e4SLinus Torvalds 			if (pos < len) {
34581da177e4SLinus Torvalds 				/* Split frag.
34591da177e4SLinus Torvalds 				 * We have two variants in this case:
34601da177e4SLinus Torvalds 				 * 1. Move all the frag to the second
34611da177e4SLinus Torvalds 				 *    part, if it is possible. F.e.
34621da177e4SLinus Torvalds 				 *    this approach is mandatory for TUX,
34631da177e4SLinus Torvalds 				 *    where splitting is expensive.
34641da177e4SLinus Torvalds 				 * 2. Split is accurately. We make this.
34651da177e4SLinus Torvalds 				 */
3466ea2ab693SIan Campbell 				skb_frag_ref(skb, i);
3467b54c9d5bSJonathan Lemon 				skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
34689e903e08SEric Dumazet 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
34699e903e08SEric Dumazet 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
34701da177e4SLinus Torvalds 				skb_shinfo(skb)->nr_frags++;
34711da177e4SLinus Torvalds 			}
34721da177e4SLinus Torvalds 			k++;
34731da177e4SLinus Torvalds 		} else
34741da177e4SLinus Torvalds 			skb_shinfo(skb)->nr_frags++;
34751da177e4SLinus Torvalds 		pos += size;
34761da177e4SLinus Torvalds 	}
34771da177e4SLinus Torvalds 	skb_shinfo(skb1)->nr_frags = k;
34781da177e4SLinus Torvalds }
34791da177e4SLinus Torvalds 
34801da177e4SLinus Torvalds /**
34811da177e4SLinus Torvalds  * skb_split - Split fragmented skb to two parts at length len.
34821da177e4SLinus Torvalds  * @skb: the buffer to split
34831da177e4SLinus Torvalds  * @skb1: the buffer to receive the second part
34841da177e4SLinus Torvalds  * @len: new length for skb
34851da177e4SLinus Torvalds  */
34861da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
34871da177e4SLinus Torvalds {
34881da177e4SLinus Torvalds 	int pos = skb_headlen(skb);
34899b65b17dSTalal Ahmad 	const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
34901da177e4SLinus Torvalds 
34919b65b17dSTalal Ahmad 	skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
34921f8b977aSWillem de Bruijn 	skb_zerocopy_clone(skb1, skb, 0);
34931da177e4SLinus Torvalds 	if (len < pos)	/* Split line is inside header. */
34941da177e4SLinus Torvalds 		skb_split_inside_header(skb, skb1, len, pos);
34951da177e4SLinus Torvalds 	else		/* Second chunk has no header, nothing to copy. */
34961da177e4SLinus Torvalds 		skb_split_no_header(skb, skb1, len, pos);
34971da177e4SLinus Torvalds }
3498b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split);
34991da177e4SLinus Torvalds 
35009f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go.
35019f782db3SIlpo Järvinen  *
35029f782db3SIlpo Järvinen  * Caller cannot keep skb_shinfo related pointers past calling here!
35039f782db3SIlpo Järvinen  */
3504832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb)
3505832d11c5SIlpo Järvinen {
3506c4777efaSEric Dumazet 	return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3507832d11c5SIlpo Järvinen }
3508832d11c5SIlpo Järvinen 
3509832d11c5SIlpo Järvinen /**
3510832d11c5SIlpo Järvinen  * skb_shift - Shifts paged data partially from skb to another
3511832d11c5SIlpo Järvinen  * @tgt: buffer into which tail data gets added
3512832d11c5SIlpo Järvinen  * @skb: buffer from which the paged data comes from
3513832d11c5SIlpo Järvinen  * @shiftlen: shift up to this many bytes
3514832d11c5SIlpo Järvinen  *
3515832d11c5SIlpo Järvinen  * Attempts to shift up to shiftlen worth of bytes, which may be less than
351620e994a0SFeng King  * the length of the skb, from skb to tgt. Returns number bytes shifted.
3517832d11c5SIlpo Järvinen  * It's up to caller to free skb if everything was shifted.
3518832d11c5SIlpo Järvinen  *
3519832d11c5SIlpo Järvinen  * If @tgt runs out of frags, the whole operation is aborted.
3520832d11c5SIlpo Järvinen  *
3521832d11c5SIlpo Järvinen  * Skb cannot include anything else but paged data while tgt is allowed
3522832d11c5SIlpo Järvinen  * to have non-paged data as well.
3523832d11c5SIlpo Järvinen  *
3524832d11c5SIlpo Järvinen  * TODO: full sized shift could be optimized but that would need
3525832d11c5SIlpo Järvinen  * specialized skb free'er to handle frags without up-to-date nr_frags.
3526832d11c5SIlpo Järvinen  */
3527832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3528832d11c5SIlpo Järvinen {
3529832d11c5SIlpo Järvinen 	int from, to, merge, todo;
3530d8e18a51SMatthew Wilcox (Oracle) 	skb_frag_t *fragfrom, *fragto;
3531832d11c5SIlpo Järvinen 
3532832d11c5SIlpo Järvinen 	BUG_ON(shiftlen > skb->len);
3533f8071cdeSEric Dumazet 
3534f8071cdeSEric Dumazet 	if (skb_headlen(skb))
3535f8071cdeSEric Dumazet 		return 0;
35361f8b977aSWillem de Bruijn 	if (skb_zcopy(tgt) || skb_zcopy(skb))
35371f8b977aSWillem de Bruijn 		return 0;
3538832d11c5SIlpo Järvinen 
3539832d11c5SIlpo Järvinen 	todo = shiftlen;
3540832d11c5SIlpo Järvinen 	from = 0;
3541832d11c5SIlpo Järvinen 	to = skb_shinfo(tgt)->nr_frags;
3542832d11c5SIlpo Järvinen 	fragfrom = &skb_shinfo(skb)->frags[from];
3543832d11c5SIlpo Järvinen 
3544832d11c5SIlpo Järvinen 	/* Actual merge is delayed until the point when we know we can
3545832d11c5SIlpo Järvinen 	 * commit all, so that we don't have to undo partial changes
3546832d11c5SIlpo Järvinen 	 */
3547832d11c5SIlpo Järvinen 	if (!to ||
3548ea2ab693SIan Campbell 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3549b54c9d5bSJonathan Lemon 			      skb_frag_off(fragfrom))) {
3550832d11c5SIlpo Järvinen 		merge = -1;
3551832d11c5SIlpo Järvinen 	} else {
3552832d11c5SIlpo Järvinen 		merge = to - 1;
3553832d11c5SIlpo Järvinen 
35549e903e08SEric Dumazet 		todo -= skb_frag_size(fragfrom);
3555832d11c5SIlpo Järvinen 		if (todo < 0) {
3556832d11c5SIlpo Järvinen 			if (skb_prepare_for_shift(skb) ||
3557832d11c5SIlpo Järvinen 			    skb_prepare_for_shift(tgt))
3558832d11c5SIlpo Järvinen 				return 0;
3559832d11c5SIlpo Järvinen 
35609f782db3SIlpo Järvinen 			/* All previous frag pointers might be stale! */
35619f782db3SIlpo Järvinen 			fragfrom = &skb_shinfo(skb)->frags[from];
3562832d11c5SIlpo Järvinen 			fragto = &skb_shinfo(tgt)->frags[merge];
3563832d11c5SIlpo Järvinen 
35649e903e08SEric Dumazet 			skb_frag_size_add(fragto, shiftlen);
35659e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, shiftlen);
3566b54c9d5bSJonathan Lemon 			skb_frag_off_add(fragfrom, shiftlen);
3567832d11c5SIlpo Järvinen 
3568832d11c5SIlpo Järvinen 			goto onlymerged;
3569832d11c5SIlpo Järvinen 		}
3570832d11c5SIlpo Järvinen 
3571832d11c5SIlpo Järvinen 		from++;
3572832d11c5SIlpo Järvinen 	}
3573832d11c5SIlpo Järvinen 
3574832d11c5SIlpo Järvinen 	/* Skip full, not-fitting skb to avoid expensive operations */
3575832d11c5SIlpo Järvinen 	if ((shiftlen == skb->len) &&
3576832d11c5SIlpo Järvinen 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3577832d11c5SIlpo Järvinen 		return 0;
3578832d11c5SIlpo Järvinen 
3579832d11c5SIlpo Järvinen 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3580832d11c5SIlpo Järvinen 		return 0;
3581832d11c5SIlpo Järvinen 
3582832d11c5SIlpo Järvinen 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3583832d11c5SIlpo Järvinen 		if (to == MAX_SKB_FRAGS)
3584832d11c5SIlpo Järvinen 			return 0;
3585832d11c5SIlpo Järvinen 
3586832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[from];
3587832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[to];
3588832d11c5SIlpo Järvinen 
35899e903e08SEric Dumazet 		if (todo >= skb_frag_size(fragfrom)) {
3590832d11c5SIlpo Järvinen 			*fragto = *fragfrom;
35919e903e08SEric Dumazet 			todo -= skb_frag_size(fragfrom);
3592832d11c5SIlpo Järvinen 			from++;
3593832d11c5SIlpo Järvinen 			to++;
3594832d11c5SIlpo Järvinen 
3595832d11c5SIlpo Järvinen 		} else {
3596ea2ab693SIan Campbell 			__skb_frag_ref(fragfrom);
3597b54c9d5bSJonathan Lemon 			skb_frag_page_copy(fragto, fragfrom);
3598b54c9d5bSJonathan Lemon 			skb_frag_off_copy(fragto, fragfrom);
35999e903e08SEric Dumazet 			skb_frag_size_set(fragto, todo);
3600832d11c5SIlpo Järvinen 
3601b54c9d5bSJonathan Lemon 			skb_frag_off_add(fragfrom, todo);
36029e903e08SEric Dumazet 			skb_frag_size_sub(fragfrom, todo);
3603832d11c5SIlpo Järvinen 			todo = 0;
3604832d11c5SIlpo Järvinen 
3605832d11c5SIlpo Järvinen 			to++;
3606832d11c5SIlpo Järvinen 			break;
3607832d11c5SIlpo Järvinen 		}
3608832d11c5SIlpo Järvinen 	}
3609832d11c5SIlpo Järvinen 
3610832d11c5SIlpo Järvinen 	/* Ready to "commit" this state change to tgt */
3611832d11c5SIlpo Järvinen 	skb_shinfo(tgt)->nr_frags = to;
3612832d11c5SIlpo Järvinen 
3613832d11c5SIlpo Järvinen 	if (merge >= 0) {
3614832d11c5SIlpo Järvinen 		fragfrom = &skb_shinfo(skb)->frags[0];
3615832d11c5SIlpo Järvinen 		fragto = &skb_shinfo(tgt)->frags[merge];
3616832d11c5SIlpo Järvinen 
36179e903e08SEric Dumazet 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
36186a5bcd84SIlias Apalodimas 		__skb_frag_unref(fragfrom, skb->pp_recycle);
3619832d11c5SIlpo Järvinen 	}
3620832d11c5SIlpo Järvinen 
3621832d11c5SIlpo Järvinen 	/* Reposition in the original skb */
3622832d11c5SIlpo Järvinen 	to = 0;
3623832d11c5SIlpo Järvinen 	while (from < skb_shinfo(skb)->nr_frags)
3624832d11c5SIlpo Järvinen 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3625832d11c5SIlpo Järvinen 	skb_shinfo(skb)->nr_frags = to;
3626832d11c5SIlpo Järvinen 
3627832d11c5SIlpo Järvinen 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3628832d11c5SIlpo Järvinen 
3629832d11c5SIlpo Järvinen onlymerged:
3630832d11c5SIlpo Järvinen 	/* Most likely the tgt won't ever need its checksum anymore, skb on
3631832d11c5SIlpo Järvinen 	 * the other hand might need it if it needs to be resent
3632832d11c5SIlpo Järvinen 	 */
3633832d11c5SIlpo Järvinen 	tgt->ip_summed = CHECKSUM_PARTIAL;
3634832d11c5SIlpo Järvinen 	skb->ip_summed = CHECKSUM_PARTIAL;
3635832d11c5SIlpo Järvinen 
3636832d11c5SIlpo Järvinen 	/* Yak, is it really working this way? Some helper please? */
3637832d11c5SIlpo Järvinen 	skb->len -= shiftlen;
3638832d11c5SIlpo Järvinen 	skb->data_len -= shiftlen;
3639832d11c5SIlpo Järvinen 	skb->truesize -= shiftlen;
3640832d11c5SIlpo Järvinen 	tgt->len += shiftlen;
3641832d11c5SIlpo Järvinen 	tgt->data_len += shiftlen;
3642832d11c5SIlpo Järvinen 	tgt->truesize += shiftlen;
3643832d11c5SIlpo Järvinen 
3644832d11c5SIlpo Järvinen 	return shiftlen;
3645832d11c5SIlpo Järvinen }
3646832d11c5SIlpo Järvinen 
3647677e90edSThomas Graf /**
3648677e90edSThomas Graf  * skb_prepare_seq_read - Prepare a sequential read of skb data
3649677e90edSThomas Graf  * @skb: the buffer to read
3650677e90edSThomas Graf  * @from: lower offset of data to be read
3651677e90edSThomas Graf  * @to: upper offset of data to be read
3652677e90edSThomas Graf  * @st: state variable
3653677e90edSThomas Graf  *
3654677e90edSThomas Graf  * Initializes the specified state variable. Must be called before
3655677e90edSThomas Graf  * invoking skb_seq_read() for the first time.
3656677e90edSThomas Graf  */
3657677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3658677e90edSThomas Graf 			  unsigned int to, struct skb_seq_state *st)
3659677e90edSThomas Graf {
3660677e90edSThomas Graf 	st->lower_offset = from;
3661677e90edSThomas Graf 	st->upper_offset = to;
3662677e90edSThomas Graf 	st->root_skb = st->cur_skb = skb;
3663677e90edSThomas Graf 	st->frag_idx = st->stepped_offset = 0;
3664677e90edSThomas Graf 	st->frag_data = NULL;
366597550f6fSWillem de Bruijn 	st->frag_off = 0;
3666677e90edSThomas Graf }
3667b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read);
3668677e90edSThomas Graf 
3669677e90edSThomas Graf /**
3670677e90edSThomas Graf  * skb_seq_read - Sequentially read skb data
3671677e90edSThomas Graf  * @consumed: number of bytes consumed by the caller so far
3672677e90edSThomas Graf  * @data: destination pointer for data to be returned
3673677e90edSThomas Graf  * @st: state variable
3674677e90edSThomas Graf  *
3675bc32383cSMathias Krause  * Reads a block of skb data at @consumed relative to the
3676677e90edSThomas Graf  * lower offset specified to skb_prepare_seq_read(). Assigns
3677bc32383cSMathias Krause  * the head of the data block to @data and returns the length
3678677e90edSThomas Graf  * of the block or 0 if the end of the skb data or the upper
3679677e90edSThomas Graf  * offset has been reached.
3680677e90edSThomas Graf  *
3681677e90edSThomas Graf  * The caller is not required to consume all of the data
3682bc32383cSMathias Krause  * returned, i.e. @consumed is typically set to the number
3683677e90edSThomas Graf  * of bytes already consumed and the next call to
3684677e90edSThomas Graf  * skb_seq_read() will return the remaining part of the block.
3685677e90edSThomas Graf  *
368625985edcSLucas De Marchi  * Note 1: The size of each block of data returned can be arbitrary,
3687e793c0f7SMasanari Iida  *       this limitation is the cost for zerocopy sequential
3688677e90edSThomas Graf  *       reads of potentially non linear data.
3689677e90edSThomas Graf  *
3690bc2cda1eSRandy Dunlap  * Note 2: Fragment lists within fragments are not implemented
3691677e90edSThomas Graf  *       at the moment, state->root_skb could be replaced with
3692677e90edSThomas Graf  *       a stack for this purpose.
3693677e90edSThomas Graf  */
3694677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3695677e90edSThomas Graf 			  struct skb_seq_state *st)
3696677e90edSThomas Graf {
3697677e90edSThomas Graf 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3698677e90edSThomas Graf 	skb_frag_t *frag;
3699677e90edSThomas Graf 
3700aeb193eaSWedson Almeida Filho 	if (unlikely(abs_offset >= st->upper_offset)) {
3701aeb193eaSWedson Almeida Filho 		if (st->frag_data) {
3702aeb193eaSWedson Almeida Filho 			kunmap_atomic(st->frag_data);
3703aeb193eaSWedson Almeida Filho 			st->frag_data = NULL;
3704aeb193eaSWedson Almeida Filho 		}
3705677e90edSThomas Graf 		return 0;
3706aeb193eaSWedson Almeida Filho 	}
3707677e90edSThomas Graf 
3708677e90edSThomas Graf next_skb:
370995e3b24cSHerbert Xu 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3710677e90edSThomas Graf 
3711995b3379SThomas Chenault 	if (abs_offset < block_limit && !st->frag_data) {
371295e3b24cSHerbert Xu 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3713677e90edSThomas Graf 		return block_limit - abs_offset;
3714677e90edSThomas Graf 	}
3715677e90edSThomas Graf 
3716677e90edSThomas Graf 	if (st->frag_idx == 0 && !st->frag_data)
3717677e90edSThomas Graf 		st->stepped_offset += skb_headlen(st->cur_skb);
3718677e90edSThomas Graf 
3719677e90edSThomas Graf 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
372097550f6fSWillem de Bruijn 		unsigned int pg_idx, pg_off, pg_sz;
3721677e90edSThomas Graf 
372297550f6fSWillem de Bruijn 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
372397550f6fSWillem de Bruijn 
372497550f6fSWillem de Bruijn 		pg_idx = 0;
372597550f6fSWillem de Bruijn 		pg_off = skb_frag_off(frag);
372697550f6fSWillem de Bruijn 		pg_sz = skb_frag_size(frag);
372797550f6fSWillem de Bruijn 
372897550f6fSWillem de Bruijn 		if (skb_frag_must_loop(skb_frag_page(frag))) {
372997550f6fSWillem de Bruijn 			pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
373097550f6fSWillem de Bruijn 			pg_off = offset_in_page(pg_off + st->frag_off);
373197550f6fSWillem de Bruijn 			pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
373297550f6fSWillem de Bruijn 						    PAGE_SIZE - pg_off);
373397550f6fSWillem de Bruijn 		}
373497550f6fSWillem de Bruijn 
373597550f6fSWillem de Bruijn 		block_limit = pg_sz + st->stepped_offset;
3736677e90edSThomas Graf 		if (abs_offset < block_limit) {
3737677e90edSThomas Graf 			if (!st->frag_data)
373897550f6fSWillem de Bruijn 				st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
3739677e90edSThomas Graf 
374097550f6fSWillem de Bruijn 			*data = (u8 *)st->frag_data + pg_off +
3741677e90edSThomas Graf 				(abs_offset - st->stepped_offset);
3742677e90edSThomas Graf 
3743677e90edSThomas Graf 			return block_limit - abs_offset;
3744677e90edSThomas Graf 		}
3745677e90edSThomas Graf 
3746677e90edSThomas Graf 		if (st->frag_data) {
374751c56b00SEric Dumazet 			kunmap_atomic(st->frag_data);
3748677e90edSThomas Graf 			st->frag_data = NULL;
3749677e90edSThomas Graf 		}
3750677e90edSThomas Graf 
375197550f6fSWillem de Bruijn 		st->stepped_offset += pg_sz;
375297550f6fSWillem de Bruijn 		st->frag_off += pg_sz;
375397550f6fSWillem de Bruijn 		if (st->frag_off == skb_frag_size(frag)) {
375497550f6fSWillem de Bruijn 			st->frag_off = 0;
3755677e90edSThomas Graf 			st->frag_idx++;
375697550f6fSWillem de Bruijn 		}
3757677e90edSThomas Graf 	}
3758677e90edSThomas Graf 
37595b5a60daSOlaf Kirch 	if (st->frag_data) {
376051c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
37615b5a60daSOlaf Kirch 		st->frag_data = NULL;
37625b5a60daSOlaf Kirch 	}
37635b5a60daSOlaf Kirch 
376421dc3301SDavid S. Miller 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3765677e90edSThomas Graf 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
376695e3b24cSHerbert Xu 		st->frag_idx = 0;
3767677e90edSThomas Graf 		goto next_skb;
376871b3346dSShyam Iyer 	} else if (st->cur_skb->next) {
376971b3346dSShyam Iyer 		st->cur_skb = st->cur_skb->next;
377071b3346dSShyam Iyer 		st->frag_idx = 0;
3771677e90edSThomas Graf 		goto next_skb;
3772677e90edSThomas Graf 	}
3773677e90edSThomas Graf 
3774677e90edSThomas Graf 	return 0;
3775677e90edSThomas Graf }
3776b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read);
3777677e90edSThomas Graf 
3778677e90edSThomas Graf /**
3779677e90edSThomas Graf  * skb_abort_seq_read - Abort a sequential read of skb data
3780677e90edSThomas Graf  * @st: state variable
3781677e90edSThomas Graf  *
3782677e90edSThomas Graf  * Must be called if skb_seq_read() was not called until it
3783677e90edSThomas Graf  * returned 0.
3784677e90edSThomas Graf  */
3785677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st)
3786677e90edSThomas Graf {
3787677e90edSThomas Graf 	if (st->frag_data)
378851c56b00SEric Dumazet 		kunmap_atomic(st->frag_data);
3789677e90edSThomas Graf }
3790b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read);
3791677e90edSThomas Graf 
37923fc7e8a6SThomas Graf #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
37933fc7e8a6SThomas Graf 
37943fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
37953fc7e8a6SThomas Graf 					  struct ts_config *conf,
37963fc7e8a6SThomas Graf 					  struct ts_state *state)
37973fc7e8a6SThomas Graf {
37983fc7e8a6SThomas Graf 	return skb_seq_read(offset, text, TS_SKB_CB(state));
37993fc7e8a6SThomas Graf }
38003fc7e8a6SThomas Graf 
38013fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
38023fc7e8a6SThomas Graf {
38033fc7e8a6SThomas Graf 	skb_abort_seq_read(TS_SKB_CB(state));
38043fc7e8a6SThomas Graf }
38053fc7e8a6SThomas Graf 
38063fc7e8a6SThomas Graf /**
38073fc7e8a6SThomas Graf  * skb_find_text - Find a text pattern in skb data
38083fc7e8a6SThomas Graf  * @skb: the buffer to look in
38093fc7e8a6SThomas Graf  * @from: search offset
38103fc7e8a6SThomas Graf  * @to: search limit
38113fc7e8a6SThomas Graf  * @config: textsearch configuration
38123fc7e8a6SThomas Graf  *
38133fc7e8a6SThomas Graf  * Finds a pattern in the skb data according to the specified
38143fc7e8a6SThomas Graf  * textsearch configuration. Use textsearch_next() to retrieve
38153fc7e8a6SThomas Graf  * subsequent occurrences of the pattern. Returns the offset
38163fc7e8a6SThomas Graf  * to the first occurrence or UINT_MAX if no match was found.
38173fc7e8a6SThomas Graf  */
38183fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3819059a2440SBojan Prtvar 			   unsigned int to, struct ts_config *config)
38203fc7e8a6SThomas Graf {
3821059a2440SBojan Prtvar 	struct ts_state state;
3822f72b948dSPhil Oester 	unsigned int ret;
3823f72b948dSPhil Oester 
3824b228c9b0SWillem de Bruijn 	BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
3825b228c9b0SWillem de Bruijn 
38263fc7e8a6SThomas Graf 	config->get_next_block = skb_ts_get_next_block;
38273fc7e8a6SThomas Graf 	config->finish = skb_ts_finish;
38283fc7e8a6SThomas Graf 
3829059a2440SBojan Prtvar 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
38303fc7e8a6SThomas Graf 
3831059a2440SBojan Prtvar 	ret = textsearch_find(config, &state);
3832f72b948dSPhil Oester 	return (ret <= to - from ? ret : UINT_MAX);
38333fc7e8a6SThomas Graf }
3834b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text);
38353fc7e8a6SThomas Graf 
3836be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3837be12a1feSHannes Frederic Sowa 			 int offset, size_t size)
3838be12a1feSHannes Frederic Sowa {
3839be12a1feSHannes Frederic Sowa 	int i = skb_shinfo(skb)->nr_frags;
3840be12a1feSHannes Frederic Sowa 
3841be12a1feSHannes Frederic Sowa 	if (skb_can_coalesce(skb, i, page, offset)) {
3842be12a1feSHannes Frederic Sowa 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3843be12a1feSHannes Frederic Sowa 	} else if (i < MAX_SKB_FRAGS) {
3844be12a1feSHannes Frederic Sowa 		get_page(page);
3845be12a1feSHannes Frederic Sowa 		skb_fill_page_desc(skb, i, page, offset, size);
3846be12a1feSHannes Frederic Sowa 	} else {
3847be12a1feSHannes Frederic Sowa 		return -EMSGSIZE;
3848be12a1feSHannes Frederic Sowa 	}
3849be12a1feSHannes Frederic Sowa 
3850be12a1feSHannes Frederic Sowa 	return 0;
3851be12a1feSHannes Frederic Sowa }
3852be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3853be12a1feSHannes Frederic Sowa 
3854cbb042f9SHerbert Xu /**
3855cbb042f9SHerbert Xu  *	skb_pull_rcsum - pull skb and update receive checksum
3856cbb042f9SHerbert Xu  *	@skb: buffer to update
3857cbb042f9SHerbert Xu  *	@len: length of data pulled
3858cbb042f9SHerbert Xu  *
3859cbb042f9SHerbert Xu  *	This function performs an skb_pull on the packet and updates
3860fee54fa5SUrs Thuermann  *	the CHECKSUM_COMPLETE checksum.  It should be used on
386184fa7933SPatrick McHardy  *	receive path processing instead of skb_pull unless you know
386284fa7933SPatrick McHardy  *	that the checksum difference is zero (e.g., a valid IP header)
386384fa7933SPatrick McHardy  *	or you are setting ip_summed to CHECKSUM_NONE.
3864cbb042f9SHerbert Xu  */
3865af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3866cbb042f9SHerbert Xu {
386731b33dfbSPravin B Shelar 	unsigned char *data = skb->data;
386831b33dfbSPravin B Shelar 
3869cbb042f9SHerbert Xu 	BUG_ON(len > skb->len);
387031b33dfbSPravin B Shelar 	__skb_pull(skb, len);
387131b33dfbSPravin B Shelar 	skb_postpull_rcsum(skb, data, len);
387231b33dfbSPravin B Shelar 	return skb->data;
3873cbb042f9SHerbert Xu }
3874f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3875f94691acSArnaldo Carvalho de Melo 
387613acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
387713acc94eSYonghong Song {
387813acc94eSYonghong Song 	skb_frag_t head_frag;
387913acc94eSYonghong Song 	struct page *page;
388013acc94eSYonghong Song 
388113acc94eSYonghong Song 	page = virt_to_head_page(frag_skb->head);
3882d8e18a51SMatthew Wilcox (Oracle) 	__skb_frag_set_page(&head_frag, page);
3883b54c9d5bSJonathan Lemon 	skb_frag_off_set(&head_frag, frag_skb->data -
3884b54c9d5bSJonathan Lemon 			 (unsigned char *)page_address(page));
3885d8e18a51SMatthew Wilcox (Oracle) 	skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
388613acc94eSYonghong Song 	return head_frag;
388713acc94eSYonghong Song }
388813acc94eSYonghong Song 
38893a1296a3SSteffen Klassert struct sk_buff *skb_segment_list(struct sk_buff *skb,
38903a1296a3SSteffen Klassert 				 netdev_features_t features,
38913a1296a3SSteffen Klassert 				 unsigned int offset)
38923a1296a3SSteffen Klassert {
38933a1296a3SSteffen Klassert 	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
38943a1296a3SSteffen Klassert 	unsigned int tnl_hlen = skb_tnl_header_len(skb);
38953a1296a3SSteffen Klassert 	unsigned int delta_truesize = 0;
38963a1296a3SSteffen Klassert 	unsigned int delta_len = 0;
38973a1296a3SSteffen Klassert 	struct sk_buff *tail = NULL;
389853475c5dSDongseok Yi 	struct sk_buff *nskb, *tmp;
389953475c5dSDongseok Yi 	int err;
39003a1296a3SSteffen Klassert 
39013a1296a3SSteffen Klassert 	skb_push(skb, -skb_network_offset(skb) + offset);
39023a1296a3SSteffen Klassert 
39033a1296a3SSteffen Klassert 	skb_shinfo(skb)->frag_list = NULL;
39043a1296a3SSteffen Klassert 
39053a1296a3SSteffen Klassert 	do {
39063a1296a3SSteffen Klassert 		nskb = list_skb;
39073a1296a3SSteffen Klassert 		list_skb = list_skb->next;
39083a1296a3SSteffen Klassert 
390953475c5dSDongseok Yi 		err = 0;
391053475c5dSDongseok Yi 		if (skb_shared(nskb)) {
391153475c5dSDongseok Yi 			tmp = skb_clone(nskb, GFP_ATOMIC);
391253475c5dSDongseok Yi 			if (tmp) {
391353475c5dSDongseok Yi 				consume_skb(nskb);
391453475c5dSDongseok Yi 				nskb = tmp;
391553475c5dSDongseok Yi 				err = skb_unclone(nskb, GFP_ATOMIC);
391653475c5dSDongseok Yi 			} else {
391753475c5dSDongseok Yi 				err = -ENOMEM;
391853475c5dSDongseok Yi 			}
391953475c5dSDongseok Yi 		}
392053475c5dSDongseok Yi 
39213a1296a3SSteffen Klassert 		if (!tail)
39223a1296a3SSteffen Klassert 			skb->next = nskb;
39233a1296a3SSteffen Klassert 		else
39243a1296a3SSteffen Klassert 			tail->next = nskb;
39253a1296a3SSteffen Klassert 
392653475c5dSDongseok Yi 		if (unlikely(err)) {
392753475c5dSDongseok Yi 			nskb->next = list_skb;
392853475c5dSDongseok Yi 			goto err_linearize;
392953475c5dSDongseok Yi 		}
393053475c5dSDongseok Yi 
39313a1296a3SSteffen Klassert 		tail = nskb;
39323a1296a3SSteffen Klassert 
39333a1296a3SSteffen Klassert 		delta_len += nskb->len;
39343a1296a3SSteffen Klassert 		delta_truesize += nskb->truesize;
39353a1296a3SSteffen Klassert 
39363a1296a3SSteffen Klassert 		skb_push(nskb, -skb_network_offset(nskb) + offset);
39373a1296a3SSteffen Klassert 
3938cf673ed0SFlorian Westphal 		skb_release_head_state(nskb);
39393a1296a3SSteffen Klassert 		__copy_skb_header(nskb, skb);
39403a1296a3SSteffen Klassert 
39413a1296a3SSteffen Klassert 		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
39423a1296a3SSteffen Klassert 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
39433a1296a3SSteffen Klassert 						 nskb->data - tnl_hlen,
39443a1296a3SSteffen Klassert 						 offset + tnl_hlen);
39453a1296a3SSteffen Klassert 
39463a1296a3SSteffen Klassert 		if (skb_needs_linearize(nskb, features) &&
39473a1296a3SSteffen Klassert 		    __skb_linearize(nskb))
39483a1296a3SSteffen Klassert 			goto err_linearize;
39493a1296a3SSteffen Klassert 
39503a1296a3SSteffen Klassert 	} while (list_skb);
39513a1296a3SSteffen Klassert 
39523a1296a3SSteffen Klassert 	skb->truesize = skb->truesize - delta_truesize;
39533a1296a3SSteffen Klassert 	skb->data_len = skb->data_len - delta_len;
39543a1296a3SSteffen Klassert 	skb->len = skb->len - delta_len;
39553a1296a3SSteffen Klassert 
39563a1296a3SSteffen Klassert 	skb_gso_reset(skb);
39573a1296a3SSteffen Klassert 
39583a1296a3SSteffen Klassert 	skb->prev = tail;
39593a1296a3SSteffen Klassert 
39603a1296a3SSteffen Klassert 	if (skb_needs_linearize(skb, features) &&
39613a1296a3SSteffen Klassert 	    __skb_linearize(skb))
39623a1296a3SSteffen Klassert 		goto err_linearize;
39633a1296a3SSteffen Klassert 
39643a1296a3SSteffen Klassert 	skb_get(skb);
39653a1296a3SSteffen Klassert 
39663a1296a3SSteffen Klassert 	return skb;
39673a1296a3SSteffen Klassert 
39683a1296a3SSteffen Klassert err_linearize:
39693a1296a3SSteffen Klassert 	kfree_skb_list(skb->next);
39703a1296a3SSteffen Klassert 	skb->next = NULL;
39713a1296a3SSteffen Klassert 	return ERR_PTR(-ENOMEM);
39723a1296a3SSteffen Klassert }
39733a1296a3SSteffen Klassert EXPORT_SYMBOL_GPL(skb_segment_list);
39743a1296a3SSteffen Klassert 
3975f4c50d99SHerbert Xu /**
3976f4c50d99SHerbert Xu  *	skb_segment - Perform protocol segmentation on skb.
3977df5771ffSMichael S. Tsirkin  *	@head_skb: buffer to segment
3978576a30ebSHerbert Xu  *	@features: features for the output path (see dev->features)
3979f4c50d99SHerbert Xu  *
3980f4c50d99SHerbert Xu  *	This function performs segmentation on the given skb.  It returns
39814c821d75SBen Hutchings  *	a pointer to the first in a list of new skbs for the segments.
39824c821d75SBen Hutchings  *	In case of error it returns ERR_PTR(err).
3983f4c50d99SHerbert Xu  */
3984df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb,
3985df5771ffSMichael S. Tsirkin 			    netdev_features_t features)
3986f4c50d99SHerbert Xu {
3987f4c50d99SHerbert Xu 	struct sk_buff *segs = NULL;
3988f4c50d99SHerbert Xu 	struct sk_buff *tail = NULL;
39891a4cedafSMichael S. Tsirkin 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3990df5771ffSMichael S. Tsirkin 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3991df5771ffSMichael S. Tsirkin 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
3992df5771ffSMichael S. Tsirkin 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
39931fd819ecSMichael S. Tsirkin 	struct sk_buff *frag_skb = head_skb;
3994f4c50d99SHerbert Xu 	unsigned int offset = doffset;
3995df5771ffSMichael S. Tsirkin 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3996802ab55aSAlexander Duyck 	unsigned int partial_segs = 0;
3997f4c50d99SHerbert Xu 	unsigned int headroom;
3998802ab55aSAlexander Duyck 	unsigned int len = head_skb->len;
3999ec5f0615SPravin B Shelar 	__be16 proto;
400036c98382SAlexander Duyck 	bool csum, sg;
4001df5771ffSMichael S. Tsirkin 	int nfrags = skb_shinfo(head_skb)->nr_frags;
4002f4c50d99SHerbert Xu 	int err = -ENOMEM;
4003f4c50d99SHerbert Xu 	int i = 0;
4004f4c50d99SHerbert Xu 	int pos;
4005f4c50d99SHerbert Xu 
40063dcbdb13SShmulik Ladkani 	if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
40073dcbdb13SShmulik Ladkani 	    (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
40083dcbdb13SShmulik Ladkani 		/* gso_size is untrusted, and we have a frag_list with a linear
40093dcbdb13SShmulik Ladkani 		 * non head_frag head.
40103dcbdb13SShmulik Ladkani 		 *
40113dcbdb13SShmulik Ladkani 		 * (we assume checking the first list_skb member suffices;
40123dcbdb13SShmulik Ladkani 		 * i.e if either of the list_skb members have non head_frag
40133dcbdb13SShmulik Ladkani 		 * head, then the first one has too).
40143dcbdb13SShmulik Ladkani 		 *
40153dcbdb13SShmulik Ladkani 		 * If head_skb's headlen does not fit requested gso_size, it
40163dcbdb13SShmulik Ladkani 		 * means that the frag_list members do NOT terminate on exact
40173dcbdb13SShmulik Ladkani 		 * gso_size boundaries. Hence we cannot perform skb_frag_t page
40183dcbdb13SShmulik Ladkani 		 * sharing. Therefore we must fallback to copying the frag_list
40193dcbdb13SShmulik Ladkani 		 * skbs; we do so by disabling SG.
40203dcbdb13SShmulik Ladkani 		 */
40213dcbdb13SShmulik Ladkani 		if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
40223dcbdb13SShmulik Ladkani 			features &= ~NETIF_F_SG;
40233dcbdb13SShmulik Ladkani 	}
40243dcbdb13SShmulik Ladkani 
40255882a07cSWei-Chun Chao 	__skb_push(head_skb, doffset);
40262f631133SMiaohe Lin 	proto = skb_network_protocol(head_skb, NULL);
4027ec5f0615SPravin B Shelar 	if (unlikely(!proto))
4028ec5f0615SPravin B Shelar 		return ERR_PTR(-EINVAL);
4029ec5f0615SPravin B Shelar 
403036c98382SAlexander Duyck 	sg = !!(features & NETIF_F_SG);
4031f245d079SAlexander Duyck 	csum = !!can_checksum_protocol(features, proto);
40327e2b10c1STom Herbert 
403307b26c94SSteffen Klassert 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
403407b26c94SSteffen Klassert 		if (!(features & NETIF_F_GSO_PARTIAL)) {
403507b26c94SSteffen Klassert 			struct sk_buff *iter;
403643170c4eSIlan Tayari 			unsigned int frag_len;
403707b26c94SSteffen Klassert 
403807b26c94SSteffen Klassert 			if (!list_skb ||
403907b26c94SSteffen Klassert 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
404007b26c94SSteffen Klassert 				goto normal;
404107b26c94SSteffen Klassert 
404243170c4eSIlan Tayari 			/* If we get here then all the required
404343170c4eSIlan Tayari 			 * GSO features except frag_list are supported.
404443170c4eSIlan Tayari 			 * Try to split the SKB to multiple GSO SKBs
404543170c4eSIlan Tayari 			 * with no frag_list.
404643170c4eSIlan Tayari 			 * Currently we can do that only when the buffers don't
404743170c4eSIlan Tayari 			 * have a linear part and all the buffers except
404843170c4eSIlan Tayari 			 * the last are of the same length.
404907b26c94SSteffen Klassert 			 */
405043170c4eSIlan Tayari 			frag_len = list_skb->len;
405107b26c94SSteffen Klassert 			skb_walk_frags(head_skb, iter) {
405243170c4eSIlan Tayari 				if (frag_len != iter->len && iter->next)
405343170c4eSIlan Tayari 					goto normal;
4054eaffadbbSIlan Tayari 				if (skb_headlen(iter) && !iter->head_frag)
405507b26c94SSteffen Klassert 					goto normal;
405607b26c94SSteffen Klassert 
405707b26c94SSteffen Klassert 				len -= iter->len;
405807b26c94SSteffen Klassert 			}
405943170c4eSIlan Tayari 
406043170c4eSIlan Tayari 			if (len != frag_len)
406143170c4eSIlan Tayari 				goto normal;
406207b26c94SSteffen Klassert 		}
406307b26c94SSteffen Klassert 
4064802ab55aSAlexander Duyck 		/* GSO partial only requires that we trim off any excess that
4065802ab55aSAlexander Duyck 		 * doesn't fit into an MSS sized block, so take care of that
4066802ab55aSAlexander Duyck 		 * now.
4067802ab55aSAlexander Duyck 		 */
4068802ab55aSAlexander Duyck 		partial_segs = len / mss;
4069d7fb5a80SAlexander Duyck 		if (partial_segs > 1)
4070802ab55aSAlexander Duyck 			mss *= partial_segs;
4071d7fb5a80SAlexander Duyck 		else
4072d7fb5a80SAlexander Duyck 			partial_segs = 0;
4073802ab55aSAlexander Duyck 	}
4074802ab55aSAlexander Duyck 
407507b26c94SSteffen Klassert normal:
4076df5771ffSMichael S. Tsirkin 	headroom = skb_headroom(head_skb);
4077df5771ffSMichael S. Tsirkin 	pos = skb_headlen(head_skb);
4078f4c50d99SHerbert Xu 
4079f4c50d99SHerbert Xu 	do {
4080f4c50d99SHerbert Xu 		struct sk_buff *nskb;
40818cb19905SMichael S. Tsirkin 		skb_frag_t *nskb_frag;
4082c8884eddSHerbert Xu 		int hsize;
4083f4c50d99SHerbert Xu 		int size;
4084f4c50d99SHerbert Xu 
40853953c46cSMarcelo Ricardo Leitner 		if (unlikely(mss == GSO_BY_FRAGS)) {
40863953c46cSMarcelo Ricardo Leitner 			len = list_skb->len;
40873953c46cSMarcelo Ricardo Leitner 		} else {
4088df5771ffSMichael S. Tsirkin 			len = head_skb->len - offset;
4089f4c50d99SHerbert Xu 			if (len > mss)
4090f4c50d99SHerbert Xu 				len = mss;
40913953c46cSMarcelo Ricardo Leitner 		}
4092f4c50d99SHerbert Xu 
4093df5771ffSMichael S. Tsirkin 		hsize = skb_headlen(head_skb) - offset;
4094f4c50d99SHerbert Xu 
4095dbd50f23SXin Long 		if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
40961a4cedafSMichael S. Tsirkin 		    (skb_headlen(list_skb) == len || sg)) {
40971a4cedafSMichael S. Tsirkin 			BUG_ON(skb_headlen(list_skb) > len);
409889319d38SHerbert Xu 
40999d8506ccSHerbert Xu 			i = 0;
41001a4cedafSMichael S. Tsirkin 			nfrags = skb_shinfo(list_skb)->nr_frags;
41011a4cedafSMichael S. Tsirkin 			frag = skb_shinfo(list_skb)->frags;
41021fd819ecSMichael S. Tsirkin 			frag_skb = list_skb;
41031a4cedafSMichael S. Tsirkin 			pos += skb_headlen(list_skb);
41049d8506ccSHerbert Xu 
41059d8506ccSHerbert Xu 			while (pos < offset + len) {
41069d8506ccSHerbert Xu 				BUG_ON(i >= nfrags);
41079d8506ccSHerbert Xu 
41084e1beba1SMichael S. Tsirkin 				size = skb_frag_size(frag);
41099d8506ccSHerbert Xu 				if (pos + size > offset + len)
41109d8506ccSHerbert Xu 					break;
41119d8506ccSHerbert Xu 
41129d8506ccSHerbert Xu 				i++;
41139d8506ccSHerbert Xu 				pos += size;
41144e1beba1SMichael S. Tsirkin 				frag++;
41159d8506ccSHerbert Xu 			}
41169d8506ccSHerbert Xu 
41171a4cedafSMichael S. Tsirkin 			nskb = skb_clone(list_skb, GFP_ATOMIC);
41181a4cedafSMichael S. Tsirkin 			list_skb = list_skb->next;
411989319d38SHerbert Xu 
4120f4c50d99SHerbert Xu 			if (unlikely(!nskb))
4121f4c50d99SHerbert Xu 				goto err;
4122f4c50d99SHerbert Xu 
41239d8506ccSHerbert Xu 			if (unlikely(pskb_trim(nskb, len))) {
41249d8506ccSHerbert Xu 				kfree_skb(nskb);
41259d8506ccSHerbert Xu 				goto err;
41269d8506ccSHerbert Xu 			}
41279d8506ccSHerbert Xu 
4128ec47ea82SAlexander Duyck 			hsize = skb_end_offset(nskb);
412989319d38SHerbert Xu 			if (skb_cow_head(nskb, doffset + headroom)) {
413089319d38SHerbert Xu 				kfree_skb(nskb);
413189319d38SHerbert Xu 				goto err;
413289319d38SHerbert Xu 			}
413389319d38SHerbert Xu 
4134ec47ea82SAlexander Duyck 			nskb->truesize += skb_end_offset(nskb) - hsize;
413589319d38SHerbert Xu 			skb_release_head_state(nskb);
413689319d38SHerbert Xu 			__skb_push(nskb, doffset);
413789319d38SHerbert Xu 		} else {
413800b229f7SPaolo Abeni 			if (hsize < 0)
413900b229f7SPaolo Abeni 				hsize = 0;
4140dbd50f23SXin Long 			if (hsize > len || !sg)
4141dbd50f23SXin Long 				hsize = len;
4142dbd50f23SXin Long 
4143c93bdd0eSMel Gorman 			nskb = __alloc_skb(hsize + doffset + headroom,
4144df5771ffSMichael S. Tsirkin 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4145c93bdd0eSMel Gorman 					   NUMA_NO_NODE);
414689319d38SHerbert Xu 
414789319d38SHerbert Xu 			if (unlikely(!nskb))
414889319d38SHerbert Xu 				goto err;
414989319d38SHerbert Xu 
415089319d38SHerbert Xu 			skb_reserve(nskb, headroom);
415189319d38SHerbert Xu 			__skb_put(nskb, doffset);
415289319d38SHerbert Xu 		}
415389319d38SHerbert Xu 
4154f4c50d99SHerbert Xu 		if (segs)
4155f4c50d99SHerbert Xu 			tail->next = nskb;
4156f4c50d99SHerbert Xu 		else
4157f4c50d99SHerbert Xu 			segs = nskb;
4158f4c50d99SHerbert Xu 		tail = nskb;
4159f4c50d99SHerbert Xu 
4160df5771ffSMichael S. Tsirkin 		__copy_skb_header(nskb, head_skb);
4161f4c50d99SHerbert Xu 
4162030737bcSEric Dumazet 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4163fcdfe3a7SVlad Yasevich 		skb_reset_mac_len(nskb);
416468c33163SPravin B Shelar 
4165df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
416668c33163SPravin B Shelar 						 nskb->data - tnl_hlen,
416768c33163SPravin B Shelar 						 doffset + tnl_hlen);
416889319d38SHerbert Xu 
41699d8506ccSHerbert Xu 		if (nskb->len == len + doffset)
41701cdbcb79SSimon Horman 			goto perform_csum_check;
417189319d38SHerbert Xu 
41727fbeffedSAlexander Duyck 		if (!sg) {
41731454c9faSYadu Kishore 			if (!csum) {
41747fbeffedSAlexander Duyck 				if (!nskb->remcsum_offload)
41756f85a124SHerbert Xu 					nskb->ip_summed = CHECKSUM_NONE;
417676443456SAlexander Duyck 				SKB_GSO_CB(nskb)->csum =
417776443456SAlexander Duyck 					skb_copy_and_csum_bits(head_skb, offset,
41781454c9faSYadu Kishore 							       skb_put(nskb,
41791454c9faSYadu Kishore 								       len),
41808d5930dfSAl Viro 							       len);
41817e2b10c1STom Herbert 				SKB_GSO_CB(nskb)->csum_start =
4182de843723STom Herbert 					skb_headroom(nskb) + doffset;
41831454c9faSYadu Kishore 			} else {
41841454c9faSYadu Kishore 				skb_copy_bits(head_skb, offset,
41851454c9faSYadu Kishore 					      skb_put(nskb, len),
41861454c9faSYadu Kishore 					      len);
41871454c9faSYadu Kishore 			}
4188f4c50d99SHerbert Xu 			continue;
4189f4c50d99SHerbert Xu 		}
4190f4c50d99SHerbert Xu 
41918cb19905SMichael S. Tsirkin 		nskb_frag = skb_shinfo(nskb)->frags;
4192f4c50d99SHerbert Xu 
4193df5771ffSMichael S. Tsirkin 		skb_copy_from_linear_data_offset(head_skb, offset,
4194d626f62bSArnaldo Carvalho de Melo 						 skb_put(nskb, hsize), hsize);
4195f4c50d99SHerbert Xu 
419606b4feb3SJonathan Lemon 		skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
419706b4feb3SJonathan Lemon 					   SKBFL_SHARED_FRAG;
4198cef401deSEric Dumazet 
4199bf5c25d6SWillem de Bruijn 		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4200bf5c25d6SWillem de Bruijn 		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4201bf5c25d6SWillem de Bruijn 			goto err;
4202bf5c25d6SWillem de Bruijn 
42039d8506ccSHerbert Xu 		while (pos < offset + len) {
42049d8506ccSHerbert Xu 			if (i >= nfrags) {
42059d8506ccSHerbert Xu 				i = 0;
42061a4cedafSMichael S. Tsirkin 				nfrags = skb_shinfo(list_skb)->nr_frags;
42071a4cedafSMichael S. Tsirkin 				frag = skb_shinfo(list_skb)->frags;
42081fd819ecSMichael S. Tsirkin 				frag_skb = list_skb;
420913acc94eSYonghong Song 				if (!skb_headlen(list_skb)) {
42109d8506ccSHerbert Xu 					BUG_ON(!nfrags);
421113acc94eSYonghong Song 				} else {
421213acc94eSYonghong Song 					BUG_ON(!list_skb->head_frag);
42139d8506ccSHerbert Xu 
421413acc94eSYonghong Song 					/* to make room for head_frag. */
421513acc94eSYonghong Song 					i--;
421613acc94eSYonghong Song 					frag--;
421713acc94eSYonghong Song 				}
4218bf5c25d6SWillem de Bruijn 				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4219bf5c25d6SWillem de Bruijn 				    skb_zerocopy_clone(nskb, frag_skb,
4220bf5c25d6SWillem de Bruijn 						       GFP_ATOMIC))
4221bf5c25d6SWillem de Bruijn 					goto err;
4222bf5c25d6SWillem de Bruijn 
42231a4cedafSMichael S. Tsirkin 				list_skb = list_skb->next;
42249d8506ccSHerbert Xu 			}
42259d8506ccSHerbert Xu 
42269d8506ccSHerbert Xu 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
42279d8506ccSHerbert Xu 				     MAX_SKB_FRAGS)) {
42289d8506ccSHerbert Xu 				net_warn_ratelimited(
42299d8506ccSHerbert Xu 					"skb_segment: too many frags: %u %u\n",
42309d8506ccSHerbert Xu 					pos, mss);
4231ff907a11SEric Dumazet 				err = -EINVAL;
42329d8506ccSHerbert Xu 				goto err;
42339d8506ccSHerbert Xu 			}
42349d8506ccSHerbert Xu 
423513acc94eSYonghong Song 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
42368cb19905SMichael S. Tsirkin 			__skb_frag_ref(nskb_frag);
42378cb19905SMichael S. Tsirkin 			size = skb_frag_size(nskb_frag);
4238f4c50d99SHerbert Xu 
4239f4c50d99SHerbert Xu 			if (pos < offset) {
4240b54c9d5bSJonathan Lemon 				skb_frag_off_add(nskb_frag, offset - pos);
42418cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, offset - pos);
4242f4c50d99SHerbert Xu 			}
4243f4c50d99SHerbert Xu 
424489319d38SHerbert Xu 			skb_shinfo(nskb)->nr_frags++;
4245f4c50d99SHerbert Xu 
4246f4c50d99SHerbert Xu 			if (pos + size <= offset + len) {
4247f4c50d99SHerbert Xu 				i++;
42484e1beba1SMichael S. Tsirkin 				frag++;
4249f4c50d99SHerbert Xu 				pos += size;
4250f4c50d99SHerbert Xu 			} else {
42518cb19905SMichael S. Tsirkin 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
425289319d38SHerbert Xu 				goto skip_fraglist;
4253f4c50d99SHerbert Xu 			}
4254f4c50d99SHerbert Xu 
42558cb19905SMichael S. Tsirkin 			nskb_frag++;
4256f4c50d99SHerbert Xu 		}
4257f4c50d99SHerbert Xu 
425889319d38SHerbert Xu skip_fraglist:
4259f4c50d99SHerbert Xu 		nskb->data_len = len - hsize;
4260f4c50d99SHerbert Xu 		nskb->len += nskb->data_len;
4261f4c50d99SHerbert Xu 		nskb->truesize += nskb->data_len;
4262ec5f0615SPravin B Shelar 
42631cdbcb79SSimon Horman perform_csum_check:
42647fbeffedSAlexander Duyck 		if (!csum) {
4265ff907a11SEric Dumazet 			if (skb_has_shared_frag(nskb) &&
4266ff907a11SEric Dumazet 			    __skb_linearize(nskb))
4267ddff00d4SAlexander Duyck 				goto err;
4268ff907a11SEric Dumazet 
42697fbeffedSAlexander Duyck 			if (!nskb->remcsum_offload)
4270ec5f0615SPravin B Shelar 				nskb->ip_summed = CHECKSUM_NONE;
427176443456SAlexander Duyck 			SKB_GSO_CB(nskb)->csum =
427276443456SAlexander Duyck 				skb_checksum(nskb, doffset,
427376443456SAlexander Duyck 					     nskb->len - doffset, 0);
42747e2b10c1STom Herbert 			SKB_GSO_CB(nskb)->csum_start =
42757e2b10c1STom Herbert 				skb_headroom(nskb) + doffset;
4276ec5f0615SPravin B Shelar 		}
4277df5771ffSMichael S. Tsirkin 	} while ((offset += len) < head_skb->len);
4278f4c50d99SHerbert Xu 
4279bec3cfdcSEric Dumazet 	/* Some callers want to get the end of the list.
4280bec3cfdcSEric Dumazet 	 * Put it in segs->prev to avoid walking the list.
4281bec3cfdcSEric Dumazet 	 * (see validate_xmit_skb_list() for example)
4282bec3cfdcSEric Dumazet 	 */
4283bec3cfdcSEric Dumazet 	segs->prev = tail;
4284432c856fSToshiaki Makita 
4285802ab55aSAlexander Duyck 	if (partial_segs) {
428607b26c94SSteffen Klassert 		struct sk_buff *iter;
4287802ab55aSAlexander Duyck 		int type = skb_shinfo(head_skb)->gso_type;
428807b26c94SSteffen Klassert 		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4289802ab55aSAlexander Duyck 
4290802ab55aSAlexander Duyck 		/* Update type to add partial and then remove dodgy if set */
429107b26c94SSteffen Klassert 		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4292802ab55aSAlexander Duyck 		type &= ~SKB_GSO_DODGY;
4293802ab55aSAlexander Duyck 
4294802ab55aSAlexander Duyck 		/* Update GSO info and prepare to start updating headers on
4295802ab55aSAlexander Duyck 		 * our way back down the stack of protocols.
4296802ab55aSAlexander Duyck 		 */
429707b26c94SSteffen Klassert 		for (iter = segs; iter; iter = iter->next) {
429807b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_size = gso_size;
429907b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_segs = partial_segs;
430007b26c94SSteffen Klassert 			skb_shinfo(iter)->gso_type = type;
430107b26c94SSteffen Klassert 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
430207b26c94SSteffen Klassert 		}
430307b26c94SSteffen Klassert 
430407b26c94SSteffen Klassert 		if (tail->len - doffset <= gso_size)
430507b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_size = 0;
430607b26c94SSteffen Klassert 		else if (tail != segs)
430707b26c94SSteffen Klassert 			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4308802ab55aSAlexander Duyck 	}
4309802ab55aSAlexander Duyck 
4310432c856fSToshiaki Makita 	/* Following permits correct backpressure, for protocols
4311432c856fSToshiaki Makita 	 * using skb_set_owner_w().
4312432c856fSToshiaki Makita 	 * Idea is to tranfert ownership from head_skb to last segment.
4313432c856fSToshiaki Makita 	 */
4314432c856fSToshiaki Makita 	if (head_skb->destructor == sock_wfree) {
4315432c856fSToshiaki Makita 		swap(tail->truesize, head_skb->truesize);
4316432c856fSToshiaki Makita 		swap(tail->destructor, head_skb->destructor);
4317432c856fSToshiaki Makita 		swap(tail->sk, head_skb->sk);
4318432c856fSToshiaki Makita 	}
4319f4c50d99SHerbert Xu 	return segs;
4320f4c50d99SHerbert Xu 
4321f4c50d99SHerbert Xu err:
4322289dccbeSEric Dumazet 	kfree_skb_list(segs);
4323f4c50d99SHerbert Xu 	return ERR_PTR(err);
4324f4c50d99SHerbert Xu }
4325f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment);
4326f4c50d99SHerbert Xu 
4327df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
4328df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE	8
4329df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4330df5042f4SFlorian Westphal 
4331df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = {
4332df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4333df5042f4SFlorian Westphal 	[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4334df5042f4SFlorian Westphal #endif
43354165079bSFlorian Westphal #ifdef CONFIG_XFRM
43364165079bSFlorian Westphal 	[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
43374165079bSFlorian Westphal #endif
433895a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
433995a7233cSPaul Blakey 	[TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
434095a7233cSPaul Blakey #endif
43413ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP)
43423ee17bc7SMat Martineau 	[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
43433ee17bc7SMat Martineau #endif
434478476d31SJeremy Kerr #if IS_ENABLED(CONFIG_MCTP_FLOWS)
434578476d31SJeremy Kerr 	[SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
434678476d31SJeremy Kerr #endif
4347df5042f4SFlorian Westphal };
4348df5042f4SFlorian Westphal 
4349df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void)
4350df5042f4SFlorian Westphal {
4351df5042f4SFlorian Westphal 	return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4352df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4353df5042f4SFlorian Westphal 		skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4354df5042f4SFlorian Westphal #endif
43554165079bSFlorian Westphal #ifdef CONFIG_XFRM
43564165079bSFlorian Westphal 		skb_ext_type_len[SKB_EXT_SEC_PATH] +
43574165079bSFlorian Westphal #endif
435895a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
435995a7233cSPaul Blakey 		skb_ext_type_len[TC_SKB_EXT] +
436095a7233cSPaul Blakey #endif
43613ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP)
43623ee17bc7SMat Martineau 		skb_ext_type_len[SKB_EXT_MPTCP] +
43633ee17bc7SMat Martineau #endif
436478476d31SJeremy Kerr #if IS_ENABLED(CONFIG_MCTP_FLOWS)
436578476d31SJeremy Kerr 		skb_ext_type_len[SKB_EXT_MCTP] +
436678476d31SJeremy Kerr #endif
4367df5042f4SFlorian Westphal 		0;
4368df5042f4SFlorian Westphal }
4369df5042f4SFlorian Westphal 
4370df5042f4SFlorian Westphal static void skb_extensions_init(void)
4371df5042f4SFlorian Westphal {
4372df5042f4SFlorian Westphal 	BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4373df5042f4SFlorian Westphal 	BUILD_BUG_ON(skb_ext_total_length() > 255);
4374df5042f4SFlorian Westphal 
4375df5042f4SFlorian Westphal 	skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4376df5042f4SFlorian Westphal 					     SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4377df5042f4SFlorian Westphal 					     0,
4378df5042f4SFlorian Westphal 					     SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4379df5042f4SFlorian Westphal 					     NULL);
4380df5042f4SFlorian Westphal }
4381df5042f4SFlorian Westphal #else
4382df5042f4SFlorian Westphal static void skb_extensions_init(void) {}
4383df5042f4SFlorian Westphal #endif
4384df5042f4SFlorian Westphal 
43851da177e4SLinus Torvalds void __init skb_init(void)
43861da177e4SLinus Torvalds {
438779a8a642SKees Cook 	skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
43881da177e4SLinus Torvalds 					      sizeof(struct sk_buff),
43891da177e4SLinus Torvalds 					      0,
4390e5d679f3SAlexey Dobriyan 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
439179a8a642SKees Cook 					      offsetof(struct sk_buff, cb),
439279a8a642SKees Cook 					      sizeof_field(struct sk_buff, cb),
439320c2df83SPaul Mundt 					      NULL);
4394d179cd12SDavid S. Miller 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4395d0bf4a9eSEric Dumazet 						sizeof(struct sk_buff_fclones),
4396d179cd12SDavid S. Miller 						0,
4397e5d679f3SAlexey Dobriyan 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
439820c2df83SPaul Mundt 						NULL);
4399df5042f4SFlorian Westphal 	skb_extensions_init();
44001da177e4SLinus Torvalds }
44011da177e4SLinus Torvalds 
440251c739d1SDavid S. Miller static int
440348a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
440448a1df65SJason A. Donenfeld 	       unsigned int recursion_level)
4405716ea3a7SDavid Howells {
44061a028e50SDavid S. Miller 	int start = skb_headlen(skb);
44071a028e50SDavid S. Miller 	int i, copy = start - offset;
4408fbb398a8SDavid S. Miller 	struct sk_buff *frag_iter;
4409716ea3a7SDavid Howells 	int elt = 0;
4410716ea3a7SDavid Howells 
441148a1df65SJason A. Donenfeld 	if (unlikely(recursion_level >= 24))
441248a1df65SJason A. Donenfeld 		return -EMSGSIZE;
441348a1df65SJason A. Donenfeld 
4414716ea3a7SDavid Howells 	if (copy > 0) {
4415716ea3a7SDavid Howells 		if (copy > len)
4416716ea3a7SDavid Howells 			copy = len;
4417642f1490SJens Axboe 		sg_set_buf(sg, skb->data + offset, copy);
4418716ea3a7SDavid Howells 		elt++;
4419716ea3a7SDavid Howells 		if ((len -= copy) == 0)
4420716ea3a7SDavid Howells 			return elt;
4421716ea3a7SDavid Howells 		offset += copy;
4422716ea3a7SDavid Howells 	}
4423716ea3a7SDavid Howells 
4424716ea3a7SDavid Howells 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
44251a028e50SDavid S. Miller 		int end;
4426716ea3a7SDavid Howells 
4427547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
44281a028e50SDavid S. Miller 
44299e903e08SEric Dumazet 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4430716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
4431716ea3a7SDavid Howells 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
443248a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
443348a1df65SJason A. Donenfeld 				return -EMSGSIZE;
4434716ea3a7SDavid Howells 
4435716ea3a7SDavid Howells 			if (copy > len)
4436716ea3a7SDavid Howells 				copy = len;
4437ea2ab693SIan Campbell 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4438b54c9d5bSJonathan Lemon 				    skb_frag_off(frag) + offset - start);
4439716ea3a7SDavid Howells 			elt++;
4440716ea3a7SDavid Howells 			if (!(len -= copy))
4441716ea3a7SDavid Howells 				return elt;
4442716ea3a7SDavid Howells 			offset += copy;
4443716ea3a7SDavid Howells 		}
44441a028e50SDavid S. Miller 		start = end;
4445716ea3a7SDavid Howells 	}
4446716ea3a7SDavid Howells 
4447fbb398a8SDavid S. Miller 	skb_walk_frags(skb, frag_iter) {
444848a1df65SJason A. Donenfeld 		int end, ret;
4449716ea3a7SDavid Howells 
4450547b792cSIlpo Järvinen 		WARN_ON(start > offset + len);
44511a028e50SDavid S. Miller 
4452fbb398a8SDavid S. Miller 		end = start + frag_iter->len;
4453716ea3a7SDavid Howells 		if ((copy = end - offset) > 0) {
445448a1df65SJason A. Donenfeld 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
445548a1df65SJason A. Donenfeld 				return -EMSGSIZE;
445648a1df65SJason A. Donenfeld 
4457716ea3a7SDavid Howells 			if (copy > len)
4458716ea3a7SDavid Howells 				copy = len;
445948a1df65SJason A. Donenfeld 			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
446048a1df65SJason A. Donenfeld 					      copy, recursion_level + 1);
446148a1df65SJason A. Donenfeld 			if (unlikely(ret < 0))
446248a1df65SJason A. Donenfeld 				return ret;
446348a1df65SJason A. Donenfeld 			elt += ret;
4464716ea3a7SDavid Howells 			if ((len -= copy) == 0)
4465716ea3a7SDavid Howells 				return elt;
4466716ea3a7SDavid Howells 			offset += copy;
4467716ea3a7SDavid Howells 		}
44681a028e50SDavid S. Miller 		start = end;
4469716ea3a7SDavid Howells 	}
4470716ea3a7SDavid Howells 	BUG_ON(len);
4471716ea3a7SDavid Howells 	return elt;
4472716ea3a7SDavid Howells }
4473716ea3a7SDavid Howells 
447448a1df65SJason A. Donenfeld /**
447548a1df65SJason A. Donenfeld  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
447648a1df65SJason A. Donenfeld  *	@skb: Socket buffer containing the buffers to be mapped
447748a1df65SJason A. Donenfeld  *	@sg: The scatter-gather list to map into
447848a1df65SJason A. Donenfeld  *	@offset: The offset into the buffer's contents to start mapping
447948a1df65SJason A. Donenfeld  *	@len: Length of buffer space to be mapped
448048a1df65SJason A. Donenfeld  *
448148a1df65SJason A. Donenfeld  *	Fill the specified scatter-gather list with mappings/pointers into a
448248a1df65SJason A. Donenfeld  *	region of the buffer space attached to a socket buffer. Returns either
448348a1df65SJason A. Donenfeld  *	the number of scatterlist items used, or -EMSGSIZE if the contents
448448a1df65SJason A. Donenfeld  *	could not fit.
448548a1df65SJason A. Donenfeld  */
448648a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
448748a1df65SJason A. Donenfeld {
448848a1df65SJason A. Donenfeld 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
448948a1df65SJason A. Donenfeld 
449048a1df65SJason A. Donenfeld 	if (nsg <= 0)
449148a1df65SJason A. Donenfeld 		return nsg;
449248a1df65SJason A. Donenfeld 
449348a1df65SJason A. Donenfeld 	sg_mark_end(&sg[nsg - 1]);
449448a1df65SJason A. Donenfeld 
449548a1df65SJason A. Donenfeld 	return nsg;
449648a1df65SJason A. Donenfeld }
449748a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec);
449848a1df65SJason A. Donenfeld 
449925a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
450025a91d8dSFan Du  * sglist without mark the sg which contain last skb data as the end.
450125a91d8dSFan Du  * So the caller can mannipulate sg list as will when padding new data after
450225a91d8dSFan Du  * the first call without calling sg_unmark_end to expend sg list.
450325a91d8dSFan Du  *
450425a91d8dSFan Du  * Scenario to use skb_to_sgvec_nomark:
450525a91d8dSFan Du  * 1. sg_init_table
450625a91d8dSFan Du  * 2. skb_to_sgvec_nomark(payload1)
450725a91d8dSFan Du  * 3. skb_to_sgvec_nomark(payload2)
450825a91d8dSFan Du  *
450925a91d8dSFan Du  * This is equivalent to:
451025a91d8dSFan Du  * 1. sg_init_table
451125a91d8dSFan Du  * 2. skb_to_sgvec(payload1)
451225a91d8dSFan Du  * 3. sg_unmark_end
451325a91d8dSFan Du  * 4. skb_to_sgvec(payload2)
451425a91d8dSFan Du  *
451525a91d8dSFan Du  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
451625a91d8dSFan Du  * is more preferable.
451725a91d8dSFan Du  */
451825a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
451925a91d8dSFan Du 			int offset, int len)
452025a91d8dSFan Du {
452148a1df65SJason A. Donenfeld 	return __skb_to_sgvec(skb, sg, offset, len, 0);
452225a91d8dSFan Du }
452325a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
452425a91d8dSFan Du 
452551c739d1SDavid S. Miller 
452651c739d1SDavid S. Miller 
4527716ea3a7SDavid Howells /**
4528716ea3a7SDavid Howells  *	skb_cow_data - Check that a socket buffer's data buffers are writable
4529716ea3a7SDavid Howells  *	@skb: The socket buffer to check.
4530716ea3a7SDavid Howells  *	@tailbits: Amount of trailing space to be added
4531716ea3a7SDavid Howells  *	@trailer: Returned pointer to the skb where the @tailbits space begins
4532716ea3a7SDavid Howells  *
4533716ea3a7SDavid Howells  *	Make sure that the data buffers attached to a socket buffer are
4534716ea3a7SDavid Howells  *	writable. If they are not, private copies are made of the data buffers
4535716ea3a7SDavid Howells  *	and the socket buffer is set to use these instead.
4536716ea3a7SDavid Howells  *
4537716ea3a7SDavid Howells  *	If @tailbits is given, make sure that there is space to write @tailbits
4538716ea3a7SDavid Howells  *	bytes of data beyond current end of socket buffer.  @trailer will be
4539716ea3a7SDavid Howells  *	set to point to the skb in which this space begins.
4540716ea3a7SDavid Howells  *
4541716ea3a7SDavid Howells  *	The number of scatterlist elements required to completely map the
4542716ea3a7SDavid Howells  *	COW'd and extended socket buffer will be returned.
4543716ea3a7SDavid Howells  */
4544716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4545716ea3a7SDavid Howells {
4546716ea3a7SDavid Howells 	int copyflag;
4547716ea3a7SDavid Howells 	int elt;
4548716ea3a7SDavid Howells 	struct sk_buff *skb1, **skb_p;
4549716ea3a7SDavid Howells 
4550716ea3a7SDavid Howells 	/* If skb is cloned or its head is paged, reallocate
4551716ea3a7SDavid Howells 	 * head pulling out all the pages (pages are considered not writable
4552716ea3a7SDavid Howells 	 * at the moment even if they are anonymous).
4553716ea3a7SDavid Howells 	 */
4554716ea3a7SDavid Howells 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4555c15fc199SMiaohe Lin 	    !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4556716ea3a7SDavid Howells 		return -ENOMEM;
4557716ea3a7SDavid Howells 
4558716ea3a7SDavid Howells 	/* Easy case. Most of packets will go this way. */
455921dc3301SDavid S. Miller 	if (!skb_has_frag_list(skb)) {
4560716ea3a7SDavid Howells 		/* A little of trouble, not enough of space for trailer.
4561716ea3a7SDavid Howells 		 * This should not happen, when stack is tuned to generate
4562716ea3a7SDavid Howells 		 * good frames. OK, on miss we reallocate and reserve even more
4563716ea3a7SDavid Howells 		 * space, 128 bytes is fair. */
4564716ea3a7SDavid Howells 
4565716ea3a7SDavid Howells 		if (skb_tailroom(skb) < tailbits &&
4566716ea3a7SDavid Howells 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4567716ea3a7SDavid Howells 			return -ENOMEM;
4568716ea3a7SDavid Howells 
4569716ea3a7SDavid Howells 		/* Voila! */
4570716ea3a7SDavid Howells 		*trailer = skb;
4571716ea3a7SDavid Howells 		return 1;
4572716ea3a7SDavid Howells 	}
4573716ea3a7SDavid Howells 
4574716ea3a7SDavid Howells 	/* Misery. We are in troubles, going to mincer fragments... */
4575716ea3a7SDavid Howells 
4576716ea3a7SDavid Howells 	elt = 1;
4577716ea3a7SDavid Howells 	skb_p = &skb_shinfo(skb)->frag_list;
4578716ea3a7SDavid Howells 	copyflag = 0;
4579716ea3a7SDavid Howells 
4580716ea3a7SDavid Howells 	while ((skb1 = *skb_p) != NULL) {
4581716ea3a7SDavid Howells 		int ntail = 0;
4582716ea3a7SDavid Howells 
4583716ea3a7SDavid Howells 		/* The fragment is partially pulled by someone,
4584716ea3a7SDavid Howells 		 * this can happen on input. Copy it and everything
4585716ea3a7SDavid Howells 		 * after it. */
4586716ea3a7SDavid Howells 
4587716ea3a7SDavid Howells 		if (skb_shared(skb1))
4588716ea3a7SDavid Howells 			copyflag = 1;
4589716ea3a7SDavid Howells 
4590716ea3a7SDavid Howells 		/* If the skb is the last, worry about trailer. */
4591716ea3a7SDavid Howells 
4592716ea3a7SDavid Howells 		if (skb1->next == NULL && tailbits) {
4593716ea3a7SDavid Howells 			if (skb_shinfo(skb1)->nr_frags ||
459421dc3301SDavid S. Miller 			    skb_has_frag_list(skb1) ||
4595716ea3a7SDavid Howells 			    skb_tailroom(skb1) < tailbits)
4596716ea3a7SDavid Howells 				ntail = tailbits + 128;
4597716ea3a7SDavid Howells 		}
4598716ea3a7SDavid Howells 
4599716ea3a7SDavid Howells 		if (copyflag ||
4600716ea3a7SDavid Howells 		    skb_cloned(skb1) ||
4601716ea3a7SDavid Howells 		    ntail ||
4602716ea3a7SDavid Howells 		    skb_shinfo(skb1)->nr_frags ||
460321dc3301SDavid S. Miller 		    skb_has_frag_list(skb1)) {
4604716ea3a7SDavid Howells 			struct sk_buff *skb2;
4605716ea3a7SDavid Howells 
4606716ea3a7SDavid Howells 			/* Fuck, we are miserable poor guys... */
4607716ea3a7SDavid Howells 			if (ntail == 0)
4608716ea3a7SDavid Howells 				skb2 = skb_copy(skb1, GFP_ATOMIC);
4609716ea3a7SDavid Howells 			else
4610716ea3a7SDavid Howells 				skb2 = skb_copy_expand(skb1,
4611716ea3a7SDavid Howells 						       skb_headroom(skb1),
4612716ea3a7SDavid Howells 						       ntail,
4613716ea3a7SDavid Howells 						       GFP_ATOMIC);
4614716ea3a7SDavid Howells 			if (unlikely(skb2 == NULL))
4615716ea3a7SDavid Howells 				return -ENOMEM;
4616716ea3a7SDavid Howells 
4617716ea3a7SDavid Howells 			if (skb1->sk)
4618716ea3a7SDavid Howells 				skb_set_owner_w(skb2, skb1->sk);
4619716ea3a7SDavid Howells 
4620716ea3a7SDavid Howells 			/* Looking around. Are we still alive?
4621716ea3a7SDavid Howells 			 * OK, link new skb, drop old one */
4622716ea3a7SDavid Howells 
4623716ea3a7SDavid Howells 			skb2->next = skb1->next;
4624716ea3a7SDavid Howells 			*skb_p = skb2;
4625716ea3a7SDavid Howells 			kfree_skb(skb1);
4626716ea3a7SDavid Howells 			skb1 = skb2;
4627716ea3a7SDavid Howells 		}
4628716ea3a7SDavid Howells 		elt++;
4629716ea3a7SDavid Howells 		*trailer = skb1;
4630716ea3a7SDavid Howells 		skb_p = &skb1->next;
4631716ea3a7SDavid Howells 	}
4632716ea3a7SDavid Howells 
4633716ea3a7SDavid Howells 	return elt;
4634716ea3a7SDavid Howells }
4635b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data);
4636716ea3a7SDavid Howells 
4637b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb)
4638b1faf566SEric Dumazet {
4639b1faf566SEric Dumazet 	struct sock *sk = skb->sk;
4640b1faf566SEric Dumazet 
4641b1faf566SEric Dumazet 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4642b1faf566SEric Dumazet }
4643b1faf566SEric Dumazet 
46448605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb)
46458605330aSSoheil Hassas Yeganeh {
46468605330aSSoheil Hassas Yeganeh 	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
46478605330aSSoheil Hassas Yeganeh 	 * So, it is safe to (mis)use it to mark skbs on the error queue.
46488605330aSSoheil Hassas Yeganeh 	 */
46498605330aSSoheil Hassas Yeganeh 	skb->pkt_type = PACKET_OUTGOING;
46508605330aSSoheil Hassas Yeganeh 	BUILD_BUG_ON(PACKET_OUTGOING == 0);
46518605330aSSoheil Hassas Yeganeh }
46528605330aSSoheil Hassas Yeganeh 
4653b1faf566SEric Dumazet /*
4654b1faf566SEric Dumazet  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4655b1faf566SEric Dumazet  */
4656b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4657b1faf566SEric Dumazet {
4658b1faf566SEric Dumazet 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4659ebb3b78dSEric Dumazet 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4660b1faf566SEric Dumazet 		return -ENOMEM;
4661b1faf566SEric Dumazet 
4662b1faf566SEric Dumazet 	skb_orphan(skb);
4663b1faf566SEric Dumazet 	skb->sk = sk;
4664b1faf566SEric Dumazet 	skb->destructor = sock_rmem_free;
4665b1faf566SEric Dumazet 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
46668605330aSSoheil Hassas Yeganeh 	skb_set_err_queue(skb);
4667b1faf566SEric Dumazet 
4668abb57ea4SEric Dumazet 	/* before exiting rcu section, make sure dst is refcounted */
4669abb57ea4SEric Dumazet 	skb_dst_force(skb);
4670abb57ea4SEric Dumazet 
4671b1faf566SEric Dumazet 	skb_queue_tail(&sk->sk_error_queue, skb);
4672b1faf566SEric Dumazet 	if (!sock_flag(sk, SOCK_DEAD))
4673e3ae2365SAlexander Aring 		sk_error_report(sk);
4674b1faf566SEric Dumazet 	return 0;
4675b1faf566SEric Dumazet }
4676b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb);
4677b1faf566SEric Dumazet 
467883a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb)
467983a1a1a7SSoheil Hassas Yeganeh {
468083a1a1a7SSoheil Hassas Yeganeh 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
468183a1a1a7SSoheil Hassas Yeganeh 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
468283a1a1a7SSoheil Hassas Yeganeh }
468383a1a1a7SSoheil Hassas Yeganeh 
4684364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4685364a9e93SWillem de Bruijn {
4686364a9e93SWillem de Bruijn 	struct sk_buff_head *q = &sk->sk_error_queue;
468783a1a1a7SSoheil Hassas Yeganeh 	struct sk_buff *skb, *skb_next = NULL;
468883a1a1a7SSoheil Hassas Yeganeh 	bool icmp_next = false;
4689997d5c3fSEric Dumazet 	unsigned long flags;
4690364a9e93SWillem de Bruijn 
4691997d5c3fSEric Dumazet 	spin_lock_irqsave(&q->lock, flags);
4692364a9e93SWillem de Bruijn 	skb = __skb_dequeue(q);
469338b25793SSoheil Hassas Yeganeh 	if (skb && (skb_next = skb_peek(q))) {
469483a1a1a7SSoheil Hassas Yeganeh 		icmp_next = is_icmp_err_skb(skb_next);
469538b25793SSoheil Hassas Yeganeh 		if (icmp_next)
4696985f7337SWillem de Bruijn 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
469738b25793SSoheil Hassas Yeganeh 	}
4698997d5c3fSEric Dumazet 	spin_unlock_irqrestore(&q->lock, flags);
4699364a9e93SWillem de Bruijn 
470083a1a1a7SSoheil Hassas Yeganeh 	if (is_icmp_err_skb(skb) && !icmp_next)
470183a1a1a7SSoheil Hassas Yeganeh 		sk->sk_err = 0;
470283a1a1a7SSoheil Hassas Yeganeh 
470383a1a1a7SSoheil Hassas Yeganeh 	if (skb_next)
4704e3ae2365SAlexander Aring 		sk_error_report(sk);
4705364a9e93SWillem de Bruijn 
4706364a9e93SWillem de Bruijn 	return skb;
4707364a9e93SWillem de Bruijn }
4708364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb);
4709364a9e93SWillem de Bruijn 
4710cab41c47SAlexander Duyck /**
4711cab41c47SAlexander Duyck  * skb_clone_sk - create clone of skb, and take reference to socket
4712cab41c47SAlexander Duyck  * @skb: the skb to clone
4713cab41c47SAlexander Duyck  *
4714cab41c47SAlexander Duyck  * This function creates a clone of a buffer that holds a reference on
4715cab41c47SAlexander Duyck  * sk_refcnt.  Buffers created via this function are meant to be
4716cab41c47SAlexander Duyck  * returned using sock_queue_err_skb, or free via kfree_skb.
4717cab41c47SAlexander Duyck  *
4718cab41c47SAlexander Duyck  * When passing buffers allocated with this function to sock_queue_err_skb
4719cab41c47SAlexander Duyck  * it is necessary to wrap the call with sock_hold/sock_put in order to
4720cab41c47SAlexander Duyck  * prevent the socket from being released prior to being enqueued on
4721cab41c47SAlexander Duyck  * the sk_error_queue.
4722cab41c47SAlexander Duyck  */
472362bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb)
472462bccb8cSAlexander Duyck {
472562bccb8cSAlexander Duyck 	struct sock *sk = skb->sk;
472662bccb8cSAlexander Duyck 	struct sk_buff *clone;
472762bccb8cSAlexander Duyck 
472841c6d650SReshetova, Elena 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
472962bccb8cSAlexander Duyck 		return NULL;
473062bccb8cSAlexander Duyck 
473162bccb8cSAlexander Duyck 	clone = skb_clone(skb, GFP_ATOMIC);
473262bccb8cSAlexander Duyck 	if (!clone) {
473362bccb8cSAlexander Duyck 		sock_put(sk);
473462bccb8cSAlexander Duyck 		return NULL;
473562bccb8cSAlexander Duyck 	}
473662bccb8cSAlexander Duyck 
473762bccb8cSAlexander Duyck 	clone->sk = sk;
473862bccb8cSAlexander Duyck 	clone->destructor = sock_efree;
473962bccb8cSAlexander Duyck 
474062bccb8cSAlexander Duyck 	return clone;
474162bccb8cSAlexander Duyck }
474262bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk);
474362bccb8cSAlexander Duyck 
474437846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb,
474537846ef0SAlexander Duyck 					struct sock *sk,
47464ef1b286SSoheil Hassas Yeganeh 					int tstype,
47474ef1b286SSoheil Hassas Yeganeh 					bool opt_stats)
4748ac45f602SPatrick Ohly {
4749ac45f602SPatrick Ohly 	struct sock_exterr_skb *serr;
4750ac45f602SPatrick Ohly 	int err;
4751ac45f602SPatrick Ohly 
47524ef1b286SSoheil Hassas Yeganeh 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
47534ef1b286SSoheil Hassas Yeganeh 
4754ac45f602SPatrick Ohly 	serr = SKB_EXT_ERR(skb);
4755ac45f602SPatrick Ohly 	memset(serr, 0, sizeof(*serr));
4756ac45f602SPatrick Ohly 	serr->ee.ee_errno = ENOMSG;
4757ac45f602SPatrick Ohly 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4758e7fd2885SWillem de Bruijn 	serr->ee.ee_info = tstype;
47594ef1b286SSoheil Hassas Yeganeh 	serr->opt_stats = opt_stats;
47601862d620SWillem de Bruijn 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
47614ed2d765SWillem de Bruijn 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
476209c2d251SWillem de Bruijn 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
476342f67eeaSEric Dumazet 		if (sk_is_tcp(sk))
4764a1cdec57SEric Dumazet 			serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
47654ed2d765SWillem de Bruijn 	}
476629030374SEric Dumazet 
4767ac45f602SPatrick Ohly 	err = sock_queue_err_skb(sk, skb);
476829030374SEric Dumazet 
4769ac45f602SPatrick Ohly 	if (err)
4770ac45f602SPatrick Ohly 		kfree_skb(skb);
4771ac45f602SPatrick Ohly }
477237846ef0SAlexander Duyck 
4773b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4774b245be1fSWillem de Bruijn {
4775b245be1fSWillem de Bruijn 	bool ret;
4776b245be1fSWillem de Bruijn 
4777b245be1fSWillem de Bruijn 	if (likely(sysctl_tstamp_allow_data || tsonly))
4778b245be1fSWillem de Bruijn 		return true;
4779b245be1fSWillem de Bruijn 
4780b245be1fSWillem de Bruijn 	read_lock_bh(&sk->sk_callback_lock);
4781b245be1fSWillem de Bruijn 	ret = sk->sk_socket && sk->sk_socket->file &&
4782b245be1fSWillem de Bruijn 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4783b245be1fSWillem de Bruijn 	read_unlock_bh(&sk->sk_callback_lock);
4784b245be1fSWillem de Bruijn 	return ret;
4785b245be1fSWillem de Bruijn }
4786b245be1fSWillem de Bruijn 
478737846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb,
478837846ef0SAlexander Duyck 			       struct skb_shared_hwtstamps *hwtstamps)
478937846ef0SAlexander Duyck {
479037846ef0SAlexander Duyck 	struct sock *sk = skb->sk;
479137846ef0SAlexander Duyck 
4792b245be1fSWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, false))
479335b99dffSWillem de Bruijn 		goto err;
4794b245be1fSWillem de Bruijn 
47959ac25fc0SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
47969ac25fc0SEric Dumazet 	 * but only if the socket refcount is not zero.
47979ac25fc0SEric Dumazet 	 */
479841c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
479937846ef0SAlexander Duyck 		*skb_hwtstamps(skb) = *hwtstamps;
48004ef1b286SSoheil Hassas Yeganeh 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
480137846ef0SAlexander Duyck 		sock_put(sk);
480235b99dffSWillem de Bruijn 		return;
480337846ef0SAlexander Duyck 	}
480435b99dffSWillem de Bruijn 
480535b99dffSWillem de Bruijn err:
480635b99dffSWillem de Bruijn 	kfree_skb(skb);
48079ac25fc0SEric Dumazet }
480837846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
480937846ef0SAlexander Duyck 
481037846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb,
4811e7ed11eeSYousuk Seung 		     const struct sk_buff *ack_skb,
481237846ef0SAlexander Duyck 		     struct skb_shared_hwtstamps *hwtstamps,
481337846ef0SAlexander Duyck 		     struct sock *sk, int tstype)
481437846ef0SAlexander Duyck {
481537846ef0SAlexander Duyck 	struct sk_buff *skb;
48164ef1b286SSoheil Hassas Yeganeh 	bool tsonly, opt_stats = false;
481737846ef0SAlexander Duyck 
48183a8dd971SWillem de Bruijn 	if (!sk)
48193a8dd971SWillem de Bruijn 		return;
48203a8dd971SWillem de Bruijn 
4821b50a5c70SMiroslav Lichvar 	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4822b50a5c70SMiroslav Lichvar 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4823b50a5c70SMiroslav Lichvar 		return;
4824b50a5c70SMiroslav Lichvar 
48253a8dd971SWillem de Bruijn 	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
48263a8dd971SWillem de Bruijn 	if (!skb_may_tx_timestamp(sk, tsonly))
482737846ef0SAlexander Duyck 		return;
482837846ef0SAlexander Duyck 
48291c885808SFrancis Yan 	if (tsonly) {
48301c885808SFrancis Yan #ifdef CONFIG_INET
48311c885808SFrancis Yan 		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
483242f67eeaSEric Dumazet 		    sk_is_tcp(sk)) {
4833e7ed11eeSYousuk Seung 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
4834e7ed11eeSYousuk Seung 							     ack_skb);
48354ef1b286SSoheil Hassas Yeganeh 			opt_stats = true;
48364ef1b286SSoheil Hassas Yeganeh 		} else
48371c885808SFrancis Yan #endif
48381c885808SFrancis Yan 			skb = alloc_skb(0, GFP_ATOMIC);
48391c885808SFrancis Yan 	} else {
484037846ef0SAlexander Duyck 		skb = skb_clone(orig_skb, GFP_ATOMIC);
48411c885808SFrancis Yan 	}
484237846ef0SAlexander Duyck 	if (!skb)
484337846ef0SAlexander Duyck 		return;
484437846ef0SAlexander Duyck 
484549ca0d8bSWillem de Bruijn 	if (tsonly) {
4846fff88030SWillem de Bruijn 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4847fff88030SWillem de Bruijn 					     SKBTX_ANY_TSTAMP;
484849ca0d8bSWillem de Bruijn 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
484949ca0d8bSWillem de Bruijn 	}
485049ca0d8bSWillem de Bruijn 
485149ca0d8bSWillem de Bruijn 	if (hwtstamps)
485249ca0d8bSWillem de Bruijn 		*skb_hwtstamps(skb) = *hwtstamps;
485349ca0d8bSWillem de Bruijn 	else
485449ca0d8bSWillem de Bruijn 		skb->tstamp = ktime_get_real();
485549ca0d8bSWillem de Bruijn 
48564ef1b286SSoheil Hassas Yeganeh 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
485737846ef0SAlexander Duyck }
4858e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4859e7fd2885SWillem de Bruijn 
4860e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb,
4861e7fd2885SWillem de Bruijn 		   struct skb_shared_hwtstamps *hwtstamps)
4862e7fd2885SWillem de Bruijn {
4863e7ed11eeSYousuk Seung 	return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
4864e7fd2885SWillem de Bruijn 			       SCM_TSTAMP_SND);
4865e7fd2885SWillem de Bruijn }
4866ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4867ac45f602SPatrick Ohly 
48686e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
48696e3e939fSJohannes Berg {
48706e3e939fSJohannes Berg 	struct sock *sk = skb->sk;
48716e3e939fSJohannes Berg 	struct sock_exterr_skb *serr;
4872dd4f1072SEric Dumazet 	int err = 1;
48736e3e939fSJohannes Berg 
48746e3e939fSJohannes Berg 	skb->wifi_acked_valid = 1;
48756e3e939fSJohannes Berg 	skb->wifi_acked = acked;
48766e3e939fSJohannes Berg 
48776e3e939fSJohannes Berg 	serr = SKB_EXT_ERR(skb);
48786e3e939fSJohannes Berg 	memset(serr, 0, sizeof(*serr));
48796e3e939fSJohannes Berg 	serr->ee.ee_errno = ENOMSG;
48806e3e939fSJohannes Berg 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
48816e3e939fSJohannes Berg 
4882dd4f1072SEric Dumazet 	/* Take a reference to prevent skb_orphan() from freeing the socket,
4883dd4f1072SEric Dumazet 	 * but only if the socket refcount is not zero.
4884dd4f1072SEric Dumazet 	 */
488541c6d650SReshetova, Elena 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
48866e3e939fSJohannes Berg 		err = sock_queue_err_skb(sk, skb);
4887dd4f1072SEric Dumazet 		sock_put(sk);
4888dd4f1072SEric Dumazet 	}
48896e3e939fSJohannes Berg 	if (err)
48906e3e939fSJohannes Berg 		kfree_skb(skb);
48916e3e939fSJohannes Berg }
48926e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
48936e3e939fSJohannes Berg 
4894f35d9d8aSRusty Russell /**
4895f35d9d8aSRusty Russell  * skb_partial_csum_set - set up and verify partial csum values for packet
4896f35d9d8aSRusty Russell  * @skb: the skb to set
4897f35d9d8aSRusty Russell  * @start: the number of bytes after skb->data to start checksumming.
4898f35d9d8aSRusty Russell  * @off: the offset from start to place the checksum.
4899f35d9d8aSRusty Russell  *
4900f35d9d8aSRusty Russell  * For untrusted partially-checksummed packets, we need to make sure the values
4901f35d9d8aSRusty Russell  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4902f35d9d8aSRusty Russell  *
4903f35d9d8aSRusty Russell  * This function checks and sets those values and skb->ip_summed: if this
4904f35d9d8aSRusty Russell  * returns false you should drop the packet.
4905f35d9d8aSRusty Russell  */
4906f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4907f35d9d8aSRusty Russell {
490852b5d6f5SEric Dumazet 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
490952b5d6f5SEric Dumazet 	u32 csum_start = skb_headroom(skb) + (u32)start;
491052b5d6f5SEric Dumazet 
491152b5d6f5SEric Dumazet 	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
491252b5d6f5SEric Dumazet 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
491352b5d6f5SEric Dumazet 				     start, off, skb_headroom(skb), skb_headlen(skb));
4914f35d9d8aSRusty Russell 		return false;
4915f35d9d8aSRusty Russell 	}
4916f35d9d8aSRusty Russell 	skb->ip_summed = CHECKSUM_PARTIAL;
491752b5d6f5SEric Dumazet 	skb->csum_start = csum_start;
4918f35d9d8aSRusty Russell 	skb->csum_offset = off;
4919e5d5decaSJason Wang 	skb_set_transport_header(skb, start);
4920f35d9d8aSRusty Russell 	return true;
4921f35d9d8aSRusty Russell }
4922b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4923f35d9d8aSRusty Russell 
4924ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4925ed1f50c3SPaul Durrant 			       unsigned int max)
4926ed1f50c3SPaul Durrant {
4927ed1f50c3SPaul Durrant 	if (skb_headlen(skb) >= len)
4928ed1f50c3SPaul Durrant 		return 0;
4929ed1f50c3SPaul Durrant 
4930ed1f50c3SPaul Durrant 	/* If we need to pullup then pullup to the max, so we
4931ed1f50c3SPaul Durrant 	 * won't need to do it again.
4932ed1f50c3SPaul Durrant 	 */
4933ed1f50c3SPaul Durrant 	if (max > skb->len)
4934ed1f50c3SPaul Durrant 		max = skb->len;
4935ed1f50c3SPaul Durrant 
4936ed1f50c3SPaul Durrant 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4937ed1f50c3SPaul Durrant 		return -ENOMEM;
4938ed1f50c3SPaul Durrant 
4939ed1f50c3SPaul Durrant 	if (skb_headlen(skb) < len)
4940ed1f50c3SPaul Durrant 		return -EPROTO;
4941ed1f50c3SPaul Durrant 
4942ed1f50c3SPaul Durrant 	return 0;
4943ed1f50c3SPaul Durrant }
4944ed1f50c3SPaul Durrant 
4945f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4)
4946f9708b43SJan Beulich 
4947f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4948f9708b43SJan Beulich 				      typeof(IPPROTO_IP) proto,
4949f9708b43SJan Beulich 				      unsigned int off)
4950f9708b43SJan Beulich {
4951f9708b43SJan Beulich 	int err;
4952f9708b43SJan Beulich 
4953161d1792SKees Cook 	switch (proto) {
4954f9708b43SJan Beulich 	case IPPROTO_TCP:
4955f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4956f9708b43SJan Beulich 					  off + MAX_TCP_HDR_LEN);
4957f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4958f9708b43SJan Beulich 						  offsetof(struct tcphdr,
4959f9708b43SJan Beulich 							   check)))
4960f9708b43SJan Beulich 			err = -EPROTO;
4961f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4962f9708b43SJan Beulich 
4963f9708b43SJan Beulich 	case IPPROTO_UDP:
4964f9708b43SJan Beulich 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4965f9708b43SJan Beulich 					  off + sizeof(struct udphdr));
4966f9708b43SJan Beulich 		if (!err && !skb_partial_csum_set(skb, off,
4967f9708b43SJan Beulich 						  offsetof(struct udphdr,
4968f9708b43SJan Beulich 							   check)))
4969f9708b43SJan Beulich 			err = -EPROTO;
4970f9708b43SJan Beulich 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4971f9708b43SJan Beulich 	}
4972f9708b43SJan Beulich 
4973f9708b43SJan Beulich 	return ERR_PTR(-EPROTO);
4974f9708b43SJan Beulich }
4975f9708b43SJan Beulich 
4976ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
4977ed1f50c3SPaul Durrant  * maximally sized IP and TCP or UDP headers.
4978ed1f50c3SPaul Durrant  */
4979ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128
4980ed1f50c3SPaul Durrant 
4981f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4982ed1f50c3SPaul Durrant {
4983ed1f50c3SPaul Durrant 	unsigned int off;
4984ed1f50c3SPaul Durrant 	bool fragment;
4985f9708b43SJan Beulich 	__sum16 *csum;
4986ed1f50c3SPaul Durrant 	int err;
4987ed1f50c3SPaul Durrant 
4988ed1f50c3SPaul Durrant 	fragment = false;
4989ed1f50c3SPaul Durrant 
4990ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb,
4991ed1f50c3SPaul Durrant 				  sizeof(struct iphdr),
4992ed1f50c3SPaul Durrant 				  MAX_IP_HDR_LEN);
4993ed1f50c3SPaul Durrant 	if (err < 0)
4994ed1f50c3SPaul Durrant 		goto out;
4995ed1f50c3SPaul Durrant 
499611f920d2SMiaohe Lin 	if (ip_is_fragment(ip_hdr(skb)))
4997ed1f50c3SPaul Durrant 		fragment = true;
4998ed1f50c3SPaul Durrant 
4999ed1f50c3SPaul Durrant 	off = ip_hdrlen(skb);
5000ed1f50c3SPaul Durrant 
5001ed1f50c3SPaul Durrant 	err = -EPROTO;
5002ed1f50c3SPaul Durrant 
5003ed1f50c3SPaul Durrant 	if (fragment)
5004ed1f50c3SPaul Durrant 		goto out;
5005ed1f50c3SPaul Durrant 
5006f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5007f9708b43SJan Beulich 	if (IS_ERR(csum))
5008f9708b43SJan Beulich 		return PTR_ERR(csum);
5009ed1f50c3SPaul Durrant 
5010ed1f50c3SPaul Durrant 	if (recalculate)
5011f9708b43SJan Beulich 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5012ed1f50c3SPaul Durrant 					   ip_hdr(skb)->daddr,
5013ed1f50c3SPaul Durrant 					   skb->len - off,
5014f9708b43SJan Beulich 					   ip_hdr(skb)->protocol, 0);
5015ed1f50c3SPaul Durrant 	err = 0;
5016ed1f50c3SPaul Durrant 
5017ed1f50c3SPaul Durrant out:
5018ed1f50c3SPaul Durrant 	return err;
5019ed1f50c3SPaul Durrant }
5020ed1f50c3SPaul Durrant 
5021ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus
5022ed1f50c3SPaul Durrant  * an IPv6 header, all options, and a maximal TCP or UDP header.
5023ed1f50c3SPaul Durrant  */
5024ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256
5025ed1f50c3SPaul Durrant 
5026ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \
5027ed1f50c3SPaul Durrant 	(type *)(skb_network_header(skb) + (off))
5028ed1f50c3SPaul Durrant 
5029ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5030ed1f50c3SPaul Durrant {
5031ed1f50c3SPaul Durrant 	int err;
5032ed1f50c3SPaul Durrant 	u8 nexthdr;
5033ed1f50c3SPaul Durrant 	unsigned int off;
5034ed1f50c3SPaul Durrant 	unsigned int len;
5035ed1f50c3SPaul Durrant 	bool fragment;
5036ed1f50c3SPaul Durrant 	bool done;
5037f9708b43SJan Beulich 	__sum16 *csum;
5038ed1f50c3SPaul Durrant 
5039ed1f50c3SPaul Durrant 	fragment = false;
5040ed1f50c3SPaul Durrant 	done = false;
5041ed1f50c3SPaul Durrant 
5042ed1f50c3SPaul Durrant 	off = sizeof(struct ipv6hdr);
5043ed1f50c3SPaul Durrant 
5044ed1f50c3SPaul Durrant 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5045ed1f50c3SPaul Durrant 	if (err < 0)
5046ed1f50c3SPaul Durrant 		goto out;
5047ed1f50c3SPaul Durrant 
5048ed1f50c3SPaul Durrant 	nexthdr = ipv6_hdr(skb)->nexthdr;
5049ed1f50c3SPaul Durrant 
5050ed1f50c3SPaul Durrant 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5051ed1f50c3SPaul Durrant 	while (off <= len && !done) {
5052ed1f50c3SPaul Durrant 		switch (nexthdr) {
5053ed1f50c3SPaul Durrant 		case IPPROTO_DSTOPTS:
5054ed1f50c3SPaul Durrant 		case IPPROTO_HOPOPTS:
5055ed1f50c3SPaul Durrant 		case IPPROTO_ROUTING: {
5056ed1f50c3SPaul Durrant 			struct ipv6_opt_hdr *hp;
5057ed1f50c3SPaul Durrant 
5058ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
5059ed1f50c3SPaul Durrant 						  off +
5060ed1f50c3SPaul Durrant 						  sizeof(struct ipv6_opt_hdr),
5061ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
5062ed1f50c3SPaul Durrant 			if (err < 0)
5063ed1f50c3SPaul Durrant 				goto out;
5064ed1f50c3SPaul Durrant 
5065ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5066ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
5067ed1f50c3SPaul Durrant 			off += ipv6_optlen(hp);
5068ed1f50c3SPaul Durrant 			break;
5069ed1f50c3SPaul Durrant 		}
5070ed1f50c3SPaul Durrant 		case IPPROTO_AH: {
5071ed1f50c3SPaul Durrant 			struct ip_auth_hdr *hp;
5072ed1f50c3SPaul Durrant 
5073ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
5074ed1f50c3SPaul Durrant 						  off +
5075ed1f50c3SPaul Durrant 						  sizeof(struct ip_auth_hdr),
5076ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
5077ed1f50c3SPaul Durrant 			if (err < 0)
5078ed1f50c3SPaul Durrant 				goto out;
5079ed1f50c3SPaul Durrant 
5080ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5081ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
5082ed1f50c3SPaul Durrant 			off += ipv6_authlen(hp);
5083ed1f50c3SPaul Durrant 			break;
5084ed1f50c3SPaul Durrant 		}
5085ed1f50c3SPaul Durrant 		case IPPROTO_FRAGMENT: {
5086ed1f50c3SPaul Durrant 			struct frag_hdr *hp;
5087ed1f50c3SPaul Durrant 
5088ed1f50c3SPaul Durrant 			err = skb_maybe_pull_tail(skb,
5089ed1f50c3SPaul Durrant 						  off +
5090ed1f50c3SPaul Durrant 						  sizeof(struct frag_hdr),
5091ed1f50c3SPaul Durrant 						  MAX_IPV6_HDR_LEN);
5092ed1f50c3SPaul Durrant 			if (err < 0)
5093ed1f50c3SPaul Durrant 				goto out;
5094ed1f50c3SPaul Durrant 
5095ed1f50c3SPaul Durrant 			hp = OPT_HDR(struct frag_hdr, skb, off);
5096ed1f50c3SPaul Durrant 
5097ed1f50c3SPaul Durrant 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5098ed1f50c3SPaul Durrant 				fragment = true;
5099ed1f50c3SPaul Durrant 
5100ed1f50c3SPaul Durrant 			nexthdr = hp->nexthdr;
5101ed1f50c3SPaul Durrant 			off += sizeof(struct frag_hdr);
5102ed1f50c3SPaul Durrant 			break;
5103ed1f50c3SPaul Durrant 		}
5104ed1f50c3SPaul Durrant 		default:
5105ed1f50c3SPaul Durrant 			done = true;
5106ed1f50c3SPaul Durrant 			break;
5107ed1f50c3SPaul Durrant 		}
5108ed1f50c3SPaul Durrant 	}
5109ed1f50c3SPaul Durrant 
5110ed1f50c3SPaul Durrant 	err = -EPROTO;
5111ed1f50c3SPaul Durrant 
5112ed1f50c3SPaul Durrant 	if (!done || fragment)
5113ed1f50c3SPaul Durrant 		goto out;
5114ed1f50c3SPaul Durrant 
5115f9708b43SJan Beulich 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
5116f9708b43SJan Beulich 	if (IS_ERR(csum))
5117f9708b43SJan Beulich 		return PTR_ERR(csum);
5118ed1f50c3SPaul Durrant 
5119ed1f50c3SPaul Durrant 	if (recalculate)
5120f9708b43SJan Beulich 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5121ed1f50c3SPaul Durrant 					 &ipv6_hdr(skb)->daddr,
5122f9708b43SJan Beulich 					 skb->len - off, nexthdr, 0);
5123ed1f50c3SPaul Durrant 	err = 0;
5124ed1f50c3SPaul Durrant 
5125ed1f50c3SPaul Durrant out:
5126ed1f50c3SPaul Durrant 	return err;
5127ed1f50c3SPaul Durrant }
5128ed1f50c3SPaul Durrant 
5129ed1f50c3SPaul Durrant /**
5130ed1f50c3SPaul Durrant  * skb_checksum_setup - set up partial checksum offset
5131ed1f50c3SPaul Durrant  * @skb: the skb to set up
5132ed1f50c3SPaul Durrant  * @recalculate: if true the pseudo-header checksum will be recalculated
5133ed1f50c3SPaul Durrant  */
5134ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5135ed1f50c3SPaul Durrant {
5136ed1f50c3SPaul Durrant 	int err;
5137ed1f50c3SPaul Durrant 
5138ed1f50c3SPaul Durrant 	switch (skb->protocol) {
5139ed1f50c3SPaul Durrant 	case htons(ETH_P_IP):
5140f9708b43SJan Beulich 		err = skb_checksum_setup_ipv4(skb, recalculate);
5141ed1f50c3SPaul Durrant 		break;
5142ed1f50c3SPaul Durrant 
5143ed1f50c3SPaul Durrant 	case htons(ETH_P_IPV6):
5144ed1f50c3SPaul Durrant 		err = skb_checksum_setup_ipv6(skb, recalculate);
5145ed1f50c3SPaul Durrant 		break;
5146ed1f50c3SPaul Durrant 
5147ed1f50c3SPaul Durrant 	default:
5148ed1f50c3SPaul Durrant 		err = -EPROTO;
5149ed1f50c3SPaul Durrant 		break;
5150ed1f50c3SPaul Durrant 	}
5151ed1f50c3SPaul Durrant 
5152ed1f50c3SPaul Durrant 	return err;
5153ed1f50c3SPaul Durrant }
5154ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup);
5155ed1f50c3SPaul Durrant 
51569afd85c9SLinus Lüssing /**
51579afd85c9SLinus Lüssing  * skb_checksum_maybe_trim - maybe trims the given skb
51589afd85c9SLinus Lüssing  * @skb: the skb to check
51599afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
51609afd85c9SLinus Lüssing  *
51619afd85c9SLinus Lüssing  * Checks whether the given skb has data beyond the given transport length.
51629afd85c9SLinus Lüssing  * If so, returns a cloned skb trimmed to this transport length.
51639afd85c9SLinus Lüssing  * Otherwise returns the provided skb. Returns NULL in error cases
51649afd85c9SLinus Lüssing  * (e.g. transport_len exceeds skb length or out-of-memory).
51659afd85c9SLinus Lüssing  *
5166a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
5167a516993fSLinus Lüssing  * differs from the provided skb.
51689afd85c9SLinus Lüssing  */
51699afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
51709afd85c9SLinus Lüssing 					       unsigned int transport_len)
51719afd85c9SLinus Lüssing {
51729afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
51739afd85c9SLinus Lüssing 	unsigned int len = skb_transport_offset(skb) + transport_len;
51749afd85c9SLinus Lüssing 	int ret;
51759afd85c9SLinus Lüssing 
5176a516993fSLinus Lüssing 	if (skb->len < len)
51779afd85c9SLinus Lüssing 		return NULL;
5178a516993fSLinus Lüssing 	else if (skb->len == len)
51799afd85c9SLinus Lüssing 		return skb;
51809afd85c9SLinus Lüssing 
51819afd85c9SLinus Lüssing 	skb_chk = skb_clone(skb, GFP_ATOMIC);
51829afd85c9SLinus Lüssing 	if (!skb_chk)
51839afd85c9SLinus Lüssing 		return NULL;
51849afd85c9SLinus Lüssing 
51859afd85c9SLinus Lüssing 	ret = pskb_trim_rcsum(skb_chk, len);
51869afd85c9SLinus Lüssing 	if (ret) {
51879afd85c9SLinus Lüssing 		kfree_skb(skb_chk);
51889afd85c9SLinus Lüssing 		return NULL;
51899afd85c9SLinus Lüssing 	}
51909afd85c9SLinus Lüssing 
51919afd85c9SLinus Lüssing 	return skb_chk;
51929afd85c9SLinus Lüssing }
51939afd85c9SLinus Lüssing 
51949afd85c9SLinus Lüssing /**
51959afd85c9SLinus Lüssing  * skb_checksum_trimmed - validate checksum of an skb
51969afd85c9SLinus Lüssing  * @skb: the skb to check
51979afd85c9SLinus Lüssing  * @transport_len: the data length beyond the network header
51989afd85c9SLinus Lüssing  * @skb_chkf: checksum function to use
51999afd85c9SLinus Lüssing  *
52009afd85c9SLinus Lüssing  * Applies the given checksum function skb_chkf to the provided skb.
52019afd85c9SLinus Lüssing  * Returns a checked and maybe trimmed skb. Returns NULL on error.
52029afd85c9SLinus Lüssing  *
52039afd85c9SLinus Lüssing  * If the skb has data beyond the given transport length, then a
52049afd85c9SLinus Lüssing  * trimmed & cloned skb is checked and returned.
52059afd85c9SLinus Lüssing  *
5206a516993fSLinus Lüssing  * Caller needs to set the skb transport header and free any returned skb if it
5207a516993fSLinus Lüssing  * differs from the provided skb.
52089afd85c9SLinus Lüssing  */
52099afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
52109afd85c9SLinus Lüssing 				     unsigned int transport_len,
52119afd85c9SLinus Lüssing 				     __sum16(*skb_chkf)(struct sk_buff *skb))
52129afd85c9SLinus Lüssing {
52139afd85c9SLinus Lüssing 	struct sk_buff *skb_chk;
52149afd85c9SLinus Lüssing 	unsigned int offset = skb_transport_offset(skb);
5215fcba67c9SLinus Lüssing 	__sum16 ret;
52169afd85c9SLinus Lüssing 
52179afd85c9SLinus Lüssing 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
52189afd85c9SLinus Lüssing 	if (!skb_chk)
5219a516993fSLinus Lüssing 		goto err;
52209afd85c9SLinus Lüssing 
5221a516993fSLinus Lüssing 	if (!pskb_may_pull(skb_chk, offset))
5222a516993fSLinus Lüssing 		goto err;
52239afd85c9SLinus Lüssing 
52249b368814SLinus Lüssing 	skb_pull_rcsum(skb_chk, offset);
52259afd85c9SLinus Lüssing 	ret = skb_chkf(skb_chk);
52269b368814SLinus Lüssing 	skb_push_rcsum(skb_chk, offset);
52279afd85c9SLinus Lüssing 
5228a516993fSLinus Lüssing 	if (ret)
5229a516993fSLinus Lüssing 		goto err;
52309afd85c9SLinus Lüssing 
52319afd85c9SLinus Lüssing 	return skb_chk;
5232a516993fSLinus Lüssing 
5233a516993fSLinus Lüssing err:
5234a516993fSLinus Lüssing 	if (skb_chk && skb_chk != skb)
5235a516993fSLinus Lüssing 		kfree_skb(skb_chk);
5236a516993fSLinus Lüssing 
5237a516993fSLinus Lüssing 	return NULL;
5238a516993fSLinus Lüssing 
52399afd85c9SLinus Lüssing }
52409afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed);
52419afd85c9SLinus Lüssing 
52424497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb)
52434497b076SBen Hutchings {
5244e87cc472SJoe Perches 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5245e87cc472SJoe Perches 			     skb->dev->name);
52464497b076SBen Hutchings }
52474497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5248bad43ca8SEric Dumazet 
5249bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5250bad43ca8SEric Dumazet {
52513d861f66SEric Dumazet 	if (head_stolen) {
52523d861f66SEric Dumazet 		skb_release_head_state(skb);
5253bad43ca8SEric Dumazet 		kmem_cache_free(skbuff_head_cache, skb);
52543d861f66SEric Dumazet 	} else {
5255bad43ca8SEric Dumazet 		__kfree_skb(skb);
5256bad43ca8SEric Dumazet 	}
52573d861f66SEric Dumazet }
5258bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial);
5259bad43ca8SEric Dumazet 
5260bad43ca8SEric Dumazet /**
5261bad43ca8SEric Dumazet  * skb_try_coalesce - try to merge skb to prior one
5262bad43ca8SEric Dumazet  * @to: prior buffer
5263bad43ca8SEric Dumazet  * @from: buffer to add
5264bad43ca8SEric Dumazet  * @fragstolen: pointer to boolean
5265c6c4b97cSRandy Dunlap  * @delta_truesize: how much more was allocated than was requested
5266bad43ca8SEric Dumazet  */
5267bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5268bad43ca8SEric Dumazet 		      bool *fragstolen, int *delta_truesize)
5269bad43ca8SEric Dumazet {
5270c818fa9eSEric Dumazet 	struct skb_shared_info *to_shinfo, *from_shinfo;
5271bad43ca8SEric Dumazet 	int i, delta, len = from->len;
5272bad43ca8SEric Dumazet 
5273bad43ca8SEric Dumazet 	*fragstolen = false;
5274bad43ca8SEric Dumazet 
5275bad43ca8SEric Dumazet 	if (skb_cloned(to))
5276bad43ca8SEric Dumazet 		return false;
5277bad43ca8SEric Dumazet 
52786a5bcd84SIlias Apalodimas 	/* The page pool signature of struct page will eventually figure out
52796a5bcd84SIlias Apalodimas 	 * which pages can be recycled or not but for now let's prohibit slab
52806a5bcd84SIlias Apalodimas 	 * allocated and page_pool allocated SKBs from being coalesced.
52816a5bcd84SIlias Apalodimas 	 */
52826a5bcd84SIlias Apalodimas 	if (to->pp_recycle != from->pp_recycle)
52836a5bcd84SIlias Apalodimas 		return false;
52846a5bcd84SIlias Apalodimas 
5285bad43ca8SEric Dumazet 	if (len <= skb_tailroom(to)) {
5286e93a0435SEric Dumazet 		if (len)
5287bad43ca8SEric Dumazet 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5288bad43ca8SEric Dumazet 		*delta_truesize = 0;
5289bad43ca8SEric Dumazet 		return true;
5290bad43ca8SEric Dumazet 	}
5291bad43ca8SEric Dumazet 
5292c818fa9eSEric Dumazet 	to_shinfo = skb_shinfo(to);
5293c818fa9eSEric Dumazet 	from_shinfo = skb_shinfo(from);
5294c818fa9eSEric Dumazet 	if (to_shinfo->frag_list || from_shinfo->frag_list)
5295bad43ca8SEric Dumazet 		return false;
52961f8b977aSWillem de Bruijn 	if (skb_zcopy(to) || skb_zcopy(from))
52971f8b977aSWillem de Bruijn 		return false;
5298bad43ca8SEric Dumazet 
5299bad43ca8SEric Dumazet 	if (skb_headlen(from) != 0) {
5300bad43ca8SEric Dumazet 		struct page *page;
5301bad43ca8SEric Dumazet 		unsigned int offset;
5302bad43ca8SEric Dumazet 
5303c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
5304c818fa9eSEric Dumazet 		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5305bad43ca8SEric Dumazet 			return false;
5306bad43ca8SEric Dumazet 
5307bad43ca8SEric Dumazet 		if (skb_head_is_locked(from))
5308bad43ca8SEric Dumazet 			return false;
5309bad43ca8SEric Dumazet 
5310bad43ca8SEric Dumazet 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5311bad43ca8SEric Dumazet 
5312bad43ca8SEric Dumazet 		page = virt_to_head_page(from->head);
5313bad43ca8SEric Dumazet 		offset = from->data - (unsigned char *)page_address(page);
5314bad43ca8SEric Dumazet 
5315c818fa9eSEric Dumazet 		skb_fill_page_desc(to, to_shinfo->nr_frags,
5316bad43ca8SEric Dumazet 				   page, offset, skb_headlen(from));
5317bad43ca8SEric Dumazet 		*fragstolen = true;
5318bad43ca8SEric Dumazet 	} else {
5319c818fa9eSEric Dumazet 		if (to_shinfo->nr_frags +
5320c818fa9eSEric Dumazet 		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
5321bad43ca8SEric Dumazet 			return false;
5322bad43ca8SEric Dumazet 
5323f4b549a5SWeiping Pan 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5324bad43ca8SEric Dumazet 	}
5325bad43ca8SEric Dumazet 
5326bad43ca8SEric Dumazet 	WARN_ON_ONCE(delta < len);
5327bad43ca8SEric Dumazet 
5328c818fa9eSEric Dumazet 	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5329c818fa9eSEric Dumazet 	       from_shinfo->frags,
5330c818fa9eSEric Dumazet 	       from_shinfo->nr_frags * sizeof(skb_frag_t));
5331c818fa9eSEric Dumazet 	to_shinfo->nr_frags += from_shinfo->nr_frags;
5332bad43ca8SEric Dumazet 
5333bad43ca8SEric Dumazet 	if (!skb_cloned(from))
5334c818fa9eSEric Dumazet 		from_shinfo->nr_frags = 0;
5335bad43ca8SEric Dumazet 
53368ea853fdSLi RongQing 	/* if the skb is not cloned this does nothing
53378ea853fdSLi RongQing 	 * since we set nr_frags to 0.
53388ea853fdSLi RongQing 	 */
5339c818fa9eSEric Dumazet 	for (i = 0; i < from_shinfo->nr_frags; i++)
5340c818fa9eSEric Dumazet 		__skb_frag_ref(&from_shinfo->frags[i]);
5341bad43ca8SEric Dumazet 
5342bad43ca8SEric Dumazet 	to->truesize += delta;
5343bad43ca8SEric Dumazet 	to->len += len;
5344bad43ca8SEric Dumazet 	to->data_len += len;
5345bad43ca8SEric Dumazet 
5346bad43ca8SEric Dumazet 	*delta_truesize = delta;
5347bad43ca8SEric Dumazet 	return true;
5348bad43ca8SEric Dumazet }
5349bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce);
5350621e84d6SNicolas Dichtel 
5351621e84d6SNicolas Dichtel /**
53528b27f277SNicolas Dichtel  * skb_scrub_packet - scrub an skb
5353621e84d6SNicolas Dichtel  *
5354621e84d6SNicolas Dichtel  * @skb: buffer to clean
53558b27f277SNicolas Dichtel  * @xnet: packet is crossing netns
5356621e84d6SNicolas Dichtel  *
53578b27f277SNicolas Dichtel  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
53588b27f277SNicolas Dichtel  * into/from a tunnel. Some information have to be cleared during these
53598b27f277SNicolas Dichtel  * operations.
53608b27f277SNicolas Dichtel  * skb_scrub_packet can also be used to clean a skb before injecting it in
53618b27f277SNicolas Dichtel  * another namespace (@xnet == true). We have to clear all information in the
53628b27f277SNicolas Dichtel  * skb that could impact namespace isolation.
5363621e84d6SNicolas Dichtel  */
53648b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5365621e84d6SNicolas Dichtel {
5366621e84d6SNicolas Dichtel 	skb->pkt_type = PACKET_HOST;
5367621e84d6SNicolas Dichtel 	skb->skb_iif = 0;
536860ff7467SWANG Cong 	skb->ignore_df = 0;
5369621e84d6SNicolas Dichtel 	skb_dst_drop(skb);
5370174e2381SFlorian Westphal 	skb_ext_reset(skb);
5371895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
5372621e84d6SNicolas Dichtel 	nf_reset_trace(skb);
5373213dd74aSHerbert Xu 
53746f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV
53756f9a5069SPetr Machata 	skb->offload_fwd_mark = 0;
5376875e8939SIdo Schimmel 	skb->offload_l3_fwd_mark = 0;
53776f9a5069SPetr Machata #endif
53786f9a5069SPetr Machata 
5379213dd74aSHerbert Xu 	if (!xnet)
5380213dd74aSHerbert Xu 		return;
5381213dd74aSHerbert Xu 
53822b5ec1a5SYe Yin 	ipvs_reset(skb);
5383213dd74aSHerbert Xu 	skb->mark = 0;
5384*de799101SMartin KaFai Lau 	skb_clear_tstamp(skb);
5385621e84d6SNicolas Dichtel }
5386621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet);
5387de960aa9SFlorian Westphal 
5388de960aa9SFlorian Westphal /**
5389de960aa9SFlorian Westphal  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5390de960aa9SFlorian Westphal  *
5391de960aa9SFlorian Westphal  * @skb: GSO skb
5392de960aa9SFlorian Westphal  *
5393de960aa9SFlorian Westphal  * skb_gso_transport_seglen is used to determine the real size of the
5394de960aa9SFlorian Westphal  * individual segments, including Layer4 headers (TCP/UDP).
5395de960aa9SFlorian Westphal  *
5396de960aa9SFlorian Westphal  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5397de960aa9SFlorian Westphal  */
5398a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5399de960aa9SFlorian Westphal {
5400de960aa9SFlorian Westphal 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
5401f993bc25SFlorian Westphal 	unsigned int thlen = 0;
5402f993bc25SFlorian Westphal 
5403f993bc25SFlorian Westphal 	if (skb->encapsulation) {
5404f993bc25SFlorian Westphal 		thlen = skb_inner_transport_header(skb) -
5405f993bc25SFlorian Westphal 			skb_transport_header(skb);
5406de960aa9SFlorian Westphal 
5407de960aa9SFlorian Westphal 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5408f993bc25SFlorian Westphal 			thlen += inner_tcp_hdrlen(skb);
5409f993bc25SFlorian Westphal 	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5410f993bc25SFlorian Westphal 		thlen = tcp_hdrlen(skb);
54111dd27cdeSDaniel Axtens 	} else if (unlikely(skb_is_gso_sctp(skb))) {
541290017accSMarcelo Ricardo Leitner 		thlen = sizeof(struct sctphdr);
5413ee80d1ebSWillem de Bruijn 	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5414ee80d1ebSWillem de Bruijn 		thlen = sizeof(struct udphdr);
5415f993bc25SFlorian Westphal 	}
54166d39d589SFlorian Westphal 	/* UFO sets gso_size to the size of the fragmentation
54176d39d589SFlorian Westphal 	 * payload, i.e. the size of the L4 (UDP) header is already
54186d39d589SFlorian Westphal 	 * accounted for.
54196d39d589SFlorian Westphal 	 */
5420f993bc25SFlorian Westphal 	return thlen + shinfo->gso_size;
5421de960aa9SFlorian Westphal }
5422a4a77718SDaniel Axtens 
5423a4a77718SDaniel Axtens /**
5424a4a77718SDaniel Axtens  * skb_gso_network_seglen - Return length of individual segments of a gso packet
5425a4a77718SDaniel Axtens  *
5426a4a77718SDaniel Axtens  * @skb: GSO skb
5427a4a77718SDaniel Axtens  *
5428a4a77718SDaniel Axtens  * skb_gso_network_seglen is used to determine the real size of the
5429a4a77718SDaniel Axtens  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5430a4a77718SDaniel Axtens  *
5431a4a77718SDaniel Axtens  * The MAC/L2 header is not accounted for.
5432a4a77718SDaniel Axtens  */
5433a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5434a4a77718SDaniel Axtens {
5435a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) -
5436a4a77718SDaniel Axtens 			       skb_network_header(skb);
5437a4a77718SDaniel Axtens 
5438a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5439a4a77718SDaniel Axtens }
5440a4a77718SDaniel Axtens 
5441a4a77718SDaniel Axtens /**
5442a4a77718SDaniel Axtens  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5443a4a77718SDaniel Axtens  *
5444a4a77718SDaniel Axtens  * @skb: GSO skb
5445a4a77718SDaniel Axtens  *
5446a4a77718SDaniel Axtens  * skb_gso_mac_seglen is used to determine the real size of the
5447a4a77718SDaniel Axtens  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5448a4a77718SDaniel Axtens  * headers (TCP/UDP).
5449a4a77718SDaniel Axtens  */
5450a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5451a4a77718SDaniel Axtens {
5452a4a77718SDaniel Axtens 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5453a4a77718SDaniel Axtens 
5454a4a77718SDaniel Axtens 	return hdr_len + skb_gso_transport_seglen(skb);
5455a4a77718SDaniel Axtens }
54560d5501c1SVlad Yasevich 
5457ae7ef81eSMarcelo Ricardo Leitner /**
54582b16f048SDaniel Axtens  * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
54592b16f048SDaniel Axtens  *
54602b16f048SDaniel Axtens  * There are a couple of instances where we have a GSO skb, and we
54612b16f048SDaniel Axtens  * want to determine what size it would be after it is segmented.
54622b16f048SDaniel Axtens  *
54632b16f048SDaniel Axtens  * We might want to check:
54642b16f048SDaniel Axtens  * -    L3+L4+payload size (e.g. IP forwarding)
54652b16f048SDaniel Axtens  * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
54662b16f048SDaniel Axtens  *
54672b16f048SDaniel Axtens  * This is a helper to do that correctly considering GSO_BY_FRAGS.
54682b16f048SDaniel Axtens  *
546949682bfaSMathieu Malaterre  * @skb: GSO skb
547049682bfaSMathieu Malaterre  *
54712b16f048SDaniel Axtens  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
54722b16f048SDaniel Axtens  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
54732b16f048SDaniel Axtens  *
54742b16f048SDaniel Axtens  * @max_len: The maximum permissible length.
54752b16f048SDaniel Axtens  *
54762b16f048SDaniel Axtens  * Returns true if the segmented length <= max length.
54772b16f048SDaniel Axtens  */
54782b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb,
54792b16f048SDaniel Axtens 				      unsigned int seg_len,
54802b16f048SDaniel Axtens 				      unsigned int max_len) {
54812b16f048SDaniel Axtens 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
54822b16f048SDaniel Axtens 	const struct sk_buff *iter;
54832b16f048SDaniel Axtens 
54842b16f048SDaniel Axtens 	if (shinfo->gso_size != GSO_BY_FRAGS)
54852b16f048SDaniel Axtens 		return seg_len <= max_len;
54862b16f048SDaniel Axtens 
54872b16f048SDaniel Axtens 	/* Undo this so we can re-use header sizes */
54882b16f048SDaniel Axtens 	seg_len -= GSO_BY_FRAGS;
54892b16f048SDaniel Axtens 
54902b16f048SDaniel Axtens 	skb_walk_frags(skb, iter) {
54912b16f048SDaniel Axtens 		if (seg_len + skb_headlen(iter) > max_len)
54922b16f048SDaniel Axtens 			return false;
54932b16f048SDaniel Axtens 	}
54942b16f048SDaniel Axtens 
54952b16f048SDaniel Axtens 	return true;
54962b16f048SDaniel Axtens }
54972b16f048SDaniel Axtens 
54982b16f048SDaniel Axtens /**
5499779b7931SDaniel Axtens  * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5500ae7ef81eSMarcelo Ricardo Leitner  *
5501ae7ef81eSMarcelo Ricardo Leitner  * @skb: GSO skb
550276f21b99SDavid S. Miller  * @mtu: MTU to validate against
5503ae7ef81eSMarcelo Ricardo Leitner  *
5504779b7931SDaniel Axtens  * skb_gso_validate_network_len validates if a given skb will fit a
5505779b7931SDaniel Axtens  * wanted MTU once split. It considers L3 headers, L4 headers, and the
5506779b7931SDaniel Axtens  * payload.
5507ae7ef81eSMarcelo Ricardo Leitner  */
5508779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5509ae7ef81eSMarcelo Ricardo Leitner {
55102b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5511ae7ef81eSMarcelo Ricardo Leitner }
5512779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5513ae7ef81eSMarcelo Ricardo Leitner 
55142b16f048SDaniel Axtens /**
55152b16f048SDaniel Axtens  * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
55162b16f048SDaniel Axtens  *
55172b16f048SDaniel Axtens  * @skb: GSO skb
55182b16f048SDaniel Axtens  * @len: length to validate against
55192b16f048SDaniel Axtens  *
55202b16f048SDaniel Axtens  * skb_gso_validate_mac_len validates if a given skb will fit a wanted
55212b16f048SDaniel Axtens  * length once split, including L2, L3 and L4 headers and the payload.
55222b16f048SDaniel Axtens  */
55232b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
55242b16f048SDaniel Axtens {
55252b16f048SDaniel Axtens 	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
55262b16f048SDaniel Axtens }
55272b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
55282b16f048SDaniel Axtens 
55290d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
55300d5501c1SVlad Yasevich {
5531d85e8be2SYuya Kusakabe 	int mac_len, meta_len;
5532d85e8be2SYuya Kusakabe 	void *meta;
55334bbb3e0eSToshiaki Makita 
55340d5501c1SVlad Yasevich 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
55350d5501c1SVlad Yasevich 		kfree_skb(skb);
55360d5501c1SVlad Yasevich 		return NULL;
55370d5501c1SVlad Yasevich 	}
55380d5501c1SVlad Yasevich 
55394bbb3e0eSToshiaki Makita 	mac_len = skb->data - skb_mac_header(skb);
5540ae474573SToshiaki Makita 	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
55414bbb3e0eSToshiaki Makita 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
55424bbb3e0eSToshiaki Makita 			mac_len - VLAN_HLEN - ETH_TLEN);
5543ae474573SToshiaki Makita 	}
5544d85e8be2SYuya Kusakabe 
5545d85e8be2SYuya Kusakabe 	meta_len = skb_metadata_len(skb);
5546d85e8be2SYuya Kusakabe 	if (meta_len) {
5547d85e8be2SYuya Kusakabe 		meta = skb_metadata_end(skb) - meta_len;
5548d85e8be2SYuya Kusakabe 		memmove(meta + VLAN_HLEN, meta, meta_len);
5549d85e8be2SYuya Kusakabe 	}
5550d85e8be2SYuya Kusakabe 
55510d5501c1SVlad Yasevich 	skb->mac_header += VLAN_HLEN;
55520d5501c1SVlad Yasevich 	return skb;
55530d5501c1SVlad Yasevich }
55540d5501c1SVlad Yasevich 
55550d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
55560d5501c1SVlad Yasevich {
55570d5501c1SVlad Yasevich 	struct vlan_hdr *vhdr;
55580d5501c1SVlad Yasevich 	u16 vlan_tci;
55590d5501c1SVlad Yasevich 
5560df8a39deSJiri Pirko 	if (unlikely(skb_vlan_tag_present(skb))) {
55610d5501c1SVlad Yasevich 		/* vlan_tci is already set-up so leave this for another time */
55620d5501c1SVlad Yasevich 		return skb;
55630d5501c1SVlad Yasevich 	}
55640d5501c1SVlad Yasevich 
55650d5501c1SVlad Yasevich 	skb = skb_share_check(skb, GFP_ATOMIC);
55660d5501c1SVlad Yasevich 	if (unlikely(!skb))
55670d5501c1SVlad Yasevich 		goto err_free;
556855eff0ebSMiaohe Lin 	/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
556955eff0ebSMiaohe Lin 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
55700d5501c1SVlad Yasevich 		goto err_free;
55710d5501c1SVlad Yasevich 
55720d5501c1SVlad Yasevich 	vhdr = (struct vlan_hdr *)skb->data;
55730d5501c1SVlad Yasevich 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
55740d5501c1SVlad Yasevich 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
55750d5501c1SVlad Yasevich 
55760d5501c1SVlad Yasevich 	skb_pull_rcsum(skb, VLAN_HLEN);
55770d5501c1SVlad Yasevich 	vlan_set_encap_proto(skb, vhdr);
55780d5501c1SVlad Yasevich 
55790d5501c1SVlad Yasevich 	skb = skb_reorder_vlan_header(skb);
55800d5501c1SVlad Yasevich 	if (unlikely(!skb))
55810d5501c1SVlad Yasevich 		goto err_free;
55820d5501c1SVlad Yasevich 
55830d5501c1SVlad Yasevich 	skb_reset_network_header(skb);
55848be33ecfSAlexander Lobakin 	if (!skb_transport_header_was_set(skb))
55850d5501c1SVlad Yasevich 		skb_reset_transport_header(skb);
55860d5501c1SVlad Yasevich 	skb_reset_mac_len(skb);
55870d5501c1SVlad Yasevich 
55880d5501c1SVlad Yasevich 	return skb;
55890d5501c1SVlad Yasevich 
55900d5501c1SVlad Yasevich err_free:
55910d5501c1SVlad Yasevich 	kfree_skb(skb);
55920d5501c1SVlad Yasevich 	return NULL;
55930d5501c1SVlad Yasevich }
55940d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag);
55952e4e4410SEric Dumazet 
5596e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len)
5597e2195121SJiri Pirko {
5598e2195121SJiri Pirko 	if (!pskb_may_pull(skb, write_len))
5599e2195121SJiri Pirko 		return -ENOMEM;
5600e2195121SJiri Pirko 
5601e2195121SJiri Pirko 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5602e2195121SJiri Pirko 		return 0;
5603e2195121SJiri Pirko 
5604e2195121SJiri Pirko 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5605e2195121SJiri Pirko }
5606e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable);
5607e2195121SJiri Pirko 
5608bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly.
5609bfca4c52SShmulik Ladkani  * expects a non skb_vlan_tag_present skb with a vlan tag payload
5610bfca4c52SShmulik Ladkani  */
5611bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
561293515d53SJiri Pirko {
561393515d53SJiri Pirko 	struct vlan_hdr *vhdr;
5614b6a79208SShmulik Ladkani 	int offset = skb->data - skb_mac_header(skb);
561593515d53SJiri Pirko 	int err;
561693515d53SJiri Pirko 
5617b6a79208SShmulik Ladkani 	if (WARN_ONCE(offset,
5618b6a79208SShmulik Ladkani 		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5619b6a79208SShmulik Ladkani 		      offset)) {
5620b6a79208SShmulik Ladkani 		return -EINVAL;
5621b6a79208SShmulik Ladkani 	}
5622b6a79208SShmulik Ladkani 
562393515d53SJiri Pirko 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
562493515d53SJiri Pirko 	if (unlikely(err))
5625b6a79208SShmulik Ladkani 		return err;
562693515d53SJiri Pirko 
562793515d53SJiri Pirko 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
562893515d53SJiri Pirko 
562993515d53SJiri Pirko 	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
563093515d53SJiri Pirko 	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
563193515d53SJiri Pirko 
563293515d53SJiri Pirko 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
563393515d53SJiri Pirko 	__skb_pull(skb, VLAN_HLEN);
563493515d53SJiri Pirko 
563593515d53SJiri Pirko 	vlan_set_encap_proto(skb, vhdr);
563693515d53SJiri Pirko 	skb->mac_header += VLAN_HLEN;
563793515d53SJiri Pirko 
563893515d53SJiri Pirko 	if (skb_network_offset(skb) < ETH_HLEN)
563993515d53SJiri Pirko 		skb_set_network_header(skb, ETH_HLEN);
564093515d53SJiri Pirko 
564193515d53SJiri Pirko 	skb_reset_mac_len(skb);
564293515d53SJiri Pirko 
564393515d53SJiri Pirko 	return err;
564493515d53SJiri Pirko }
5645bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop);
564693515d53SJiri Pirko 
5647b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload.
5648b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5649b6a79208SShmulik Ladkani  */
565093515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb)
565193515d53SJiri Pirko {
565293515d53SJiri Pirko 	u16 vlan_tci;
565393515d53SJiri Pirko 	__be16 vlan_proto;
565493515d53SJiri Pirko 	int err;
565593515d53SJiri Pirko 
5656df8a39deSJiri Pirko 	if (likely(skb_vlan_tag_present(skb))) {
5657b1817524SMichał Mirosław 		__vlan_hwaccel_clear_tag(skb);
565893515d53SJiri Pirko 	} else {
5659ecf4ee41SShmulik Ladkani 		if (unlikely(!eth_type_vlan(skb->protocol)))
566093515d53SJiri Pirko 			return 0;
566193515d53SJiri Pirko 
566293515d53SJiri Pirko 		err = __skb_vlan_pop(skb, &vlan_tci);
566393515d53SJiri Pirko 		if (err)
566493515d53SJiri Pirko 			return err;
566593515d53SJiri Pirko 	}
566693515d53SJiri Pirko 	/* move next vlan tag to hw accel tag */
5667ecf4ee41SShmulik Ladkani 	if (likely(!eth_type_vlan(skb->protocol)))
566893515d53SJiri Pirko 		return 0;
566993515d53SJiri Pirko 
567093515d53SJiri Pirko 	vlan_proto = skb->protocol;
567193515d53SJiri Pirko 	err = __skb_vlan_pop(skb, &vlan_tci);
567293515d53SJiri Pirko 	if (unlikely(err))
567393515d53SJiri Pirko 		return err;
567493515d53SJiri Pirko 
567593515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
567693515d53SJiri Pirko 	return 0;
567793515d53SJiri Pirko }
567893515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop);
567993515d53SJiri Pirko 
5680b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5681b6a79208SShmulik Ladkani  * Expects skb->data at mac header.
5682b6a79208SShmulik Ladkani  */
568393515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
568493515d53SJiri Pirko {
5685df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb)) {
5686b6a79208SShmulik Ladkani 		int offset = skb->data - skb_mac_header(skb);
568793515d53SJiri Pirko 		int err;
568893515d53SJiri Pirko 
5689b6a79208SShmulik Ladkani 		if (WARN_ONCE(offset,
5690b6a79208SShmulik Ladkani 			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5691b6a79208SShmulik Ladkani 			      offset)) {
5692b6a79208SShmulik Ladkani 			return -EINVAL;
5693b6a79208SShmulik Ladkani 		}
5694b6a79208SShmulik Ladkani 
569593515d53SJiri Pirko 		err = __vlan_insert_tag(skb, skb->vlan_proto,
5696df8a39deSJiri Pirko 					skb_vlan_tag_get(skb));
5697b6a79208SShmulik Ladkani 		if (err)
569893515d53SJiri Pirko 			return err;
56999241e2dfSDaniel Borkmann 
570093515d53SJiri Pirko 		skb->protocol = skb->vlan_proto;
570193515d53SJiri Pirko 		skb->mac_len += VLAN_HLEN;
570293515d53SJiri Pirko 
57036b83d28aSDaniel Borkmann 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
570493515d53SJiri Pirko 	}
570593515d53SJiri Pirko 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
570693515d53SJiri Pirko 	return 0;
570793515d53SJiri Pirko }
570893515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push);
570993515d53SJiri Pirko 
571019fbcb36SGuillaume Nault /**
571119fbcb36SGuillaume Nault  * skb_eth_pop() - Drop the Ethernet header at the head of a packet
571219fbcb36SGuillaume Nault  *
571319fbcb36SGuillaume Nault  * @skb: Socket buffer to modify
571419fbcb36SGuillaume Nault  *
571519fbcb36SGuillaume Nault  * Drop the Ethernet header of @skb.
571619fbcb36SGuillaume Nault  *
571719fbcb36SGuillaume Nault  * Expects that skb->data points to the mac header and that no VLAN tags are
571819fbcb36SGuillaume Nault  * present.
571919fbcb36SGuillaume Nault  *
572019fbcb36SGuillaume Nault  * Returns 0 on success, -errno otherwise.
572119fbcb36SGuillaume Nault  */
572219fbcb36SGuillaume Nault int skb_eth_pop(struct sk_buff *skb)
572319fbcb36SGuillaume Nault {
572419fbcb36SGuillaume Nault 	if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
572519fbcb36SGuillaume Nault 	    skb_network_offset(skb) < ETH_HLEN)
572619fbcb36SGuillaume Nault 		return -EPROTO;
572719fbcb36SGuillaume Nault 
572819fbcb36SGuillaume Nault 	skb_pull_rcsum(skb, ETH_HLEN);
572919fbcb36SGuillaume Nault 	skb_reset_mac_header(skb);
573019fbcb36SGuillaume Nault 	skb_reset_mac_len(skb);
573119fbcb36SGuillaume Nault 
573219fbcb36SGuillaume Nault 	return 0;
573319fbcb36SGuillaume Nault }
573419fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_pop);
573519fbcb36SGuillaume Nault 
573619fbcb36SGuillaume Nault /**
573719fbcb36SGuillaume Nault  * skb_eth_push() - Add a new Ethernet header at the head of a packet
573819fbcb36SGuillaume Nault  *
573919fbcb36SGuillaume Nault  * @skb: Socket buffer to modify
574019fbcb36SGuillaume Nault  * @dst: Destination MAC address of the new header
574119fbcb36SGuillaume Nault  * @src: Source MAC address of the new header
574219fbcb36SGuillaume Nault  *
574319fbcb36SGuillaume Nault  * Prepend @skb with a new Ethernet header.
574419fbcb36SGuillaume Nault  *
574519fbcb36SGuillaume Nault  * Expects that skb->data points to the mac header, which must be empty.
574619fbcb36SGuillaume Nault  *
574719fbcb36SGuillaume Nault  * Returns 0 on success, -errno otherwise.
574819fbcb36SGuillaume Nault  */
574919fbcb36SGuillaume Nault int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
575019fbcb36SGuillaume Nault 		 const unsigned char *src)
575119fbcb36SGuillaume Nault {
575219fbcb36SGuillaume Nault 	struct ethhdr *eth;
575319fbcb36SGuillaume Nault 	int err;
575419fbcb36SGuillaume Nault 
575519fbcb36SGuillaume Nault 	if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
575619fbcb36SGuillaume Nault 		return -EPROTO;
575719fbcb36SGuillaume Nault 
575819fbcb36SGuillaume Nault 	err = skb_cow_head(skb, sizeof(*eth));
575919fbcb36SGuillaume Nault 	if (err < 0)
576019fbcb36SGuillaume Nault 		return err;
576119fbcb36SGuillaume Nault 
576219fbcb36SGuillaume Nault 	skb_push(skb, sizeof(*eth));
576319fbcb36SGuillaume Nault 	skb_reset_mac_header(skb);
576419fbcb36SGuillaume Nault 	skb_reset_mac_len(skb);
576519fbcb36SGuillaume Nault 
576619fbcb36SGuillaume Nault 	eth = eth_hdr(skb);
576719fbcb36SGuillaume Nault 	ether_addr_copy(eth->h_dest, dst);
576819fbcb36SGuillaume Nault 	ether_addr_copy(eth->h_source, src);
576919fbcb36SGuillaume Nault 	eth->h_proto = skb->protocol;
577019fbcb36SGuillaume Nault 
577119fbcb36SGuillaume Nault 	skb_postpush_rcsum(skb, eth, sizeof(*eth));
577219fbcb36SGuillaume Nault 
577319fbcb36SGuillaume Nault 	return 0;
577419fbcb36SGuillaume Nault }
577519fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_push);
577619fbcb36SGuillaume Nault 
57778822e270SJohn Hurley /* Update the ethertype of hdr and the skb csum value if required. */
57788822e270SJohn Hurley static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
57798822e270SJohn Hurley 			     __be16 ethertype)
57808822e270SJohn Hurley {
57818822e270SJohn Hurley 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
57828822e270SJohn Hurley 		__be16 diff[] = { ~hdr->h_proto, ethertype };
57838822e270SJohn Hurley 
57848822e270SJohn Hurley 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
57858822e270SJohn Hurley 	}
57868822e270SJohn Hurley 
57878822e270SJohn Hurley 	hdr->h_proto = ethertype;
57888822e270SJohn Hurley }
57898822e270SJohn Hurley 
57908822e270SJohn Hurley /**
5791e7dbfed1SMartin Varghese  * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5792e7dbfed1SMartin Varghese  *                   the packet
57938822e270SJohn Hurley  *
57948822e270SJohn Hurley  * @skb: buffer
57958822e270SJohn Hurley  * @mpls_lse: MPLS label stack entry to push
57968822e270SJohn Hurley  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5797fa4e0f88SDavide Caratti  * @mac_len: length of the MAC header
5798e7dbfed1SMartin Varghese  * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5799e7dbfed1SMartin Varghese  *            ethernet
58008822e270SJohn Hurley  *
58018822e270SJohn Hurley  * Expects skb->data at mac header.
58028822e270SJohn Hurley  *
58038822e270SJohn Hurley  * Returns 0 on success, -errno otherwise.
58048822e270SJohn Hurley  */
5805fa4e0f88SDavide Caratti int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5806d04ac224SMartin Varghese 		  int mac_len, bool ethernet)
58078822e270SJohn Hurley {
58088822e270SJohn Hurley 	struct mpls_shim_hdr *lse;
58098822e270SJohn Hurley 	int err;
58108822e270SJohn Hurley 
58118822e270SJohn Hurley 	if (unlikely(!eth_p_mpls(mpls_proto)))
58128822e270SJohn Hurley 		return -EINVAL;
58138822e270SJohn Hurley 
58148822e270SJohn Hurley 	/* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
58158822e270SJohn Hurley 	if (skb->encapsulation)
58168822e270SJohn Hurley 		return -EINVAL;
58178822e270SJohn Hurley 
58188822e270SJohn Hurley 	err = skb_cow_head(skb, MPLS_HLEN);
58198822e270SJohn Hurley 	if (unlikely(err))
58208822e270SJohn Hurley 		return err;
58218822e270SJohn Hurley 
58228822e270SJohn Hurley 	if (!skb->inner_protocol) {
5823e7dbfed1SMartin Varghese 		skb_set_inner_network_header(skb, skb_network_offset(skb));
58248822e270SJohn Hurley 		skb_set_inner_protocol(skb, skb->protocol);
58258822e270SJohn Hurley 	}
58268822e270SJohn Hurley 
58278822e270SJohn Hurley 	skb_push(skb, MPLS_HLEN);
58288822e270SJohn Hurley 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5829fa4e0f88SDavide Caratti 		mac_len);
58308822e270SJohn Hurley 	skb_reset_mac_header(skb);
5831fa4e0f88SDavide Caratti 	skb_set_network_header(skb, mac_len);
5832e7dbfed1SMartin Varghese 	skb_reset_mac_len(skb);
58338822e270SJohn Hurley 
58348822e270SJohn Hurley 	lse = mpls_hdr(skb);
58358822e270SJohn Hurley 	lse->label_stack_entry = mpls_lse;
58368822e270SJohn Hurley 	skb_postpush_rcsum(skb, lse, MPLS_HLEN);
58378822e270SJohn Hurley 
58384296adc3SGuillaume Nault 	if (ethernet && mac_len >= ETH_HLEN)
58398822e270SJohn Hurley 		skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
58408822e270SJohn Hurley 	skb->protocol = mpls_proto;
58418822e270SJohn Hurley 
58428822e270SJohn Hurley 	return 0;
58438822e270SJohn Hurley }
58448822e270SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_push);
58458822e270SJohn Hurley 
58462e4e4410SEric Dumazet /**
5847ed246ceeSJohn Hurley  * skb_mpls_pop() - pop the outermost MPLS header
5848ed246ceeSJohn Hurley  *
5849ed246ceeSJohn Hurley  * @skb: buffer
5850ed246ceeSJohn Hurley  * @next_proto: ethertype of header after popped MPLS header
5851fa4e0f88SDavide Caratti  * @mac_len: length of the MAC header
585276f99f98SMartin Varghese  * @ethernet: flag to indicate if the packet is ethernet
5853ed246ceeSJohn Hurley  *
5854ed246ceeSJohn Hurley  * Expects skb->data at mac header.
5855ed246ceeSJohn Hurley  *
5856ed246ceeSJohn Hurley  * Returns 0 on success, -errno otherwise.
5857ed246ceeSJohn Hurley  */
5858040b5cfbSMartin Varghese int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5859040b5cfbSMartin Varghese 		 bool ethernet)
5860ed246ceeSJohn Hurley {
5861ed246ceeSJohn Hurley 	int err;
5862ed246ceeSJohn Hurley 
5863ed246ceeSJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
5864dedc5a08SDavide Caratti 		return 0;
5865ed246ceeSJohn Hurley 
5866fa4e0f88SDavide Caratti 	err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5867ed246ceeSJohn Hurley 	if (unlikely(err))
5868ed246ceeSJohn Hurley 		return err;
5869ed246ceeSJohn Hurley 
5870ed246ceeSJohn Hurley 	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5871ed246ceeSJohn Hurley 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5872fa4e0f88SDavide Caratti 		mac_len);
5873ed246ceeSJohn Hurley 
5874ed246ceeSJohn Hurley 	__skb_pull(skb, MPLS_HLEN);
5875ed246ceeSJohn Hurley 	skb_reset_mac_header(skb);
5876fa4e0f88SDavide Caratti 	skb_set_network_header(skb, mac_len);
5877ed246ceeSJohn Hurley 
58784296adc3SGuillaume Nault 	if (ethernet && mac_len >= ETH_HLEN) {
5879ed246ceeSJohn Hurley 		struct ethhdr *hdr;
5880ed246ceeSJohn Hurley 
5881ed246ceeSJohn Hurley 		/* use mpls_hdr() to get ethertype to account for VLANs. */
5882ed246ceeSJohn Hurley 		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5883ed246ceeSJohn Hurley 		skb_mod_eth_type(skb, hdr, next_proto);
5884ed246ceeSJohn Hurley 	}
5885ed246ceeSJohn Hurley 	skb->protocol = next_proto;
5886ed246ceeSJohn Hurley 
5887ed246ceeSJohn Hurley 	return 0;
5888ed246ceeSJohn Hurley }
5889ed246ceeSJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_pop);
5890ed246ceeSJohn Hurley 
5891ed246ceeSJohn Hurley /**
5892d27cf5c5SJohn Hurley  * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5893d27cf5c5SJohn Hurley  *
5894d27cf5c5SJohn Hurley  * @skb: buffer
5895d27cf5c5SJohn Hurley  * @mpls_lse: new MPLS label stack entry to update to
5896d27cf5c5SJohn Hurley  *
5897d27cf5c5SJohn Hurley  * Expects skb->data at mac header.
5898d27cf5c5SJohn Hurley  *
5899d27cf5c5SJohn Hurley  * Returns 0 on success, -errno otherwise.
5900d27cf5c5SJohn Hurley  */
5901d27cf5c5SJohn Hurley int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5902d27cf5c5SJohn Hurley {
5903d27cf5c5SJohn Hurley 	int err;
5904d27cf5c5SJohn Hurley 
5905d27cf5c5SJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
5906d27cf5c5SJohn Hurley 		return -EINVAL;
5907d27cf5c5SJohn Hurley 
5908d27cf5c5SJohn Hurley 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5909d27cf5c5SJohn Hurley 	if (unlikely(err))
5910d27cf5c5SJohn Hurley 		return err;
5911d27cf5c5SJohn Hurley 
5912d27cf5c5SJohn Hurley 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
5913d27cf5c5SJohn Hurley 		__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5914d27cf5c5SJohn Hurley 
5915d27cf5c5SJohn Hurley 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5916d27cf5c5SJohn Hurley 	}
5917d27cf5c5SJohn Hurley 
5918d27cf5c5SJohn Hurley 	mpls_hdr(skb)->label_stack_entry = mpls_lse;
5919d27cf5c5SJohn Hurley 
5920d27cf5c5SJohn Hurley 	return 0;
5921d27cf5c5SJohn Hurley }
5922d27cf5c5SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5923d27cf5c5SJohn Hurley 
5924d27cf5c5SJohn Hurley /**
59252a2ea508SJohn Hurley  * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
59262a2ea508SJohn Hurley  *
59272a2ea508SJohn Hurley  * @skb: buffer
59282a2ea508SJohn Hurley  *
59292a2ea508SJohn Hurley  * Expects skb->data at mac header.
59302a2ea508SJohn Hurley  *
59312a2ea508SJohn Hurley  * Returns 0 on success, -errno otherwise.
59322a2ea508SJohn Hurley  */
59332a2ea508SJohn Hurley int skb_mpls_dec_ttl(struct sk_buff *skb)
59342a2ea508SJohn Hurley {
59352a2ea508SJohn Hurley 	u32 lse;
59362a2ea508SJohn Hurley 	u8 ttl;
59372a2ea508SJohn Hurley 
59382a2ea508SJohn Hurley 	if (unlikely(!eth_p_mpls(skb->protocol)))
59392a2ea508SJohn Hurley 		return -EINVAL;
59402a2ea508SJohn Hurley 
594113de4ed9SDavide Caratti 	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
594213de4ed9SDavide Caratti 		return -ENOMEM;
594313de4ed9SDavide Caratti 
59442a2ea508SJohn Hurley 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
59452a2ea508SJohn Hurley 	ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
59462a2ea508SJohn Hurley 	if (!--ttl)
59472a2ea508SJohn Hurley 		return -EINVAL;
59482a2ea508SJohn Hurley 
59492a2ea508SJohn Hurley 	lse &= ~MPLS_LS_TTL_MASK;
59502a2ea508SJohn Hurley 	lse |= ttl << MPLS_LS_TTL_SHIFT;
59512a2ea508SJohn Hurley 
59522a2ea508SJohn Hurley 	return skb_mpls_update_lse(skb, cpu_to_be32(lse));
59532a2ea508SJohn Hurley }
59542a2ea508SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
59552a2ea508SJohn Hurley 
59562a2ea508SJohn Hurley /**
59572e4e4410SEric Dumazet  * alloc_skb_with_frags - allocate skb with page frags
59582e4e4410SEric Dumazet  *
5959de3f0d0eSMasanari Iida  * @header_len: size of linear part
5960de3f0d0eSMasanari Iida  * @data_len: needed length in frags
5961de3f0d0eSMasanari Iida  * @max_page_order: max page order desired.
5962de3f0d0eSMasanari Iida  * @errcode: pointer to error code if any
5963de3f0d0eSMasanari Iida  * @gfp_mask: allocation mask
59642e4e4410SEric Dumazet  *
59652e4e4410SEric Dumazet  * This can be used to allocate a paged skb, given a maximal order for frags.
59662e4e4410SEric Dumazet  */
59672e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
59682e4e4410SEric Dumazet 				     unsigned long data_len,
59692e4e4410SEric Dumazet 				     int max_page_order,
59702e4e4410SEric Dumazet 				     int *errcode,
59712e4e4410SEric Dumazet 				     gfp_t gfp_mask)
59722e4e4410SEric Dumazet {
59732e4e4410SEric Dumazet 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
59742e4e4410SEric Dumazet 	unsigned long chunk;
59752e4e4410SEric Dumazet 	struct sk_buff *skb;
59762e4e4410SEric Dumazet 	struct page *page;
59772e4e4410SEric Dumazet 	int i;
59782e4e4410SEric Dumazet 
59792e4e4410SEric Dumazet 	*errcode = -EMSGSIZE;
59802e4e4410SEric Dumazet 	/* Note this test could be relaxed, if we succeed to allocate
59812e4e4410SEric Dumazet 	 * high order pages...
59822e4e4410SEric Dumazet 	 */
59832e4e4410SEric Dumazet 	if (npages > MAX_SKB_FRAGS)
59842e4e4410SEric Dumazet 		return NULL;
59852e4e4410SEric Dumazet 
59862e4e4410SEric Dumazet 	*errcode = -ENOBUFS;
5987f8c468e8SDavid Rientjes 	skb = alloc_skb(header_len, gfp_mask);
59882e4e4410SEric Dumazet 	if (!skb)
59892e4e4410SEric Dumazet 		return NULL;
59902e4e4410SEric Dumazet 
59912e4e4410SEric Dumazet 	skb->truesize += npages << PAGE_SHIFT;
59922e4e4410SEric Dumazet 
59932e4e4410SEric Dumazet 	for (i = 0; npages > 0; i++) {
59942e4e4410SEric Dumazet 		int order = max_page_order;
59952e4e4410SEric Dumazet 
59962e4e4410SEric Dumazet 		while (order) {
59972e4e4410SEric Dumazet 			if (npages >= 1 << order) {
5998d0164adcSMel Gorman 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
59992e4e4410SEric Dumazet 						   __GFP_COMP |
6000d14b56f5SMichal Hocko 						   __GFP_NOWARN,
60012e4e4410SEric Dumazet 						   order);
60022e4e4410SEric Dumazet 				if (page)
60032e4e4410SEric Dumazet 					goto fill_page;
60042e4e4410SEric Dumazet 				/* Do not retry other high order allocations */
60052e4e4410SEric Dumazet 				order = 1;
60062e4e4410SEric Dumazet 				max_page_order = 0;
60072e4e4410SEric Dumazet 			}
60082e4e4410SEric Dumazet 			order--;
60092e4e4410SEric Dumazet 		}
60102e4e4410SEric Dumazet 		page = alloc_page(gfp_mask);
60112e4e4410SEric Dumazet 		if (!page)
60122e4e4410SEric Dumazet 			goto failure;
60132e4e4410SEric Dumazet fill_page:
60142e4e4410SEric Dumazet 		chunk = min_t(unsigned long, data_len,
60152e4e4410SEric Dumazet 			      PAGE_SIZE << order);
60162e4e4410SEric Dumazet 		skb_fill_page_desc(skb, i, page, 0, chunk);
60172e4e4410SEric Dumazet 		data_len -= chunk;
60182e4e4410SEric Dumazet 		npages -= 1 << order;
60192e4e4410SEric Dumazet 	}
60202e4e4410SEric Dumazet 	return skb;
60212e4e4410SEric Dumazet 
60222e4e4410SEric Dumazet failure:
60232e4e4410SEric Dumazet 	kfree_skb(skb);
60242e4e4410SEric Dumazet 	return NULL;
60252e4e4410SEric Dumazet }
60262e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags);
60276fa01ccdSSowmini Varadhan 
60286fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */
60296fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
60306fa01ccdSSowmini Varadhan 				    const int headlen, gfp_t gfp_mask)
60316fa01ccdSSowmini Varadhan {
60326fa01ccdSSowmini Varadhan 	int i;
60336fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
60346fa01ccdSSowmini Varadhan 	int new_hlen = headlen - off;
60356fa01ccdSSowmini Varadhan 	u8 *data;
60366fa01ccdSSowmini Varadhan 
60376fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
60386fa01ccdSSowmini Varadhan 
60396fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
60406fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
60416fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
60426fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
60436fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
60446fa01ccdSSowmini Varadhan 	if (!data)
60456fa01ccdSSowmini Varadhan 		return -ENOMEM;
60466fa01ccdSSowmini Varadhan 
60476fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
60486fa01ccdSSowmini Varadhan 
60496fa01ccdSSowmini Varadhan 	/* Copy real data, and all frags */
60506fa01ccdSSowmini Varadhan 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
60516fa01ccdSSowmini Varadhan 	skb->len -= off;
60526fa01ccdSSowmini Varadhan 
60536fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
60546fa01ccdSSowmini Varadhan 	       skb_shinfo(skb),
60556fa01ccdSSowmini Varadhan 	       offsetof(struct skb_shared_info,
60566fa01ccdSSowmini Varadhan 			frags[skb_shinfo(skb)->nr_frags]));
60576fa01ccdSSowmini Varadhan 	if (skb_cloned(skb)) {
60586fa01ccdSSowmini Varadhan 		/* drop the old head gracefully */
60596fa01ccdSSowmini Varadhan 		if (skb_orphan_frags(skb, gfp_mask)) {
60606fa01ccdSSowmini Varadhan 			kfree(data);
60616fa01ccdSSowmini Varadhan 			return -ENOMEM;
60626fa01ccdSSowmini Varadhan 		}
60636fa01ccdSSowmini Varadhan 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
60646fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
60656fa01ccdSSowmini Varadhan 		if (skb_has_frag_list(skb))
60666fa01ccdSSowmini Varadhan 			skb_clone_fraglist(skb);
60676fa01ccdSSowmini Varadhan 		skb_release_data(skb);
60686fa01ccdSSowmini Varadhan 	} else {
60696fa01ccdSSowmini Varadhan 		/* we can reuse existing recount- all we did was
60706fa01ccdSSowmini Varadhan 		 * relocate values
60716fa01ccdSSowmini Varadhan 		 */
60726fa01ccdSSowmini Varadhan 		skb_free_head(skb);
60736fa01ccdSSowmini Varadhan 	}
60746fa01ccdSSowmini Varadhan 
60756fa01ccdSSowmini Varadhan 	skb->head = data;
60766fa01ccdSSowmini Varadhan 	skb->data = data;
60776fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
6078763087daSEric Dumazet 	skb_set_end_offset(skb, size);
60796fa01ccdSSowmini Varadhan 	skb_set_tail_pointer(skb, skb_headlen(skb));
60806fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
60816fa01ccdSSowmini Varadhan 	skb->cloned = 0;
60826fa01ccdSSowmini Varadhan 	skb->hdr_len = 0;
60836fa01ccdSSowmini Varadhan 	skb->nohdr = 0;
60846fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
60856fa01ccdSSowmini Varadhan 
60866fa01ccdSSowmini Varadhan 	return 0;
60876fa01ccdSSowmini Varadhan }
60886fa01ccdSSowmini Varadhan 
60896fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
60906fa01ccdSSowmini Varadhan 
60916fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into
60926fa01ccdSSowmini Varadhan  * pskb_carve()
60936fa01ccdSSowmini Varadhan  */
60946fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb,
60956fa01ccdSSowmini Varadhan 				struct skb_shared_info *shinfo, int eat,
60966fa01ccdSSowmini Varadhan 				gfp_t gfp_mask)
60976fa01ccdSSowmini Varadhan {
60986fa01ccdSSowmini Varadhan 	struct sk_buff *list = shinfo->frag_list;
60996fa01ccdSSowmini Varadhan 	struct sk_buff *clone = NULL;
61006fa01ccdSSowmini Varadhan 	struct sk_buff *insp = NULL;
61016fa01ccdSSowmini Varadhan 
61026fa01ccdSSowmini Varadhan 	do {
61036fa01ccdSSowmini Varadhan 		if (!list) {
61046fa01ccdSSowmini Varadhan 			pr_err("Not enough bytes to eat. Want %d\n", eat);
61056fa01ccdSSowmini Varadhan 			return -EFAULT;
61066fa01ccdSSowmini Varadhan 		}
61076fa01ccdSSowmini Varadhan 		if (list->len <= eat) {
61086fa01ccdSSowmini Varadhan 			/* Eaten as whole. */
61096fa01ccdSSowmini Varadhan 			eat -= list->len;
61106fa01ccdSSowmini Varadhan 			list = list->next;
61116fa01ccdSSowmini Varadhan 			insp = list;
61126fa01ccdSSowmini Varadhan 		} else {
61136fa01ccdSSowmini Varadhan 			/* Eaten partially. */
61146fa01ccdSSowmini Varadhan 			if (skb_shared(list)) {
61156fa01ccdSSowmini Varadhan 				clone = skb_clone(list, gfp_mask);
61166fa01ccdSSowmini Varadhan 				if (!clone)
61176fa01ccdSSowmini Varadhan 					return -ENOMEM;
61186fa01ccdSSowmini Varadhan 				insp = list->next;
61196fa01ccdSSowmini Varadhan 				list = clone;
61206fa01ccdSSowmini Varadhan 			} else {
61216fa01ccdSSowmini Varadhan 				/* This may be pulled without problems. */
61226fa01ccdSSowmini Varadhan 				insp = list;
61236fa01ccdSSowmini Varadhan 			}
61246fa01ccdSSowmini Varadhan 			if (pskb_carve(list, eat, gfp_mask) < 0) {
61256fa01ccdSSowmini Varadhan 				kfree_skb(clone);
61266fa01ccdSSowmini Varadhan 				return -ENOMEM;
61276fa01ccdSSowmini Varadhan 			}
61286fa01ccdSSowmini Varadhan 			break;
61296fa01ccdSSowmini Varadhan 		}
61306fa01ccdSSowmini Varadhan 	} while (eat);
61316fa01ccdSSowmini Varadhan 
61326fa01ccdSSowmini Varadhan 	/* Free pulled out fragments. */
61336fa01ccdSSowmini Varadhan 	while ((list = shinfo->frag_list) != insp) {
61346fa01ccdSSowmini Varadhan 		shinfo->frag_list = list->next;
6135ef527f96SEric Dumazet 		consume_skb(list);
61366fa01ccdSSowmini Varadhan 	}
61376fa01ccdSSowmini Varadhan 	/* And insert new clone at head. */
61386fa01ccdSSowmini Varadhan 	if (clone) {
61396fa01ccdSSowmini Varadhan 		clone->next = list;
61406fa01ccdSSowmini Varadhan 		shinfo->frag_list = clone;
61416fa01ccdSSowmini Varadhan 	}
61426fa01ccdSSowmini Varadhan 	return 0;
61436fa01ccdSSowmini Varadhan }
61446fa01ccdSSowmini Varadhan 
61456fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the
61466fa01ccdSSowmini Varadhan  * non-linear part of skb
61476fa01ccdSSowmini Varadhan  */
61486fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
61496fa01ccdSSowmini Varadhan 				       int pos, gfp_t gfp_mask)
61506fa01ccdSSowmini Varadhan {
61516fa01ccdSSowmini Varadhan 	int i, k = 0;
61526fa01ccdSSowmini Varadhan 	int size = skb_end_offset(skb);
61536fa01ccdSSowmini Varadhan 	u8 *data;
61546fa01ccdSSowmini Varadhan 	const int nfrags = skb_shinfo(skb)->nr_frags;
61556fa01ccdSSowmini Varadhan 	struct skb_shared_info *shinfo;
61566fa01ccdSSowmini Varadhan 
61576fa01ccdSSowmini Varadhan 	size = SKB_DATA_ALIGN(size);
61586fa01ccdSSowmini Varadhan 
61596fa01ccdSSowmini Varadhan 	if (skb_pfmemalloc(skb))
61606fa01ccdSSowmini Varadhan 		gfp_mask |= __GFP_MEMALLOC;
61616fa01ccdSSowmini Varadhan 	data = kmalloc_reserve(size +
61626fa01ccdSSowmini Varadhan 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
61636fa01ccdSSowmini Varadhan 			       gfp_mask, NUMA_NO_NODE, NULL);
61646fa01ccdSSowmini Varadhan 	if (!data)
61656fa01ccdSSowmini Varadhan 		return -ENOMEM;
61666fa01ccdSSowmini Varadhan 
61676fa01ccdSSowmini Varadhan 	size = SKB_WITH_OVERHEAD(ksize(data));
61686fa01ccdSSowmini Varadhan 
61696fa01ccdSSowmini Varadhan 	memcpy((struct skb_shared_info *)(data + size),
6170e3ec1e8cSMiaohe Lin 	       skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
61716fa01ccdSSowmini Varadhan 	if (skb_orphan_frags(skb, gfp_mask)) {
61726fa01ccdSSowmini Varadhan 		kfree(data);
61736fa01ccdSSowmini Varadhan 		return -ENOMEM;
61746fa01ccdSSowmini Varadhan 	}
61756fa01ccdSSowmini Varadhan 	shinfo = (struct skb_shared_info *)(data + size);
61766fa01ccdSSowmini Varadhan 	for (i = 0; i < nfrags; i++) {
61776fa01ccdSSowmini Varadhan 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
61786fa01ccdSSowmini Varadhan 
61796fa01ccdSSowmini Varadhan 		if (pos + fsize > off) {
61806fa01ccdSSowmini Varadhan 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
61816fa01ccdSSowmini Varadhan 
61826fa01ccdSSowmini Varadhan 			if (pos < off) {
61836fa01ccdSSowmini Varadhan 				/* Split frag.
61846fa01ccdSSowmini Varadhan 				 * We have two variants in this case:
61856fa01ccdSSowmini Varadhan 				 * 1. Move all the frag to the second
61866fa01ccdSSowmini Varadhan 				 *    part, if it is possible. F.e.
61876fa01ccdSSowmini Varadhan 				 *    this approach is mandatory for TUX,
61886fa01ccdSSowmini Varadhan 				 *    where splitting is expensive.
61896fa01ccdSSowmini Varadhan 				 * 2. Split is accurately. We make this.
61906fa01ccdSSowmini Varadhan 				 */
6191b54c9d5bSJonathan Lemon 				skb_frag_off_add(&shinfo->frags[0], off - pos);
61926fa01ccdSSowmini Varadhan 				skb_frag_size_sub(&shinfo->frags[0], off - pos);
61936fa01ccdSSowmini Varadhan 			}
61946fa01ccdSSowmini Varadhan 			skb_frag_ref(skb, i);
61956fa01ccdSSowmini Varadhan 			k++;
61966fa01ccdSSowmini Varadhan 		}
61976fa01ccdSSowmini Varadhan 		pos += fsize;
61986fa01ccdSSowmini Varadhan 	}
61996fa01ccdSSowmini Varadhan 	shinfo->nr_frags = k;
62006fa01ccdSSowmini Varadhan 	if (skb_has_frag_list(skb))
62016fa01ccdSSowmini Varadhan 		skb_clone_fraglist(skb);
62026fa01ccdSSowmini Varadhan 
62036fa01ccdSSowmini Varadhan 	/* split line is in frag list */
6204eabe8618SMiaohe Lin 	if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6205eabe8618SMiaohe Lin 		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6206eabe8618SMiaohe Lin 		if (skb_has_frag_list(skb))
6207eabe8618SMiaohe Lin 			kfree_skb_list(skb_shinfo(skb)->frag_list);
6208eabe8618SMiaohe Lin 		kfree(data);
6209eabe8618SMiaohe Lin 		return -ENOMEM;
62106fa01ccdSSowmini Varadhan 	}
62116fa01ccdSSowmini Varadhan 	skb_release_data(skb);
62126fa01ccdSSowmini Varadhan 
62136fa01ccdSSowmini Varadhan 	skb->head = data;
62146fa01ccdSSowmini Varadhan 	skb->head_frag = 0;
62156fa01ccdSSowmini Varadhan 	skb->data = data;
6216763087daSEric Dumazet 	skb_set_end_offset(skb, size);
62176fa01ccdSSowmini Varadhan 	skb_reset_tail_pointer(skb);
62186fa01ccdSSowmini Varadhan 	skb_headers_offset_update(skb, 0);
62196fa01ccdSSowmini Varadhan 	skb->cloned   = 0;
62206fa01ccdSSowmini Varadhan 	skb->hdr_len  = 0;
62216fa01ccdSSowmini Varadhan 	skb->nohdr    = 0;
62226fa01ccdSSowmini Varadhan 	skb->len -= off;
62236fa01ccdSSowmini Varadhan 	skb->data_len = skb->len;
62246fa01ccdSSowmini Varadhan 	atomic_set(&skb_shinfo(skb)->dataref, 1);
62256fa01ccdSSowmini Varadhan 	return 0;
62266fa01ccdSSowmini Varadhan }
62276fa01ccdSSowmini Varadhan 
62286fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */
62296fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
62306fa01ccdSSowmini Varadhan {
62316fa01ccdSSowmini Varadhan 	int headlen = skb_headlen(skb);
62326fa01ccdSSowmini Varadhan 
62336fa01ccdSSowmini Varadhan 	if (len < headlen)
62346fa01ccdSSowmini Varadhan 		return pskb_carve_inside_header(skb, len, headlen, gfp);
62356fa01ccdSSowmini Varadhan 	else
62366fa01ccdSSowmini Varadhan 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
62376fa01ccdSSowmini Varadhan }
62386fa01ccdSSowmini Varadhan 
62396fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in
62406fa01ccdSSowmini Varadhan  * a new skb
62416fa01ccdSSowmini Varadhan  */
62426fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
62436fa01ccdSSowmini Varadhan 			     int to_copy, gfp_t gfp)
62446fa01ccdSSowmini Varadhan {
62456fa01ccdSSowmini Varadhan 	struct sk_buff  *clone = skb_clone(skb, gfp);
62466fa01ccdSSowmini Varadhan 
62476fa01ccdSSowmini Varadhan 	if (!clone)
62486fa01ccdSSowmini Varadhan 		return NULL;
62496fa01ccdSSowmini Varadhan 
62506fa01ccdSSowmini Varadhan 	if (pskb_carve(clone, off, gfp) < 0 ||
62516fa01ccdSSowmini Varadhan 	    pskb_trim(clone, to_copy)) {
62526fa01ccdSSowmini Varadhan 		kfree_skb(clone);
62536fa01ccdSSowmini Varadhan 		return NULL;
62546fa01ccdSSowmini Varadhan 	}
62556fa01ccdSSowmini Varadhan 	return clone;
62566fa01ccdSSowmini Varadhan }
62576fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract);
6258c8c8b127SEric Dumazet 
6259c8c8b127SEric Dumazet /**
6260c8c8b127SEric Dumazet  * skb_condense - try to get rid of fragments/frag_list if possible
6261c8c8b127SEric Dumazet  * @skb: buffer
6262c8c8b127SEric Dumazet  *
6263c8c8b127SEric Dumazet  * Can be used to save memory before skb is added to a busy queue.
6264c8c8b127SEric Dumazet  * If packet has bytes in frags and enough tail room in skb->head,
6265c8c8b127SEric Dumazet  * pull all of them, so that we can free the frags right now and adjust
6266c8c8b127SEric Dumazet  * truesize.
6267c8c8b127SEric Dumazet  * Notes:
6268c8c8b127SEric Dumazet  *	We do not reallocate skb->head thus can not fail.
6269c8c8b127SEric Dumazet  *	Caller must re-evaluate skb->truesize if needed.
6270c8c8b127SEric Dumazet  */
6271c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb)
6272c8c8b127SEric Dumazet {
62733174fed9SEric Dumazet 	if (skb->data_len) {
62743174fed9SEric Dumazet 		if (skb->data_len > skb->end - skb->tail ||
6275c8c8b127SEric Dumazet 		    skb_cloned(skb))
6276c8c8b127SEric Dumazet 			return;
6277c8c8b127SEric Dumazet 
6278c8c8b127SEric Dumazet 		/* Nice, we can free page frag(s) right now */
6279c8c8b127SEric Dumazet 		__pskb_pull_tail(skb, skb->data_len);
62803174fed9SEric Dumazet 	}
62813174fed9SEric Dumazet 	/* At this point, skb->truesize might be over estimated,
62823174fed9SEric Dumazet 	 * because skb had a fragment, and fragments do not tell
62833174fed9SEric Dumazet 	 * their truesize.
62843174fed9SEric Dumazet 	 * When we pulled its content into skb->head, fragment
62853174fed9SEric Dumazet 	 * was freed, but __pskb_pull_tail() could not possibly
62863174fed9SEric Dumazet 	 * adjust skb->truesize, not knowing the frag truesize.
6287c8c8b127SEric Dumazet 	 */
6288c8c8b127SEric Dumazet 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6289c8c8b127SEric Dumazet }
6290df5042f4SFlorian Westphal 
6291df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS
6292df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6293df5042f4SFlorian Westphal {
6294df5042f4SFlorian Westphal 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6295df5042f4SFlorian Westphal }
6296df5042f4SFlorian Westphal 
62978b69a803SPaolo Abeni /**
62988b69a803SPaolo Abeni  * __skb_ext_alloc - allocate a new skb extensions storage
62998b69a803SPaolo Abeni  *
63004930f483SFlorian Westphal  * @flags: See kmalloc().
63014930f483SFlorian Westphal  *
63028b69a803SPaolo Abeni  * Returns the newly allocated pointer. The pointer can later attached to a
63038b69a803SPaolo Abeni  * skb via __skb_ext_set().
63048b69a803SPaolo Abeni  * Note: caller must handle the skb_ext as an opaque data.
63058b69a803SPaolo Abeni  */
63064930f483SFlorian Westphal struct skb_ext *__skb_ext_alloc(gfp_t flags)
6307df5042f4SFlorian Westphal {
63084930f483SFlorian Westphal 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6309df5042f4SFlorian Westphal 
6310df5042f4SFlorian Westphal 	if (new) {
6311df5042f4SFlorian Westphal 		memset(new->offset, 0, sizeof(new->offset));
6312df5042f4SFlorian Westphal 		refcount_set(&new->refcnt, 1);
6313df5042f4SFlorian Westphal 	}
6314df5042f4SFlorian Westphal 
6315df5042f4SFlorian Westphal 	return new;
6316df5042f4SFlorian Westphal }
6317df5042f4SFlorian Westphal 
63184165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
63194165079bSFlorian Westphal 					 unsigned int old_active)
6320df5042f4SFlorian Westphal {
6321df5042f4SFlorian Westphal 	struct skb_ext *new;
6322df5042f4SFlorian Westphal 
6323df5042f4SFlorian Westphal 	if (refcount_read(&old->refcnt) == 1)
6324df5042f4SFlorian Westphal 		return old;
6325df5042f4SFlorian Westphal 
6326df5042f4SFlorian Westphal 	new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6327df5042f4SFlorian Westphal 	if (!new)
6328df5042f4SFlorian Westphal 		return NULL;
6329df5042f4SFlorian Westphal 
6330df5042f4SFlorian Westphal 	memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6331df5042f4SFlorian Westphal 	refcount_set(&new->refcnt, 1);
6332df5042f4SFlorian Westphal 
63334165079bSFlorian Westphal #ifdef CONFIG_XFRM
63344165079bSFlorian Westphal 	if (old_active & (1 << SKB_EXT_SEC_PATH)) {
63354165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
63364165079bSFlorian Westphal 		unsigned int i;
63374165079bSFlorian Westphal 
63384165079bSFlorian Westphal 		for (i = 0; i < sp->len; i++)
63394165079bSFlorian Westphal 			xfrm_state_hold(sp->xvec[i]);
63404165079bSFlorian Westphal 	}
63414165079bSFlorian Westphal #endif
6342df5042f4SFlorian Westphal 	__skb_ext_put(old);
6343df5042f4SFlorian Westphal 	return new;
6344df5042f4SFlorian Westphal }
6345df5042f4SFlorian Westphal 
6346df5042f4SFlorian Westphal /**
63478b69a803SPaolo Abeni  * __skb_ext_set - attach the specified extension storage to this skb
63488b69a803SPaolo Abeni  * @skb: buffer
63498b69a803SPaolo Abeni  * @id: extension id
63508b69a803SPaolo Abeni  * @ext: extension storage previously allocated via __skb_ext_alloc()
63518b69a803SPaolo Abeni  *
63528b69a803SPaolo Abeni  * Existing extensions, if any, are cleared.
63538b69a803SPaolo Abeni  *
63548b69a803SPaolo Abeni  * Returns the pointer to the extension.
63558b69a803SPaolo Abeni  */
63568b69a803SPaolo Abeni void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
63578b69a803SPaolo Abeni 		    struct skb_ext *ext)
63588b69a803SPaolo Abeni {
63598b69a803SPaolo Abeni 	unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
63608b69a803SPaolo Abeni 
63618b69a803SPaolo Abeni 	skb_ext_put(skb);
63628b69a803SPaolo Abeni 	newlen = newoff + skb_ext_type_len[id];
63638b69a803SPaolo Abeni 	ext->chunks = newlen;
63648b69a803SPaolo Abeni 	ext->offset[id] = newoff;
63658b69a803SPaolo Abeni 	skb->extensions = ext;
63668b69a803SPaolo Abeni 	skb->active_extensions = 1 << id;
63678b69a803SPaolo Abeni 	return skb_ext_get_ptr(ext, id);
63688b69a803SPaolo Abeni }
63698b69a803SPaolo Abeni 
63708b69a803SPaolo Abeni /**
6371df5042f4SFlorian Westphal  * skb_ext_add - allocate space for given extension, COW if needed
6372df5042f4SFlorian Westphal  * @skb: buffer
6373df5042f4SFlorian Westphal  * @id: extension to allocate space for
6374df5042f4SFlorian Westphal  *
6375df5042f4SFlorian Westphal  * Allocates enough space for the given extension.
6376df5042f4SFlorian Westphal  * If the extension is already present, a pointer to that extension
6377df5042f4SFlorian Westphal  * is returned.
6378df5042f4SFlorian Westphal  *
6379df5042f4SFlorian Westphal  * If the skb was cloned, COW applies and the returned memory can be
6380df5042f4SFlorian Westphal  * modified without changing the extension space of clones buffers.
6381df5042f4SFlorian Westphal  *
6382df5042f4SFlorian Westphal  * Returns pointer to the extension or NULL on allocation failure.
6383df5042f4SFlorian Westphal  */
6384df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6385df5042f4SFlorian Westphal {
6386df5042f4SFlorian Westphal 	struct skb_ext *new, *old = NULL;
6387df5042f4SFlorian Westphal 	unsigned int newlen, newoff;
6388df5042f4SFlorian Westphal 
6389df5042f4SFlorian Westphal 	if (skb->active_extensions) {
6390df5042f4SFlorian Westphal 		old = skb->extensions;
6391df5042f4SFlorian Westphal 
63924165079bSFlorian Westphal 		new = skb_ext_maybe_cow(old, skb->active_extensions);
6393df5042f4SFlorian Westphal 		if (!new)
6394df5042f4SFlorian Westphal 			return NULL;
6395df5042f4SFlorian Westphal 
6396682ec859SPaolo Abeni 		if (__skb_ext_exist(new, id))
6397df5042f4SFlorian Westphal 			goto set_active;
6398df5042f4SFlorian Westphal 
6399e94e50bdSPaolo Abeni 		newoff = new->chunks;
6400df5042f4SFlorian Westphal 	} else {
6401df5042f4SFlorian Westphal 		newoff = SKB_EXT_CHUNKSIZEOF(*new);
6402df5042f4SFlorian Westphal 
64034930f483SFlorian Westphal 		new = __skb_ext_alloc(GFP_ATOMIC);
6404df5042f4SFlorian Westphal 		if (!new)
6405df5042f4SFlorian Westphal 			return NULL;
6406df5042f4SFlorian Westphal 	}
6407df5042f4SFlorian Westphal 
6408df5042f4SFlorian Westphal 	newlen = newoff + skb_ext_type_len[id];
6409df5042f4SFlorian Westphal 	new->chunks = newlen;
6410df5042f4SFlorian Westphal 	new->offset[id] = newoff;
6411df5042f4SFlorian Westphal set_active:
6412b0999f38SPaolo Abeni 	skb->slow_gro = 1;
6413682ec859SPaolo Abeni 	skb->extensions = new;
6414df5042f4SFlorian Westphal 	skb->active_extensions |= 1 << id;
6415df5042f4SFlorian Westphal 	return skb_ext_get_ptr(new, id);
6416df5042f4SFlorian Westphal }
6417df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add);
6418df5042f4SFlorian Westphal 
64194165079bSFlorian Westphal #ifdef CONFIG_XFRM
64204165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp)
64214165079bSFlorian Westphal {
64224165079bSFlorian Westphal 	unsigned int i;
64234165079bSFlorian Westphal 
64244165079bSFlorian Westphal 	for (i = 0; i < sp->len; i++)
64254165079bSFlorian Westphal 		xfrm_state_put(sp->xvec[i]);
64264165079bSFlorian Westphal }
64274165079bSFlorian Westphal #endif
64284165079bSFlorian Westphal 
642978476d31SJeremy Kerr #ifdef CONFIG_MCTP_FLOWS
643078476d31SJeremy Kerr static void skb_ext_put_mctp(struct mctp_flow *flow)
643178476d31SJeremy Kerr {
643278476d31SJeremy Kerr 	if (flow->key)
643378476d31SJeremy Kerr 		mctp_key_unref(flow->key);
643478476d31SJeremy Kerr }
643578476d31SJeremy Kerr #endif
643678476d31SJeremy Kerr 
6437df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6438df5042f4SFlorian Westphal {
6439df5042f4SFlorian Westphal 	struct skb_ext *ext = skb->extensions;
6440df5042f4SFlorian Westphal 
6441df5042f4SFlorian Westphal 	skb->active_extensions &= ~(1 << id);
6442df5042f4SFlorian Westphal 	if (skb->active_extensions == 0) {
6443df5042f4SFlorian Westphal 		skb->extensions = NULL;
6444df5042f4SFlorian Westphal 		__skb_ext_put(ext);
64454165079bSFlorian Westphal #ifdef CONFIG_XFRM
64464165079bSFlorian Westphal 	} else if (id == SKB_EXT_SEC_PATH &&
64474165079bSFlorian Westphal 		   refcount_read(&ext->refcnt) == 1) {
64484165079bSFlorian Westphal 		struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
64494165079bSFlorian Westphal 
64504165079bSFlorian Westphal 		skb_ext_put_sp(sp);
64514165079bSFlorian Westphal 		sp->len = 0;
64524165079bSFlorian Westphal #endif
6453df5042f4SFlorian Westphal 	}
6454df5042f4SFlorian Westphal }
6455df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del);
6456df5042f4SFlorian Westphal 
6457df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext)
6458df5042f4SFlorian Westphal {
6459df5042f4SFlorian Westphal 	/* If this is last clone, nothing can increment
6460df5042f4SFlorian Westphal 	 * it after check passes.  Avoids one atomic op.
6461df5042f4SFlorian Westphal 	 */
6462df5042f4SFlorian Westphal 	if (refcount_read(&ext->refcnt) == 1)
6463df5042f4SFlorian Westphal 		goto free_now;
6464df5042f4SFlorian Westphal 
6465df5042f4SFlorian Westphal 	if (!refcount_dec_and_test(&ext->refcnt))
6466df5042f4SFlorian Westphal 		return;
6467df5042f4SFlorian Westphal free_now:
64684165079bSFlorian Westphal #ifdef CONFIG_XFRM
64694165079bSFlorian Westphal 	if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
64704165079bSFlorian Westphal 		skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
64714165079bSFlorian Westphal #endif
647278476d31SJeremy Kerr #ifdef CONFIG_MCTP_FLOWS
647378476d31SJeremy Kerr 	if (__skb_ext_exist(ext, SKB_EXT_MCTP))
647478476d31SJeremy Kerr 		skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
647578476d31SJeremy Kerr #endif
64764165079bSFlorian Westphal 
6477df5042f4SFlorian Westphal 	kmem_cache_free(skbuff_ext_cache, ext);
6478df5042f4SFlorian Westphal }
6479df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put);
6480df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */
6481