xref: /openbmc/linux/net/core/skbuff.c (revision af958a38)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/kmemcheck.h>
45 #include <linux/mm.h>
46 #include <linux/interrupt.h>
47 #include <linux/in.h>
48 #include <linux/inet.h>
49 #include <linux/slab.h>
50 #include <linux/tcp.h>
51 #include <linux/udp.h>
52 #include <linux/netdevice.h>
53 #ifdef CONFIG_NET_CLS_ACT
54 #include <net/pkt_sched.h>
55 #endif
56 #include <linux/string.h>
57 #include <linux/skbuff.h>
58 #include <linux/splice.h>
59 #include <linux/cache.h>
60 #include <linux/rtnetlink.h>
61 #include <linux/init.h>
62 #include <linux/scatterlist.h>
63 #include <linux/errqueue.h>
64 #include <linux/prefetch.h>
65 #include <linux/if_vlan.h>
66 
67 #include <net/protocol.h>
68 #include <net/dst.h>
69 #include <net/sock.h>
70 #include <net/checksum.h>
71 #include <net/ip6_checksum.h>
72 #include <net/xfrm.h>
73 
74 #include <asm/uaccess.h>
75 #include <trace/events/skb.h>
76 #include <linux/highmem.h>
77 
78 struct kmem_cache *skbuff_head_cache __read_mostly;
79 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
80 
81 /**
82  *	skb_panic - private function for out-of-line support
83  *	@skb:	buffer
84  *	@sz:	size
85  *	@addr:	address
86  *	@msg:	skb_over_panic or skb_under_panic
87  *
88  *	Out-of-line support for skb_put() and skb_push().
89  *	Called via the wrapper skb_over_panic() or skb_under_panic().
90  *	Keep out of line to prevent kernel bloat.
91  *	__builtin_return_address is not used because it is not always reliable.
92  */
93 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
94 		      const char msg[])
95 {
96 	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
97 		 msg, addr, skb->len, sz, skb->head, skb->data,
98 		 (unsigned long)skb->tail, (unsigned long)skb->end,
99 		 skb->dev ? skb->dev->name : "<NULL>");
100 	BUG();
101 }
102 
103 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
104 {
105 	skb_panic(skb, sz, addr, __func__);
106 }
107 
108 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
109 {
110 	skb_panic(skb, sz, addr, __func__);
111 }
112 
113 /*
114  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
115  * the caller if emergency pfmemalloc reserves are being used. If it is and
116  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
117  * may be used. Otherwise, the packet data may be discarded until enough
118  * memory is free
119  */
120 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
121 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
122 
123 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
124 			       unsigned long ip, bool *pfmemalloc)
125 {
126 	void *obj;
127 	bool ret_pfmemalloc = false;
128 
129 	/*
130 	 * Try a regular allocation, when that fails and we're not entitled
131 	 * to the reserves, fail.
132 	 */
133 	obj = kmalloc_node_track_caller(size,
134 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
135 					node);
136 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
137 		goto out;
138 
139 	/* Try again but now we are using pfmemalloc reserves */
140 	ret_pfmemalloc = true;
141 	obj = kmalloc_node_track_caller(size, flags, node);
142 
143 out:
144 	if (pfmemalloc)
145 		*pfmemalloc = ret_pfmemalloc;
146 
147 	return obj;
148 }
149 
150 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
151  *	'private' fields and also do memory statistics to find all the
152  *	[BEEP] leaks.
153  *
154  */
155 
156 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
157 {
158 	struct sk_buff *skb;
159 
160 	/* Get the HEAD */
161 	skb = kmem_cache_alloc_node(skbuff_head_cache,
162 				    gfp_mask & ~__GFP_DMA, node);
163 	if (!skb)
164 		goto out;
165 
166 	/*
167 	 * Only clear those fields we need to clear, not those that we will
168 	 * actually initialise below. Hence, don't put any more fields after
169 	 * the tail pointer in struct sk_buff!
170 	 */
171 	memset(skb, 0, offsetof(struct sk_buff, tail));
172 	skb->head = NULL;
173 	skb->truesize = sizeof(struct sk_buff);
174 	atomic_set(&skb->users, 1);
175 
176 	skb->mac_header = (typeof(skb->mac_header))~0U;
177 out:
178 	return skb;
179 }
180 
181 /**
182  *	__alloc_skb	-	allocate a network buffer
183  *	@size: size to allocate
184  *	@gfp_mask: allocation mask
185  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
186  *		instead of head cache and allocate a cloned (child) skb.
187  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
188  *		allocations in case the data is required for writeback
189  *	@node: numa node to allocate memory on
190  *
191  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
192  *	tail room of at least size bytes. The object has a reference count
193  *	of one. The return is the buffer. On a failure the return is %NULL.
194  *
195  *	Buffers may only be allocated from interrupts using a @gfp_mask of
196  *	%GFP_ATOMIC.
197  */
198 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
199 			    int flags, int node)
200 {
201 	struct kmem_cache *cache;
202 	struct skb_shared_info *shinfo;
203 	struct sk_buff *skb;
204 	u8 *data;
205 	bool pfmemalloc;
206 
207 	cache = (flags & SKB_ALLOC_FCLONE)
208 		? skbuff_fclone_cache : skbuff_head_cache;
209 
210 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
211 		gfp_mask |= __GFP_MEMALLOC;
212 
213 	/* Get the HEAD */
214 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
215 	if (!skb)
216 		goto out;
217 	prefetchw(skb);
218 
219 	/* We do our best to align skb_shared_info on a separate cache
220 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
221 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
222 	 * Both skb->head and skb_shared_info are cache line aligned.
223 	 */
224 	size = SKB_DATA_ALIGN(size);
225 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
226 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
227 	if (!data)
228 		goto nodata;
229 	/* kmalloc(size) might give us more room than requested.
230 	 * Put skb_shared_info exactly at the end of allocated zone,
231 	 * to allow max possible filling before reallocation.
232 	 */
233 	size = SKB_WITH_OVERHEAD(ksize(data));
234 	prefetchw(data + size);
235 
236 	/*
237 	 * Only clear those fields we need to clear, not those that we will
238 	 * actually initialise below. Hence, don't put any more fields after
239 	 * the tail pointer in struct sk_buff!
240 	 */
241 	memset(skb, 0, offsetof(struct sk_buff, tail));
242 	/* Account for allocated memory : skb + skb->head */
243 	skb->truesize = SKB_TRUESIZE(size);
244 	skb->pfmemalloc = pfmemalloc;
245 	atomic_set(&skb->users, 1);
246 	skb->head = data;
247 	skb->data = data;
248 	skb_reset_tail_pointer(skb);
249 	skb->end = skb->tail + size;
250 	skb->mac_header = (typeof(skb->mac_header))~0U;
251 	skb->transport_header = (typeof(skb->transport_header))~0U;
252 
253 	/* make sure we initialize shinfo sequentially */
254 	shinfo = skb_shinfo(skb);
255 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
256 	atomic_set(&shinfo->dataref, 1);
257 	kmemcheck_annotate_variable(shinfo->destructor_arg);
258 
259 	if (flags & SKB_ALLOC_FCLONE) {
260 		struct sk_buff *child = skb + 1;
261 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
262 
263 		kmemcheck_annotate_bitfield(child, flags1);
264 		kmemcheck_annotate_bitfield(child, flags2);
265 		skb->fclone = SKB_FCLONE_ORIG;
266 		atomic_set(fclone_ref, 1);
267 
268 		child->fclone = SKB_FCLONE_UNAVAILABLE;
269 		child->pfmemalloc = pfmemalloc;
270 	}
271 out:
272 	return skb;
273 nodata:
274 	kmem_cache_free(cache, skb);
275 	skb = NULL;
276 	goto out;
277 }
278 EXPORT_SYMBOL(__alloc_skb);
279 
280 /**
281  * build_skb - build a network buffer
282  * @data: data buffer provided by caller
283  * @frag_size: size of fragment, or 0 if head was kmalloced
284  *
285  * Allocate a new &sk_buff. Caller provides space holding head and
286  * skb_shared_info. @data must have been allocated by kmalloc() only if
287  * @frag_size is 0, otherwise data should come from the page allocator.
288  * The return is the new skb buffer.
289  * On a failure the return is %NULL, and @data is not freed.
290  * Notes :
291  *  Before IO, driver allocates only data buffer where NIC put incoming frame
292  *  Driver should add room at head (NET_SKB_PAD) and
293  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
294  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
295  *  before giving packet to stack.
296  *  RX rings only contains data buffers, not full skbs.
297  */
298 struct sk_buff *build_skb(void *data, unsigned int frag_size)
299 {
300 	struct skb_shared_info *shinfo;
301 	struct sk_buff *skb;
302 	unsigned int size = frag_size ? : ksize(data);
303 
304 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
305 	if (!skb)
306 		return NULL;
307 
308 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
309 
310 	memset(skb, 0, offsetof(struct sk_buff, tail));
311 	skb->truesize = SKB_TRUESIZE(size);
312 	skb->head_frag = frag_size != 0;
313 	atomic_set(&skb->users, 1);
314 	skb->head = data;
315 	skb->data = data;
316 	skb_reset_tail_pointer(skb);
317 	skb->end = skb->tail + size;
318 	skb->mac_header = (typeof(skb->mac_header))~0U;
319 	skb->transport_header = (typeof(skb->transport_header))~0U;
320 
321 	/* make sure we initialize shinfo sequentially */
322 	shinfo = skb_shinfo(skb);
323 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
324 	atomic_set(&shinfo->dataref, 1);
325 	kmemcheck_annotate_variable(shinfo->destructor_arg);
326 
327 	return skb;
328 }
329 EXPORT_SYMBOL(build_skb);
330 
331 struct netdev_alloc_cache {
332 	struct page_frag	frag;
333 	/* we maintain a pagecount bias, so that we dont dirty cache line
334 	 * containing page->_count every time we allocate a fragment.
335 	 */
336 	unsigned int		pagecnt_bias;
337 };
338 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
339 
340 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
341 {
342 	struct netdev_alloc_cache *nc;
343 	void *data = NULL;
344 	int order;
345 	unsigned long flags;
346 
347 	local_irq_save(flags);
348 	nc = &__get_cpu_var(netdev_alloc_cache);
349 	if (unlikely(!nc->frag.page)) {
350 refill:
351 		for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
352 			gfp_t gfp = gfp_mask;
353 
354 			if (order)
355 				gfp |= __GFP_COMP | __GFP_NOWARN;
356 			nc->frag.page = alloc_pages(gfp, order);
357 			if (likely(nc->frag.page))
358 				break;
359 			if (--order < 0)
360 				goto end;
361 		}
362 		nc->frag.size = PAGE_SIZE << order;
363 recycle:
364 		atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
365 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
366 		nc->frag.offset = 0;
367 	}
368 
369 	if (nc->frag.offset + fragsz > nc->frag.size) {
370 		/* avoid unnecessary locked operations if possible */
371 		if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
372 		    atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
373 			goto recycle;
374 		goto refill;
375 	}
376 
377 	data = page_address(nc->frag.page) + nc->frag.offset;
378 	nc->frag.offset += fragsz;
379 	nc->pagecnt_bias--;
380 end:
381 	local_irq_restore(flags);
382 	return data;
383 }
384 
385 /**
386  * netdev_alloc_frag - allocate a page fragment
387  * @fragsz: fragment size
388  *
389  * Allocates a frag from a page for receive buffer.
390  * Uses GFP_ATOMIC allocations.
391  */
392 void *netdev_alloc_frag(unsigned int fragsz)
393 {
394 	return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
395 }
396 EXPORT_SYMBOL(netdev_alloc_frag);
397 
398 /**
399  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
400  *	@dev: network device to receive on
401  *	@length: length to allocate
402  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
403  *
404  *	Allocate a new &sk_buff and assign it a usage count of one. The
405  *	buffer has unspecified headroom built in. Users should allocate
406  *	the headroom they think they need without accounting for the
407  *	built in space. The built in space is used for optimisations.
408  *
409  *	%NULL is returned if there is no free memory.
410  */
411 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
412 				   unsigned int length, gfp_t gfp_mask)
413 {
414 	struct sk_buff *skb = NULL;
415 	unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
416 			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
417 
418 	if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
419 		void *data;
420 
421 		if (sk_memalloc_socks())
422 			gfp_mask |= __GFP_MEMALLOC;
423 
424 		data = __netdev_alloc_frag(fragsz, gfp_mask);
425 
426 		if (likely(data)) {
427 			skb = build_skb(data, fragsz);
428 			if (unlikely(!skb))
429 				put_page(virt_to_head_page(data));
430 		}
431 	} else {
432 		skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
433 				  SKB_ALLOC_RX, NUMA_NO_NODE);
434 	}
435 	if (likely(skb)) {
436 		skb_reserve(skb, NET_SKB_PAD);
437 		skb->dev = dev;
438 	}
439 	return skb;
440 }
441 EXPORT_SYMBOL(__netdev_alloc_skb);
442 
443 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
444 		     int size, unsigned int truesize)
445 {
446 	skb_fill_page_desc(skb, i, page, off, size);
447 	skb->len += size;
448 	skb->data_len += size;
449 	skb->truesize += truesize;
450 }
451 EXPORT_SYMBOL(skb_add_rx_frag);
452 
453 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
454 			  unsigned int truesize)
455 {
456 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
457 
458 	skb_frag_size_add(frag, size);
459 	skb->len += size;
460 	skb->data_len += size;
461 	skb->truesize += truesize;
462 }
463 EXPORT_SYMBOL(skb_coalesce_rx_frag);
464 
465 static void skb_drop_list(struct sk_buff **listp)
466 {
467 	kfree_skb_list(*listp);
468 	*listp = NULL;
469 }
470 
471 static inline void skb_drop_fraglist(struct sk_buff *skb)
472 {
473 	skb_drop_list(&skb_shinfo(skb)->frag_list);
474 }
475 
476 static void skb_clone_fraglist(struct sk_buff *skb)
477 {
478 	struct sk_buff *list;
479 
480 	skb_walk_frags(skb, list)
481 		skb_get(list);
482 }
483 
484 static void skb_free_head(struct sk_buff *skb)
485 {
486 	if (skb->head_frag)
487 		put_page(virt_to_head_page(skb->head));
488 	else
489 		kfree(skb->head);
490 }
491 
492 static void skb_release_data(struct sk_buff *skb)
493 {
494 	if (!skb->cloned ||
495 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
496 			       &skb_shinfo(skb)->dataref)) {
497 		if (skb_shinfo(skb)->nr_frags) {
498 			int i;
499 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
500 				skb_frag_unref(skb, i);
501 		}
502 
503 		/*
504 		 * If skb buf is from userspace, we need to notify the caller
505 		 * the lower device DMA has done;
506 		 */
507 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
508 			struct ubuf_info *uarg;
509 
510 			uarg = skb_shinfo(skb)->destructor_arg;
511 			if (uarg->callback)
512 				uarg->callback(uarg, true);
513 		}
514 
515 		if (skb_has_frag_list(skb))
516 			skb_drop_fraglist(skb);
517 
518 		skb_free_head(skb);
519 	}
520 }
521 
522 /*
523  *	Free an skbuff by memory without cleaning the state.
524  */
525 static void kfree_skbmem(struct sk_buff *skb)
526 {
527 	struct sk_buff *other;
528 	atomic_t *fclone_ref;
529 
530 	switch (skb->fclone) {
531 	case SKB_FCLONE_UNAVAILABLE:
532 		kmem_cache_free(skbuff_head_cache, skb);
533 		break;
534 
535 	case SKB_FCLONE_ORIG:
536 		fclone_ref = (atomic_t *) (skb + 2);
537 		if (atomic_dec_and_test(fclone_ref))
538 			kmem_cache_free(skbuff_fclone_cache, skb);
539 		break;
540 
541 	case SKB_FCLONE_CLONE:
542 		fclone_ref = (atomic_t *) (skb + 1);
543 		other = skb - 1;
544 
545 		/* The clone portion is available for
546 		 * fast-cloning again.
547 		 */
548 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
549 
550 		if (atomic_dec_and_test(fclone_ref))
551 			kmem_cache_free(skbuff_fclone_cache, other);
552 		break;
553 	}
554 }
555 
556 static void skb_release_head_state(struct sk_buff *skb)
557 {
558 	skb_dst_drop(skb);
559 #ifdef CONFIG_XFRM
560 	secpath_put(skb->sp);
561 #endif
562 	if (skb->destructor) {
563 		WARN_ON(in_irq());
564 		skb->destructor(skb);
565 	}
566 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
567 	nf_conntrack_put(skb->nfct);
568 #endif
569 #ifdef CONFIG_BRIDGE_NETFILTER
570 	nf_bridge_put(skb->nf_bridge);
571 #endif
572 /* XXX: IS this still necessary? - JHS */
573 #ifdef CONFIG_NET_SCHED
574 	skb->tc_index = 0;
575 #ifdef CONFIG_NET_CLS_ACT
576 	skb->tc_verd = 0;
577 #endif
578 #endif
579 }
580 
581 /* Free everything but the sk_buff shell. */
582 static void skb_release_all(struct sk_buff *skb)
583 {
584 	skb_release_head_state(skb);
585 	if (likely(skb->head))
586 		skb_release_data(skb);
587 }
588 
589 /**
590  *	__kfree_skb - private function
591  *	@skb: buffer
592  *
593  *	Free an sk_buff. Release anything attached to the buffer.
594  *	Clean the state. This is an internal helper function. Users should
595  *	always call kfree_skb
596  */
597 
598 void __kfree_skb(struct sk_buff *skb)
599 {
600 	skb_release_all(skb);
601 	kfree_skbmem(skb);
602 }
603 EXPORT_SYMBOL(__kfree_skb);
604 
605 /**
606  *	kfree_skb - free an sk_buff
607  *	@skb: buffer to free
608  *
609  *	Drop a reference to the buffer and free it if the usage count has
610  *	hit zero.
611  */
612 void kfree_skb(struct sk_buff *skb)
613 {
614 	if (unlikely(!skb))
615 		return;
616 	if (likely(atomic_read(&skb->users) == 1))
617 		smp_rmb();
618 	else if (likely(!atomic_dec_and_test(&skb->users)))
619 		return;
620 	trace_kfree_skb(skb, __builtin_return_address(0));
621 	__kfree_skb(skb);
622 }
623 EXPORT_SYMBOL(kfree_skb);
624 
625 void kfree_skb_list(struct sk_buff *segs)
626 {
627 	while (segs) {
628 		struct sk_buff *next = segs->next;
629 
630 		kfree_skb(segs);
631 		segs = next;
632 	}
633 }
634 EXPORT_SYMBOL(kfree_skb_list);
635 
636 /**
637  *	skb_tx_error - report an sk_buff xmit error
638  *	@skb: buffer that triggered an error
639  *
640  *	Report xmit error if a device callback is tracking this skb.
641  *	skb must be freed afterwards.
642  */
643 void skb_tx_error(struct sk_buff *skb)
644 {
645 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
646 		struct ubuf_info *uarg;
647 
648 		uarg = skb_shinfo(skb)->destructor_arg;
649 		if (uarg->callback)
650 			uarg->callback(uarg, false);
651 		skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
652 	}
653 }
654 EXPORT_SYMBOL(skb_tx_error);
655 
656 /**
657  *	consume_skb - free an skbuff
658  *	@skb: buffer to free
659  *
660  *	Drop a ref to the buffer and free it if the usage count has hit zero
661  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
662  *	is being dropped after a failure and notes that
663  */
664 void consume_skb(struct sk_buff *skb)
665 {
666 	if (unlikely(!skb))
667 		return;
668 	if (likely(atomic_read(&skb->users) == 1))
669 		smp_rmb();
670 	else if (likely(!atomic_dec_and_test(&skb->users)))
671 		return;
672 	trace_consume_skb(skb);
673 	__kfree_skb(skb);
674 }
675 EXPORT_SYMBOL(consume_skb);
676 
677 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
678 {
679 	new->tstamp		= old->tstamp;
680 	new->dev		= old->dev;
681 	new->transport_header	= old->transport_header;
682 	new->network_header	= old->network_header;
683 	new->mac_header		= old->mac_header;
684 	new->inner_protocol	= old->inner_protocol;
685 	new->inner_transport_header = old->inner_transport_header;
686 	new->inner_network_header = old->inner_network_header;
687 	new->inner_mac_header = old->inner_mac_header;
688 	skb_dst_copy(new, old);
689 	skb_copy_hash(new, old);
690 	new->ooo_okay		= old->ooo_okay;
691 	new->no_fcs		= old->no_fcs;
692 	new->encapsulation	= old->encapsulation;
693 	new->encap_hdr_csum	= old->encap_hdr_csum;
694 	new->csum_valid		= old->csum_valid;
695 	new->csum_complete_sw	= old->csum_complete_sw;
696 #ifdef CONFIG_XFRM
697 	new->sp			= secpath_get(old->sp);
698 #endif
699 	memcpy(new->cb, old->cb, sizeof(old->cb));
700 	new->csum		= old->csum;
701 	new->ignore_df		= old->ignore_df;
702 	new->pkt_type		= old->pkt_type;
703 	new->ip_summed		= old->ip_summed;
704 	skb_copy_queue_mapping(new, old);
705 	new->priority		= old->priority;
706 #if IS_ENABLED(CONFIG_IP_VS)
707 	new->ipvs_property	= old->ipvs_property;
708 #endif
709 	new->pfmemalloc		= old->pfmemalloc;
710 	new->protocol		= old->protocol;
711 	new->mark		= old->mark;
712 	new->skb_iif		= old->skb_iif;
713 	__nf_copy(new, old);
714 #ifdef CONFIG_NET_SCHED
715 	new->tc_index		= old->tc_index;
716 #ifdef CONFIG_NET_CLS_ACT
717 	new->tc_verd		= old->tc_verd;
718 #endif
719 #endif
720 	new->vlan_proto		= old->vlan_proto;
721 	new->vlan_tci		= old->vlan_tci;
722 
723 	skb_copy_secmark(new, old);
724 
725 #ifdef CONFIG_NET_RX_BUSY_POLL
726 	new->napi_id	= old->napi_id;
727 #endif
728 }
729 
730 /*
731  * You should not add any new code to this function.  Add it to
732  * __copy_skb_header above instead.
733  */
734 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
735 {
736 #define C(x) n->x = skb->x
737 
738 	n->next = n->prev = NULL;
739 	n->sk = NULL;
740 	__copy_skb_header(n, skb);
741 
742 	C(len);
743 	C(data_len);
744 	C(mac_len);
745 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
746 	n->cloned = 1;
747 	n->nohdr = 0;
748 	n->destructor = NULL;
749 	C(tail);
750 	C(end);
751 	C(head);
752 	C(head_frag);
753 	C(data);
754 	C(truesize);
755 	atomic_set(&n->users, 1);
756 
757 	atomic_inc(&(skb_shinfo(skb)->dataref));
758 	skb->cloned = 1;
759 
760 	return n;
761 #undef C
762 }
763 
764 /**
765  *	skb_morph	-	morph one skb into another
766  *	@dst: the skb to receive the contents
767  *	@src: the skb to supply the contents
768  *
769  *	This is identical to skb_clone except that the target skb is
770  *	supplied by the user.
771  *
772  *	The target skb is returned upon exit.
773  */
774 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
775 {
776 	skb_release_all(dst);
777 	return __skb_clone(dst, src);
778 }
779 EXPORT_SYMBOL_GPL(skb_morph);
780 
781 /**
782  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
783  *	@skb: the skb to modify
784  *	@gfp_mask: allocation priority
785  *
786  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
787  *	It will copy all frags into kernel and drop the reference
788  *	to userspace pages.
789  *
790  *	If this function is called from an interrupt gfp_mask() must be
791  *	%GFP_ATOMIC.
792  *
793  *	Returns 0 on success or a negative error code on failure
794  *	to allocate kernel memory to copy to.
795  */
796 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
797 {
798 	int i;
799 	int num_frags = skb_shinfo(skb)->nr_frags;
800 	struct page *page, *head = NULL;
801 	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
802 
803 	for (i = 0; i < num_frags; i++) {
804 		u8 *vaddr;
805 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
806 
807 		page = alloc_page(gfp_mask);
808 		if (!page) {
809 			while (head) {
810 				struct page *next = (struct page *)page_private(head);
811 				put_page(head);
812 				head = next;
813 			}
814 			return -ENOMEM;
815 		}
816 		vaddr = kmap_atomic(skb_frag_page(f));
817 		memcpy(page_address(page),
818 		       vaddr + f->page_offset, skb_frag_size(f));
819 		kunmap_atomic(vaddr);
820 		set_page_private(page, (unsigned long)head);
821 		head = page;
822 	}
823 
824 	/* skb frags release userspace buffers */
825 	for (i = 0; i < num_frags; i++)
826 		skb_frag_unref(skb, i);
827 
828 	uarg->callback(uarg, false);
829 
830 	/* skb frags point to kernel buffers */
831 	for (i = num_frags - 1; i >= 0; i--) {
832 		__skb_fill_page_desc(skb, i, head, 0,
833 				     skb_shinfo(skb)->frags[i].size);
834 		head = (struct page *)page_private(head);
835 	}
836 
837 	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
838 	return 0;
839 }
840 EXPORT_SYMBOL_GPL(skb_copy_ubufs);
841 
842 /**
843  *	skb_clone	-	duplicate an sk_buff
844  *	@skb: buffer to clone
845  *	@gfp_mask: allocation priority
846  *
847  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
848  *	copies share the same packet data but not structure. The new
849  *	buffer has a reference count of 1. If the allocation fails the
850  *	function returns %NULL otherwise the new buffer is returned.
851  *
852  *	If this function is called from an interrupt gfp_mask() must be
853  *	%GFP_ATOMIC.
854  */
855 
856 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
857 {
858 	struct sk_buff *n;
859 
860 	if (skb_orphan_frags(skb, gfp_mask))
861 		return NULL;
862 
863 	n = skb + 1;
864 	if (skb->fclone == SKB_FCLONE_ORIG &&
865 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
866 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
867 		n->fclone = SKB_FCLONE_CLONE;
868 		atomic_inc(fclone_ref);
869 	} else {
870 		if (skb_pfmemalloc(skb))
871 			gfp_mask |= __GFP_MEMALLOC;
872 
873 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
874 		if (!n)
875 			return NULL;
876 
877 		kmemcheck_annotate_bitfield(n, flags1);
878 		kmemcheck_annotate_bitfield(n, flags2);
879 		n->fclone = SKB_FCLONE_UNAVAILABLE;
880 	}
881 
882 	return __skb_clone(n, skb);
883 }
884 EXPORT_SYMBOL(skb_clone);
885 
886 static void skb_headers_offset_update(struct sk_buff *skb, int off)
887 {
888 	/* Only adjust this if it actually is csum_start rather than csum */
889 	if (skb->ip_summed == CHECKSUM_PARTIAL)
890 		skb->csum_start += off;
891 	/* {transport,network,mac}_header and tail are relative to skb->head */
892 	skb->transport_header += off;
893 	skb->network_header   += off;
894 	if (skb_mac_header_was_set(skb))
895 		skb->mac_header += off;
896 	skb->inner_transport_header += off;
897 	skb->inner_network_header += off;
898 	skb->inner_mac_header += off;
899 }
900 
901 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
902 {
903 	__copy_skb_header(new, old);
904 
905 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
906 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
907 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
908 }
909 
910 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
911 {
912 	if (skb_pfmemalloc(skb))
913 		return SKB_ALLOC_RX;
914 	return 0;
915 }
916 
917 /**
918  *	skb_copy	-	create private copy of an sk_buff
919  *	@skb: buffer to copy
920  *	@gfp_mask: allocation priority
921  *
922  *	Make a copy of both an &sk_buff and its data. This is used when the
923  *	caller wishes to modify the data and needs a private copy of the
924  *	data to alter. Returns %NULL on failure or the pointer to the buffer
925  *	on success. The returned buffer has a reference count of 1.
926  *
927  *	As by-product this function converts non-linear &sk_buff to linear
928  *	one, so that &sk_buff becomes completely private and caller is allowed
929  *	to modify all the data of returned buffer. This means that this
930  *	function is not recommended for use in circumstances when only
931  *	header is going to be modified. Use pskb_copy() instead.
932  */
933 
934 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
935 {
936 	int headerlen = skb_headroom(skb);
937 	unsigned int size = skb_end_offset(skb) + skb->data_len;
938 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
939 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
940 
941 	if (!n)
942 		return NULL;
943 
944 	/* Set the data pointer */
945 	skb_reserve(n, headerlen);
946 	/* Set the tail pointer and length */
947 	skb_put(n, skb->len);
948 
949 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
950 		BUG();
951 
952 	copy_skb_header(n, skb);
953 	return n;
954 }
955 EXPORT_SYMBOL(skb_copy);
956 
957 /**
958  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
959  *	@skb: buffer to copy
960  *	@headroom: headroom of new skb
961  *	@gfp_mask: allocation priority
962  *	@fclone: if true allocate the copy of the skb from the fclone
963  *	cache instead of the head cache; it is recommended to set this
964  *	to true for the cases where the copy will likely be cloned
965  *
966  *	Make a copy of both an &sk_buff and part of its data, located
967  *	in header. Fragmented data remain shared. This is used when
968  *	the caller wishes to modify only header of &sk_buff and needs
969  *	private copy of the header to alter. Returns %NULL on failure
970  *	or the pointer to the buffer on success.
971  *	The returned buffer has a reference count of 1.
972  */
973 
974 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
975 				   gfp_t gfp_mask, bool fclone)
976 {
977 	unsigned int size = skb_headlen(skb) + headroom;
978 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
979 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
980 
981 	if (!n)
982 		goto out;
983 
984 	/* Set the data pointer */
985 	skb_reserve(n, headroom);
986 	/* Set the tail pointer and length */
987 	skb_put(n, skb_headlen(skb));
988 	/* Copy the bytes */
989 	skb_copy_from_linear_data(skb, n->data, n->len);
990 
991 	n->truesize += skb->data_len;
992 	n->data_len  = skb->data_len;
993 	n->len	     = skb->len;
994 
995 	if (skb_shinfo(skb)->nr_frags) {
996 		int i;
997 
998 		if (skb_orphan_frags(skb, gfp_mask)) {
999 			kfree_skb(n);
1000 			n = NULL;
1001 			goto out;
1002 		}
1003 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1004 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1005 			skb_frag_ref(skb, i);
1006 		}
1007 		skb_shinfo(n)->nr_frags = i;
1008 	}
1009 
1010 	if (skb_has_frag_list(skb)) {
1011 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1012 		skb_clone_fraglist(n);
1013 	}
1014 
1015 	copy_skb_header(n, skb);
1016 out:
1017 	return n;
1018 }
1019 EXPORT_SYMBOL(__pskb_copy_fclone);
1020 
1021 /**
1022  *	pskb_expand_head - reallocate header of &sk_buff
1023  *	@skb: buffer to reallocate
1024  *	@nhead: room to add at head
1025  *	@ntail: room to add at tail
1026  *	@gfp_mask: allocation priority
1027  *
1028  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1029  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1030  *	reference count of 1. Returns zero in the case of success or error,
1031  *	if expansion failed. In the last case, &sk_buff is not changed.
1032  *
1033  *	All the pointers pointing into skb header may change and must be
1034  *	reloaded after call to this function.
1035  */
1036 
1037 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1038 		     gfp_t gfp_mask)
1039 {
1040 	int i;
1041 	u8 *data;
1042 	int size = nhead + skb_end_offset(skb) + ntail;
1043 	long off;
1044 
1045 	BUG_ON(nhead < 0);
1046 
1047 	if (skb_shared(skb))
1048 		BUG();
1049 
1050 	size = SKB_DATA_ALIGN(size);
1051 
1052 	if (skb_pfmemalloc(skb))
1053 		gfp_mask |= __GFP_MEMALLOC;
1054 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1055 			       gfp_mask, NUMA_NO_NODE, NULL);
1056 	if (!data)
1057 		goto nodata;
1058 	size = SKB_WITH_OVERHEAD(ksize(data));
1059 
1060 	/* Copy only real data... and, alas, header. This should be
1061 	 * optimized for the cases when header is void.
1062 	 */
1063 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1064 
1065 	memcpy((struct skb_shared_info *)(data + size),
1066 	       skb_shinfo(skb),
1067 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1068 
1069 	/*
1070 	 * if shinfo is shared we must drop the old head gracefully, but if it
1071 	 * is not we can just drop the old head and let the existing refcount
1072 	 * be since all we did is relocate the values
1073 	 */
1074 	if (skb_cloned(skb)) {
1075 		/* copy this zero copy skb frags */
1076 		if (skb_orphan_frags(skb, gfp_mask))
1077 			goto nofrags;
1078 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1079 			skb_frag_ref(skb, i);
1080 
1081 		if (skb_has_frag_list(skb))
1082 			skb_clone_fraglist(skb);
1083 
1084 		skb_release_data(skb);
1085 	} else {
1086 		skb_free_head(skb);
1087 	}
1088 	off = (data + nhead) - skb->head;
1089 
1090 	skb->head     = data;
1091 	skb->head_frag = 0;
1092 	skb->data    += off;
1093 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1094 	skb->end      = size;
1095 	off           = nhead;
1096 #else
1097 	skb->end      = skb->head + size;
1098 #endif
1099 	skb->tail	      += off;
1100 	skb_headers_offset_update(skb, nhead);
1101 	skb->cloned   = 0;
1102 	skb->hdr_len  = 0;
1103 	skb->nohdr    = 0;
1104 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1105 	return 0;
1106 
1107 nofrags:
1108 	kfree(data);
1109 nodata:
1110 	return -ENOMEM;
1111 }
1112 EXPORT_SYMBOL(pskb_expand_head);
1113 
1114 /* Make private copy of skb with writable head and some headroom */
1115 
1116 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1117 {
1118 	struct sk_buff *skb2;
1119 	int delta = headroom - skb_headroom(skb);
1120 
1121 	if (delta <= 0)
1122 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1123 	else {
1124 		skb2 = skb_clone(skb, GFP_ATOMIC);
1125 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1126 					     GFP_ATOMIC)) {
1127 			kfree_skb(skb2);
1128 			skb2 = NULL;
1129 		}
1130 	}
1131 	return skb2;
1132 }
1133 EXPORT_SYMBOL(skb_realloc_headroom);
1134 
1135 /**
1136  *	skb_copy_expand	-	copy and expand sk_buff
1137  *	@skb: buffer to copy
1138  *	@newheadroom: new free bytes at head
1139  *	@newtailroom: new free bytes at tail
1140  *	@gfp_mask: allocation priority
1141  *
1142  *	Make a copy of both an &sk_buff and its data and while doing so
1143  *	allocate additional space.
1144  *
1145  *	This is used when the caller wishes to modify the data and needs a
1146  *	private copy of the data to alter as well as more space for new fields.
1147  *	Returns %NULL on failure or the pointer to the buffer
1148  *	on success. The returned buffer has a reference count of 1.
1149  *
1150  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1151  *	is called from an interrupt.
1152  */
1153 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1154 				int newheadroom, int newtailroom,
1155 				gfp_t gfp_mask)
1156 {
1157 	/*
1158 	 *	Allocate the copy buffer
1159 	 */
1160 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1161 					gfp_mask, skb_alloc_rx_flag(skb),
1162 					NUMA_NO_NODE);
1163 	int oldheadroom = skb_headroom(skb);
1164 	int head_copy_len, head_copy_off;
1165 
1166 	if (!n)
1167 		return NULL;
1168 
1169 	skb_reserve(n, newheadroom);
1170 
1171 	/* Set the tail pointer and length */
1172 	skb_put(n, skb->len);
1173 
1174 	head_copy_len = oldheadroom;
1175 	head_copy_off = 0;
1176 	if (newheadroom <= head_copy_len)
1177 		head_copy_len = newheadroom;
1178 	else
1179 		head_copy_off = newheadroom - head_copy_len;
1180 
1181 	/* Copy the linear header and data. */
1182 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1183 			  skb->len + head_copy_len))
1184 		BUG();
1185 
1186 	copy_skb_header(n, skb);
1187 
1188 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1189 
1190 	return n;
1191 }
1192 EXPORT_SYMBOL(skb_copy_expand);
1193 
1194 /**
1195  *	skb_pad			-	zero pad the tail of an skb
1196  *	@skb: buffer to pad
1197  *	@pad: space to pad
1198  *
1199  *	Ensure that a buffer is followed by a padding area that is zero
1200  *	filled. Used by network drivers which may DMA or transfer data
1201  *	beyond the buffer end onto the wire.
1202  *
1203  *	May return error in out of memory cases. The skb is freed on error.
1204  */
1205 
1206 int skb_pad(struct sk_buff *skb, int pad)
1207 {
1208 	int err;
1209 	int ntail;
1210 
1211 	/* If the skbuff is non linear tailroom is always zero.. */
1212 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1213 		memset(skb->data+skb->len, 0, pad);
1214 		return 0;
1215 	}
1216 
1217 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1218 	if (likely(skb_cloned(skb) || ntail > 0)) {
1219 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1220 		if (unlikely(err))
1221 			goto free_skb;
1222 	}
1223 
1224 	/* FIXME: The use of this function with non-linear skb's really needs
1225 	 * to be audited.
1226 	 */
1227 	err = skb_linearize(skb);
1228 	if (unlikely(err))
1229 		goto free_skb;
1230 
1231 	memset(skb->data + skb->len, 0, pad);
1232 	return 0;
1233 
1234 free_skb:
1235 	kfree_skb(skb);
1236 	return err;
1237 }
1238 EXPORT_SYMBOL(skb_pad);
1239 
1240 /**
1241  *	pskb_put - add data to the tail of a potentially fragmented buffer
1242  *	@skb: start of the buffer to use
1243  *	@tail: tail fragment of the buffer to use
1244  *	@len: amount of data to add
1245  *
1246  *	This function extends the used data area of the potentially
1247  *	fragmented buffer. @tail must be the last fragment of @skb -- or
1248  *	@skb itself. If this would exceed the total buffer size the kernel
1249  *	will panic. A pointer to the first byte of the extra data is
1250  *	returned.
1251  */
1252 
1253 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1254 {
1255 	if (tail != skb) {
1256 		skb->data_len += len;
1257 		skb->len += len;
1258 	}
1259 	return skb_put(tail, len);
1260 }
1261 EXPORT_SYMBOL_GPL(pskb_put);
1262 
1263 /**
1264  *	skb_put - add data to a buffer
1265  *	@skb: buffer to use
1266  *	@len: amount of data to add
1267  *
1268  *	This function extends the used data area of the buffer. If this would
1269  *	exceed the total buffer size the kernel will panic. A pointer to the
1270  *	first byte of the extra data is returned.
1271  */
1272 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1273 {
1274 	unsigned char *tmp = skb_tail_pointer(skb);
1275 	SKB_LINEAR_ASSERT(skb);
1276 	skb->tail += len;
1277 	skb->len  += len;
1278 	if (unlikely(skb->tail > skb->end))
1279 		skb_over_panic(skb, len, __builtin_return_address(0));
1280 	return tmp;
1281 }
1282 EXPORT_SYMBOL(skb_put);
1283 
1284 /**
1285  *	skb_push - add data to the start of a buffer
1286  *	@skb: buffer to use
1287  *	@len: amount of data to add
1288  *
1289  *	This function extends the used data area of the buffer at the buffer
1290  *	start. If this would exceed the total buffer headroom the kernel will
1291  *	panic. A pointer to the first byte of the extra data is returned.
1292  */
1293 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1294 {
1295 	skb->data -= len;
1296 	skb->len  += len;
1297 	if (unlikely(skb->data<skb->head))
1298 		skb_under_panic(skb, len, __builtin_return_address(0));
1299 	return skb->data;
1300 }
1301 EXPORT_SYMBOL(skb_push);
1302 
1303 /**
1304  *	skb_pull - remove data from the start of a buffer
1305  *	@skb: buffer to use
1306  *	@len: amount of data to remove
1307  *
1308  *	This function removes data from the start of a buffer, returning
1309  *	the memory to the headroom. A pointer to the next data in the buffer
1310  *	is returned. Once the data has been pulled future pushes will overwrite
1311  *	the old data.
1312  */
1313 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1314 {
1315 	return skb_pull_inline(skb, len);
1316 }
1317 EXPORT_SYMBOL(skb_pull);
1318 
1319 /**
1320  *	skb_trim - remove end from a buffer
1321  *	@skb: buffer to alter
1322  *	@len: new length
1323  *
1324  *	Cut the length of a buffer down by removing data from the tail. If
1325  *	the buffer is already under the length specified it is not modified.
1326  *	The skb must be linear.
1327  */
1328 void skb_trim(struct sk_buff *skb, unsigned int len)
1329 {
1330 	if (skb->len > len)
1331 		__skb_trim(skb, len);
1332 }
1333 EXPORT_SYMBOL(skb_trim);
1334 
1335 /* Trims skb to length len. It can change skb pointers.
1336  */
1337 
1338 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1339 {
1340 	struct sk_buff **fragp;
1341 	struct sk_buff *frag;
1342 	int offset = skb_headlen(skb);
1343 	int nfrags = skb_shinfo(skb)->nr_frags;
1344 	int i;
1345 	int err;
1346 
1347 	if (skb_cloned(skb) &&
1348 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1349 		return err;
1350 
1351 	i = 0;
1352 	if (offset >= len)
1353 		goto drop_pages;
1354 
1355 	for (; i < nfrags; i++) {
1356 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1357 
1358 		if (end < len) {
1359 			offset = end;
1360 			continue;
1361 		}
1362 
1363 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1364 
1365 drop_pages:
1366 		skb_shinfo(skb)->nr_frags = i;
1367 
1368 		for (; i < nfrags; i++)
1369 			skb_frag_unref(skb, i);
1370 
1371 		if (skb_has_frag_list(skb))
1372 			skb_drop_fraglist(skb);
1373 		goto done;
1374 	}
1375 
1376 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1377 	     fragp = &frag->next) {
1378 		int end = offset + frag->len;
1379 
1380 		if (skb_shared(frag)) {
1381 			struct sk_buff *nfrag;
1382 
1383 			nfrag = skb_clone(frag, GFP_ATOMIC);
1384 			if (unlikely(!nfrag))
1385 				return -ENOMEM;
1386 
1387 			nfrag->next = frag->next;
1388 			consume_skb(frag);
1389 			frag = nfrag;
1390 			*fragp = frag;
1391 		}
1392 
1393 		if (end < len) {
1394 			offset = end;
1395 			continue;
1396 		}
1397 
1398 		if (end > len &&
1399 		    unlikely((err = pskb_trim(frag, len - offset))))
1400 			return err;
1401 
1402 		if (frag->next)
1403 			skb_drop_list(&frag->next);
1404 		break;
1405 	}
1406 
1407 done:
1408 	if (len > skb_headlen(skb)) {
1409 		skb->data_len -= skb->len - len;
1410 		skb->len       = len;
1411 	} else {
1412 		skb->len       = len;
1413 		skb->data_len  = 0;
1414 		skb_set_tail_pointer(skb, len);
1415 	}
1416 
1417 	return 0;
1418 }
1419 EXPORT_SYMBOL(___pskb_trim);
1420 
1421 /**
1422  *	__pskb_pull_tail - advance tail of skb header
1423  *	@skb: buffer to reallocate
1424  *	@delta: number of bytes to advance tail
1425  *
1426  *	The function makes a sense only on a fragmented &sk_buff,
1427  *	it expands header moving its tail forward and copying necessary
1428  *	data from fragmented part.
1429  *
1430  *	&sk_buff MUST have reference count of 1.
1431  *
1432  *	Returns %NULL (and &sk_buff does not change) if pull failed
1433  *	or value of new tail of skb in the case of success.
1434  *
1435  *	All the pointers pointing into skb header may change and must be
1436  *	reloaded after call to this function.
1437  */
1438 
1439 /* Moves tail of skb head forward, copying data from fragmented part,
1440  * when it is necessary.
1441  * 1. It may fail due to malloc failure.
1442  * 2. It may change skb pointers.
1443  *
1444  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1445  */
1446 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1447 {
1448 	/* If skb has not enough free space at tail, get new one
1449 	 * plus 128 bytes for future expansions. If we have enough
1450 	 * room at tail, reallocate without expansion only if skb is cloned.
1451 	 */
1452 	int i, k, eat = (skb->tail + delta) - skb->end;
1453 
1454 	if (eat > 0 || skb_cloned(skb)) {
1455 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1456 				     GFP_ATOMIC))
1457 			return NULL;
1458 	}
1459 
1460 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1461 		BUG();
1462 
1463 	/* Optimization: no fragments, no reasons to preestimate
1464 	 * size of pulled pages. Superb.
1465 	 */
1466 	if (!skb_has_frag_list(skb))
1467 		goto pull_pages;
1468 
1469 	/* Estimate size of pulled pages. */
1470 	eat = delta;
1471 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1472 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1473 
1474 		if (size >= eat)
1475 			goto pull_pages;
1476 		eat -= size;
1477 	}
1478 
1479 	/* If we need update frag list, we are in troubles.
1480 	 * Certainly, it possible to add an offset to skb data,
1481 	 * but taking into account that pulling is expected to
1482 	 * be very rare operation, it is worth to fight against
1483 	 * further bloating skb head and crucify ourselves here instead.
1484 	 * Pure masohism, indeed. 8)8)
1485 	 */
1486 	if (eat) {
1487 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1488 		struct sk_buff *clone = NULL;
1489 		struct sk_buff *insp = NULL;
1490 
1491 		do {
1492 			BUG_ON(!list);
1493 
1494 			if (list->len <= eat) {
1495 				/* Eaten as whole. */
1496 				eat -= list->len;
1497 				list = list->next;
1498 				insp = list;
1499 			} else {
1500 				/* Eaten partially. */
1501 
1502 				if (skb_shared(list)) {
1503 					/* Sucks! We need to fork list. :-( */
1504 					clone = skb_clone(list, GFP_ATOMIC);
1505 					if (!clone)
1506 						return NULL;
1507 					insp = list->next;
1508 					list = clone;
1509 				} else {
1510 					/* This may be pulled without
1511 					 * problems. */
1512 					insp = list;
1513 				}
1514 				if (!pskb_pull(list, eat)) {
1515 					kfree_skb(clone);
1516 					return NULL;
1517 				}
1518 				break;
1519 			}
1520 		} while (eat);
1521 
1522 		/* Free pulled out fragments. */
1523 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1524 			skb_shinfo(skb)->frag_list = list->next;
1525 			kfree_skb(list);
1526 		}
1527 		/* And insert new clone at head. */
1528 		if (clone) {
1529 			clone->next = list;
1530 			skb_shinfo(skb)->frag_list = clone;
1531 		}
1532 	}
1533 	/* Success! Now we may commit changes to skb data. */
1534 
1535 pull_pages:
1536 	eat = delta;
1537 	k = 0;
1538 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1539 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1540 
1541 		if (size <= eat) {
1542 			skb_frag_unref(skb, i);
1543 			eat -= size;
1544 		} else {
1545 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1546 			if (eat) {
1547 				skb_shinfo(skb)->frags[k].page_offset += eat;
1548 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1549 				eat = 0;
1550 			}
1551 			k++;
1552 		}
1553 	}
1554 	skb_shinfo(skb)->nr_frags = k;
1555 
1556 	skb->tail     += delta;
1557 	skb->data_len -= delta;
1558 
1559 	return skb_tail_pointer(skb);
1560 }
1561 EXPORT_SYMBOL(__pskb_pull_tail);
1562 
1563 /**
1564  *	skb_copy_bits - copy bits from skb to kernel buffer
1565  *	@skb: source skb
1566  *	@offset: offset in source
1567  *	@to: destination buffer
1568  *	@len: number of bytes to copy
1569  *
1570  *	Copy the specified number of bytes from the source skb to the
1571  *	destination buffer.
1572  *
1573  *	CAUTION ! :
1574  *		If its prototype is ever changed,
1575  *		check arch/{*}/net/{*}.S files,
1576  *		since it is called from BPF assembly code.
1577  */
1578 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1579 {
1580 	int start = skb_headlen(skb);
1581 	struct sk_buff *frag_iter;
1582 	int i, copy;
1583 
1584 	if (offset > (int)skb->len - len)
1585 		goto fault;
1586 
1587 	/* Copy header. */
1588 	if ((copy = start - offset) > 0) {
1589 		if (copy > len)
1590 			copy = len;
1591 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1592 		if ((len -= copy) == 0)
1593 			return 0;
1594 		offset += copy;
1595 		to     += copy;
1596 	}
1597 
1598 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1599 		int end;
1600 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1601 
1602 		WARN_ON(start > offset + len);
1603 
1604 		end = start + skb_frag_size(f);
1605 		if ((copy = end - offset) > 0) {
1606 			u8 *vaddr;
1607 
1608 			if (copy > len)
1609 				copy = len;
1610 
1611 			vaddr = kmap_atomic(skb_frag_page(f));
1612 			memcpy(to,
1613 			       vaddr + f->page_offset + offset - start,
1614 			       copy);
1615 			kunmap_atomic(vaddr);
1616 
1617 			if ((len -= copy) == 0)
1618 				return 0;
1619 			offset += copy;
1620 			to     += copy;
1621 		}
1622 		start = end;
1623 	}
1624 
1625 	skb_walk_frags(skb, frag_iter) {
1626 		int end;
1627 
1628 		WARN_ON(start > offset + len);
1629 
1630 		end = start + frag_iter->len;
1631 		if ((copy = end - offset) > 0) {
1632 			if (copy > len)
1633 				copy = len;
1634 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1635 				goto fault;
1636 			if ((len -= copy) == 0)
1637 				return 0;
1638 			offset += copy;
1639 			to     += copy;
1640 		}
1641 		start = end;
1642 	}
1643 
1644 	if (!len)
1645 		return 0;
1646 
1647 fault:
1648 	return -EFAULT;
1649 }
1650 EXPORT_SYMBOL(skb_copy_bits);
1651 
1652 /*
1653  * Callback from splice_to_pipe(), if we need to release some pages
1654  * at the end of the spd in case we error'ed out in filling the pipe.
1655  */
1656 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1657 {
1658 	put_page(spd->pages[i]);
1659 }
1660 
1661 static struct page *linear_to_page(struct page *page, unsigned int *len,
1662 				   unsigned int *offset,
1663 				   struct sock *sk)
1664 {
1665 	struct page_frag *pfrag = sk_page_frag(sk);
1666 
1667 	if (!sk_page_frag_refill(sk, pfrag))
1668 		return NULL;
1669 
1670 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1671 
1672 	memcpy(page_address(pfrag->page) + pfrag->offset,
1673 	       page_address(page) + *offset, *len);
1674 	*offset = pfrag->offset;
1675 	pfrag->offset += *len;
1676 
1677 	return pfrag->page;
1678 }
1679 
1680 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1681 			     struct page *page,
1682 			     unsigned int offset)
1683 {
1684 	return	spd->nr_pages &&
1685 		spd->pages[spd->nr_pages - 1] == page &&
1686 		(spd->partial[spd->nr_pages - 1].offset +
1687 		 spd->partial[spd->nr_pages - 1].len == offset);
1688 }
1689 
1690 /*
1691  * Fill page/offset/length into spd, if it can hold more pages.
1692  */
1693 static bool spd_fill_page(struct splice_pipe_desc *spd,
1694 			  struct pipe_inode_info *pipe, struct page *page,
1695 			  unsigned int *len, unsigned int offset,
1696 			  bool linear,
1697 			  struct sock *sk)
1698 {
1699 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1700 		return true;
1701 
1702 	if (linear) {
1703 		page = linear_to_page(page, len, &offset, sk);
1704 		if (!page)
1705 			return true;
1706 	}
1707 	if (spd_can_coalesce(spd, page, offset)) {
1708 		spd->partial[spd->nr_pages - 1].len += *len;
1709 		return false;
1710 	}
1711 	get_page(page);
1712 	spd->pages[spd->nr_pages] = page;
1713 	spd->partial[spd->nr_pages].len = *len;
1714 	spd->partial[spd->nr_pages].offset = offset;
1715 	spd->nr_pages++;
1716 
1717 	return false;
1718 }
1719 
1720 static bool __splice_segment(struct page *page, unsigned int poff,
1721 			     unsigned int plen, unsigned int *off,
1722 			     unsigned int *len,
1723 			     struct splice_pipe_desc *spd, bool linear,
1724 			     struct sock *sk,
1725 			     struct pipe_inode_info *pipe)
1726 {
1727 	if (!*len)
1728 		return true;
1729 
1730 	/* skip this segment if already processed */
1731 	if (*off >= plen) {
1732 		*off -= plen;
1733 		return false;
1734 	}
1735 
1736 	/* ignore any bits we already processed */
1737 	poff += *off;
1738 	plen -= *off;
1739 	*off = 0;
1740 
1741 	do {
1742 		unsigned int flen = min(*len, plen);
1743 
1744 		if (spd_fill_page(spd, pipe, page, &flen, poff,
1745 				  linear, sk))
1746 			return true;
1747 		poff += flen;
1748 		plen -= flen;
1749 		*len -= flen;
1750 	} while (*len && plen);
1751 
1752 	return false;
1753 }
1754 
1755 /*
1756  * Map linear and fragment data from the skb to spd. It reports true if the
1757  * pipe is full or if we already spliced the requested length.
1758  */
1759 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1760 			      unsigned int *offset, unsigned int *len,
1761 			      struct splice_pipe_desc *spd, struct sock *sk)
1762 {
1763 	int seg;
1764 
1765 	/* map the linear part :
1766 	 * If skb->head_frag is set, this 'linear' part is backed by a
1767 	 * fragment, and if the head is not shared with any clones then
1768 	 * we can avoid a copy since we own the head portion of this page.
1769 	 */
1770 	if (__splice_segment(virt_to_page(skb->data),
1771 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1772 			     skb_headlen(skb),
1773 			     offset, len, spd,
1774 			     skb_head_is_locked(skb),
1775 			     sk, pipe))
1776 		return true;
1777 
1778 	/*
1779 	 * then map the fragments
1780 	 */
1781 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1782 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1783 
1784 		if (__splice_segment(skb_frag_page(f),
1785 				     f->page_offset, skb_frag_size(f),
1786 				     offset, len, spd, false, sk, pipe))
1787 			return true;
1788 	}
1789 
1790 	return false;
1791 }
1792 
1793 /*
1794  * Map data from the skb to a pipe. Should handle both the linear part,
1795  * the fragments, and the frag list. It does NOT handle frag lists within
1796  * the frag list, if such a thing exists. We'd probably need to recurse to
1797  * handle that cleanly.
1798  */
1799 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1800 		    struct pipe_inode_info *pipe, unsigned int tlen,
1801 		    unsigned int flags)
1802 {
1803 	struct partial_page partial[MAX_SKB_FRAGS];
1804 	struct page *pages[MAX_SKB_FRAGS];
1805 	struct splice_pipe_desc spd = {
1806 		.pages = pages,
1807 		.partial = partial,
1808 		.nr_pages_max = MAX_SKB_FRAGS,
1809 		.flags = flags,
1810 		.ops = &nosteal_pipe_buf_ops,
1811 		.spd_release = sock_spd_release,
1812 	};
1813 	struct sk_buff *frag_iter;
1814 	struct sock *sk = skb->sk;
1815 	int ret = 0;
1816 
1817 	/*
1818 	 * __skb_splice_bits() only fails if the output has no room left,
1819 	 * so no point in going over the frag_list for the error case.
1820 	 */
1821 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1822 		goto done;
1823 	else if (!tlen)
1824 		goto done;
1825 
1826 	/*
1827 	 * now see if we have a frag_list to map
1828 	 */
1829 	skb_walk_frags(skb, frag_iter) {
1830 		if (!tlen)
1831 			break;
1832 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1833 			break;
1834 	}
1835 
1836 done:
1837 	if (spd.nr_pages) {
1838 		/*
1839 		 * Drop the socket lock, otherwise we have reverse
1840 		 * locking dependencies between sk_lock and i_mutex
1841 		 * here as compared to sendfile(). We enter here
1842 		 * with the socket lock held, and splice_to_pipe() will
1843 		 * grab the pipe inode lock. For sendfile() emulation,
1844 		 * we call into ->sendpage() with the i_mutex lock held
1845 		 * and networking will grab the socket lock.
1846 		 */
1847 		release_sock(sk);
1848 		ret = splice_to_pipe(pipe, &spd);
1849 		lock_sock(sk);
1850 	}
1851 
1852 	return ret;
1853 }
1854 
1855 /**
1856  *	skb_store_bits - store bits from kernel buffer to skb
1857  *	@skb: destination buffer
1858  *	@offset: offset in destination
1859  *	@from: source buffer
1860  *	@len: number of bytes to copy
1861  *
1862  *	Copy the specified number of bytes from the source buffer to the
1863  *	destination skb.  This function handles all the messy bits of
1864  *	traversing fragment lists and such.
1865  */
1866 
1867 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1868 {
1869 	int start = skb_headlen(skb);
1870 	struct sk_buff *frag_iter;
1871 	int i, copy;
1872 
1873 	if (offset > (int)skb->len - len)
1874 		goto fault;
1875 
1876 	if ((copy = start - offset) > 0) {
1877 		if (copy > len)
1878 			copy = len;
1879 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1880 		if ((len -= copy) == 0)
1881 			return 0;
1882 		offset += copy;
1883 		from += copy;
1884 	}
1885 
1886 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1887 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1888 		int end;
1889 
1890 		WARN_ON(start > offset + len);
1891 
1892 		end = start + skb_frag_size(frag);
1893 		if ((copy = end - offset) > 0) {
1894 			u8 *vaddr;
1895 
1896 			if (copy > len)
1897 				copy = len;
1898 
1899 			vaddr = kmap_atomic(skb_frag_page(frag));
1900 			memcpy(vaddr + frag->page_offset + offset - start,
1901 			       from, copy);
1902 			kunmap_atomic(vaddr);
1903 
1904 			if ((len -= copy) == 0)
1905 				return 0;
1906 			offset += copy;
1907 			from += copy;
1908 		}
1909 		start = end;
1910 	}
1911 
1912 	skb_walk_frags(skb, frag_iter) {
1913 		int end;
1914 
1915 		WARN_ON(start > offset + len);
1916 
1917 		end = start + frag_iter->len;
1918 		if ((copy = end - offset) > 0) {
1919 			if (copy > len)
1920 				copy = len;
1921 			if (skb_store_bits(frag_iter, offset - start,
1922 					   from, copy))
1923 				goto fault;
1924 			if ((len -= copy) == 0)
1925 				return 0;
1926 			offset += copy;
1927 			from += copy;
1928 		}
1929 		start = end;
1930 	}
1931 	if (!len)
1932 		return 0;
1933 
1934 fault:
1935 	return -EFAULT;
1936 }
1937 EXPORT_SYMBOL(skb_store_bits);
1938 
1939 /* Checksum skb data. */
1940 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
1941 		      __wsum csum, const struct skb_checksum_ops *ops)
1942 {
1943 	int start = skb_headlen(skb);
1944 	int i, copy = start - offset;
1945 	struct sk_buff *frag_iter;
1946 	int pos = 0;
1947 
1948 	/* Checksum header. */
1949 	if (copy > 0) {
1950 		if (copy > len)
1951 			copy = len;
1952 		csum = ops->update(skb->data + offset, copy, csum);
1953 		if ((len -= copy) == 0)
1954 			return csum;
1955 		offset += copy;
1956 		pos	= copy;
1957 	}
1958 
1959 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1960 		int end;
1961 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1962 
1963 		WARN_ON(start > offset + len);
1964 
1965 		end = start + skb_frag_size(frag);
1966 		if ((copy = end - offset) > 0) {
1967 			__wsum csum2;
1968 			u8 *vaddr;
1969 
1970 			if (copy > len)
1971 				copy = len;
1972 			vaddr = kmap_atomic(skb_frag_page(frag));
1973 			csum2 = ops->update(vaddr + frag->page_offset +
1974 					    offset - start, copy, 0);
1975 			kunmap_atomic(vaddr);
1976 			csum = ops->combine(csum, csum2, pos, copy);
1977 			if (!(len -= copy))
1978 				return csum;
1979 			offset += copy;
1980 			pos    += copy;
1981 		}
1982 		start = end;
1983 	}
1984 
1985 	skb_walk_frags(skb, frag_iter) {
1986 		int end;
1987 
1988 		WARN_ON(start > offset + len);
1989 
1990 		end = start + frag_iter->len;
1991 		if ((copy = end - offset) > 0) {
1992 			__wsum csum2;
1993 			if (copy > len)
1994 				copy = len;
1995 			csum2 = __skb_checksum(frag_iter, offset - start,
1996 					       copy, 0, ops);
1997 			csum = ops->combine(csum, csum2, pos, copy);
1998 			if ((len -= copy) == 0)
1999 				return csum;
2000 			offset += copy;
2001 			pos    += copy;
2002 		}
2003 		start = end;
2004 	}
2005 	BUG_ON(len);
2006 
2007 	return csum;
2008 }
2009 EXPORT_SYMBOL(__skb_checksum);
2010 
2011 __wsum skb_checksum(const struct sk_buff *skb, int offset,
2012 		    int len, __wsum csum)
2013 {
2014 	const struct skb_checksum_ops ops = {
2015 		.update  = csum_partial_ext,
2016 		.combine = csum_block_add_ext,
2017 	};
2018 
2019 	return __skb_checksum(skb, offset, len, csum, &ops);
2020 }
2021 EXPORT_SYMBOL(skb_checksum);
2022 
2023 /* Both of above in one bottle. */
2024 
2025 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2026 				    u8 *to, int len, __wsum csum)
2027 {
2028 	int start = skb_headlen(skb);
2029 	int i, copy = start - offset;
2030 	struct sk_buff *frag_iter;
2031 	int pos = 0;
2032 
2033 	/* Copy header. */
2034 	if (copy > 0) {
2035 		if (copy > len)
2036 			copy = len;
2037 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2038 						 copy, csum);
2039 		if ((len -= copy) == 0)
2040 			return csum;
2041 		offset += copy;
2042 		to     += copy;
2043 		pos	= copy;
2044 	}
2045 
2046 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2047 		int end;
2048 
2049 		WARN_ON(start > offset + len);
2050 
2051 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2052 		if ((copy = end - offset) > 0) {
2053 			__wsum csum2;
2054 			u8 *vaddr;
2055 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2056 
2057 			if (copy > len)
2058 				copy = len;
2059 			vaddr = kmap_atomic(skb_frag_page(frag));
2060 			csum2 = csum_partial_copy_nocheck(vaddr +
2061 							  frag->page_offset +
2062 							  offset - start, to,
2063 							  copy, 0);
2064 			kunmap_atomic(vaddr);
2065 			csum = csum_block_add(csum, csum2, pos);
2066 			if (!(len -= copy))
2067 				return csum;
2068 			offset += copy;
2069 			to     += copy;
2070 			pos    += copy;
2071 		}
2072 		start = end;
2073 	}
2074 
2075 	skb_walk_frags(skb, frag_iter) {
2076 		__wsum csum2;
2077 		int end;
2078 
2079 		WARN_ON(start > offset + len);
2080 
2081 		end = start + frag_iter->len;
2082 		if ((copy = end - offset) > 0) {
2083 			if (copy > len)
2084 				copy = len;
2085 			csum2 = skb_copy_and_csum_bits(frag_iter,
2086 						       offset - start,
2087 						       to, copy, 0);
2088 			csum = csum_block_add(csum, csum2, pos);
2089 			if ((len -= copy) == 0)
2090 				return csum;
2091 			offset += copy;
2092 			to     += copy;
2093 			pos    += copy;
2094 		}
2095 		start = end;
2096 	}
2097 	BUG_ON(len);
2098 	return csum;
2099 }
2100 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2101 
2102  /**
2103  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2104  *	@from: source buffer
2105  *
2106  *	Calculates the amount of linear headroom needed in the 'to' skb passed
2107  *	into skb_zerocopy().
2108  */
2109 unsigned int
2110 skb_zerocopy_headlen(const struct sk_buff *from)
2111 {
2112 	unsigned int hlen = 0;
2113 
2114 	if (!from->head_frag ||
2115 	    skb_headlen(from) < L1_CACHE_BYTES ||
2116 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2117 		hlen = skb_headlen(from);
2118 
2119 	if (skb_has_frag_list(from))
2120 		hlen = from->len;
2121 
2122 	return hlen;
2123 }
2124 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2125 
2126 /**
2127  *	skb_zerocopy - Zero copy skb to skb
2128  *	@to: destination buffer
2129  *	@from: source buffer
2130  *	@len: number of bytes to copy from source buffer
2131  *	@hlen: size of linear headroom in destination buffer
2132  *
2133  *	Copies up to `len` bytes from `from` to `to` by creating references
2134  *	to the frags in the source buffer.
2135  *
2136  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2137  *	headroom in the `to` buffer.
2138  *
2139  *	Return value:
2140  *	0: everything is OK
2141  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
2142  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2143  */
2144 int
2145 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2146 {
2147 	int i, j = 0;
2148 	int plen = 0; /* length of skb->head fragment */
2149 	int ret;
2150 	struct page *page;
2151 	unsigned int offset;
2152 
2153 	BUG_ON(!from->head_frag && !hlen);
2154 
2155 	/* dont bother with small payloads */
2156 	if (len <= skb_tailroom(to))
2157 		return skb_copy_bits(from, 0, skb_put(to, len), len);
2158 
2159 	if (hlen) {
2160 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2161 		if (unlikely(ret))
2162 			return ret;
2163 		len -= hlen;
2164 	} else {
2165 		plen = min_t(int, skb_headlen(from), len);
2166 		if (plen) {
2167 			page = virt_to_head_page(from->head);
2168 			offset = from->data - (unsigned char *)page_address(page);
2169 			__skb_fill_page_desc(to, 0, page, offset, plen);
2170 			get_page(page);
2171 			j = 1;
2172 			len -= plen;
2173 		}
2174 	}
2175 
2176 	to->truesize += len + plen;
2177 	to->len += len + plen;
2178 	to->data_len += len + plen;
2179 
2180 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2181 		skb_tx_error(from);
2182 		return -ENOMEM;
2183 	}
2184 
2185 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2186 		if (!len)
2187 			break;
2188 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2189 		skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2190 		len -= skb_shinfo(to)->frags[j].size;
2191 		skb_frag_ref(to, j);
2192 		j++;
2193 	}
2194 	skb_shinfo(to)->nr_frags = j;
2195 
2196 	return 0;
2197 }
2198 EXPORT_SYMBOL_GPL(skb_zerocopy);
2199 
2200 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2201 {
2202 	__wsum csum;
2203 	long csstart;
2204 
2205 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2206 		csstart = skb_checksum_start_offset(skb);
2207 	else
2208 		csstart = skb_headlen(skb);
2209 
2210 	BUG_ON(csstart > skb_headlen(skb));
2211 
2212 	skb_copy_from_linear_data(skb, to, csstart);
2213 
2214 	csum = 0;
2215 	if (csstart != skb->len)
2216 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2217 					      skb->len - csstart, 0);
2218 
2219 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2220 		long csstuff = csstart + skb->csum_offset;
2221 
2222 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2223 	}
2224 }
2225 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2226 
2227 /**
2228  *	skb_dequeue - remove from the head of the queue
2229  *	@list: list to dequeue from
2230  *
2231  *	Remove the head of the list. The list lock is taken so the function
2232  *	may be used safely with other locking list functions. The head item is
2233  *	returned or %NULL if the list is empty.
2234  */
2235 
2236 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2237 {
2238 	unsigned long flags;
2239 	struct sk_buff *result;
2240 
2241 	spin_lock_irqsave(&list->lock, flags);
2242 	result = __skb_dequeue(list);
2243 	spin_unlock_irqrestore(&list->lock, flags);
2244 	return result;
2245 }
2246 EXPORT_SYMBOL(skb_dequeue);
2247 
2248 /**
2249  *	skb_dequeue_tail - remove from the tail of the queue
2250  *	@list: list to dequeue from
2251  *
2252  *	Remove the tail of the list. The list lock is taken so the function
2253  *	may be used safely with other locking list functions. The tail item is
2254  *	returned or %NULL if the list is empty.
2255  */
2256 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2257 {
2258 	unsigned long flags;
2259 	struct sk_buff *result;
2260 
2261 	spin_lock_irqsave(&list->lock, flags);
2262 	result = __skb_dequeue_tail(list);
2263 	spin_unlock_irqrestore(&list->lock, flags);
2264 	return result;
2265 }
2266 EXPORT_SYMBOL(skb_dequeue_tail);
2267 
2268 /**
2269  *	skb_queue_purge - empty a list
2270  *	@list: list to empty
2271  *
2272  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2273  *	the list and one reference dropped. This function takes the list
2274  *	lock and is atomic with respect to other list locking functions.
2275  */
2276 void skb_queue_purge(struct sk_buff_head *list)
2277 {
2278 	struct sk_buff *skb;
2279 	while ((skb = skb_dequeue(list)) != NULL)
2280 		kfree_skb(skb);
2281 }
2282 EXPORT_SYMBOL(skb_queue_purge);
2283 
2284 /**
2285  *	skb_queue_head - queue a buffer at the list head
2286  *	@list: list to use
2287  *	@newsk: buffer to queue
2288  *
2289  *	Queue a buffer at the start of the list. This function takes the
2290  *	list lock and can be used safely with other locking &sk_buff functions
2291  *	safely.
2292  *
2293  *	A buffer cannot be placed on two lists at the same time.
2294  */
2295 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2296 {
2297 	unsigned long flags;
2298 
2299 	spin_lock_irqsave(&list->lock, flags);
2300 	__skb_queue_head(list, newsk);
2301 	spin_unlock_irqrestore(&list->lock, flags);
2302 }
2303 EXPORT_SYMBOL(skb_queue_head);
2304 
2305 /**
2306  *	skb_queue_tail - queue a buffer at the list tail
2307  *	@list: list to use
2308  *	@newsk: buffer to queue
2309  *
2310  *	Queue a buffer at the tail of the list. This function takes the
2311  *	list lock and can be used safely with other locking &sk_buff functions
2312  *	safely.
2313  *
2314  *	A buffer cannot be placed on two lists at the same time.
2315  */
2316 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2317 {
2318 	unsigned long flags;
2319 
2320 	spin_lock_irqsave(&list->lock, flags);
2321 	__skb_queue_tail(list, newsk);
2322 	spin_unlock_irqrestore(&list->lock, flags);
2323 }
2324 EXPORT_SYMBOL(skb_queue_tail);
2325 
2326 /**
2327  *	skb_unlink	-	remove a buffer from a list
2328  *	@skb: buffer to remove
2329  *	@list: list to use
2330  *
2331  *	Remove a packet from a list. The list locks are taken and this
2332  *	function is atomic with respect to other list locked calls
2333  *
2334  *	You must know what list the SKB is on.
2335  */
2336 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2337 {
2338 	unsigned long flags;
2339 
2340 	spin_lock_irqsave(&list->lock, flags);
2341 	__skb_unlink(skb, list);
2342 	spin_unlock_irqrestore(&list->lock, flags);
2343 }
2344 EXPORT_SYMBOL(skb_unlink);
2345 
2346 /**
2347  *	skb_append	-	append a buffer
2348  *	@old: buffer to insert after
2349  *	@newsk: buffer to insert
2350  *	@list: list to use
2351  *
2352  *	Place a packet after a given packet in a list. The list locks are taken
2353  *	and this function is atomic with respect to other list locked calls.
2354  *	A buffer cannot be placed on two lists at the same time.
2355  */
2356 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2357 {
2358 	unsigned long flags;
2359 
2360 	spin_lock_irqsave(&list->lock, flags);
2361 	__skb_queue_after(list, old, newsk);
2362 	spin_unlock_irqrestore(&list->lock, flags);
2363 }
2364 EXPORT_SYMBOL(skb_append);
2365 
2366 /**
2367  *	skb_insert	-	insert a buffer
2368  *	@old: buffer to insert before
2369  *	@newsk: buffer to insert
2370  *	@list: list to use
2371  *
2372  *	Place a packet before a given packet in a list. The list locks are
2373  * 	taken and this function is atomic with respect to other list locked
2374  *	calls.
2375  *
2376  *	A buffer cannot be placed on two lists at the same time.
2377  */
2378 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2379 {
2380 	unsigned long flags;
2381 
2382 	spin_lock_irqsave(&list->lock, flags);
2383 	__skb_insert(newsk, old->prev, old, list);
2384 	spin_unlock_irqrestore(&list->lock, flags);
2385 }
2386 EXPORT_SYMBOL(skb_insert);
2387 
2388 static inline void skb_split_inside_header(struct sk_buff *skb,
2389 					   struct sk_buff* skb1,
2390 					   const u32 len, const int pos)
2391 {
2392 	int i;
2393 
2394 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2395 					 pos - len);
2396 	/* And move data appendix as is. */
2397 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2398 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2399 
2400 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2401 	skb_shinfo(skb)->nr_frags  = 0;
2402 	skb1->data_len		   = skb->data_len;
2403 	skb1->len		   += skb1->data_len;
2404 	skb->data_len		   = 0;
2405 	skb->len		   = len;
2406 	skb_set_tail_pointer(skb, len);
2407 }
2408 
2409 static inline void skb_split_no_header(struct sk_buff *skb,
2410 				       struct sk_buff* skb1,
2411 				       const u32 len, int pos)
2412 {
2413 	int i, k = 0;
2414 	const int nfrags = skb_shinfo(skb)->nr_frags;
2415 
2416 	skb_shinfo(skb)->nr_frags = 0;
2417 	skb1->len		  = skb1->data_len = skb->len - len;
2418 	skb->len		  = len;
2419 	skb->data_len		  = len - pos;
2420 
2421 	for (i = 0; i < nfrags; i++) {
2422 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2423 
2424 		if (pos + size > len) {
2425 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2426 
2427 			if (pos < len) {
2428 				/* Split frag.
2429 				 * We have two variants in this case:
2430 				 * 1. Move all the frag to the second
2431 				 *    part, if it is possible. F.e.
2432 				 *    this approach is mandatory for TUX,
2433 				 *    where splitting is expensive.
2434 				 * 2. Split is accurately. We make this.
2435 				 */
2436 				skb_frag_ref(skb, i);
2437 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2438 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2439 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2440 				skb_shinfo(skb)->nr_frags++;
2441 			}
2442 			k++;
2443 		} else
2444 			skb_shinfo(skb)->nr_frags++;
2445 		pos += size;
2446 	}
2447 	skb_shinfo(skb1)->nr_frags = k;
2448 }
2449 
2450 /**
2451  * skb_split - Split fragmented skb to two parts at length len.
2452  * @skb: the buffer to split
2453  * @skb1: the buffer to receive the second part
2454  * @len: new length for skb
2455  */
2456 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2457 {
2458 	int pos = skb_headlen(skb);
2459 
2460 	skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2461 	if (len < pos)	/* Split line is inside header. */
2462 		skb_split_inside_header(skb, skb1, len, pos);
2463 	else		/* Second chunk has no header, nothing to copy. */
2464 		skb_split_no_header(skb, skb1, len, pos);
2465 }
2466 EXPORT_SYMBOL(skb_split);
2467 
2468 /* Shifting from/to a cloned skb is a no-go.
2469  *
2470  * Caller cannot keep skb_shinfo related pointers past calling here!
2471  */
2472 static int skb_prepare_for_shift(struct sk_buff *skb)
2473 {
2474 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2475 }
2476 
2477 /**
2478  * skb_shift - Shifts paged data partially from skb to another
2479  * @tgt: buffer into which tail data gets added
2480  * @skb: buffer from which the paged data comes from
2481  * @shiftlen: shift up to this many bytes
2482  *
2483  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2484  * the length of the skb, from skb to tgt. Returns number bytes shifted.
2485  * It's up to caller to free skb if everything was shifted.
2486  *
2487  * If @tgt runs out of frags, the whole operation is aborted.
2488  *
2489  * Skb cannot include anything else but paged data while tgt is allowed
2490  * to have non-paged data as well.
2491  *
2492  * TODO: full sized shift could be optimized but that would need
2493  * specialized skb free'er to handle frags without up-to-date nr_frags.
2494  */
2495 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2496 {
2497 	int from, to, merge, todo;
2498 	struct skb_frag_struct *fragfrom, *fragto;
2499 
2500 	BUG_ON(shiftlen > skb->len);
2501 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2502 
2503 	todo = shiftlen;
2504 	from = 0;
2505 	to = skb_shinfo(tgt)->nr_frags;
2506 	fragfrom = &skb_shinfo(skb)->frags[from];
2507 
2508 	/* Actual merge is delayed until the point when we know we can
2509 	 * commit all, so that we don't have to undo partial changes
2510 	 */
2511 	if (!to ||
2512 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2513 			      fragfrom->page_offset)) {
2514 		merge = -1;
2515 	} else {
2516 		merge = to - 1;
2517 
2518 		todo -= skb_frag_size(fragfrom);
2519 		if (todo < 0) {
2520 			if (skb_prepare_for_shift(skb) ||
2521 			    skb_prepare_for_shift(tgt))
2522 				return 0;
2523 
2524 			/* All previous frag pointers might be stale! */
2525 			fragfrom = &skb_shinfo(skb)->frags[from];
2526 			fragto = &skb_shinfo(tgt)->frags[merge];
2527 
2528 			skb_frag_size_add(fragto, shiftlen);
2529 			skb_frag_size_sub(fragfrom, shiftlen);
2530 			fragfrom->page_offset += shiftlen;
2531 
2532 			goto onlymerged;
2533 		}
2534 
2535 		from++;
2536 	}
2537 
2538 	/* Skip full, not-fitting skb to avoid expensive operations */
2539 	if ((shiftlen == skb->len) &&
2540 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2541 		return 0;
2542 
2543 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2544 		return 0;
2545 
2546 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2547 		if (to == MAX_SKB_FRAGS)
2548 			return 0;
2549 
2550 		fragfrom = &skb_shinfo(skb)->frags[from];
2551 		fragto = &skb_shinfo(tgt)->frags[to];
2552 
2553 		if (todo >= skb_frag_size(fragfrom)) {
2554 			*fragto = *fragfrom;
2555 			todo -= skb_frag_size(fragfrom);
2556 			from++;
2557 			to++;
2558 
2559 		} else {
2560 			__skb_frag_ref(fragfrom);
2561 			fragto->page = fragfrom->page;
2562 			fragto->page_offset = fragfrom->page_offset;
2563 			skb_frag_size_set(fragto, todo);
2564 
2565 			fragfrom->page_offset += todo;
2566 			skb_frag_size_sub(fragfrom, todo);
2567 			todo = 0;
2568 
2569 			to++;
2570 			break;
2571 		}
2572 	}
2573 
2574 	/* Ready to "commit" this state change to tgt */
2575 	skb_shinfo(tgt)->nr_frags = to;
2576 
2577 	if (merge >= 0) {
2578 		fragfrom = &skb_shinfo(skb)->frags[0];
2579 		fragto = &skb_shinfo(tgt)->frags[merge];
2580 
2581 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2582 		__skb_frag_unref(fragfrom);
2583 	}
2584 
2585 	/* Reposition in the original skb */
2586 	to = 0;
2587 	while (from < skb_shinfo(skb)->nr_frags)
2588 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2589 	skb_shinfo(skb)->nr_frags = to;
2590 
2591 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2592 
2593 onlymerged:
2594 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2595 	 * the other hand might need it if it needs to be resent
2596 	 */
2597 	tgt->ip_summed = CHECKSUM_PARTIAL;
2598 	skb->ip_summed = CHECKSUM_PARTIAL;
2599 
2600 	/* Yak, is it really working this way? Some helper please? */
2601 	skb->len -= shiftlen;
2602 	skb->data_len -= shiftlen;
2603 	skb->truesize -= shiftlen;
2604 	tgt->len += shiftlen;
2605 	tgt->data_len += shiftlen;
2606 	tgt->truesize += shiftlen;
2607 
2608 	return shiftlen;
2609 }
2610 
2611 /**
2612  * skb_prepare_seq_read - Prepare a sequential read of skb data
2613  * @skb: the buffer to read
2614  * @from: lower offset of data to be read
2615  * @to: upper offset of data to be read
2616  * @st: state variable
2617  *
2618  * Initializes the specified state variable. Must be called before
2619  * invoking skb_seq_read() for the first time.
2620  */
2621 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2622 			  unsigned int to, struct skb_seq_state *st)
2623 {
2624 	st->lower_offset = from;
2625 	st->upper_offset = to;
2626 	st->root_skb = st->cur_skb = skb;
2627 	st->frag_idx = st->stepped_offset = 0;
2628 	st->frag_data = NULL;
2629 }
2630 EXPORT_SYMBOL(skb_prepare_seq_read);
2631 
2632 /**
2633  * skb_seq_read - Sequentially read skb data
2634  * @consumed: number of bytes consumed by the caller so far
2635  * @data: destination pointer for data to be returned
2636  * @st: state variable
2637  *
2638  * Reads a block of skb data at @consumed relative to the
2639  * lower offset specified to skb_prepare_seq_read(). Assigns
2640  * the head of the data block to @data and returns the length
2641  * of the block or 0 if the end of the skb data or the upper
2642  * offset has been reached.
2643  *
2644  * The caller is not required to consume all of the data
2645  * returned, i.e. @consumed is typically set to the number
2646  * of bytes already consumed and the next call to
2647  * skb_seq_read() will return the remaining part of the block.
2648  *
2649  * Note 1: The size of each block of data returned can be arbitrary,
2650  *       this limitation is the cost for zerocopy sequential
2651  *       reads of potentially non linear data.
2652  *
2653  * Note 2: Fragment lists within fragments are not implemented
2654  *       at the moment, state->root_skb could be replaced with
2655  *       a stack for this purpose.
2656  */
2657 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2658 			  struct skb_seq_state *st)
2659 {
2660 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2661 	skb_frag_t *frag;
2662 
2663 	if (unlikely(abs_offset >= st->upper_offset)) {
2664 		if (st->frag_data) {
2665 			kunmap_atomic(st->frag_data);
2666 			st->frag_data = NULL;
2667 		}
2668 		return 0;
2669 	}
2670 
2671 next_skb:
2672 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2673 
2674 	if (abs_offset < block_limit && !st->frag_data) {
2675 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2676 		return block_limit - abs_offset;
2677 	}
2678 
2679 	if (st->frag_idx == 0 && !st->frag_data)
2680 		st->stepped_offset += skb_headlen(st->cur_skb);
2681 
2682 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2683 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2684 		block_limit = skb_frag_size(frag) + st->stepped_offset;
2685 
2686 		if (abs_offset < block_limit) {
2687 			if (!st->frag_data)
2688 				st->frag_data = kmap_atomic(skb_frag_page(frag));
2689 
2690 			*data = (u8 *) st->frag_data + frag->page_offset +
2691 				(abs_offset - st->stepped_offset);
2692 
2693 			return block_limit - abs_offset;
2694 		}
2695 
2696 		if (st->frag_data) {
2697 			kunmap_atomic(st->frag_data);
2698 			st->frag_data = NULL;
2699 		}
2700 
2701 		st->frag_idx++;
2702 		st->stepped_offset += skb_frag_size(frag);
2703 	}
2704 
2705 	if (st->frag_data) {
2706 		kunmap_atomic(st->frag_data);
2707 		st->frag_data = NULL;
2708 	}
2709 
2710 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2711 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2712 		st->frag_idx = 0;
2713 		goto next_skb;
2714 	} else if (st->cur_skb->next) {
2715 		st->cur_skb = st->cur_skb->next;
2716 		st->frag_idx = 0;
2717 		goto next_skb;
2718 	}
2719 
2720 	return 0;
2721 }
2722 EXPORT_SYMBOL(skb_seq_read);
2723 
2724 /**
2725  * skb_abort_seq_read - Abort a sequential read of skb data
2726  * @st: state variable
2727  *
2728  * Must be called if skb_seq_read() was not called until it
2729  * returned 0.
2730  */
2731 void skb_abort_seq_read(struct skb_seq_state *st)
2732 {
2733 	if (st->frag_data)
2734 		kunmap_atomic(st->frag_data);
2735 }
2736 EXPORT_SYMBOL(skb_abort_seq_read);
2737 
2738 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2739 
2740 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2741 					  struct ts_config *conf,
2742 					  struct ts_state *state)
2743 {
2744 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2745 }
2746 
2747 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2748 {
2749 	skb_abort_seq_read(TS_SKB_CB(state));
2750 }
2751 
2752 /**
2753  * skb_find_text - Find a text pattern in skb data
2754  * @skb: the buffer to look in
2755  * @from: search offset
2756  * @to: search limit
2757  * @config: textsearch configuration
2758  * @state: uninitialized textsearch state variable
2759  *
2760  * Finds a pattern in the skb data according to the specified
2761  * textsearch configuration. Use textsearch_next() to retrieve
2762  * subsequent occurrences of the pattern. Returns the offset
2763  * to the first occurrence or UINT_MAX if no match was found.
2764  */
2765 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2766 			   unsigned int to, struct ts_config *config,
2767 			   struct ts_state *state)
2768 {
2769 	unsigned int ret;
2770 
2771 	config->get_next_block = skb_ts_get_next_block;
2772 	config->finish = skb_ts_finish;
2773 
2774 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2775 
2776 	ret = textsearch_find(config, state);
2777 	return (ret <= to - from ? ret : UINT_MAX);
2778 }
2779 EXPORT_SYMBOL(skb_find_text);
2780 
2781 /**
2782  * skb_append_datato_frags - append the user data to a skb
2783  * @sk: sock  structure
2784  * @skb: skb structure to be appended with user data.
2785  * @getfrag: call back function to be used for getting the user data
2786  * @from: pointer to user message iov
2787  * @length: length of the iov message
2788  *
2789  * Description: This procedure append the user data in the fragment part
2790  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2791  */
2792 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2793 			int (*getfrag)(void *from, char *to, int offset,
2794 					int len, int odd, struct sk_buff *skb),
2795 			void *from, int length)
2796 {
2797 	int frg_cnt = skb_shinfo(skb)->nr_frags;
2798 	int copy;
2799 	int offset = 0;
2800 	int ret;
2801 	struct page_frag *pfrag = &current->task_frag;
2802 
2803 	do {
2804 		/* Return error if we don't have space for new frag */
2805 		if (frg_cnt >= MAX_SKB_FRAGS)
2806 			return -EMSGSIZE;
2807 
2808 		if (!sk_page_frag_refill(sk, pfrag))
2809 			return -ENOMEM;
2810 
2811 		/* copy the user data to page */
2812 		copy = min_t(int, length, pfrag->size - pfrag->offset);
2813 
2814 		ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2815 			      offset, copy, 0, skb);
2816 		if (ret < 0)
2817 			return -EFAULT;
2818 
2819 		/* copy was successful so update the size parameters */
2820 		skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2821 				   copy);
2822 		frg_cnt++;
2823 		pfrag->offset += copy;
2824 		get_page(pfrag->page);
2825 
2826 		skb->truesize += copy;
2827 		atomic_add(copy, &sk->sk_wmem_alloc);
2828 		skb->len += copy;
2829 		skb->data_len += copy;
2830 		offset += copy;
2831 		length -= copy;
2832 
2833 	} while (length > 0);
2834 
2835 	return 0;
2836 }
2837 EXPORT_SYMBOL(skb_append_datato_frags);
2838 
2839 /**
2840  *	skb_pull_rcsum - pull skb and update receive checksum
2841  *	@skb: buffer to update
2842  *	@len: length of data pulled
2843  *
2844  *	This function performs an skb_pull on the packet and updates
2845  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2846  *	receive path processing instead of skb_pull unless you know
2847  *	that the checksum difference is zero (e.g., a valid IP header)
2848  *	or you are setting ip_summed to CHECKSUM_NONE.
2849  */
2850 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2851 {
2852 	BUG_ON(len > skb->len);
2853 	skb->len -= len;
2854 	BUG_ON(skb->len < skb->data_len);
2855 	skb_postpull_rcsum(skb, skb->data, len);
2856 	return skb->data += len;
2857 }
2858 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2859 
2860 /**
2861  *	skb_segment - Perform protocol segmentation on skb.
2862  *	@head_skb: buffer to segment
2863  *	@features: features for the output path (see dev->features)
2864  *
2865  *	This function performs segmentation on the given skb.  It returns
2866  *	a pointer to the first in a list of new skbs for the segments.
2867  *	In case of error it returns ERR_PTR(err).
2868  */
2869 struct sk_buff *skb_segment(struct sk_buff *head_skb,
2870 			    netdev_features_t features)
2871 {
2872 	struct sk_buff *segs = NULL;
2873 	struct sk_buff *tail = NULL;
2874 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2875 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2876 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
2877 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2878 	struct sk_buff *frag_skb = head_skb;
2879 	unsigned int offset = doffset;
2880 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2881 	unsigned int headroom;
2882 	unsigned int len;
2883 	__be16 proto;
2884 	bool csum;
2885 	int sg = !!(features & NETIF_F_SG);
2886 	int nfrags = skb_shinfo(head_skb)->nr_frags;
2887 	int err = -ENOMEM;
2888 	int i = 0;
2889 	int pos;
2890 	int dummy;
2891 
2892 	__skb_push(head_skb, doffset);
2893 	proto = skb_network_protocol(head_skb, &dummy);
2894 	if (unlikely(!proto))
2895 		return ERR_PTR(-EINVAL);
2896 
2897 	csum = !head_skb->encap_hdr_csum &&
2898 	    !!can_checksum_protocol(features, proto);
2899 
2900 	headroom = skb_headroom(head_skb);
2901 	pos = skb_headlen(head_skb);
2902 
2903 	do {
2904 		struct sk_buff *nskb;
2905 		skb_frag_t *nskb_frag;
2906 		int hsize;
2907 		int size;
2908 
2909 		len = head_skb->len - offset;
2910 		if (len > mss)
2911 			len = mss;
2912 
2913 		hsize = skb_headlen(head_skb) - offset;
2914 		if (hsize < 0)
2915 			hsize = 0;
2916 		if (hsize > len || !sg)
2917 			hsize = len;
2918 
2919 		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2920 		    (skb_headlen(list_skb) == len || sg)) {
2921 			BUG_ON(skb_headlen(list_skb) > len);
2922 
2923 			i = 0;
2924 			nfrags = skb_shinfo(list_skb)->nr_frags;
2925 			frag = skb_shinfo(list_skb)->frags;
2926 			frag_skb = list_skb;
2927 			pos += skb_headlen(list_skb);
2928 
2929 			while (pos < offset + len) {
2930 				BUG_ON(i >= nfrags);
2931 
2932 				size = skb_frag_size(frag);
2933 				if (pos + size > offset + len)
2934 					break;
2935 
2936 				i++;
2937 				pos += size;
2938 				frag++;
2939 			}
2940 
2941 			nskb = skb_clone(list_skb, GFP_ATOMIC);
2942 			list_skb = list_skb->next;
2943 
2944 			if (unlikely(!nskb))
2945 				goto err;
2946 
2947 			if (unlikely(pskb_trim(nskb, len))) {
2948 				kfree_skb(nskb);
2949 				goto err;
2950 			}
2951 
2952 			hsize = skb_end_offset(nskb);
2953 			if (skb_cow_head(nskb, doffset + headroom)) {
2954 				kfree_skb(nskb);
2955 				goto err;
2956 			}
2957 
2958 			nskb->truesize += skb_end_offset(nskb) - hsize;
2959 			skb_release_head_state(nskb);
2960 			__skb_push(nskb, doffset);
2961 		} else {
2962 			nskb = __alloc_skb(hsize + doffset + headroom,
2963 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2964 					   NUMA_NO_NODE);
2965 
2966 			if (unlikely(!nskb))
2967 				goto err;
2968 
2969 			skb_reserve(nskb, headroom);
2970 			__skb_put(nskb, doffset);
2971 		}
2972 
2973 		if (segs)
2974 			tail->next = nskb;
2975 		else
2976 			segs = nskb;
2977 		tail = nskb;
2978 
2979 		__copy_skb_header(nskb, head_skb);
2980 
2981 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2982 		skb_reset_mac_len(nskb);
2983 
2984 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2985 						 nskb->data - tnl_hlen,
2986 						 doffset + tnl_hlen);
2987 
2988 		if (nskb->len == len + doffset)
2989 			goto perform_csum_check;
2990 
2991 		if (!sg) {
2992 			nskb->ip_summed = CHECKSUM_NONE;
2993 			nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2994 							    skb_put(nskb, len),
2995 							    len, 0);
2996 			SKB_GSO_CB(nskb)->csum_start =
2997 			    skb_headroom(nskb) + doffset;
2998 			continue;
2999 		}
3000 
3001 		nskb_frag = skb_shinfo(nskb)->frags;
3002 
3003 		skb_copy_from_linear_data_offset(head_skb, offset,
3004 						 skb_put(nskb, hsize), hsize);
3005 
3006 		skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
3007 			SKBTX_SHARED_FRAG;
3008 
3009 		while (pos < offset + len) {
3010 			if (i >= nfrags) {
3011 				BUG_ON(skb_headlen(list_skb));
3012 
3013 				i = 0;
3014 				nfrags = skb_shinfo(list_skb)->nr_frags;
3015 				frag = skb_shinfo(list_skb)->frags;
3016 				frag_skb = list_skb;
3017 
3018 				BUG_ON(!nfrags);
3019 
3020 				list_skb = list_skb->next;
3021 			}
3022 
3023 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
3024 				     MAX_SKB_FRAGS)) {
3025 				net_warn_ratelimited(
3026 					"skb_segment: too many frags: %u %u\n",
3027 					pos, mss);
3028 				goto err;
3029 			}
3030 
3031 			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3032 				goto err;
3033 
3034 			*nskb_frag = *frag;
3035 			__skb_frag_ref(nskb_frag);
3036 			size = skb_frag_size(nskb_frag);
3037 
3038 			if (pos < offset) {
3039 				nskb_frag->page_offset += offset - pos;
3040 				skb_frag_size_sub(nskb_frag, offset - pos);
3041 			}
3042 
3043 			skb_shinfo(nskb)->nr_frags++;
3044 
3045 			if (pos + size <= offset + len) {
3046 				i++;
3047 				frag++;
3048 				pos += size;
3049 			} else {
3050 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3051 				goto skip_fraglist;
3052 			}
3053 
3054 			nskb_frag++;
3055 		}
3056 
3057 skip_fraglist:
3058 		nskb->data_len = len - hsize;
3059 		nskb->len += nskb->data_len;
3060 		nskb->truesize += nskb->data_len;
3061 
3062 perform_csum_check:
3063 		if (!csum) {
3064 			nskb->csum = skb_checksum(nskb, doffset,
3065 						  nskb->len - doffset, 0);
3066 			nskb->ip_summed = CHECKSUM_NONE;
3067 			SKB_GSO_CB(nskb)->csum_start =
3068 			    skb_headroom(nskb) + doffset;
3069 		}
3070 	} while ((offset += len) < head_skb->len);
3071 
3072 	return segs;
3073 
3074 err:
3075 	kfree_skb_list(segs);
3076 	return ERR_PTR(err);
3077 }
3078 EXPORT_SYMBOL_GPL(skb_segment);
3079 
3080 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3081 {
3082 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3083 	unsigned int offset = skb_gro_offset(skb);
3084 	unsigned int headlen = skb_headlen(skb);
3085 	struct sk_buff *nskb, *lp, *p = *head;
3086 	unsigned int len = skb_gro_len(skb);
3087 	unsigned int delta_truesize;
3088 	unsigned int headroom;
3089 
3090 	if (unlikely(p->len + len >= 65536))
3091 		return -E2BIG;
3092 
3093 	lp = NAPI_GRO_CB(p)->last;
3094 	pinfo = skb_shinfo(lp);
3095 
3096 	if (headlen <= offset) {
3097 		skb_frag_t *frag;
3098 		skb_frag_t *frag2;
3099 		int i = skbinfo->nr_frags;
3100 		int nr_frags = pinfo->nr_frags + i;
3101 
3102 		if (nr_frags > MAX_SKB_FRAGS)
3103 			goto merge;
3104 
3105 		offset -= headlen;
3106 		pinfo->nr_frags = nr_frags;
3107 		skbinfo->nr_frags = 0;
3108 
3109 		frag = pinfo->frags + nr_frags;
3110 		frag2 = skbinfo->frags + i;
3111 		do {
3112 			*--frag = *--frag2;
3113 		} while (--i);
3114 
3115 		frag->page_offset += offset;
3116 		skb_frag_size_sub(frag, offset);
3117 
3118 		/* all fragments truesize : remove (head size + sk_buff) */
3119 		delta_truesize = skb->truesize -
3120 				 SKB_TRUESIZE(skb_end_offset(skb));
3121 
3122 		skb->truesize -= skb->data_len;
3123 		skb->len -= skb->data_len;
3124 		skb->data_len = 0;
3125 
3126 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3127 		goto done;
3128 	} else if (skb->head_frag) {
3129 		int nr_frags = pinfo->nr_frags;
3130 		skb_frag_t *frag = pinfo->frags + nr_frags;
3131 		struct page *page = virt_to_head_page(skb->head);
3132 		unsigned int first_size = headlen - offset;
3133 		unsigned int first_offset;
3134 
3135 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3136 			goto merge;
3137 
3138 		first_offset = skb->data -
3139 			       (unsigned char *)page_address(page) +
3140 			       offset;
3141 
3142 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3143 
3144 		frag->page.p	  = page;
3145 		frag->page_offset = first_offset;
3146 		skb_frag_size_set(frag, first_size);
3147 
3148 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3149 		/* We dont need to clear skbinfo->nr_frags here */
3150 
3151 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3152 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3153 		goto done;
3154 	}
3155 	if (pinfo->frag_list)
3156 		goto merge;
3157 	if (skb_gro_len(p) != pinfo->gso_size)
3158 		return -E2BIG;
3159 
3160 	headroom = skb_headroom(p);
3161 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
3162 	if (unlikely(!nskb))
3163 		return -ENOMEM;
3164 
3165 	__copy_skb_header(nskb, p);
3166 	nskb->mac_len = p->mac_len;
3167 
3168 	skb_reserve(nskb, headroom);
3169 	__skb_put(nskb, skb_gro_offset(p));
3170 
3171 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
3172 	skb_set_network_header(nskb, skb_network_offset(p));
3173 	skb_set_transport_header(nskb, skb_transport_offset(p));
3174 
3175 	__skb_pull(p, skb_gro_offset(p));
3176 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
3177 	       p->data - skb_mac_header(p));
3178 
3179 	skb_shinfo(nskb)->frag_list = p;
3180 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3181 	pinfo->gso_size = 0;
3182 	skb_header_release(p);
3183 	NAPI_GRO_CB(nskb)->last = p;
3184 
3185 	nskb->data_len += p->len;
3186 	nskb->truesize += p->truesize;
3187 	nskb->len += p->len;
3188 
3189 	*head = nskb;
3190 	nskb->next = p->next;
3191 	p->next = NULL;
3192 
3193 	p = nskb;
3194 
3195 merge:
3196 	delta_truesize = skb->truesize;
3197 	if (offset > headlen) {
3198 		unsigned int eat = offset - headlen;
3199 
3200 		skbinfo->frags[0].page_offset += eat;
3201 		skb_frag_size_sub(&skbinfo->frags[0], eat);
3202 		skb->data_len -= eat;
3203 		skb->len -= eat;
3204 		offset = headlen;
3205 	}
3206 
3207 	__skb_pull(skb, offset);
3208 
3209 	if (NAPI_GRO_CB(p)->last == p)
3210 		skb_shinfo(p)->frag_list = skb;
3211 	else
3212 		NAPI_GRO_CB(p)->last->next = skb;
3213 	NAPI_GRO_CB(p)->last = skb;
3214 	skb_header_release(skb);
3215 	lp = p;
3216 
3217 done:
3218 	NAPI_GRO_CB(p)->count++;
3219 	p->data_len += len;
3220 	p->truesize += delta_truesize;
3221 	p->len += len;
3222 	if (lp != p) {
3223 		lp->data_len += len;
3224 		lp->truesize += delta_truesize;
3225 		lp->len += len;
3226 	}
3227 	NAPI_GRO_CB(skb)->same_flow = 1;
3228 	return 0;
3229 }
3230 EXPORT_SYMBOL_GPL(skb_gro_receive);
3231 
3232 void __init skb_init(void)
3233 {
3234 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3235 					      sizeof(struct sk_buff),
3236 					      0,
3237 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3238 					      NULL);
3239 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3240 						(2*sizeof(struct sk_buff)) +
3241 						sizeof(atomic_t),
3242 						0,
3243 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3244 						NULL);
3245 }
3246 
3247 /**
3248  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3249  *	@skb: Socket buffer containing the buffers to be mapped
3250  *	@sg: The scatter-gather list to map into
3251  *	@offset: The offset into the buffer's contents to start mapping
3252  *	@len: Length of buffer space to be mapped
3253  *
3254  *	Fill the specified scatter-gather list with mappings/pointers into a
3255  *	region of the buffer space attached to a socket buffer.
3256  */
3257 static int
3258 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3259 {
3260 	int start = skb_headlen(skb);
3261 	int i, copy = start - offset;
3262 	struct sk_buff *frag_iter;
3263 	int elt = 0;
3264 
3265 	if (copy > 0) {
3266 		if (copy > len)
3267 			copy = len;
3268 		sg_set_buf(sg, skb->data + offset, copy);
3269 		elt++;
3270 		if ((len -= copy) == 0)
3271 			return elt;
3272 		offset += copy;
3273 	}
3274 
3275 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3276 		int end;
3277 
3278 		WARN_ON(start > offset + len);
3279 
3280 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3281 		if ((copy = end - offset) > 0) {
3282 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3283 
3284 			if (copy > len)
3285 				copy = len;
3286 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3287 					frag->page_offset+offset-start);
3288 			elt++;
3289 			if (!(len -= copy))
3290 				return elt;
3291 			offset += copy;
3292 		}
3293 		start = end;
3294 	}
3295 
3296 	skb_walk_frags(skb, frag_iter) {
3297 		int end;
3298 
3299 		WARN_ON(start > offset + len);
3300 
3301 		end = start + frag_iter->len;
3302 		if ((copy = end - offset) > 0) {
3303 			if (copy > len)
3304 				copy = len;
3305 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3306 					      copy);
3307 			if ((len -= copy) == 0)
3308 				return elt;
3309 			offset += copy;
3310 		}
3311 		start = end;
3312 	}
3313 	BUG_ON(len);
3314 	return elt;
3315 }
3316 
3317 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3318  * sglist without mark the sg which contain last skb data as the end.
3319  * So the caller can mannipulate sg list as will when padding new data after
3320  * the first call without calling sg_unmark_end to expend sg list.
3321  *
3322  * Scenario to use skb_to_sgvec_nomark:
3323  * 1. sg_init_table
3324  * 2. skb_to_sgvec_nomark(payload1)
3325  * 3. skb_to_sgvec_nomark(payload2)
3326  *
3327  * This is equivalent to:
3328  * 1. sg_init_table
3329  * 2. skb_to_sgvec(payload1)
3330  * 3. sg_unmark_end
3331  * 4. skb_to_sgvec(payload2)
3332  *
3333  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3334  * is more preferable.
3335  */
3336 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3337 			int offset, int len)
3338 {
3339 	return __skb_to_sgvec(skb, sg, offset, len);
3340 }
3341 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3342 
3343 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3344 {
3345 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
3346 
3347 	sg_mark_end(&sg[nsg - 1]);
3348 
3349 	return nsg;
3350 }
3351 EXPORT_SYMBOL_GPL(skb_to_sgvec);
3352 
3353 /**
3354  *	skb_cow_data - Check that a socket buffer's data buffers are writable
3355  *	@skb: The socket buffer to check.
3356  *	@tailbits: Amount of trailing space to be added
3357  *	@trailer: Returned pointer to the skb where the @tailbits space begins
3358  *
3359  *	Make sure that the data buffers attached to a socket buffer are
3360  *	writable. If they are not, private copies are made of the data buffers
3361  *	and the socket buffer is set to use these instead.
3362  *
3363  *	If @tailbits is given, make sure that there is space to write @tailbits
3364  *	bytes of data beyond current end of socket buffer.  @trailer will be
3365  *	set to point to the skb in which this space begins.
3366  *
3367  *	The number of scatterlist elements required to completely map the
3368  *	COW'd and extended socket buffer will be returned.
3369  */
3370 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3371 {
3372 	int copyflag;
3373 	int elt;
3374 	struct sk_buff *skb1, **skb_p;
3375 
3376 	/* If skb is cloned or its head is paged, reallocate
3377 	 * head pulling out all the pages (pages are considered not writable
3378 	 * at the moment even if they are anonymous).
3379 	 */
3380 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3381 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3382 		return -ENOMEM;
3383 
3384 	/* Easy case. Most of packets will go this way. */
3385 	if (!skb_has_frag_list(skb)) {
3386 		/* A little of trouble, not enough of space for trailer.
3387 		 * This should not happen, when stack is tuned to generate
3388 		 * good frames. OK, on miss we reallocate and reserve even more
3389 		 * space, 128 bytes is fair. */
3390 
3391 		if (skb_tailroom(skb) < tailbits &&
3392 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3393 			return -ENOMEM;
3394 
3395 		/* Voila! */
3396 		*trailer = skb;
3397 		return 1;
3398 	}
3399 
3400 	/* Misery. We are in troubles, going to mincer fragments... */
3401 
3402 	elt = 1;
3403 	skb_p = &skb_shinfo(skb)->frag_list;
3404 	copyflag = 0;
3405 
3406 	while ((skb1 = *skb_p) != NULL) {
3407 		int ntail = 0;
3408 
3409 		/* The fragment is partially pulled by someone,
3410 		 * this can happen on input. Copy it and everything
3411 		 * after it. */
3412 
3413 		if (skb_shared(skb1))
3414 			copyflag = 1;
3415 
3416 		/* If the skb is the last, worry about trailer. */
3417 
3418 		if (skb1->next == NULL && tailbits) {
3419 			if (skb_shinfo(skb1)->nr_frags ||
3420 			    skb_has_frag_list(skb1) ||
3421 			    skb_tailroom(skb1) < tailbits)
3422 				ntail = tailbits + 128;
3423 		}
3424 
3425 		if (copyflag ||
3426 		    skb_cloned(skb1) ||
3427 		    ntail ||
3428 		    skb_shinfo(skb1)->nr_frags ||
3429 		    skb_has_frag_list(skb1)) {
3430 			struct sk_buff *skb2;
3431 
3432 			/* Fuck, we are miserable poor guys... */
3433 			if (ntail == 0)
3434 				skb2 = skb_copy(skb1, GFP_ATOMIC);
3435 			else
3436 				skb2 = skb_copy_expand(skb1,
3437 						       skb_headroom(skb1),
3438 						       ntail,
3439 						       GFP_ATOMIC);
3440 			if (unlikely(skb2 == NULL))
3441 				return -ENOMEM;
3442 
3443 			if (skb1->sk)
3444 				skb_set_owner_w(skb2, skb1->sk);
3445 
3446 			/* Looking around. Are we still alive?
3447 			 * OK, link new skb, drop old one */
3448 
3449 			skb2->next = skb1->next;
3450 			*skb_p = skb2;
3451 			kfree_skb(skb1);
3452 			skb1 = skb2;
3453 		}
3454 		elt++;
3455 		*trailer = skb1;
3456 		skb_p = &skb1->next;
3457 	}
3458 
3459 	return elt;
3460 }
3461 EXPORT_SYMBOL_GPL(skb_cow_data);
3462 
3463 static void sock_rmem_free(struct sk_buff *skb)
3464 {
3465 	struct sock *sk = skb->sk;
3466 
3467 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3468 }
3469 
3470 /*
3471  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3472  */
3473 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3474 {
3475 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3476 	    (unsigned int)sk->sk_rcvbuf)
3477 		return -ENOMEM;
3478 
3479 	skb_orphan(skb);
3480 	skb->sk = sk;
3481 	skb->destructor = sock_rmem_free;
3482 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3483 
3484 	/* before exiting rcu section, make sure dst is refcounted */
3485 	skb_dst_force(skb);
3486 
3487 	skb_queue_tail(&sk->sk_error_queue, skb);
3488 	if (!sock_flag(sk, SOCK_DEAD))
3489 		sk->sk_data_ready(sk);
3490 	return 0;
3491 }
3492 EXPORT_SYMBOL(sock_queue_err_skb);
3493 
3494 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3495 		     struct skb_shared_hwtstamps *hwtstamps,
3496 		     struct sock *sk, int tstype)
3497 {
3498 	struct sock_exterr_skb *serr;
3499 	struct sk_buff *skb;
3500 	int err;
3501 
3502 	if (!sk)
3503 		return;
3504 
3505 	if (hwtstamps) {
3506 		*skb_hwtstamps(orig_skb) =
3507 			*hwtstamps;
3508 	} else {
3509 		/*
3510 		 * no hardware time stamps available,
3511 		 * so keep the shared tx_flags and only
3512 		 * store software time stamp
3513 		 */
3514 		orig_skb->tstamp = ktime_get_real();
3515 	}
3516 
3517 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3518 	if (!skb)
3519 		return;
3520 
3521 	serr = SKB_EXT_ERR(skb);
3522 	memset(serr, 0, sizeof(*serr));
3523 	serr->ee.ee_errno = ENOMSG;
3524 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3525 	serr->ee.ee_info = tstype;
3526 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3527 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
3528 		if (sk->sk_protocol == IPPROTO_TCP)
3529 			serr->ee.ee_data -= sk->sk_tskey;
3530 	}
3531 
3532 	err = sock_queue_err_skb(sk, skb);
3533 
3534 	if (err)
3535 		kfree_skb(skb);
3536 }
3537 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3538 
3539 void skb_tstamp_tx(struct sk_buff *orig_skb,
3540 		   struct skb_shared_hwtstamps *hwtstamps)
3541 {
3542 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3543 			       SCM_TSTAMP_SND);
3544 }
3545 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3546 
3547 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3548 {
3549 	struct sock *sk = skb->sk;
3550 	struct sock_exterr_skb *serr;
3551 	int err;
3552 
3553 	skb->wifi_acked_valid = 1;
3554 	skb->wifi_acked = acked;
3555 
3556 	serr = SKB_EXT_ERR(skb);
3557 	memset(serr, 0, sizeof(*serr));
3558 	serr->ee.ee_errno = ENOMSG;
3559 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3560 
3561 	err = sock_queue_err_skb(sk, skb);
3562 	if (err)
3563 		kfree_skb(skb);
3564 }
3565 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3566 
3567 
3568 /**
3569  * skb_partial_csum_set - set up and verify partial csum values for packet
3570  * @skb: the skb to set
3571  * @start: the number of bytes after skb->data to start checksumming.
3572  * @off: the offset from start to place the checksum.
3573  *
3574  * For untrusted partially-checksummed packets, we need to make sure the values
3575  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3576  *
3577  * This function checks and sets those values and skb->ip_summed: if this
3578  * returns false you should drop the packet.
3579  */
3580 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3581 {
3582 	if (unlikely(start > skb_headlen(skb)) ||
3583 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3584 		net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3585 				     start, off, skb_headlen(skb));
3586 		return false;
3587 	}
3588 	skb->ip_summed = CHECKSUM_PARTIAL;
3589 	skb->csum_start = skb_headroom(skb) + start;
3590 	skb->csum_offset = off;
3591 	skb_set_transport_header(skb, start);
3592 	return true;
3593 }
3594 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3595 
3596 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3597 			       unsigned int max)
3598 {
3599 	if (skb_headlen(skb) >= len)
3600 		return 0;
3601 
3602 	/* If we need to pullup then pullup to the max, so we
3603 	 * won't need to do it again.
3604 	 */
3605 	if (max > skb->len)
3606 		max = skb->len;
3607 
3608 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3609 		return -ENOMEM;
3610 
3611 	if (skb_headlen(skb) < len)
3612 		return -EPROTO;
3613 
3614 	return 0;
3615 }
3616 
3617 #define MAX_TCP_HDR_LEN (15 * 4)
3618 
3619 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3620 				      typeof(IPPROTO_IP) proto,
3621 				      unsigned int off)
3622 {
3623 	switch (proto) {
3624 		int err;
3625 
3626 	case IPPROTO_TCP:
3627 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3628 					  off + MAX_TCP_HDR_LEN);
3629 		if (!err && !skb_partial_csum_set(skb, off,
3630 						  offsetof(struct tcphdr,
3631 							   check)))
3632 			err = -EPROTO;
3633 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3634 
3635 	case IPPROTO_UDP:
3636 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3637 					  off + sizeof(struct udphdr));
3638 		if (!err && !skb_partial_csum_set(skb, off,
3639 						  offsetof(struct udphdr,
3640 							   check)))
3641 			err = -EPROTO;
3642 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3643 	}
3644 
3645 	return ERR_PTR(-EPROTO);
3646 }
3647 
3648 /* This value should be large enough to cover a tagged ethernet header plus
3649  * maximally sized IP and TCP or UDP headers.
3650  */
3651 #define MAX_IP_HDR_LEN 128
3652 
3653 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3654 {
3655 	unsigned int off;
3656 	bool fragment;
3657 	__sum16 *csum;
3658 	int err;
3659 
3660 	fragment = false;
3661 
3662 	err = skb_maybe_pull_tail(skb,
3663 				  sizeof(struct iphdr),
3664 				  MAX_IP_HDR_LEN);
3665 	if (err < 0)
3666 		goto out;
3667 
3668 	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3669 		fragment = true;
3670 
3671 	off = ip_hdrlen(skb);
3672 
3673 	err = -EPROTO;
3674 
3675 	if (fragment)
3676 		goto out;
3677 
3678 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3679 	if (IS_ERR(csum))
3680 		return PTR_ERR(csum);
3681 
3682 	if (recalculate)
3683 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3684 					   ip_hdr(skb)->daddr,
3685 					   skb->len - off,
3686 					   ip_hdr(skb)->protocol, 0);
3687 	err = 0;
3688 
3689 out:
3690 	return err;
3691 }
3692 
3693 /* This value should be large enough to cover a tagged ethernet header plus
3694  * an IPv6 header, all options, and a maximal TCP or UDP header.
3695  */
3696 #define MAX_IPV6_HDR_LEN 256
3697 
3698 #define OPT_HDR(type, skb, off) \
3699 	(type *)(skb_network_header(skb) + (off))
3700 
3701 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3702 {
3703 	int err;
3704 	u8 nexthdr;
3705 	unsigned int off;
3706 	unsigned int len;
3707 	bool fragment;
3708 	bool done;
3709 	__sum16 *csum;
3710 
3711 	fragment = false;
3712 	done = false;
3713 
3714 	off = sizeof(struct ipv6hdr);
3715 
3716 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3717 	if (err < 0)
3718 		goto out;
3719 
3720 	nexthdr = ipv6_hdr(skb)->nexthdr;
3721 
3722 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3723 	while (off <= len && !done) {
3724 		switch (nexthdr) {
3725 		case IPPROTO_DSTOPTS:
3726 		case IPPROTO_HOPOPTS:
3727 		case IPPROTO_ROUTING: {
3728 			struct ipv6_opt_hdr *hp;
3729 
3730 			err = skb_maybe_pull_tail(skb,
3731 						  off +
3732 						  sizeof(struct ipv6_opt_hdr),
3733 						  MAX_IPV6_HDR_LEN);
3734 			if (err < 0)
3735 				goto out;
3736 
3737 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3738 			nexthdr = hp->nexthdr;
3739 			off += ipv6_optlen(hp);
3740 			break;
3741 		}
3742 		case IPPROTO_AH: {
3743 			struct ip_auth_hdr *hp;
3744 
3745 			err = skb_maybe_pull_tail(skb,
3746 						  off +
3747 						  sizeof(struct ip_auth_hdr),
3748 						  MAX_IPV6_HDR_LEN);
3749 			if (err < 0)
3750 				goto out;
3751 
3752 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3753 			nexthdr = hp->nexthdr;
3754 			off += ipv6_authlen(hp);
3755 			break;
3756 		}
3757 		case IPPROTO_FRAGMENT: {
3758 			struct frag_hdr *hp;
3759 
3760 			err = skb_maybe_pull_tail(skb,
3761 						  off +
3762 						  sizeof(struct frag_hdr),
3763 						  MAX_IPV6_HDR_LEN);
3764 			if (err < 0)
3765 				goto out;
3766 
3767 			hp = OPT_HDR(struct frag_hdr, skb, off);
3768 
3769 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3770 				fragment = true;
3771 
3772 			nexthdr = hp->nexthdr;
3773 			off += sizeof(struct frag_hdr);
3774 			break;
3775 		}
3776 		default:
3777 			done = true;
3778 			break;
3779 		}
3780 	}
3781 
3782 	err = -EPROTO;
3783 
3784 	if (!done || fragment)
3785 		goto out;
3786 
3787 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
3788 	if (IS_ERR(csum))
3789 		return PTR_ERR(csum);
3790 
3791 	if (recalculate)
3792 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3793 					 &ipv6_hdr(skb)->daddr,
3794 					 skb->len - off, nexthdr, 0);
3795 	err = 0;
3796 
3797 out:
3798 	return err;
3799 }
3800 
3801 /**
3802  * skb_checksum_setup - set up partial checksum offset
3803  * @skb: the skb to set up
3804  * @recalculate: if true the pseudo-header checksum will be recalculated
3805  */
3806 int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3807 {
3808 	int err;
3809 
3810 	switch (skb->protocol) {
3811 	case htons(ETH_P_IP):
3812 		err = skb_checksum_setup_ipv4(skb, recalculate);
3813 		break;
3814 
3815 	case htons(ETH_P_IPV6):
3816 		err = skb_checksum_setup_ipv6(skb, recalculate);
3817 		break;
3818 
3819 	default:
3820 		err = -EPROTO;
3821 		break;
3822 	}
3823 
3824 	return err;
3825 }
3826 EXPORT_SYMBOL(skb_checksum_setup);
3827 
3828 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3829 {
3830 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3831 			     skb->dev->name);
3832 }
3833 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3834 
3835 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3836 {
3837 	if (head_stolen) {
3838 		skb_release_head_state(skb);
3839 		kmem_cache_free(skbuff_head_cache, skb);
3840 	} else {
3841 		__kfree_skb(skb);
3842 	}
3843 }
3844 EXPORT_SYMBOL(kfree_skb_partial);
3845 
3846 /**
3847  * skb_try_coalesce - try to merge skb to prior one
3848  * @to: prior buffer
3849  * @from: buffer to add
3850  * @fragstolen: pointer to boolean
3851  * @delta_truesize: how much more was allocated than was requested
3852  */
3853 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3854 		      bool *fragstolen, int *delta_truesize)
3855 {
3856 	int i, delta, len = from->len;
3857 
3858 	*fragstolen = false;
3859 
3860 	if (skb_cloned(to))
3861 		return false;
3862 
3863 	if (len <= skb_tailroom(to)) {
3864 		BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3865 		*delta_truesize = 0;
3866 		return true;
3867 	}
3868 
3869 	if (skb_has_frag_list(to) || skb_has_frag_list(from))
3870 		return false;
3871 
3872 	if (skb_headlen(from) != 0) {
3873 		struct page *page;
3874 		unsigned int offset;
3875 
3876 		if (skb_shinfo(to)->nr_frags +
3877 		    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3878 			return false;
3879 
3880 		if (skb_head_is_locked(from))
3881 			return false;
3882 
3883 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3884 
3885 		page = virt_to_head_page(from->head);
3886 		offset = from->data - (unsigned char *)page_address(page);
3887 
3888 		skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3889 				   page, offset, skb_headlen(from));
3890 		*fragstolen = true;
3891 	} else {
3892 		if (skb_shinfo(to)->nr_frags +
3893 		    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3894 			return false;
3895 
3896 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3897 	}
3898 
3899 	WARN_ON_ONCE(delta < len);
3900 
3901 	memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3902 	       skb_shinfo(from)->frags,
3903 	       skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3904 	skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3905 
3906 	if (!skb_cloned(from))
3907 		skb_shinfo(from)->nr_frags = 0;
3908 
3909 	/* if the skb is not cloned this does nothing
3910 	 * since we set nr_frags to 0.
3911 	 */
3912 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3913 		skb_frag_ref(from, i);
3914 
3915 	to->truesize += delta;
3916 	to->len += len;
3917 	to->data_len += len;
3918 
3919 	*delta_truesize = delta;
3920 	return true;
3921 }
3922 EXPORT_SYMBOL(skb_try_coalesce);
3923 
3924 /**
3925  * skb_scrub_packet - scrub an skb
3926  *
3927  * @skb: buffer to clean
3928  * @xnet: packet is crossing netns
3929  *
3930  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
3931  * into/from a tunnel. Some information have to be cleared during these
3932  * operations.
3933  * skb_scrub_packet can also be used to clean a skb before injecting it in
3934  * another namespace (@xnet == true). We have to clear all information in the
3935  * skb that could impact namespace isolation.
3936  */
3937 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3938 {
3939 	if (xnet)
3940 		skb_orphan(skb);
3941 	skb->tstamp.tv64 = 0;
3942 	skb->pkt_type = PACKET_HOST;
3943 	skb->skb_iif = 0;
3944 	skb->ignore_df = 0;
3945 	skb_dst_drop(skb);
3946 	skb->mark = 0;
3947 	secpath_reset(skb);
3948 	nf_reset(skb);
3949 	nf_reset_trace(skb);
3950 }
3951 EXPORT_SYMBOL_GPL(skb_scrub_packet);
3952 
3953 /**
3954  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
3955  *
3956  * @skb: GSO skb
3957  *
3958  * skb_gso_transport_seglen is used to determine the real size of the
3959  * individual segments, including Layer4 headers (TCP/UDP).
3960  *
3961  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
3962  */
3963 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3964 {
3965 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
3966 
3967 	if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3968 		return tcp_hdrlen(skb) + shinfo->gso_size;
3969 
3970 	/* UFO sets gso_size to the size of the fragmentation
3971 	 * payload, i.e. the size of the L4 (UDP) header is already
3972 	 * accounted for.
3973 	 */
3974 	return shinfo->gso_size;
3975 }
3976 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
3977 
3978 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
3979 {
3980 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
3981 		kfree_skb(skb);
3982 		return NULL;
3983 	}
3984 
3985 	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
3986 	skb->mac_header += VLAN_HLEN;
3987 	return skb;
3988 }
3989 
3990 struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
3991 {
3992 	struct vlan_hdr *vhdr;
3993 	u16 vlan_tci;
3994 
3995 	if (unlikely(vlan_tx_tag_present(skb))) {
3996 		/* vlan_tci is already set-up so leave this for another time */
3997 		return skb;
3998 	}
3999 
4000 	skb = skb_share_check(skb, GFP_ATOMIC);
4001 	if (unlikely(!skb))
4002 		goto err_free;
4003 
4004 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4005 		goto err_free;
4006 
4007 	vhdr = (struct vlan_hdr *)skb->data;
4008 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
4009 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4010 
4011 	skb_pull_rcsum(skb, VLAN_HLEN);
4012 	vlan_set_encap_proto(skb, vhdr);
4013 
4014 	skb = skb_reorder_vlan_header(skb);
4015 	if (unlikely(!skb))
4016 		goto err_free;
4017 
4018 	skb_reset_network_header(skb);
4019 	skb_reset_transport_header(skb);
4020 	skb_reset_mac_len(skb);
4021 
4022 	return skb;
4023 
4024 err_free:
4025 	kfree_skb(skb);
4026 	return NULL;
4027 }
4028 EXPORT_SYMBOL(skb_vlan_untag);
4029