xref: /openbmc/linux/net/core/skbuff.c (revision d78c317f)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 #include <linux/prefetch.h>
61 
62 #include <net/protocol.h>
63 #include <net/dst.h>
64 #include <net/sock.h>
65 #include <net/checksum.h>
66 #include <net/xfrm.h>
67 
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 #include <trace/events/skb.h>
71 
72 #include "kmap_skb.h"
73 
74 static struct kmem_cache *skbuff_head_cache __read_mostly;
75 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
76 
77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
78 				  struct pipe_buffer *buf)
79 {
80 	put_page(buf->page);
81 }
82 
83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 				struct pipe_buffer *buf)
85 {
86 	get_page(buf->page);
87 }
88 
89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
90 			       struct pipe_buffer *buf)
91 {
92 	return 1;
93 }
94 
95 
96 /* Pipe buffer operations for a socket. */
97 static const struct pipe_buf_operations sock_pipe_buf_ops = {
98 	.can_merge = 0,
99 	.map = generic_pipe_buf_map,
100 	.unmap = generic_pipe_buf_unmap,
101 	.confirm = generic_pipe_buf_confirm,
102 	.release = sock_pipe_buf_release,
103 	.steal = sock_pipe_buf_steal,
104 	.get = sock_pipe_buf_get,
105 };
106 
107 /*
108  *	Keep out-of-line to prevent kernel bloat.
109  *	__builtin_return_address is not used because it is not always
110  *	reliable.
111  */
112 
113 /**
114  *	skb_over_panic	- 	private function
115  *	@skb: buffer
116  *	@sz: size
117  *	@here: address
118  *
119  *	Out of line support code for skb_put(). Not user callable.
120  */
121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
122 {
123 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
124 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
125 	       here, skb->len, sz, skb->head, skb->data,
126 	       (unsigned long)skb->tail, (unsigned long)skb->end,
127 	       skb->dev ? skb->dev->name : "<NULL>");
128 	BUG();
129 }
130 
131 /**
132  *	skb_under_panic	- 	private function
133  *	@skb: buffer
134  *	@sz: size
135  *	@here: address
136  *
137  *	Out of line support code for skb_push(). Not user callable.
138  */
139 
140 static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141 {
142 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
143 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
144 	       here, skb->len, sz, skb->head, skb->data,
145 	       (unsigned long)skb->tail, (unsigned long)skb->end,
146 	       skb->dev ? skb->dev->name : "<NULL>");
147 	BUG();
148 }
149 
150 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
151  *	'private' fields and also do memory statistics to find all the
152  *	[BEEP] leaks.
153  *
154  */
155 
156 /**
157  *	__alloc_skb	-	allocate a network buffer
158  *	@size: size to allocate
159  *	@gfp_mask: allocation mask
160  *	@fclone: allocate from fclone cache instead of head cache
161  *		and allocate a cloned (child) skb
162  *	@node: numa node to allocate memory on
163  *
164  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
165  *	tail room of size bytes. The object has a reference count of one.
166  *	The return is the buffer. On a failure the return is %NULL.
167  *
168  *	Buffers may only be allocated from interrupts using a @gfp_mask of
169  *	%GFP_ATOMIC.
170  */
171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
172 			    int fclone, int node)
173 {
174 	struct kmem_cache *cache;
175 	struct skb_shared_info *shinfo;
176 	struct sk_buff *skb;
177 	u8 *data;
178 
179 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
180 
181 	/* Get the HEAD */
182 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
183 	if (!skb)
184 		goto out;
185 	prefetchw(skb);
186 
187 	/* We do our best to align skb_shared_info on a separate cache
188 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
189 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
190 	 * Both skb->head and skb_shared_info are cache line aligned.
191 	 */
192 	size = SKB_DATA_ALIGN(size);
193 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
194 	data = kmalloc_node_track_caller(size, gfp_mask, node);
195 	if (!data)
196 		goto nodata;
197 	/* kmalloc(size) might give us more room than requested.
198 	 * Put skb_shared_info exactly at the end of allocated zone,
199 	 * to allow max possible filling before reallocation.
200 	 */
201 	size = SKB_WITH_OVERHEAD(ksize(data));
202 	prefetchw(data + size);
203 
204 	/*
205 	 * Only clear those fields we need to clear, not those that we will
206 	 * actually initialise below. Hence, don't put any more fields after
207 	 * the tail pointer in struct sk_buff!
208 	 */
209 	memset(skb, 0, offsetof(struct sk_buff, tail));
210 	/* Account for allocated memory : skb + skb->head */
211 	skb->truesize = SKB_TRUESIZE(size);
212 	atomic_set(&skb->users, 1);
213 	skb->head = data;
214 	skb->data = data;
215 	skb_reset_tail_pointer(skb);
216 	skb->end = skb->tail + size;
217 #ifdef NET_SKBUFF_DATA_USES_OFFSET
218 	skb->mac_header = ~0U;
219 #endif
220 
221 	/* make sure we initialize shinfo sequentially */
222 	shinfo = skb_shinfo(skb);
223 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
224 	atomic_set(&shinfo->dataref, 1);
225 	kmemcheck_annotate_variable(shinfo->destructor_arg);
226 
227 	if (fclone) {
228 		struct sk_buff *child = skb + 1;
229 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
230 
231 		kmemcheck_annotate_bitfield(child, flags1);
232 		kmemcheck_annotate_bitfield(child, flags2);
233 		skb->fclone = SKB_FCLONE_ORIG;
234 		atomic_set(fclone_ref, 1);
235 
236 		child->fclone = SKB_FCLONE_UNAVAILABLE;
237 	}
238 out:
239 	return skb;
240 nodata:
241 	kmem_cache_free(cache, skb);
242 	skb = NULL;
243 	goto out;
244 }
245 EXPORT_SYMBOL(__alloc_skb);
246 
247 /**
248  * build_skb - build a network buffer
249  * @data: data buffer provided by caller
250  *
251  * Allocate a new &sk_buff. Caller provides space holding head and
252  * skb_shared_info. @data must have been allocated by kmalloc()
253  * The return is the new skb buffer.
254  * On a failure the return is %NULL, and @data is not freed.
255  * Notes :
256  *  Before IO, driver allocates only data buffer where NIC put incoming frame
257  *  Driver should add room at head (NET_SKB_PAD) and
258  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
259  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
260  *  before giving packet to stack.
261  *  RX rings only contains data buffers, not full skbs.
262  */
263 struct sk_buff *build_skb(void *data)
264 {
265 	struct skb_shared_info *shinfo;
266 	struct sk_buff *skb;
267 	unsigned int size;
268 
269 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
270 	if (!skb)
271 		return NULL;
272 
273 	size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
274 
275 	memset(skb, 0, offsetof(struct sk_buff, tail));
276 	skb->truesize = SKB_TRUESIZE(size);
277 	atomic_set(&skb->users, 1);
278 	skb->head = data;
279 	skb->data = data;
280 	skb_reset_tail_pointer(skb);
281 	skb->end = skb->tail + size;
282 #ifdef NET_SKBUFF_DATA_USES_OFFSET
283 	skb->mac_header = ~0U;
284 #endif
285 
286 	/* make sure we initialize shinfo sequentially */
287 	shinfo = skb_shinfo(skb);
288 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
289 	atomic_set(&shinfo->dataref, 1);
290 	kmemcheck_annotate_variable(shinfo->destructor_arg);
291 
292 	return skb;
293 }
294 EXPORT_SYMBOL(build_skb);
295 
296 /**
297  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
298  *	@dev: network device to receive on
299  *	@length: length to allocate
300  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
301  *
302  *	Allocate a new &sk_buff and assign it a usage count of one. The
303  *	buffer has unspecified headroom built in. Users should allocate
304  *	the headroom they think they need without accounting for the
305  *	built in space. The built in space is used for optimisations.
306  *
307  *	%NULL is returned if there is no free memory.
308  */
309 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
310 		unsigned int length, gfp_t gfp_mask)
311 {
312 	struct sk_buff *skb;
313 
314 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
315 	if (likely(skb)) {
316 		skb_reserve(skb, NET_SKB_PAD);
317 		skb->dev = dev;
318 	}
319 	return skb;
320 }
321 EXPORT_SYMBOL(__netdev_alloc_skb);
322 
323 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
324 		int size)
325 {
326 	skb_fill_page_desc(skb, i, page, off, size);
327 	skb->len += size;
328 	skb->data_len += size;
329 	skb->truesize += size;
330 }
331 EXPORT_SYMBOL(skb_add_rx_frag);
332 
333 /**
334  *	dev_alloc_skb - allocate an skbuff for receiving
335  *	@length: length to allocate
336  *
337  *	Allocate a new &sk_buff and assign it a usage count of one. The
338  *	buffer has unspecified headroom built in. Users should allocate
339  *	the headroom they think they need without accounting for the
340  *	built in space. The built in space is used for optimisations.
341  *
342  *	%NULL is returned if there is no free memory. Although this function
343  *	allocates memory it can be called from an interrupt.
344  */
345 struct sk_buff *dev_alloc_skb(unsigned int length)
346 {
347 	/*
348 	 * There is more code here than it seems:
349 	 * __dev_alloc_skb is an inline
350 	 */
351 	return __dev_alloc_skb(length, GFP_ATOMIC);
352 }
353 EXPORT_SYMBOL(dev_alloc_skb);
354 
355 static void skb_drop_list(struct sk_buff **listp)
356 {
357 	struct sk_buff *list = *listp;
358 
359 	*listp = NULL;
360 
361 	do {
362 		struct sk_buff *this = list;
363 		list = list->next;
364 		kfree_skb(this);
365 	} while (list);
366 }
367 
368 static inline void skb_drop_fraglist(struct sk_buff *skb)
369 {
370 	skb_drop_list(&skb_shinfo(skb)->frag_list);
371 }
372 
373 static void skb_clone_fraglist(struct sk_buff *skb)
374 {
375 	struct sk_buff *list;
376 
377 	skb_walk_frags(skb, list)
378 		skb_get(list);
379 }
380 
381 static void skb_release_data(struct sk_buff *skb)
382 {
383 	if (!skb->cloned ||
384 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
385 			       &skb_shinfo(skb)->dataref)) {
386 		if (skb_shinfo(skb)->nr_frags) {
387 			int i;
388 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
389 				skb_frag_unref(skb, i);
390 		}
391 
392 		/*
393 		 * If skb buf is from userspace, we need to notify the caller
394 		 * the lower device DMA has done;
395 		 */
396 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
397 			struct ubuf_info *uarg;
398 
399 			uarg = skb_shinfo(skb)->destructor_arg;
400 			if (uarg->callback)
401 				uarg->callback(uarg);
402 		}
403 
404 		if (skb_has_frag_list(skb))
405 			skb_drop_fraglist(skb);
406 
407 		kfree(skb->head);
408 	}
409 }
410 
411 /*
412  *	Free an skbuff by memory without cleaning the state.
413  */
414 static void kfree_skbmem(struct sk_buff *skb)
415 {
416 	struct sk_buff *other;
417 	atomic_t *fclone_ref;
418 
419 	switch (skb->fclone) {
420 	case SKB_FCLONE_UNAVAILABLE:
421 		kmem_cache_free(skbuff_head_cache, skb);
422 		break;
423 
424 	case SKB_FCLONE_ORIG:
425 		fclone_ref = (atomic_t *) (skb + 2);
426 		if (atomic_dec_and_test(fclone_ref))
427 			kmem_cache_free(skbuff_fclone_cache, skb);
428 		break;
429 
430 	case SKB_FCLONE_CLONE:
431 		fclone_ref = (atomic_t *) (skb + 1);
432 		other = skb - 1;
433 
434 		/* The clone portion is available for
435 		 * fast-cloning again.
436 		 */
437 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
438 
439 		if (atomic_dec_and_test(fclone_ref))
440 			kmem_cache_free(skbuff_fclone_cache, other);
441 		break;
442 	}
443 }
444 
445 static void skb_release_head_state(struct sk_buff *skb)
446 {
447 	skb_dst_drop(skb);
448 #ifdef CONFIG_XFRM
449 	secpath_put(skb->sp);
450 #endif
451 	if (skb->destructor) {
452 		WARN_ON(in_irq());
453 		skb->destructor(skb);
454 	}
455 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
456 	nf_conntrack_put(skb->nfct);
457 #endif
458 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
459 	nf_conntrack_put_reasm(skb->nfct_reasm);
460 #endif
461 #ifdef CONFIG_BRIDGE_NETFILTER
462 	nf_bridge_put(skb->nf_bridge);
463 #endif
464 /* XXX: IS this still necessary? - JHS */
465 #ifdef CONFIG_NET_SCHED
466 	skb->tc_index = 0;
467 #ifdef CONFIG_NET_CLS_ACT
468 	skb->tc_verd = 0;
469 #endif
470 #endif
471 }
472 
473 /* Free everything but the sk_buff shell. */
474 static void skb_release_all(struct sk_buff *skb)
475 {
476 	skb_release_head_state(skb);
477 	skb_release_data(skb);
478 }
479 
480 /**
481  *	__kfree_skb - private function
482  *	@skb: buffer
483  *
484  *	Free an sk_buff. Release anything attached to the buffer.
485  *	Clean the state. This is an internal helper function. Users should
486  *	always call kfree_skb
487  */
488 
489 void __kfree_skb(struct sk_buff *skb)
490 {
491 	skb_release_all(skb);
492 	kfree_skbmem(skb);
493 }
494 EXPORT_SYMBOL(__kfree_skb);
495 
496 /**
497  *	kfree_skb - free an sk_buff
498  *	@skb: buffer to free
499  *
500  *	Drop a reference to the buffer and free it if the usage count has
501  *	hit zero.
502  */
503 void kfree_skb(struct sk_buff *skb)
504 {
505 	if (unlikely(!skb))
506 		return;
507 	if (likely(atomic_read(&skb->users) == 1))
508 		smp_rmb();
509 	else if (likely(!atomic_dec_and_test(&skb->users)))
510 		return;
511 	trace_kfree_skb(skb, __builtin_return_address(0));
512 	__kfree_skb(skb);
513 }
514 EXPORT_SYMBOL(kfree_skb);
515 
516 /**
517  *	consume_skb - free an skbuff
518  *	@skb: buffer to free
519  *
520  *	Drop a ref to the buffer and free it if the usage count has hit zero
521  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
522  *	is being dropped after a failure and notes that
523  */
524 void consume_skb(struct sk_buff *skb)
525 {
526 	if (unlikely(!skb))
527 		return;
528 	if (likely(atomic_read(&skb->users) == 1))
529 		smp_rmb();
530 	else if (likely(!atomic_dec_and_test(&skb->users)))
531 		return;
532 	trace_consume_skb(skb);
533 	__kfree_skb(skb);
534 }
535 EXPORT_SYMBOL(consume_skb);
536 
537 /**
538  * 	skb_recycle - clean up an skb for reuse
539  * 	@skb: buffer
540  *
541  * 	Recycles the skb to be reused as a receive buffer. This
542  * 	function does any necessary reference count dropping, and
543  * 	cleans up the skbuff as if it just came from __alloc_skb().
544  */
545 void skb_recycle(struct sk_buff *skb)
546 {
547 	struct skb_shared_info *shinfo;
548 
549 	skb_release_head_state(skb);
550 
551 	shinfo = skb_shinfo(skb);
552 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
553 	atomic_set(&shinfo->dataref, 1);
554 
555 	memset(skb, 0, offsetof(struct sk_buff, tail));
556 	skb->data = skb->head + NET_SKB_PAD;
557 	skb_reset_tail_pointer(skb);
558 }
559 EXPORT_SYMBOL(skb_recycle);
560 
561 /**
562  *	skb_recycle_check - check if skb can be reused for receive
563  *	@skb: buffer
564  *	@skb_size: minimum receive buffer size
565  *
566  *	Checks that the skb passed in is not shared or cloned, and
567  *	that it is linear and its head portion at least as large as
568  *	skb_size so that it can be recycled as a receive buffer.
569  *	If these conditions are met, this function does any necessary
570  *	reference count dropping and cleans up the skbuff as if it
571  *	just came from __alloc_skb().
572  */
573 bool skb_recycle_check(struct sk_buff *skb, int skb_size)
574 {
575 	if (!skb_is_recycleable(skb, skb_size))
576 		return false;
577 
578 	skb_recycle(skb);
579 
580 	return true;
581 }
582 EXPORT_SYMBOL(skb_recycle_check);
583 
584 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
585 {
586 	new->tstamp		= old->tstamp;
587 	new->dev		= old->dev;
588 	new->transport_header	= old->transport_header;
589 	new->network_header	= old->network_header;
590 	new->mac_header		= old->mac_header;
591 	skb_dst_copy(new, old);
592 	new->rxhash		= old->rxhash;
593 	new->ooo_okay		= old->ooo_okay;
594 	new->l4_rxhash		= old->l4_rxhash;
595 #ifdef CONFIG_XFRM
596 	new->sp			= secpath_get(old->sp);
597 #endif
598 	memcpy(new->cb, old->cb, sizeof(old->cb));
599 	new->csum		= old->csum;
600 	new->local_df		= old->local_df;
601 	new->pkt_type		= old->pkt_type;
602 	new->ip_summed		= old->ip_summed;
603 	skb_copy_queue_mapping(new, old);
604 	new->priority		= old->priority;
605 #if IS_ENABLED(CONFIG_IP_VS)
606 	new->ipvs_property	= old->ipvs_property;
607 #endif
608 	new->protocol		= old->protocol;
609 	new->mark		= old->mark;
610 	new->skb_iif		= old->skb_iif;
611 	__nf_copy(new, old);
612 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
613 	new->nf_trace		= old->nf_trace;
614 #endif
615 #ifdef CONFIG_NET_SCHED
616 	new->tc_index		= old->tc_index;
617 #ifdef CONFIG_NET_CLS_ACT
618 	new->tc_verd		= old->tc_verd;
619 #endif
620 #endif
621 	new->vlan_tci		= old->vlan_tci;
622 
623 	skb_copy_secmark(new, old);
624 }
625 
626 /*
627  * You should not add any new code to this function.  Add it to
628  * __copy_skb_header above instead.
629  */
630 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
631 {
632 #define C(x) n->x = skb->x
633 
634 	n->next = n->prev = NULL;
635 	n->sk = NULL;
636 	__copy_skb_header(n, skb);
637 
638 	C(len);
639 	C(data_len);
640 	C(mac_len);
641 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
642 	n->cloned = 1;
643 	n->nohdr = 0;
644 	n->destructor = NULL;
645 	C(tail);
646 	C(end);
647 	C(head);
648 	C(data);
649 	C(truesize);
650 	atomic_set(&n->users, 1);
651 
652 	atomic_inc(&(skb_shinfo(skb)->dataref));
653 	skb->cloned = 1;
654 
655 	return n;
656 #undef C
657 }
658 
659 /**
660  *	skb_morph	-	morph one skb into another
661  *	@dst: the skb to receive the contents
662  *	@src: the skb to supply the contents
663  *
664  *	This is identical to skb_clone except that the target skb is
665  *	supplied by the user.
666  *
667  *	The target skb is returned upon exit.
668  */
669 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
670 {
671 	skb_release_all(dst);
672 	return __skb_clone(dst, src);
673 }
674 EXPORT_SYMBOL_GPL(skb_morph);
675 
676 /*	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
677  *	@skb: the skb to modify
678  *	@gfp_mask: allocation priority
679  *
680  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
681  *	It will copy all frags into kernel and drop the reference
682  *	to userspace pages.
683  *
684  *	If this function is called from an interrupt gfp_mask() must be
685  *	%GFP_ATOMIC.
686  *
687  *	Returns 0 on success or a negative error code on failure
688  *	to allocate kernel memory to copy to.
689  */
690 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
691 {
692 	int i;
693 	int num_frags = skb_shinfo(skb)->nr_frags;
694 	struct page *page, *head = NULL;
695 	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
696 
697 	for (i = 0; i < num_frags; i++) {
698 		u8 *vaddr;
699 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
700 
701 		page = alloc_page(GFP_ATOMIC);
702 		if (!page) {
703 			while (head) {
704 				struct page *next = (struct page *)head->private;
705 				put_page(head);
706 				head = next;
707 			}
708 			return -ENOMEM;
709 		}
710 		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
711 		memcpy(page_address(page),
712 		       vaddr + f->page_offset, skb_frag_size(f));
713 		kunmap_skb_frag(vaddr);
714 		page->private = (unsigned long)head;
715 		head = page;
716 	}
717 
718 	/* skb frags release userspace buffers */
719 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
720 		skb_frag_unref(skb, i);
721 
722 	uarg->callback(uarg);
723 
724 	/* skb frags point to kernel buffers */
725 	for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
726 		__skb_fill_page_desc(skb, i-1, head, 0,
727 				     skb_shinfo(skb)->frags[i - 1].size);
728 		head = (struct page *)head->private;
729 	}
730 
731 	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
732 	return 0;
733 }
734 
735 
736 /**
737  *	skb_clone	-	duplicate an sk_buff
738  *	@skb: buffer to clone
739  *	@gfp_mask: allocation priority
740  *
741  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
742  *	copies share the same packet data but not structure. The new
743  *	buffer has a reference count of 1. If the allocation fails the
744  *	function returns %NULL otherwise the new buffer is returned.
745  *
746  *	If this function is called from an interrupt gfp_mask() must be
747  *	%GFP_ATOMIC.
748  */
749 
750 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
751 {
752 	struct sk_buff *n;
753 
754 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
755 		if (skb_copy_ubufs(skb, gfp_mask))
756 			return NULL;
757 	}
758 
759 	n = skb + 1;
760 	if (skb->fclone == SKB_FCLONE_ORIG &&
761 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
762 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
763 		n->fclone = SKB_FCLONE_CLONE;
764 		atomic_inc(fclone_ref);
765 	} else {
766 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
767 		if (!n)
768 			return NULL;
769 
770 		kmemcheck_annotate_bitfield(n, flags1);
771 		kmemcheck_annotate_bitfield(n, flags2);
772 		n->fclone = SKB_FCLONE_UNAVAILABLE;
773 	}
774 
775 	return __skb_clone(n, skb);
776 }
777 EXPORT_SYMBOL(skb_clone);
778 
779 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
780 {
781 #ifndef NET_SKBUFF_DATA_USES_OFFSET
782 	/*
783 	 *	Shift between the two data areas in bytes
784 	 */
785 	unsigned long offset = new->data - old->data;
786 #endif
787 
788 	__copy_skb_header(new, old);
789 
790 #ifndef NET_SKBUFF_DATA_USES_OFFSET
791 	/* {transport,network,mac}_header are relative to skb->head */
792 	new->transport_header += offset;
793 	new->network_header   += offset;
794 	if (skb_mac_header_was_set(new))
795 		new->mac_header	      += offset;
796 #endif
797 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
798 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
799 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
800 }
801 
802 /**
803  *	skb_copy	-	create private copy of an sk_buff
804  *	@skb: buffer to copy
805  *	@gfp_mask: allocation priority
806  *
807  *	Make a copy of both an &sk_buff and its data. This is used when the
808  *	caller wishes to modify the data and needs a private copy of the
809  *	data to alter. Returns %NULL on failure or the pointer to the buffer
810  *	on success. The returned buffer has a reference count of 1.
811  *
812  *	As by-product this function converts non-linear &sk_buff to linear
813  *	one, so that &sk_buff becomes completely private and caller is allowed
814  *	to modify all the data of returned buffer. This means that this
815  *	function is not recommended for use in circumstances when only
816  *	header is going to be modified. Use pskb_copy() instead.
817  */
818 
819 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
820 {
821 	int headerlen = skb_headroom(skb);
822 	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
823 	struct sk_buff *n = alloc_skb(size, gfp_mask);
824 
825 	if (!n)
826 		return NULL;
827 
828 	/* Set the data pointer */
829 	skb_reserve(n, headerlen);
830 	/* Set the tail pointer and length */
831 	skb_put(n, skb->len);
832 
833 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
834 		BUG();
835 
836 	copy_skb_header(n, skb);
837 	return n;
838 }
839 EXPORT_SYMBOL(skb_copy);
840 
841 /**
842  *	__pskb_copy	-	create copy of an sk_buff with private head.
843  *	@skb: buffer to copy
844  *	@headroom: headroom of new skb
845  *	@gfp_mask: allocation priority
846  *
847  *	Make a copy of both an &sk_buff and part of its data, located
848  *	in header. Fragmented data remain shared. This is used when
849  *	the caller wishes to modify only header of &sk_buff and needs
850  *	private copy of the header to alter. Returns %NULL on failure
851  *	or the pointer to the buffer on success.
852  *	The returned buffer has a reference count of 1.
853  */
854 
855 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
856 {
857 	unsigned int size = skb_headlen(skb) + headroom;
858 	struct sk_buff *n = alloc_skb(size, gfp_mask);
859 
860 	if (!n)
861 		goto out;
862 
863 	/* Set the data pointer */
864 	skb_reserve(n, headroom);
865 	/* Set the tail pointer and length */
866 	skb_put(n, skb_headlen(skb));
867 	/* Copy the bytes */
868 	skb_copy_from_linear_data(skb, n->data, n->len);
869 
870 	n->truesize += skb->data_len;
871 	n->data_len  = skb->data_len;
872 	n->len	     = skb->len;
873 
874 	if (skb_shinfo(skb)->nr_frags) {
875 		int i;
876 
877 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
878 			if (skb_copy_ubufs(skb, gfp_mask)) {
879 				kfree_skb(n);
880 				n = NULL;
881 				goto out;
882 			}
883 		}
884 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
885 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
886 			skb_frag_ref(skb, i);
887 		}
888 		skb_shinfo(n)->nr_frags = i;
889 	}
890 
891 	if (skb_has_frag_list(skb)) {
892 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
893 		skb_clone_fraglist(n);
894 	}
895 
896 	copy_skb_header(n, skb);
897 out:
898 	return n;
899 }
900 EXPORT_SYMBOL(__pskb_copy);
901 
902 /**
903  *	pskb_expand_head - reallocate header of &sk_buff
904  *	@skb: buffer to reallocate
905  *	@nhead: room to add at head
906  *	@ntail: room to add at tail
907  *	@gfp_mask: allocation priority
908  *
909  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
910  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
911  *	reference count of 1. Returns zero in the case of success or error,
912  *	if expansion failed. In the last case, &sk_buff is not changed.
913  *
914  *	All the pointers pointing into skb header may change and must be
915  *	reloaded after call to this function.
916  */
917 
918 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
919 		     gfp_t gfp_mask)
920 {
921 	int i;
922 	u8 *data;
923 	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
924 	long off;
925 	bool fastpath;
926 
927 	BUG_ON(nhead < 0);
928 
929 	if (skb_shared(skb))
930 		BUG();
931 
932 	size = SKB_DATA_ALIGN(size);
933 
934 	/* Check if we can avoid taking references on fragments if we own
935 	 * the last reference on skb->head. (see skb_release_data())
936 	 */
937 	if (!skb->cloned)
938 		fastpath = true;
939 	else {
940 		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
941 		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
942 	}
943 
944 	if (fastpath &&
945 	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
946 		memmove(skb->head + size, skb_shinfo(skb),
947 			offsetof(struct skb_shared_info,
948 				 frags[skb_shinfo(skb)->nr_frags]));
949 		memmove(skb->head + nhead, skb->head,
950 			skb_tail_pointer(skb) - skb->head);
951 		off = nhead;
952 		goto adjust_others;
953 	}
954 
955 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
956 	if (!data)
957 		goto nodata;
958 
959 	/* Copy only real data... and, alas, header. This should be
960 	 * optimized for the cases when header is void.
961 	 */
962 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
963 
964 	memcpy((struct skb_shared_info *)(data + size),
965 	       skb_shinfo(skb),
966 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
967 
968 	if (fastpath) {
969 		kfree(skb->head);
970 	} else {
971 		/* copy this zero copy skb frags */
972 		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
973 			if (skb_copy_ubufs(skb, gfp_mask))
974 				goto nofrags;
975 		}
976 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
977 			skb_frag_ref(skb, i);
978 
979 		if (skb_has_frag_list(skb))
980 			skb_clone_fraglist(skb);
981 
982 		skb_release_data(skb);
983 	}
984 	off = (data + nhead) - skb->head;
985 
986 	skb->head     = data;
987 adjust_others:
988 	skb->data    += off;
989 #ifdef NET_SKBUFF_DATA_USES_OFFSET
990 	skb->end      = size;
991 	off           = nhead;
992 #else
993 	skb->end      = skb->head + size;
994 #endif
995 	/* {transport,network,mac}_header and tail are relative to skb->head */
996 	skb->tail	      += off;
997 	skb->transport_header += off;
998 	skb->network_header   += off;
999 	if (skb_mac_header_was_set(skb))
1000 		skb->mac_header += off;
1001 	/* Only adjust this if it actually is csum_start rather than csum */
1002 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1003 		skb->csum_start += nhead;
1004 	skb->cloned   = 0;
1005 	skb->hdr_len  = 0;
1006 	skb->nohdr    = 0;
1007 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1008 	return 0;
1009 
1010 nofrags:
1011 	kfree(data);
1012 nodata:
1013 	return -ENOMEM;
1014 }
1015 EXPORT_SYMBOL(pskb_expand_head);
1016 
1017 /* Make private copy of skb with writable head and some headroom */
1018 
1019 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1020 {
1021 	struct sk_buff *skb2;
1022 	int delta = headroom - skb_headroom(skb);
1023 
1024 	if (delta <= 0)
1025 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1026 	else {
1027 		skb2 = skb_clone(skb, GFP_ATOMIC);
1028 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1029 					     GFP_ATOMIC)) {
1030 			kfree_skb(skb2);
1031 			skb2 = NULL;
1032 		}
1033 	}
1034 	return skb2;
1035 }
1036 EXPORT_SYMBOL(skb_realloc_headroom);
1037 
1038 /**
1039  *	skb_copy_expand	-	copy and expand sk_buff
1040  *	@skb: buffer to copy
1041  *	@newheadroom: new free bytes at head
1042  *	@newtailroom: new free bytes at tail
1043  *	@gfp_mask: allocation priority
1044  *
1045  *	Make a copy of both an &sk_buff and its data and while doing so
1046  *	allocate additional space.
1047  *
1048  *	This is used when the caller wishes to modify the data and needs a
1049  *	private copy of the data to alter as well as more space for new fields.
1050  *	Returns %NULL on failure or the pointer to the buffer
1051  *	on success. The returned buffer has a reference count of 1.
1052  *
1053  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1054  *	is called from an interrupt.
1055  */
1056 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1057 				int newheadroom, int newtailroom,
1058 				gfp_t gfp_mask)
1059 {
1060 	/*
1061 	 *	Allocate the copy buffer
1062 	 */
1063 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
1064 				      gfp_mask);
1065 	int oldheadroom = skb_headroom(skb);
1066 	int head_copy_len, head_copy_off;
1067 	int off;
1068 
1069 	if (!n)
1070 		return NULL;
1071 
1072 	skb_reserve(n, newheadroom);
1073 
1074 	/* Set the tail pointer and length */
1075 	skb_put(n, skb->len);
1076 
1077 	head_copy_len = oldheadroom;
1078 	head_copy_off = 0;
1079 	if (newheadroom <= head_copy_len)
1080 		head_copy_len = newheadroom;
1081 	else
1082 		head_copy_off = newheadroom - head_copy_len;
1083 
1084 	/* Copy the linear header and data. */
1085 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1086 			  skb->len + head_copy_len))
1087 		BUG();
1088 
1089 	copy_skb_header(n, skb);
1090 
1091 	off                  = newheadroom - oldheadroom;
1092 	if (n->ip_summed == CHECKSUM_PARTIAL)
1093 		n->csum_start += off;
1094 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1095 	n->transport_header += off;
1096 	n->network_header   += off;
1097 	if (skb_mac_header_was_set(skb))
1098 		n->mac_header += off;
1099 #endif
1100 
1101 	return n;
1102 }
1103 EXPORT_SYMBOL(skb_copy_expand);
1104 
1105 /**
1106  *	skb_pad			-	zero pad the tail of an skb
1107  *	@skb: buffer to pad
1108  *	@pad: space to pad
1109  *
1110  *	Ensure that a buffer is followed by a padding area that is zero
1111  *	filled. Used by network drivers which may DMA or transfer data
1112  *	beyond the buffer end onto the wire.
1113  *
1114  *	May return error in out of memory cases. The skb is freed on error.
1115  */
1116 
1117 int skb_pad(struct sk_buff *skb, int pad)
1118 {
1119 	int err;
1120 	int ntail;
1121 
1122 	/* If the skbuff is non linear tailroom is always zero.. */
1123 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1124 		memset(skb->data+skb->len, 0, pad);
1125 		return 0;
1126 	}
1127 
1128 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1129 	if (likely(skb_cloned(skb) || ntail > 0)) {
1130 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1131 		if (unlikely(err))
1132 			goto free_skb;
1133 	}
1134 
1135 	/* FIXME: The use of this function with non-linear skb's really needs
1136 	 * to be audited.
1137 	 */
1138 	err = skb_linearize(skb);
1139 	if (unlikely(err))
1140 		goto free_skb;
1141 
1142 	memset(skb->data + skb->len, 0, pad);
1143 	return 0;
1144 
1145 free_skb:
1146 	kfree_skb(skb);
1147 	return err;
1148 }
1149 EXPORT_SYMBOL(skb_pad);
1150 
1151 /**
1152  *	skb_put - add data to a buffer
1153  *	@skb: buffer to use
1154  *	@len: amount of data to add
1155  *
1156  *	This function extends the used data area of the buffer. If this would
1157  *	exceed the total buffer size the kernel will panic. A pointer to the
1158  *	first byte of the extra data is returned.
1159  */
1160 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1161 {
1162 	unsigned char *tmp = skb_tail_pointer(skb);
1163 	SKB_LINEAR_ASSERT(skb);
1164 	skb->tail += len;
1165 	skb->len  += len;
1166 	if (unlikely(skb->tail > skb->end))
1167 		skb_over_panic(skb, len, __builtin_return_address(0));
1168 	return tmp;
1169 }
1170 EXPORT_SYMBOL(skb_put);
1171 
1172 /**
1173  *	skb_push - add data to the start of a buffer
1174  *	@skb: buffer to use
1175  *	@len: amount of data to add
1176  *
1177  *	This function extends the used data area of the buffer at the buffer
1178  *	start. If this would exceed the total buffer headroom the kernel will
1179  *	panic. A pointer to the first byte of the extra data is returned.
1180  */
1181 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1182 {
1183 	skb->data -= len;
1184 	skb->len  += len;
1185 	if (unlikely(skb->data<skb->head))
1186 		skb_under_panic(skb, len, __builtin_return_address(0));
1187 	return skb->data;
1188 }
1189 EXPORT_SYMBOL(skb_push);
1190 
1191 /**
1192  *	skb_pull - remove data from the start of a buffer
1193  *	@skb: buffer to use
1194  *	@len: amount of data to remove
1195  *
1196  *	This function removes data from the start of a buffer, returning
1197  *	the memory to the headroom. A pointer to the next data in the buffer
1198  *	is returned. Once the data has been pulled future pushes will overwrite
1199  *	the old data.
1200  */
1201 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1202 {
1203 	return skb_pull_inline(skb, len);
1204 }
1205 EXPORT_SYMBOL(skb_pull);
1206 
1207 /**
1208  *	skb_trim - remove end from a buffer
1209  *	@skb: buffer to alter
1210  *	@len: new length
1211  *
1212  *	Cut the length of a buffer down by removing data from the tail. If
1213  *	the buffer is already under the length specified it is not modified.
1214  *	The skb must be linear.
1215  */
1216 void skb_trim(struct sk_buff *skb, unsigned int len)
1217 {
1218 	if (skb->len > len)
1219 		__skb_trim(skb, len);
1220 }
1221 EXPORT_SYMBOL(skb_trim);
1222 
1223 /* Trims skb to length len. It can change skb pointers.
1224  */
1225 
1226 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1227 {
1228 	struct sk_buff **fragp;
1229 	struct sk_buff *frag;
1230 	int offset = skb_headlen(skb);
1231 	int nfrags = skb_shinfo(skb)->nr_frags;
1232 	int i;
1233 	int err;
1234 
1235 	if (skb_cloned(skb) &&
1236 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1237 		return err;
1238 
1239 	i = 0;
1240 	if (offset >= len)
1241 		goto drop_pages;
1242 
1243 	for (; i < nfrags; i++) {
1244 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1245 
1246 		if (end < len) {
1247 			offset = end;
1248 			continue;
1249 		}
1250 
1251 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1252 
1253 drop_pages:
1254 		skb_shinfo(skb)->nr_frags = i;
1255 
1256 		for (; i < nfrags; i++)
1257 			skb_frag_unref(skb, i);
1258 
1259 		if (skb_has_frag_list(skb))
1260 			skb_drop_fraglist(skb);
1261 		goto done;
1262 	}
1263 
1264 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1265 	     fragp = &frag->next) {
1266 		int end = offset + frag->len;
1267 
1268 		if (skb_shared(frag)) {
1269 			struct sk_buff *nfrag;
1270 
1271 			nfrag = skb_clone(frag, GFP_ATOMIC);
1272 			if (unlikely(!nfrag))
1273 				return -ENOMEM;
1274 
1275 			nfrag->next = frag->next;
1276 			kfree_skb(frag);
1277 			frag = nfrag;
1278 			*fragp = frag;
1279 		}
1280 
1281 		if (end < len) {
1282 			offset = end;
1283 			continue;
1284 		}
1285 
1286 		if (end > len &&
1287 		    unlikely((err = pskb_trim(frag, len - offset))))
1288 			return err;
1289 
1290 		if (frag->next)
1291 			skb_drop_list(&frag->next);
1292 		break;
1293 	}
1294 
1295 done:
1296 	if (len > skb_headlen(skb)) {
1297 		skb->data_len -= skb->len - len;
1298 		skb->len       = len;
1299 	} else {
1300 		skb->len       = len;
1301 		skb->data_len  = 0;
1302 		skb_set_tail_pointer(skb, len);
1303 	}
1304 
1305 	return 0;
1306 }
1307 EXPORT_SYMBOL(___pskb_trim);
1308 
1309 /**
1310  *	__pskb_pull_tail - advance tail of skb header
1311  *	@skb: buffer to reallocate
1312  *	@delta: number of bytes to advance tail
1313  *
1314  *	The function makes a sense only on a fragmented &sk_buff,
1315  *	it expands header moving its tail forward and copying necessary
1316  *	data from fragmented part.
1317  *
1318  *	&sk_buff MUST have reference count of 1.
1319  *
1320  *	Returns %NULL (and &sk_buff does not change) if pull failed
1321  *	or value of new tail of skb in the case of success.
1322  *
1323  *	All the pointers pointing into skb header may change and must be
1324  *	reloaded after call to this function.
1325  */
1326 
1327 /* Moves tail of skb head forward, copying data from fragmented part,
1328  * when it is necessary.
1329  * 1. It may fail due to malloc failure.
1330  * 2. It may change skb pointers.
1331  *
1332  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1333  */
1334 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1335 {
1336 	/* If skb has not enough free space at tail, get new one
1337 	 * plus 128 bytes for future expansions. If we have enough
1338 	 * room at tail, reallocate without expansion only if skb is cloned.
1339 	 */
1340 	int i, k, eat = (skb->tail + delta) - skb->end;
1341 
1342 	if (eat > 0 || skb_cloned(skb)) {
1343 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1344 				     GFP_ATOMIC))
1345 			return NULL;
1346 	}
1347 
1348 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1349 		BUG();
1350 
1351 	/* Optimization: no fragments, no reasons to preestimate
1352 	 * size of pulled pages. Superb.
1353 	 */
1354 	if (!skb_has_frag_list(skb))
1355 		goto pull_pages;
1356 
1357 	/* Estimate size of pulled pages. */
1358 	eat = delta;
1359 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1360 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1361 
1362 		if (size >= eat)
1363 			goto pull_pages;
1364 		eat -= size;
1365 	}
1366 
1367 	/* If we need update frag list, we are in troubles.
1368 	 * Certainly, it possible to add an offset to skb data,
1369 	 * but taking into account that pulling is expected to
1370 	 * be very rare operation, it is worth to fight against
1371 	 * further bloating skb head and crucify ourselves here instead.
1372 	 * Pure masohism, indeed. 8)8)
1373 	 */
1374 	if (eat) {
1375 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1376 		struct sk_buff *clone = NULL;
1377 		struct sk_buff *insp = NULL;
1378 
1379 		do {
1380 			BUG_ON(!list);
1381 
1382 			if (list->len <= eat) {
1383 				/* Eaten as whole. */
1384 				eat -= list->len;
1385 				list = list->next;
1386 				insp = list;
1387 			} else {
1388 				/* Eaten partially. */
1389 
1390 				if (skb_shared(list)) {
1391 					/* Sucks! We need to fork list. :-( */
1392 					clone = skb_clone(list, GFP_ATOMIC);
1393 					if (!clone)
1394 						return NULL;
1395 					insp = list->next;
1396 					list = clone;
1397 				} else {
1398 					/* This may be pulled without
1399 					 * problems. */
1400 					insp = list;
1401 				}
1402 				if (!pskb_pull(list, eat)) {
1403 					kfree_skb(clone);
1404 					return NULL;
1405 				}
1406 				break;
1407 			}
1408 		} while (eat);
1409 
1410 		/* Free pulled out fragments. */
1411 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1412 			skb_shinfo(skb)->frag_list = list->next;
1413 			kfree_skb(list);
1414 		}
1415 		/* And insert new clone at head. */
1416 		if (clone) {
1417 			clone->next = list;
1418 			skb_shinfo(skb)->frag_list = clone;
1419 		}
1420 	}
1421 	/* Success! Now we may commit changes to skb data. */
1422 
1423 pull_pages:
1424 	eat = delta;
1425 	k = 0;
1426 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1427 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1428 
1429 		if (size <= eat) {
1430 			skb_frag_unref(skb, i);
1431 			eat -= size;
1432 		} else {
1433 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1434 			if (eat) {
1435 				skb_shinfo(skb)->frags[k].page_offset += eat;
1436 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1437 				eat = 0;
1438 			}
1439 			k++;
1440 		}
1441 	}
1442 	skb_shinfo(skb)->nr_frags = k;
1443 
1444 	skb->tail     += delta;
1445 	skb->data_len -= delta;
1446 
1447 	return skb_tail_pointer(skb);
1448 }
1449 EXPORT_SYMBOL(__pskb_pull_tail);
1450 
1451 /**
1452  *	skb_copy_bits - copy bits from skb to kernel buffer
1453  *	@skb: source skb
1454  *	@offset: offset in source
1455  *	@to: destination buffer
1456  *	@len: number of bytes to copy
1457  *
1458  *	Copy the specified number of bytes from the source skb to the
1459  *	destination buffer.
1460  *
1461  *	CAUTION ! :
1462  *		If its prototype is ever changed,
1463  *		check arch/{*}/net/{*}.S files,
1464  *		since it is called from BPF assembly code.
1465  */
1466 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1467 {
1468 	int start = skb_headlen(skb);
1469 	struct sk_buff *frag_iter;
1470 	int i, copy;
1471 
1472 	if (offset > (int)skb->len - len)
1473 		goto fault;
1474 
1475 	/* Copy header. */
1476 	if ((copy = start - offset) > 0) {
1477 		if (copy > len)
1478 			copy = len;
1479 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1480 		if ((len -= copy) == 0)
1481 			return 0;
1482 		offset += copy;
1483 		to     += copy;
1484 	}
1485 
1486 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1487 		int end;
1488 
1489 		WARN_ON(start > offset + len);
1490 
1491 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1492 		if ((copy = end - offset) > 0) {
1493 			u8 *vaddr;
1494 
1495 			if (copy > len)
1496 				copy = len;
1497 
1498 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1499 			memcpy(to,
1500 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1501 			       offset - start, copy);
1502 			kunmap_skb_frag(vaddr);
1503 
1504 			if ((len -= copy) == 0)
1505 				return 0;
1506 			offset += copy;
1507 			to     += copy;
1508 		}
1509 		start = end;
1510 	}
1511 
1512 	skb_walk_frags(skb, frag_iter) {
1513 		int end;
1514 
1515 		WARN_ON(start > offset + len);
1516 
1517 		end = start + frag_iter->len;
1518 		if ((copy = end - offset) > 0) {
1519 			if (copy > len)
1520 				copy = len;
1521 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1522 				goto fault;
1523 			if ((len -= copy) == 0)
1524 				return 0;
1525 			offset += copy;
1526 			to     += copy;
1527 		}
1528 		start = end;
1529 	}
1530 
1531 	if (!len)
1532 		return 0;
1533 
1534 fault:
1535 	return -EFAULT;
1536 }
1537 EXPORT_SYMBOL(skb_copy_bits);
1538 
1539 /*
1540  * Callback from splice_to_pipe(), if we need to release some pages
1541  * at the end of the spd in case we error'ed out in filling the pipe.
1542  */
1543 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1544 {
1545 	put_page(spd->pages[i]);
1546 }
1547 
1548 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1549 					  unsigned int *offset,
1550 					  struct sk_buff *skb, struct sock *sk)
1551 {
1552 	struct page *p = sk->sk_sndmsg_page;
1553 	unsigned int off;
1554 
1555 	if (!p) {
1556 new_page:
1557 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1558 		if (!p)
1559 			return NULL;
1560 
1561 		off = sk->sk_sndmsg_off = 0;
1562 		/* hold one ref to this page until it's full */
1563 	} else {
1564 		unsigned int mlen;
1565 
1566 		off = sk->sk_sndmsg_off;
1567 		mlen = PAGE_SIZE - off;
1568 		if (mlen < 64 && mlen < *len) {
1569 			put_page(p);
1570 			goto new_page;
1571 		}
1572 
1573 		*len = min_t(unsigned int, *len, mlen);
1574 	}
1575 
1576 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1577 	sk->sk_sndmsg_off += *len;
1578 	*offset = off;
1579 	get_page(p);
1580 
1581 	return p;
1582 }
1583 
1584 /*
1585  * Fill page/offset/length into spd, if it can hold more pages.
1586  */
1587 static inline int spd_fill_page(struct splice_pipe_desc *spd,
1588 				struct pipe_inode_info *pipe, struct page *page,
1589 				unsigned int *len, unsigned int offset,
1590 				struct sk_buff *skb, int linear,
1591 				struct sock *sk)
1592 {
1593 	if (unlikely(spd->nr_pages == pipe->buffers))
1594 		return 1;
1595 
1596 	if (linear) {
1597 		page = linear_to_page(page, len, &offset, skb, sk);
1598 		if (!page)
1599 			return 1;
1600 	} else
1601 		get_page(page);
1602 
1603 	spd->pages[spd->nr_pages] = page;
1604 	spd->partial[spd->nr_pages].len = *len;
1605 	spd->partial[spd->nr_pages].offset = offset;
1606 	spd->nr_pages++;
1607 
1608 	return 0;
1609 }
1610 
1611 static inline void __segment_seek(struct page **page, unsigned int *poff,
1612 				  unsigned int *plen, unsigned int off)
1613 {
1614 	unsigned long n;
1615 
1616 	*poff += off;
1617 	n = *poff / PAGE_SIZE;
1618 	if (n)
1619 		*page = nth_page(*page, n);
1620 
1621 	*poff = *poff % PAGE_SIZE;
1622 	*plen -= off;
1623 }
1624 
1625 static inline int __splice_segment(struct page *page, unsigned int poff,
1626 				   unsigned int plen, unsigned int *off,
1627 				   unsigned int *len, struct sk_buff *skb,
1628 				   struct splice_pipe_desc *spd, int linear,
1629 				   struct sock *sk,
1630 				   struct pipe_inode_info *pipe)
1631 {
1632 	if (!*len)
1633 		return 1;
1634 
1635 	/* skip this segment if already processed */
1636 	if (*off >= plen) {
1637 		*off -= plen;
1638 		return 0;
1639 	}
1640 
1641 	/* ignore any bits we already processed */
1642 	if (*off) {
1643 		__segment_seek(&page, &poff, &plen, *off);
1644 		*off = 0;
1645 	}
1646 
1647 	do {
1648 		unsigned int flen = min(*len, plen);
1649 
1650 		/* the linear region may spread across several pages  */
1651 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1652 
1653 		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1654 			return 1;
1655 
1656 		__segment_seek(&page, &poff, &plen, flen);
1657 		*len -= flen;
1658 
1659 	} while (*len && plen);
1660 
1661 	return 0;
1662 }
1663 
1664 /*
1665  * Map linear and fragment data from the skb to spd. It reports failure if the
1666  * pipe is full or if we already spliced the requested length.
1667  */
1668 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1669 			     unsigned int *offset, unsigned int *len,
1670 			     struct splice_pipe_desc *spd, struct sock *sk)
1671 {
1672 	int seg;
1673 
1674 	/*
1675 	 * map the linear part
1676 	 */
1677 	if (__splice_segment(virt_to_page(skb->data),
1678 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1679 			     skb_headlen(skb),
1680 			     offset, len, skb, spd, 1, sk, pipe))
1681 		return 1;
1682 
1683 	/*
1684 	 * then map the fragments
1685 	 */
1686 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1687 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1688 
1689 		if (__splice_segment(skb_frag_page(f),
1690 				     f->page_offset, skb_frag_size(f),
1691 				     offset, len, skb, spd, 0, sk, pipe))
1692 			return 1;
1693 	}
1694 
1695 	return 0;
1696 }
1697 
1698 /*
1699  * Map data from the skb to a pipe. Should handle both the linear part,
1700  * the fragments, and the frag list. It does NOT handle frag lists within
1701  * the frag list, if such a thing exists. We'd probably need to recurse to
1702  * handle that cleanly.
1703  */
1704 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1705 		    struct pipe_inode_info *pipe, unsigned int tlen,
1706 		    unsigned int flags)
1707 {
1708 	struct partial_page partial[PIPE_DEF_BUFFERS];
1709 	struct page *pages[PIPE_DEF_BUFFERS];
1710 	struct splice_pipe_desc spd = {
1711 		.pages = pages,
1712 		.partial = partial,
1713 		.flags = flags,
1714 		.ops = &sock_pipe_buf_ops,
1715 		.spd_release = sock_spd_release,
1716 	};
1717 	struct sk_buff *frag_iter;
1718 	struct sock *sk = skb->sk;
1719 	int ret = 0;
1720 
1721 	if (splice_grow_spd(pipe, &spd))
1722 		return -ENOMEM;
1723 
1724 	/*
1725 	 * __skb_splice_bits() only fails if the output has no room left,
1726 	 * so no point in going over the frag_list for the error case.
1727 	 */
1728 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1729 		goto done;
1730 	else if (!tlen)
1731 		goto done;
1732 
1733 	/*
1734 	 * now see if we have a frag_list to map
1735 	 */
1736 	skb_walk_frags(skb, frag_iter) {
1737 		if (!tlen)
1738 			break;
1739 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1740 			break;
1741 	}
1742 
1743 done:
1744 	if (spd.nr_pages) {
1745 		/*
1746 		 * Drop the socket lock, otherwise we have reverse
1747 		 * locking dependencies between sk_lock and i_mutex
1748 		 * here as compared to sendfile(). We enter here
1749 		 * with the socket lock held, and splice_to_pipe() will
1750 		 * grab the pipe inode lock. For sendfile() emulation,
1751 		 * we call into ->sendpage() with the i_mutex lock held
1752 		 * and networking will grab the socket lock.
1753 		 */
1754 		release_sock(sk);
1755 		ret = splice_to_pipe(pipe, &spd);
1756 		lock_sock(sk);
1757 	}
1758 
1759 	splice_shrink_spd(pipe, &spd);
1760 	return ret;
1761 }
1762 
1763 /**
1764  *	skb_store_bits - store bits from kernel buffer to skb
1765  *	@skb: destination buffer
1766  *	@offset: offset in destination
1767  *	@from: source buffer
1768  *	@len: number of bytes to copy
1769  *
1770  *	Copy the specified number of bytes from the source buffer to the
1771  *	destination skb.  This function handles all the messy bits of
1772  *	traversing fragment lists and such.
1773  */
1774 
1775 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1776 {
1777 	int start = skb_headlen(skb);
1778 	struct sk_buff *frag_iter;
1779 	int i, copy;
1780 
1781 	if (offset > (int)skb->len - len)
1782 		goto fault;
1783 
1784 	if ((copy = start - offset) > 0) {
1785 		if (copy > len)
1786 			copy = len;
1787 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1788 		if ((len -= copy) == 0)
1789 			return 0;
1790 		offset += copy;
1791 		from += copy;
1792 	}
1793 
1794 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1795 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1796 		int end;
1797 
1798 		WARN_ON(start > offset + len);
1799 
1800 		end = start + skb_frag_size(frag);
1801 		if ((copy = end - offset) > 0) {
1802 			u8 *vaddr;
1803 
1804 			if (copy > len)
1805 				copy = len;
1806 
1807 			vaddr = kmap_skb_frag(frag);
1808 			memcpy(vaddr + frag->page_offset + offset - start,
1809 			       from, copy);
1810 			kunmap_skb_frag(vaddr);
1811 
1812 			if ((len -= copy) == 0)
1813 				return 0;
1814 			offset += copy;
1815 			from += copy;
1816 		}
1817 		start = end;
1818 	}
1819 
1820 	skb_walk_frags(skb, frag_iter) {
1821 		int end;
1822 
1823 		WARN_ON(start > offset + len);
1824 
1825 		end = start + frag_iter->len;
1826 		if ((copy = end - offset) > 0) {
1827 			if (copy > len)
1828 				copy = len;
1829 			if (skb_store_bits(frag_iter, offset - start,
1830 					   from, copy))
1831 				goto fault;
1832 			if ((len -= copy) == 0)
1833 				return 0;
1834 			offset += copy;
1835 			from += copy;
1836 		}
1837 		start = end;
1838 	}
1839 	if (!len)
1840 		return 0;
1841 
1842 fault:
1843 	return -EFAULT;
1844 }
1845 EXPORT_SYMBOL(skb_store_bits);
1846 
1847 /* Checksum skb data. */
1848 
1849 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1850 			  int len, __wsum csum)
1851 {
1852 	int start = skb_headlen(skb);
1853 	int i, copy = start - offset;
1854 	struct sk_buff *frag_iter;
1855 	int pos = 0;
1856 
1857 	/* Checksum header. */
1858 	if (copy > 0) {
1859 		if (copy > len)
1860 			copy = len;
1861 		csum = csum_partial(skb->data + offset, copy, csum);
1862 		if ((len -= copy) == 0)
1863 			return csum;
1864 		offset += copy;
1865 		pos	= copy;
1866 	}
1867 
1868 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1869 		int end;
1870 
1871 		WARN_ON(start > offset + len);
1872 
1873 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1874 		if ((copy = end - offset) > 0) {
1875 			__wsum csum2;
1876 			u8 *vaddr;
1877 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1878 
1879 			if (copy > len)
1880 				copy = len;
1881 			vaddr = kmap_skb_frag(frag);
1882 			csum2 = csum_partial(vaddr + frag->page_offset +
1883 					     offset - start, copy, 0);
1884 			kunmap_skb_frag(vaddr);
1885 			csum = csum_block_add(csum, csum2, pos);
1886 			if (!(len -= copy))
1887 				return csum;
1888 			offset += copy;
1889 			pos    += copy;
1890 		}
1891 		start = end;
1892 	}
1893 
1894 	skb_walk_frags(skb, frag_iter) {
1895 		int end;
1896 
1897 		WARN_ON(start > offset + len);
1898 
1899 		end = start + frag_iter->len;
1900 		if ((copy = end - offset) > 0) {
1901 			__wsum csum2;
1902 			if (copy > len)
1903 				copy = len;
1904 			csum2 = skb_checksum(frag_iter, offset - start,
1905 					     copy, 0);
1906 			csum = csum_block_add(csum, csum2, pos);
1907 			if ((len -= copy) == 0)
1908 				return csum;
1909 			offset += copy;
1910 			pos    += copy;
1911 		}
1912 		start = end;
1913 	}
1914 	BUG_ON(len);
1915 
1916 	return csum;
1917 }
1918 EXPORT_SYMBOL(skb_checksum);
1919 
1920 /* Both of above in one bottle. */
1921 
1922 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1923 				    u8 *to, int len, __wsum csum)
1924 {
1925 	int start = skb_headlen(skb);
1926 	int i, copy = start - offset;
1927 	struct sk_buff *frag_iter;
1928 	int pos = 0;
1929 
1930 	/* Copy header. */
1931 	if (copy > 0) {
1932 		if (copy > len)
1933 			copy = len;
1934 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1935 						 copy, csum);
1936 		if ((len -= copy) == 0)
1937 			return csum;
1938 		offset += copy;
1939 		to     += copy;
1940 		pos	= copy;
1941 	}
1942 
1943 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1944 		int end;
1945 
1946 		WARN_ON(start > offset + len);
1947 
1948 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1949 		if ((copy = end - offset) > 0) {
1950 			__wsum csum2;
1951 			u8 *vaddr;
1952 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1953 
1954 			if (copy > len)
1955 				copy = len;
1956 			vaddr = kmap_skb_frag(frag);
1957 			csum2 = csum_partial_copy_nocheck(vaddr +
1958 							  frag->page_offset +
1959 							  offset - start, to,
1960 							  copy, 0);
1961 			kunmap_skb_frag(vaddr);
1962 			csum = csum_block_add(csum, csum2, pos);
1963 			if (!(len -= copy))
1964 				return csum;
1965 			offset += copy;
1966 			to     += copy;
1967 			pos    += copy;
1968 		}
1969 		start = end;
1970 	}
1971 
1972 	skb_walk_frags(skb, frag_iter) {
1973 		__wsum csum2;
1974 		int end;
1975 
1976 		WARN_ON(start > offset + len);
1977 
1978 		end = start + frag_iter->len;
1979 		if ((copy = end - offset) > 0) {
1980 			if (copy > len)
1981 				copy = len;
1982 			csum2 = skb_copy_and_csum_bits(frag_iter,
1983 						       offset - start,
1984 						       to, copy, 0);
1985 			csum = csum_block_add(csum, csum2, pos);
1986 			if ((len -= copy) == 0)
1987 				return csum;
1988 			offset += copy;
1989 			to     += copy;
1990 			pos    += copy;
1991 		}
1992 		start = end;
1993 	}
1994 	BUG_ON(len);
1995 	return csum;
1996 }
1997 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1998 
1999 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2000 {
2001 	__wsum csum;
2002 	long csstart;
2003 
2004 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2005 		csstart = skb_checksum_start_offset(skb);
2006 	else
2007 		csstart = skb_headlen(skb);
2008 
2009 	BUG_ON(csstart > skb_headlen(skb));
2010 
2011 	skb_copy_from_linear_data(skb, to, csstart);
2012 
2013 	csum = 0;
2014 	if (csstart != skb->len)
2015 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2016 					      skb->len - csstart, 0);
2017 
2018 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2019 		long csstuff = csstart + skb->csum_offset;
2020 
2021 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2022 	}
2023 }
2024 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2025 
2026 /**
2027  *	skb_dequeue - remove from the head of the queue
2028  *	@list: list to dequeue from
2029  *
2030  *	Remove the head of the list. The list lock is taken so the function
2031  *	may be used safely with other locking list functions. The head item is
2032  *	returned or %NULL if the list is empty.
2033  */
2034 
2035 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2036 {
2037 	unsigned long flags;
2038 	struct sk_buff *result;
2039 
2040 	spin_lock_irqsave(&list->lock, flags);
2041 	result = __skb_dequeue(list);
2042 	spin_unlock_irqrestore(&list->lock, flags);
2043 	return result;
2044 }
2045 EXPORT_SYMBOL(skb_dequeue);
2046 
2047 /**
2048  *	skb_dequeue_tail - remove from the tail of the queue
2049  *	@list: list to dequeue from
2050  *
2051  *	Remove the tail of the list. The list lock is taken so the function
2052  *	may be used safely with other locking list functions. The tail item is
2053  *	returned or %NULL if the list is empty.
2054  */
2055 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2056 {
2057 	unsigned long flags;
2058 	struct sk_buff *result;
2059 
2060 	spin_lock_irqsave(&list->lock, flags);
2061 	result = __skb_dequeue_tail(list);
2062 	spin_unlock_irqrestore(&list->lock, flags);
2063 	return result;
2064 }
2065 EXPORT_SYMBOL(skb_dequeue_tail);
2066 
2067 /**
2068  *	skb_queue_purge - empty a list
2069  *	@list: list to empty
2070  *
2071  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2072  *	the list and one reference dropped. This function takes the list
2073  *	lock and is atomic with respect to other list locking functions.
2074  */
2075 void skb_queue_purge(struct sk_buff_head *list)
2076 {
2077 	struct sk_buff *skb;
2078 	while ((skb = skb_dequeue(list)) != NULL)
2079 		kfree_skb(skb);
2080 }
2081 EXPORT_SYMBOL(skb_queue_purge);
2082 
2083 /**
2084  *	skb_queue_head - queue a buffer at the list head
2085  *	@list: list to use
2086  *	@newsk: buffer to queue
2087  *
2088  *	Queue a buffer at the start of the list. This function takes the
2089  *	list lock and can be used safely with other locking &sk_buff functions
2090  *	safely.
2091  *
2092  *	A buffer cannot be placed on two lists at the same time.
2093  */
2094 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2095 {
2096 	unsigned long flags;
2097 
2098 	spin_lock_irqsave(&list->lock, flags);
2099 	__skb_queue_head(list, newsk);
2100 	spin_unlock_irqrestore(&list->lock, flags);
2101 }
2102 EXPORT_SYMBOL(skb_queue_head);
2103 
2104 /**
2105  *	skb_queue_tail - queue a buffer at the list tail
2106  *	@list: list to use
2107  *	@newsk: buffer to queue
2108  *
2109  *	Queue a buffer at the tail of the list. This function takes the
2110  *	list lock and can be used safely with other locking &sk_buff functions
2111  *	safely.
2112  *
2113  *	A buffer cannot be placed on two lists at the same time.
2114  */
2115 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2116 {
2117 	unsigned long flags;
2118 
2119 	spin_lock_irqsave(&list->lock, flags);
2120 	__skb_queue_tail(list, newsk);
2121 	spin_unlock_irqrestore(&list->lock, flags);
2122 }
2123 EXPORT_SYMBOL(skb_queue_tail);
2124 
2125 /**
2126  *	skb_unlink	-	remove a buffer from a list
2127  *	@skb: buffer to remove
2128  *	@list: list to use
2129  *
2130  *	Remove a packet from a list. The list locks are taken and this
2131  *	function is atomic with respect to other list locked calls
2132  *
2133  *	You must know what list the SKB is on.
2134  */
2135 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2136 {
2137 	unsigned long flags;
2138 
2139 	spin_lock_irqsave(&list->lock, flags);
2140 	__skb_unlink(skb, list);
2141 	spin_unlock_irqrestore(&list->lock, flags);
2142 }
2143 EXPORT_SYMBOL(skb_unlink);
2144 
2145 /**
2146  *	skb_append	-	append a buffer
2147  *	@old: buffer to insert after
2148  *	@newsk: buffer to insert
2149  *	@list: list to use
2150  *
2151  *	Place a packet after a given packet in a list. The list locks are taken
2152  *	and this function is atomic with respect to other list locked calls.
2153  *	A buffer cannot be placed on two lists at the same time.
2154  */
2155 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2156 {
2157 	unsigned long flags;
2158 
2159 	spin_lock_irqsave(&list->lock, flags);
2160 	__skb_queue_after(list, old, newsk);
2161 	spin_unlock_irqrestore(&list->lock, flags);
2162 }
2163 EXPORT_SYMBOL(skb_append);
2164 
2165 /**
2166  *	skb_insert	-	insert a buffer
2167  *	@old: buffer to insert before
2168  *	@newsk: buffer to insert
2169  *	@list: list to use
2170  *
2171  *	Place a packet before a given packet in a list. The list locks are
2172  * 	taken and this function is atomic with respect to other list locked
2173  *	calls.
2174  *
2175  *	A buffer cannot be placed on two lists at the same time.
2176  */
2177 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2178 {
2179 	unsigned long flags;
2180 
2181 	spin_lock_irqsave(&list->lock, flags);
2182 	__skb_insert(newsk, old->prev, old, list);
2183 	spin_unlock_irqrestore(&list->lock, flags);
2184 }
2185 EXPORT_SYMBOL(skb_insert);
2186 
2187 static inline void skb_split_inside_header(struct sk_buff *skb,
2188 					   struct sk_buff* skb1,
2189 					   const u32 len, const int pos)
2190 {
2191 	int i;
2192 
2193 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2194 					 pos - len);
2195 	/* And move data appendix as is. */
2196 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2197 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2198 
2199 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2200 	skb_shinfo(skb)->nr_frags  = 0;
2201 	skb1->data_len		   = skb->data_len;
2202 	skb1->len		   += skb1->data_len;
2203 	skb->data_len		   = 0;
2204 	skb->len		   = len;
2205 	skb_set_tail_pointer(skb, len);
2206 }
2207 
2208 static inline void skb_split_no_header(struct sk_buff *skb,
2209 				       struct sk_buff* skb1,
2210 				       const u32 len, int pos)
2211 {
2212 	int i, k = 0;
2213 	const int nfrags = skb_shinfo(skb)->nr_frags;
2214 
2215 	skb_shinfo(skb)->nr_frags = 0;
2216 	skb1->len		  = skb1->data_len = skb->len - len;
2217 	skb->len		  = len;
2218 	skb->data_len		  = len - pos;
2219 
2220 	for (i = 0; i < nfrags; i++) {
2221 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2222 
2223 		if (pos + size > len) {
2224 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2225 
2226 			if (pos < len) {
2227 				/* Split frag.
2228 				 * We have two variants in this case:
2229 				 * 1. Move all the frag to the second
2230 				 *    part, if it is possible. F.e.
2231 				 *    this approach is mandatory for TUX,
2232 				 *    where splitting is expensive.
2233 				 * 2. Split is accurately. We make this.
2234 				 */
2235 				skb_frag_ref(skb, i);
2236 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2237 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2238 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2239 				skb_shinfo(skb)->nr_frags++;
2240 			}
2241 			k++;
2242 		} else
2243 			skb_shinfo(skb)->nr_frags++;
2244 		pos += size;
2245 	}
2246 	skb_shinfo(skb1)->nr_frags = k;
2247 }
2248 
2249 /**
2250  * skb_split - Split fragmented skb to two parts at length len.
2251  * @skb: the buffer to split
2252  * @skb1: the buffer to receive the second part
2253  * @len: new length for skb
2254  */
2255 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2256 {
2257 	int pos = skb_headlen(skb);
2258 
2259 	if (len < pos)	/* Split line is inside header. */
2260 		skb_split_inside_header(skb, skb1, len, pos);
2261 	else		/* Second chunk has no header, nothing to copy. */
2262 		skb_split_no_header(skb, skb1, len, pos);
2263 }
2264 EXPORT_SYMBOL(skb_split);
2265 
2266 /* Shifting from/to a cloned skb is a no-go.
2267  *
2268  * Caller cannot keep skb_shinfo related pointers past calling here!
2269  */
2270 static int skb_prepare_for_shift(struct sk_buff *skb)
2271 {
2272 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2273 }
2274 
2275 /**
2276  * skb_shift - Shifts paged data partially from skb to another
2277  * @tgt: buffer into which tail data gets added
2278  * @skb: buffer from which the paged data comes from
2279  * @shiftlen: shift up to this many bytes
2280  *
2281  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2282  * the length of the skb, from skb to tgt. Returns number bytes shifted.
2283  * It's up to caller to free skb if everything was shifted.
2284  *
2285  * If @tgt runs out of frags, the whole operation is aborted.
2286  *
2287  * Skb cannot include anything else but paged data while tgt is allowed
2288  * to have non-paged data as well.
2289  *
2290  * TODO: full sized shift could be optimized but that would need
2291  * specialized skb free'er to handle frags without up-to-date nr_frags.
2292  */
2293 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2294 {
2295 	int from, to, merge, todo;
2296 	struct skb_frag_struct *fragfrom, *fragto;
2297 
2298 	BUG_ON(shiftlen > skb->len);
2299 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2300 
2301 	todo = shiftlen;
2302 	from = 0;
2303 	to = skb_shinfo(tgt)->nr_frags;
2304 	fragfrom = &skb_shinfo(skb)->frags[from];
2305 
2306 	/* Actual merge is delayed until the point when we know we can
2307 	 * commit all, so that we don't have to undo partial changes
2308 	 */
2309 	if (!to ||
2310 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2311 			      fragfrom->page_offset)) {
2312 		merge = -1;
2313 	} else {
2314 		merge = to - 1;
2315 
2316 		todo -= skb_frag_size(fragfrom);
2317 		if (todo < 0) {
2318 			if (skb_prepare_for_shift(skb) ||
2319 			    skb_prepare_for_shift(tgt))
2320 				return 0;
2321 
2322 			/* All previous frag pointers might be stale! */
2323 			fragfrom = &skb_shinfo(skb)->frags[from];
2324 			fragto = &skb_shinfo(tgt)->frags[merge];
2325 
2326 			skb_frag_size_add(fragto, shiftlen);
2327 			skb_frag_size_sub(fragfrom, shiftlen);
2328 			fragfrom->page_offset += shiftlen;
2329 
2330 			goto onlymerged;
2331 		}
2332 
2333 		from++;
2334 	}
2335 
2336 	/* Skip full, not-fitting skb to avoid expensive operations */
2337 	if ((shiftlen == skb->len) &&
2338 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2339 		return 0;
2340 
2341 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2342 		return 0;
2343 
2344 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2345 		if (to == MAX_SKB_FRAGS)
2346 			return 0;
2347 
2348 		fragfrom = &skb_shinfo(skb)->frags[from];
2349 		fragto = &skb_shinfo(tgt)->frags[to];
2350 
2351 		if (todo >= skb_frag_size(fragfrom)) {
2352 			*fragto = *fragfrom;
2353 			todo -= skb_frag_size(fragfrom);
2354 			from++;
2355 			to++;
2356 
2357 		} else {
2358 			__skb_frag_ref(fragfrom);
2359 			fragto->page = fragfrom->page;
2360 			fragto->page_offset = fragfrom->page_offset;
2361 			skb_frag_size_set(fragto, todo);
2362 
2363 			fragfrom->page_offset += todo;
2364 			skb_frag_size_sub(fragfrom, todo);
2365 			todo = 0;
2366 
2367 			to++;
2368 			break;
2369 		}
2370 	}
2371 
2372 	/* Ready to "commit" this state change to tgt */
2373 	skb_shinfo(tgt)->nr_frags = to;
2374 
2375 	if (merge >= 0) {
2376 		fragfrom = &skb_shinfo(skb)->frags[0];
2377 		fragto = &skb_shinfo(tgt)->frags[merge];
2378 
2379 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2380 		__skb_frag_unref(fragfrom);
2381 	}
2382 
2383 	/* Reposition in the original skb */
2384 	to = 0;
2385 	while (from < skb_shinfo(skb)->nr_frags)
2386 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2387 	skb_shinfo(skb)->nr_frags = to;
2388 
2389 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2390 
2391 onlymerged:
2392 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2393 	 * the other hand might need it if it needs to be resent
2394 	 */
2395 	tgt->ip_summed = CHECKSUM_PARTIAL;
2396 	skb->ip_summed = CHECKSUM_PARTIAL;
2397 
2398 	/* Yak, is it really working this way? Some helper please? */
2399 	skb->len -= shiftlen;
2400 	skb->data_len -= shiftlen;
2401 	skb->truesize -= shiftlen;
2402 	tgt->len += shiftlen;
2403 	tgt->data_len += shiftlen;
2404 	tgt->truesize += shiftlen;
2405 
2406 	return shiftlen;
2407 }
2408 
2409 /**
2410  * skb_prepare_seq_read - Prepare a sequential read of skb data
2411  * @skb: the buffer to read
2412  * @from: lower offset of data to be read
2413  * @to: upper offset of data to be read
2414  * @st: state variable
2415  *
2416  * Initializes the specified state variable. Must be called before
2417  * invoking skb_seq_read() for the first time.
2418  */
2419 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2420 			  unsigned int to, struct skb_seq_state *st)
2421 {
2422 	st->lower_offset = from;
2423 	st->upper_offset = to;
2424 	st->root_skb = st->cur_skb = skb;
2425 	st->frag_idx = st->stepped_offset = 0;
2426 	st->frag_data = NULL;
2427 }
2428 EXPORT_SYMBOL(skb_prepare_seq_read);
2429 
2430 /**
2431  * skb_seq_read - Sequentially read skb data
2432  * @consumed: number of bytes consumed by the caller so far
2433  * @data: destination pointer for data to be returned
2434  * @st: state variable
2435  *
2436  * Reads a block of skb data at &consumed relative to the
2437  * lower offset specified to skb_prepare_seq_read(). Assigns
2438  * the head of the data block to &data and returns the length
2439  * of the block or 0 if the end of the skb data or the upper
2440  * offset has been reached.
2441  *
2442  * The caller is not required to consume all of the data
2443  * returned, i.e. &consumed is typically set to the number
2444  * of bytes already consumed and the next call to
2445  * skb_seq_read() will return the remaining part of the block.
2446  *
2447  * Note 1: The size of each block of data returned can be arbitrary,
2448  *       this limitation is the cost for zerocopy seqeuental
2449  *       reads of potentially non linear data.
2450  *
2451  * Note 2: Fragment lists within fragments are not implemented
2452  *       at the moment, state->root_skb could be replaced with
2453  *       a stack for this purpose.
2454  */
2455 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2456 			  struct skb_seq_state *st)
2457 {
2458 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2459 	skb_frag_t *frag;
2460 
2461 	if (unlikely(abs_offset >= st->upper_offset))
2462 		return 0;
2463 
2464 next_skb:
2465 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2466 
2467 	if (abs_offset < block_limit && !st->frag_data) {
2468 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2469 		return block_limit - abs_offset;
2470 	}
2471 
2472 	if (st->frag_idx == 0 && !st->frag_data)
2473 		st->stepped_offset += skb_headlen(st->cur_skb);
2474 
2475 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2476 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2477 		block_limit = skb_frag_size(frag) + st->stepped_offset;
2478 
2479 		if (abs_offset < block_limit) {
2480 			if (!st->frag_data)
2481 				st->frag_data = kmap_skb_frag(frag);
2482 
2483 			*data = (u8 *) st->frag_data + frag->page_offset +
2484 				(abs_offset - st->stepped_offset);
2485 
2486 			return block_limit - abs_offset;
2487 		}
2488 
2489 		if (st->frag_data) {
2490 			kunmap_skb_frag(st->frag_data);
2491 			st->frag_data = NULL;
2492 		}
2493 
2494 		st->frag_idx++;
2495 		st->stepped_offset += skb_frag_size(frag);
2496 	}
2497 
2498 	if (st->frag_data) {
2499 		kunmap_skb_frag(st->frag_data);
2500 		st->frag_data = NULL;
2501 	}
2502 
2503 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2504 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2505 		st->frag_idx = 0;
2506 		goto next_skb;
2507 	} else if (st->cur_skb->next) {
2508 		st->cur_skb = st->cur_skb->next;
2509 		st->frag_idx = 0;
2510 		goto next_skb;
2511 	}
2512 
2513 	return 0;
2514 }
2515 EXPORT_SYMBOL(skb_seq_read);
2516 
2517 /**
2518  * skb_abort_seq_read - Abort a sequential read of skb data
2519  * @st: state variable
2520  *
2521  * Must be called if skb_seq_read() was not called until it
2522  * returned 0.
2523  */
2524 void skb_abort_seq_read(struct skb_seq_state *st)
2525 {
2526 	if (st->frag_data)
2527 		kunmap_skb_frag(st->frag_data);
2528 }
2529 EXPORT_SYMBOL(skb_abort_seq_read);
2530 
2531 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2532 
2533 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2534 					  struct ts_config *conf,
2535 					  struct ts_state *state)
2536 {
2537 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2538 }
2539 
2540 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2541 {
2542 	skb_abort_seq_read(TS_SKB_CB(state));
2543 }
2544 
2545 /**
2546  * skb_find_text - Find a text pattern in skb data
2547  * @skb: the buffer to look in
2548  * @from: search offset
2549  * @to: search limit
2550  * @config: textsearch configuration
2551  * @state: uninitialized textsearch state variable
2552  *
2553  * Finds a pattern in the skb data according to the specified
2554  * textsearch configuration. Use textsearch_next() to retrieve
2555  * subsequent occurrences of the pattern. Returns the offset
2556  * to the first occurrence or UINT_MAX if no match was found.
2557  */
2558 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2559 			   unsigned int to, struct ts_config *config,
2560 			   struct ts_state *state)
2561 {
2562 	unsigned int ret;
2563 
2564 	config->get_next_block = skb_ts_get_next_block;
2565 	config->finish = skb_ts_finish;
2566 
2567 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2568 
2569 	ret = textsearch_find(config, state);
2570 	return (ret <= to - from ? ret : UINT_MAX);
2571 }
2572 EXPORT_SYMBOL(skb_find_text);
2573 
2574 /**
2575  * skb_append_datato_frags: - append the user data to a skb
2576  * @sk: sock  structure
2577  * @skb: skb structure to be appened with user data.
2578  * @getfrag: call back function to be used for getting the user data
2579  * @from: pointer to user message iov
2580  * @length: length of the iov message
2581  *
2582  * Description: This procedure append the user data in the fragment part
2583  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2584  */
2585 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2586 			int (*getfrag)(void *from, char *to, int offset,
2587 					int len, int odd, struct sk_buff *skb),
2588 			void *from, int length)
2589 {
2590 	int frg_cnt = 0;
2591 	skb_frag_t *frag = NULL;
2592 	struct page *page = NULL;
2593 	int copy, left;
2594 	int offset = 0;
2595 	int ret;
2596 
2597 	do {
2598 		/* Return error if we don't have space for new frag */
2599 		frg_cnt = skb_shinfo(skb)->nr_frags;
2600 		if (frg_cnt >= MAX_SKB_FRAGS)
2601 			return -EFAULT;
2602 
2603 		/* allocate a new page for next frag */
2604 		page = alloc_pages(sk->sk_allocation, 0);
2605 
2606 		/* If alloc_page fails just return failure and caller will
2607 		 * free previous allocated pages by doing kfree_skb()
2608 		 */
2609 		if (page == NULL)
2610 			return -ENOMEM;
2611 
2612 		/* initialize the next frag */
2613 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2614 		skb->truesize += PAGE_SIZE;
2615 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2616 
2617 		/* get the new initialized frag */
2618 		frg_cnt = skb_shinfo(skb)->nr_frags;
2619 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2620 
2621 		/* copy the user data to page */
2622 		left = PAGE_SIZE - frag->page_offset;
2623 		copy = (length > left)? left : length;
2624 
2625 		ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2626 			    offset, copy, 0, skb);
2627 		if (ret < 0)
2628 			return -EFAULT;
2629 
2630 		/* copy was successful so update the size parameters */
2631 		skb_frag_size_add(frag, copy);
2632 		skb->len += copy;
2633 		skb->data_len += copy;
2634 		offset += copy;
2635 		length -= copy;
2636 
2637 	} while (length > 0);
2638 
2639 	return 0;
2640 }
2641 EXPORT_SYMBOL(skb_append_datato_frags);
2642 
2643 /**
2644  *	skb_pull_rcsum - pull skb and update receive checksum
2645  *	@skb: buffer to update
2646  *	@len: length of data pulled
2647  *
2648  *	This function performs an skb_pull on the packet and updates
2649  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2650  *	receive path processing instead of skb_pull unless you know
2651  *	that the checksum difference is zero (e.g., a valid IP header)
2652  *	or you are setting ip_summed to CHECKSUM_NONE.
2653  */
2654 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2655 {
2656 	BUG_ON(len > skb->len);
2657 	skb->len -= len;
2658 	BUG_ON(skb->len < skb->data_len);
2659 	skb_postpull_rcsum(skb, skb->data, len);
2660 	return skb->data += len;
2661 }
2662 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2663 
2664 /**
2665  *	skb_segment - Perform protocol segmentation on skb.
2666  *	@skb: buffer to segment
2667  *	@features: features for the output path (see dev->features)
2668  *
2669  *	This function performs segmentation on the given skb.  It returns
2670  *	a pointer to the first in a list of new skbs for the segments.
2671  *	In case of error it returns ERR_PTR(err).
2672  */
2673 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2674 {
2675 	struct sk_buff *segs = NULL;
2676 	struct sk_buff *tail = NULL;
2677 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2678 	unsigned int mss = skb_shinfo(skb)->gso_size;
2679 	unsigned int doffset = skb->data - skb_mac_header(skb);
2680 	unsigned int offset = doffset;
2681 	unsigned int headroom;
2682 	unsigned int len;
2683 	int sg = !!(features & NETIF_F_SG);
2684 	int nfrags = skb_shinfo(skb)->nr_frags;
2685 	int err = -ENOMEM;
2686 	int i = 0;
2687 	int pos;
2688 
2689 	__skb_push(skb, doffset);
2690 	headroom = skb_headroom(skb);
2691 	pos = skb_headlen(skb);
2692 
2693 	do {
2694 		struct sk_buff *nskb;
2695 		skb_frag_t *frag;
2696 		int hsize;
2697 		int size;
2698 
2699 		len = skb->len - offset;
2700 		if (len > mss)
2701 			len = mss;
2702 
2703 		hsize = skb_headlen(skb) - offset;
2704 		if (hsize < 0)
2705 			hsize = 0;
2706 		if (hsize > len || !sg)
2707 			hsize = len;
2708 
2709 		if (!hsize && i >= nfrags) {
2710 			BUG_ON(fskb->len != len);
2711 
2712 			pos += len;
2713 			nskb = skb_clone(fskb, GFP_ATOMIC);
2714 			fskb = fskb->next;
2715 
2716 			if (unlikely(!nskb))
2717 				goto err;
2718 
2719 			hsize = skb_end_pointer(nskb) - nskb->head;
2720 			if (skb_cow_head(nskb, doffset + headroom)) {
2721 				kfree_skb(nskb);
2722 				goto err;
2723 			}
2724 
2725 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2726 					  hsize;
2727 			skb_release_head_state(nskb);
2728 			__skb_push(nskb, doffset);
2729 		} else {
2730 			nskb = alloc_skb(hsize + doffset + headroom,
2731 					 GFP_ATOMIC);
2732 
2733 			if (unlikely(!nskb))
2734 				goto err;
2735 
2736 			skb_reserve(nskb, headroom);
2737 			__skb_put(nskb, doffset);
2738 		}
2739 
2740 		if (segs)
2741 			tail->next = nskb;
2742 		else
2743 			segs = nskb;
2744 		tail = nskb;
2745 
2746 		__copy_skb_header(nskb, skb);
2747 		nskb->mac_len = skb->mac_len;
2748 
2749 		/* nskb and skb might have different headroom */
2750 		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2751 			nskb->csum_start += skb_headroom(nskb) - headroom;
2752 
2753 		skb_reset_mac_header(nskb);
2754 		skb_set_network_header(nskb, skb->mac_len);
2755 		nskb->transport_header = (nskb->network_header +
2756 					  skb_network_header_len(skb));
2757 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2758 
2759 		if (fskb != skb_shinfo(skb)->frag_list)
2760 			continue;
2761 
2762 		if (!sg) {
2763 			nskb->ip_summed = CHECKSUM_NONE;
2764 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2765 							    skb_put(nskb, len),
2766 							    len, 0);
2767 			continue;
2768 		}
2769 
2770 		frag = skb_shinfo(nskb)->frags;
2771 
2772 		skb_copy_from_linear_data_offset(skb, offset,
2773 						 skb_put(nskb, hsize), hsize);
2774 
2775 		while (pos < offset + len && i < nfrags) {
2776 			*frag = skb_shinfo(skb)->frags[i];
2777 			__skb_frag_ref(frag);
2778 			size = skb_frag_size(frag);
2779 
2780 			if (pos < offset) {
2781 				frag->page_offset += offset - pos;
2782 				skb_frag_size_sub(frag, offset - pos);
2783 			}
2784 
2785 			skb_shinfo(nskb)->nr_frags++;
2786 
2787 			if (pos + size <= offset + len) {
2788 				i++;
2789 				pos += size;
2790 			} else {
2791 				skb_frag_size_sub(frag, pos + size - (offset + len));
2792 				goto skip_fraglist;
2793 			}
2794 
2795 			frag++;
2796 		}
2797 
2798 		if (pos < offset + len) {
2799 			struct sk_buff *fskb2 = fskb;
2800 
2801 			BUG_ON(pos + fskb->len != offset + len);
2802 
2803 			pos += fskb->len;
2804 			fskb = fskb->next;
2805 
2806 			if (fskb2->next) {
2807 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2808 				if (!fskb2)
2809 					goto err;
2810 			} else
2811 				skb_get(fskb2);
2812 
2813 			SKB_FRAG_ASSERT(nskb);
2814 			skb_shinfo(nskb)->frag_list = fskb2;
2815 		}
2816 
2817 skip_fraglist:
2818 		nskb->data_len = len - hsize;
2819 		nskb->len += nskb->data_len;
2820 		nskb->truesize += nskb->data_len;
2821 	} while ((offset += len) < skb->len);
2822 
2823 	return segs;
2824 
2825 err:
2826 	while ((skb = segs)) {
2827 		segs = skb->next;
2828 		kfree_skb(skb);
2829 	}
2830 	return ERR_PTR(err);
2831 }
2832 EXPORT_SYMBOL_GPL(skb_segment);
2833 
2834 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2835 {
2836 	struct sk_buff *p = *head;
2837 	struct sk_buff *nskb;
2838 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2839 	struct skb_shared_info *pinfo = skb_shinfo(p);
2840 	unsigned int headroom;
2841 	unsigned int len = skb_gro_len(skb);
2842 	unsigned int offset = skb_gro_offset(skb);
2843 	unsigned int headlen = skb_headlen(skb);
2844 
2845 	if (p->len + len >= 65536)
2846 		return -E2BIG;
2847 
2848 	if (pinfo->frag_list)
2849 		goto merge;
2850 	else if (headlen <= offset) {
2851 		skb_frag_t *frag;
2852 		skb_frag_t *frag2;
2853 		int i = skbinfo->nr_frags;
2854 		int nr_frags = pinfo->nr_frags + i;
2855 
2856 		offset -= headlen;
2857 
2858 		if (nr_frags > MAX_SKB_FRAGS)
2859 			return -E2BIG;
2860 
2861 		pinfo->nr_frags = nr_frags;
2862 		skbinfo->nr_frags = 0;
2863 
2864 		frag = pinfo->frags + nr_frags;
2865 		frag2 = skbinfo->frags + i;
2866 		do {
2867 			*--frag = *--frag2;
2868 		} while (--i);
2869 
2870 		frag->page_offset += offset;
2871 		skb_frag_size_sub(frag, offset);
2872 
2873 		skb->truesize -= skb->data_len;
2874 		skb->len -= skb->data_len;
2875 		skb->data_len = 0;
2876 
2877 		NAPI_GRO_CB(skb)->free = 1;
2878 		goto done;
2879 	} else if (skb_gro_len(p) != pinfo->gso_size)
2880 		return -E2BIG;
2881 
2882 	headroom = skb_headroom(p);
2883 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2884 	if (unlikely(!nskb))
2885 		return -ENOMEM;
2886 
2887 	__copy_skb_header(nskb, p);
2888 	nskb->mac_len = p->mac_len;
2889 
2890 	skb_reserve(nskb, headroom);
2891 	__skb_put(nskb, skb_gro_offset(p));
2892 
2893 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2894 	skb_set_network_header(nskb, skb_network_offset(p));
2895 	skb_set_transport_header(nskb, skb_transport_offset(p));
2896 
2897 	__skb_pull(p, skb_gro_offset(p));
2898 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2899 	       p->data - skb_mac_header(p));
2900 
2901 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2902 	skb_shinfo(nskb)->frag_list = p;
2903 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2904 	pinfo->gso_size = 0;
2905 	skb_header_release(p);
2906 	nskb->prev = p;
2907 
2908 	nskb->data_len += p->len;
2909 	nskb->truesize += p->len;
2910 	nskb->len += p->len;
2911 
2912 	*head = nskb;
2913 	nskb->next = p->next;
2914 	p->next = NULL;
2915 
2916 	p = nskb;
2917 
2918 merge:
2919 	if (offset > headlen) {
2920 		unsigned int eat = offset - headlen;
2921 
2922 		skbinfo->frags[0].page_offset += eat;
2923 		skb_frag_size_sub(&skbinfo->frags[0], eat);
2924 		skb->data_len -= eat;
2925 		skb->len -= eat;
2926 		offset = headlen;
2927 	}
2928 
2929 	__skb_pull(skb, offset);
2930 
2931 	p->prev->next = skb;
2932 	p->prev = skb;
2933 	skb_header_release(skb);
2934 
2935 done:
2936 	NAPI_GRO_CB(p)->count++;
2937 	p->data_len += len;
2938 	p->truesize += len;
2939 	p->len += len;
2940 
2941 	NAPI_GRO_CB(skb)->same_flow = 1;
2942 	return 0;
2943 }
2944 EXPORT_SYMBOL_GPL(skb_gro_receive);
2945 
2946 void __init skb_init(void)
2947 {
2948 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2949 					      sizeof(struct sk_buff),
2950 					      0,
2951 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2952 					      NULL);
2953 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2954 						(2*sizeof(struct sk_buff)) +
2955 						sizeof(atomic_t),
2956 						0,
2957 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2958 						NULL);
2959 }
2960 
2961 /**
2962  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2963  *	@skb: Socket buffer containing the buffers to be mapped
2964  *	@sg: The scatter-gather list to map into
2965  *	@offset: The offset into the buffer's contents to start mapping
2966  *	@len: Length of buffer space to be mapped
2967  *
2968  *	Fill the specified scatter-gather list with mappings/pointers into a
2969  *	region of the buffer space attached to a socket buffer.
2970  */
2971 static int
2972 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2973 {
2974 	int start = skb_headlen(skb);
2975 	int i, copy = start - offset;
2976 	struct sk_buff *frag_iter;
2977 	int elt = 0;
2978 
2979 	if (copy > 0) {
2980 		if (copy > len)
2981 			copy = len;
2982 		sg_set_buf(sg, skb->data + offset, copy);
2983 		elt++;
2984 		if ((len -= copy) == 0)
2985 			return elt;
2986 		offset += copy;
2987 	}
2988 
2989 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2990 		int end;
2991 
2992 		WARN_ON(start > offset + len);
2993 
2994 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2995 		if ((copy = end - offset) > 0) {
2996 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2997 
2998 			if (copy > len)
2999 				copy = len;
3000 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3001 					frag->page_offset+offset-start);
3002 			elt++;
3003 			if (!(len -= copy))
3004 				return elt;
3005 			offset += copy;
3006 		}
3007 		start = end;
3008 	}
3009 
3010 	skb_walk_frags(skb, frag_iter) {
3011 		int end;
3012 
3013 		WARN_ON(start > offset + len);
3014 
3015 		end = start + frag_iter->len;
3016 		if ((copy = end - offset) > 0) {
3017 			if (copy > len)
3018 				copy = len;
3019 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3020 					      copy);
3021 			if ((len -= copy) == 0)
3022 				return elt;
3023 			offset += copy;
3024 		}
3025 		start = end;
3026 	}
3027 	BUG_ON(len);
3028 	return elt;
3029 }
3030 
3031 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3032 {
3033 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
3034 
3035 	sg_mark_end(&sg[nsg - 1]);
3036 
3037 	return nsg;
3038 }
3039 EXPORT_SYMBOL_GPL(skb_to_sgvec);
3040 
3041 /**
3042  *	skb_cow_data - Check that a socket buffer's data buffers are writable
3043  *	@skb: The socket buffer to check.
3044  *	@tailbits: Amount of trailing space to be added
3045  *	@trailer: Returned pointer to the skb where the @tailbits space begins
3046  *
3047  *	Make sure that the data buffers attached to a socket buffer are
3048  *	writable. If they are not, private copies are made of the data buffers
3049  *	and the socket buffer is set to use these instead.
3050  *
3051  *	If @tailbits is given, make sure that there is space to write @tailbits
3052  *	bytes of data beyond current end of socket buffer.  @trailer will be
3053  *	set to point to the skb in which this space begins.
3054  *
3055  *	The number of scatterlist elements required to completely map the
3056  *	COW'd and extended socket buffer will be returned.
3057  */
3058 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3059 {
3060 	int copyflag;
3061 	int elt;
3062 	struct sk_buff *skb1, **skb_p;
3063 
3064 	/* If skb is cloned or its head is paged, reallocate
3065 	 * head pulling out all the pages (pages are considered not writable
3066 	 * at the moment even if they are anonymous).
3067 	 */
3068 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3069 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3070 		return -ENOMEM;
3071 
3072 	/* Easy case. Most of packets will go this way. */
3073 	if (!skb_has_frag_list(skb)) {
3074 		/* A little of trouble, not enough of space for trailer.
3075 		 * This should not happen, when stack is tuned to generate
3076 		 * good frames. OK, on miss we reallocate and reserve even more
3077 		 * space, 128 bytes is fair. */
3078 
3079 		if (skb_tailroom(skb) < tailbits &&
3080 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3081 			return -ENOMEM;
3082 
3083 		/* Voila! */
3084 		*trailer = skb;
3085 		return 1;
3086 	}
3087 
3088 	/* Misery. We are in troubles, going to mincer fragments... */
3089 
3090 	elt = 1;
3091 	skb_p = &skb_shinfo(skb)->frag_list;
3092 	copyflag = 0;
3093 
3094 	while ((skb1 = *skb_p) != NULL) {
3095 		int ntail = 0;
3096 
3097 		/* The fragment is partially pulled by someone,
3098 		 * this can happen on input. Copy it and everything
3099 		 * after it. */
3100 
3101 		if (skb_shared(skb1))
3102 			copyflag = 1;
3103 
3104 		/* If the skb is the last, worry about trailer. */
3105 
3106 		if (skb1->next == NULL && tailbits) {
3107 			if (skb_shinfo(skb1)->nr_frags ||
3108 			    skb_has_frag_list(skb1) ||
3109 			    skb_tailroom(skb1) < tailbits)
3110 				ntail = tailbits + 128;
3111 		}
3112 
3113 		if (copyflag ||
3114 		    skb_cloned(skb1) ||
3115 		    ntail ||
3116 		    skb_shinfo(skb1)->nr_frags ||
3117 		    skb_has_frag_list(skb1)) {
3118 			struct sk_buff *skb2;
3119 
3120 			/* Fuck, we are miserable poor guys... */
3121 			if (ntail == 0)
3122 				skb2 = skb_copy(skb1, GFP_ATOMIC);
3123 			else
3124 				skb2 = skb_copy_expand(skb1,
3125 						       skb_headroom(skb1),
3126 						       ntail,
3127 						       GFP_ATOMIC);
3128 			if (unlikely(skb2 == NULL))
3129 				return -ENOMEM;
3130 
3131 			if (skb1->sk)
3132 				skb_set_owner_w(skb2, skb1->sk);
3133 
3134 			/* Looking around. Are we still alive?
3135 			 * OK, link new skb, drop old one */
3136 
3137 			skb2->next = skb1->next;
3138 			*skb_p = skb2;
3139 			kfree_skb(skb1);
3140 			skb1 = skb2;
3141 		}
3142 		elt++;
3143 		*trailer = skb1;
3144 		skb_p = &skb1->next;
3145 	}
3146 
3147 	return elt;
3148 }
3149 EXPORT_SYMBOL_GPL(skb_cow_data);
3150 
3151 static void sock_rmem_free(struct sk_buff *skb)
3152 {
3153 	struct sock *sk = skb->sk;
3154 
3155 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3156 }
3157 
3158 /*
3159  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3160  */
3161 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3162 {
3163 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3164 	    (unsigned)sk->sk_rcvbuf)
3165 		return -ENOMEM;
3166 
3167 	skb_orphan(skb);
3168 	skb->sk = sk;
3169 	skb->destructor = sock_rmem_free;
3170 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3171 
3172 	/* before exiting rcu section, make sure dst is refcounted */
3173 	skb_dst_force(skb);
3174 
3175 	skb_queue_tail(&sk->sk_error_queue, skb);
3176 	if (!sock_flag(sk, SOCK_DEAD))
3177 		sk->sk_data_ready(sk, skb->len);
3178 	return 0;
3179 }
3180 EXPORT_SYMBOL(sock_queue_err_skb);
3181 
3182 void skb_tstamp_tx(struct sk_buff *orig_skb,
3183 		struct skb_shared_hwtstamps *hwtstamps)
3184 {
3185 	struct sock *sk = orig_skb->sk;
3186 	struct sock_exterr_skb *serr;
3187 	struct sk_buff *skb;
3188 	int err;
3189 
3190 	if (!sk)
3191 		return;
3192 
3193 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3194 	if (!skb)
3195 		return;
3196 
3197 	if (hwtstamps) {
3198 		*skb_hwtstamps(skb) =
3199 			*hwtstamps;
3200 	} else {
3201 		/*
3202 		 * no hardware time stamps available,
3203 		 * so keep the shared tx_flags and only
3204 		 * store software time stamp
3205 		 */
3206 		skb->tstamp = ktime_get_real();
3207 	}
3208 
3209 	serr = SKB_EXT_ERR(skb);
3210 	memset(serr, 0, sizeof(*serr));
3211 	serr->ee.ee_errno = ENOMSG;
3212 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3213 
3214 	err = sock_queue_err_skb(sk, skb);
3215 
3216 	if (err)
3217 		kfree_skb(skb);
3218 }
3219 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3220 
3221 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3222 {
3223 	struct sock *sk = skb->sk;
3224 	struct sock_exterr_skb *serr;
3225 	int err;
3226 
3227 	skb->wifi_acked_valid = 1;
3228 	skb->wifi_acked = acked;
3229 
3230 	serr = SKB_EXT_ERR(skb);
3231 	memset(serr, 0, sizeof(*serr));
3232 	serr->ee.ee_errno = ENOMSG;
3233 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3234 
3235 	err = sock_queue_err_skb(sk, skb);
3236 	if (err)
3237 		kfree_skb(skb);
3238 }
3239 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3240 
3241 
3242 /**
3243  * skb_partial_csum_set - set up and verify partial csum values for packet
3244  * @skb: the skb to set
3245  * @start: the number of bytes after skb->data to start checksumming.
3246  * @off: the offset from start to place the checksum.
3247  *
3248  * For untrusted partially-checksummed packets, we need to make sure the values
3249  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3250  *
3251  * This function checks and sets those values and skb->ip_summed: if this
3252  * returns false you should drop the packet.
3253  */
3254 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3255 {
3256 	if (unlikely(start > skb_headlen(skb)) ||
3257 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3258 		if (net_ratelimit())
3259 			printk(KERN_WARNING
3260 			       "bad partial csum: csum=%u/%u len=%u\n",
3261 			       start, off, skb_headlen(skb));
3262 		return false;
3263 	}
3264 	skb->ip_summed = CHECKSUM_PARTIAL;
3265 	skb->csum_start = skb_headroom(skb) + start;
3266 	skb->csum_offset = off;
3267 	return true;
3268 }
3269 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3270 
3271 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3272 {
3273 	if (net_ratelimit())
3274 		pr_warning("%s: received packets cannot be forwarded"
3275 			   " while LRO is enabled\n", skb->dev->name);
3276 }
3277 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3278