xref: /openbmc/linux/net/core/skbuff.c (revision a09d2831)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/kmemcheck.h>
43 #include <linux/mm.h>
44 #include <linux/interrupt.h>
45 #include <linux/in.h>
46 #include <linux/inet.h>
47 #include <linux/slab.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
51 #endif
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 #include <trace/events/skb.h>
70 
71 #include "kmap_skb.h"
72 
73 static struct kmem_cache *skbuff_head_cache __read_mostly;
74 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75 
76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
77 				  struct pipe_buffer *buf)
78 {
79 	put_page(buf->page);
80 }
81 
82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
83 				struct pipe_buffer *buf)
84 {
85 	get_page(buf->page);
86 }
87 
88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
89 			       struct pipe_buffer *buf)
90 {
91 	return 1;
92 }
93 
94 
95 /* Pipe buffer operations for a socket. */
96 static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 	.can_merge = 0,
98 	.map = generic_pipe_buf_map,
99 	.unmap = generic_pipe_buf_unmap,
100 	.confirm = generic_pipe_buf_confirm,
101 	.release = sock_pipe_buf_release,
102 	.steal = sock_pipe_buf_steal,
103 	.get = sock_pipe_buf_get,
104 };
105 
106 /*
107  *	Keep out-of-line to prevent kernel bloat.
108  *	__builtin_return_address is not used because it is not always
109  *	reliable.
110  */
111 
112 /**
113  *	skb_over_panic	- 	private function
114  *	@skb: buffer
115  *	@sz: size
116  *	@here: address
117  *
118  *	Out of line support code for skb_put(). Not user callable.
119  */
120 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121 {
122 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
124 	       here, skb->len, sz, skb->head, skb->data,
125 	       (unsigned long)skb->tail, (unsigned long)skb->end,
126 	       skb->dev ? skb->dev->name : "<NULL>");
127 	BUG();
128 }
129 EXPORT_SYMBOL(skb_over_panic);
130 
131 /**
132  *	skb_under_panic	- 	private function
133  *	@skb: buffer
134  *	@sz: size
135  *	@here: address
136  *
137  *	Out of line support code for skb_push(). Not user callable.
138  */
139 
140 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141 {
142 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
143 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
144 	       here, skb->len, sz, skb->head, skb->data,
145 	       (unsigned long)skb->tail, (unsigned long)skb->end,
146 	       skb->dev ? skb->dev->name : "<NULL>");
147 	BUG();
148 }
149 EXPORT_SYMBOL(skb_under_panic);
150 
151 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
152  *	'private' fields and also do memory statistics to find all the
153  *	[BEEP] leaks.
154  *
155  */
156 
157 /**
158  *	__alloc_skb	-	allocate a network buffer
159  *	@size: size to allocate
160  *	@gfp_mask: allocation mask
161  *	@fclone: allocate from fclone cache instead of head cache
162  *		and allocate a cloned (child) skb
163  *	@node: numa node to allocate memory on
164  *
165  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
166  *	tail room of size bytes. The object has a reference count of one.
167  *	The return is the buffer. On a failure the return is %NULL.
168  *
169  *	Buffers may only be allocated from interrupts using a @gfp_mask of
170  *	%GFP_ATOMIC.
171  */
172 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
173 			    int fclone, int node)
174 {
175 	struct kmem_cache *cache;
176 	struct skb_shared_info *shinfo;
177 	struct sk_buff *skb;
178 	u8 *data;
179 
180 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
181 
182 	/* Get the HEAD */
183 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
184 	if (!skb)
185 		goto out;
186 
187 	size = SKB_DATA_ALIGN(size);
188 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
189 			gfp_mask, node);
190 	if (!data)
191 		goto nodata;
192 
193 	/*
194 	 * Only clear those fields we need to clear, not those that we will
195 	 * actually initialise below. Hence, don't put any more fields after
196 	 * the tail pointer in struct sk_buff!
197 	 */
198 	memset(skb, 0, offsetof(struct sk_buff, tail));
199 	skb->truesize = size + sizeof(struct sk_buff);
200 	atomic_set(&skb->users, 1);
201 	skb->head = data;
202 	skb->data = data;
203 	skb_reset_tail_pointer(skb);
204 	skb->end = skb->tail + size;
205 	kmemcheck_annotate_bitfield(skb, flags1);
206 	kmemcheck_annotate_bitfield(skb, flags2);
207 #ifdef NET_SKBUFF_DATA_USES_OFFSET
208 	skb->mac_header = ~0U;
209 #endif
210 
211 	/* make sure we initialize shinfo sequentially */
212 	shinfo = skb_shinfo(skb);
213 	atomic_set(&shinfo->dataref, 1);
214 	shinfo->nr_frags  = 0;
215 	shinfo->gso_size = 0;
216 	shinfo->gso_segs = 0;
217 	shinfo->gso_type = 0;
218 	shinfo->ip6_frag_id = 0;
219 	shinfo->tx_flags.flags = 0;
220 	skb_frag_list_init(skb);
221 	memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
222 
223 	if (fclone) {
224 		struct sk_buff *child = skb + 1;
225 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
226 
227 		kmemcheck_annotate_bitfield(child, flags1);
228 		kmemcheck_annotate_bitfield(child, flags2);
229 		skb->fclone = SKB_FCLONE_ORIG;
230 		atomic_set(fclone_ref, 1);
231 
232 		child->fclone = SKB_FCLONE_UNAVAILABLE;
233 	}
234 out:
235 	return skb;
236 nodata:
237 	kmem_cache_free(cache, skb);
238 	skb = NULL;
239 	goto out;
240 }
241 EXPORT_SYMBOL(__alloc_skb);
242 
243 /**
244  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
245  *	@dev: network device to receive on
246  *	@length: length to allocate
247  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
248  *
249  *	Allocate a new &sk_buff and assign it a usage count of one. The
250  *	buffer has unspecified headroom built in. Users should allocate
251  *	the headroom they think they need without accounting for the
252  *	built in space. The built in space is used for optimisations.
253  *
254  *	%NULL is returned if there is no free memory.
255  */
256 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
257 		unsigned int length, gfp_t gfp_mask)
258 {
259 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
260 	struct sk_buff *skb;
261 
262 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
263 	if (likely(skb)) {
264 		skb_reserve(skb, NET_SKB_PAD);
265 		skb->dev = dev;
266 	}
267 	return skb;
268 }
269 EXPORT_SYMBOL(__netdev_alloc_skb);
270 
271 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
272 {
273 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
274 	struct page *page;
275 
276 	page = alloc_pages_node(node, gfp_mask, 0);
277 	return page;
278 }
279 EXPORT_SYMBOL(__netdev_alloc_page);
280 
281 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
282 		int size)
283 {
284 	skb_fill_page_desc(skb, i, page, off, size);
285 	skb->len += size;
286 	skb->data_len += size;
287 	skb->truesize += size;
288 }
289 EXPORT_SYMBOL(skb_add_rx_frag);
290 
291 /**
292  *	dev_alloc_skb - allocate an skbuff for receiving
293  *	@length: length to allocate
294  *
295  *	Allocate a new &sk_buff and assign it a usage count of one. The
296  *	buffer has unspecified headroom built in. Users should allocate
297  *	the headroom they think they need without accounting for the
298  *	built in space. The built in space is used for optimisations.
299  *
300  *	%NULL is returned if there is no free memory. Although this function
301  *	allocates memory it can be called from an interrupt.
302  */
303 struct sk_buff *dev_alloc_skb(unsigned int length)
304 {
305 	/*
306 	 * There is more code here than it seems:
307 	 * __dev_alloc_skb is an inline
308 	 */
309 	return __dev_alloc_skb(length, GFP_ATOMIC);
310 }
311 EXPORT_SYMBOL(dev_alloc_skb);
312 
313 static void skb_drop_list(struct sk_buff **listp)
314 {
315 	struct sk_buff *list = *listp;
316 
317 	*listp = NULL;
318 
319 	do {
320 		struct sk_buff *this = list;
321 		list = list->next;
322 		kfree_skb(this);
323 	} while (list);
324 }
325 
326 static inline void skb_drop_fraglist(struct sk_buff *skb)
327 {
328 	skb_drop_list(&skb_shinfo(skb)->frag_list);
329 }
330 
331 static void skb_clone_fraglist(struct sk_buff *skb)
332 {
333 	struct sk_buff *list;
334 
335 	skb_walk_frags(skb, list)
336 		skb_get(list);
337 }
338 
339 static void skb_release_data(struct sk_buff *skb)
340 {
341 	if (!skb->cloned ||
342 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
343 			       &skb_shinfo(skb)->dataref)) {
344 		if (skb_shinfo(skb)->nr_frags) {
345 			int i;
346 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
347 				put_page(skb_shinfo(skb)->frags[i].page);
348 		}
349 
350 		if (skb_has_frags(skb))
351 			skb_drop_fraglist(skb);
352 
353 		kfree(skb->head);
354 	}
355 }
356 
357 /*
358  *	Free an skbuff by memory without cleaning the state.
359  */
360 static void kfree_skbmem(struct sk_buff *skb)
361 {
362 	struct sk_buff *other;
363 	atomic_t *fclone_ref;
364 
365 	switch (skb->fclone) {
366 	case SKB_FCLONE_UNAVAILABLE:
367 		kmem_cache_free(skbuff_head_cache, skb);
368 		break;
369 
370 	case SKB_FCLONE_ORIG:
371 		fclone_ref = (atomic_t *) (skb + 2);
372 		if (atomic_dec_and_test(fclone_ref))
373 			kmem_cache_free(skbuff_fclone_cache, skb);
374 		break;
375 
376 	case SKB_FCLONE_CLONE:
377 		fclone_ref = (atomic_t *) (skb + 1);
378 		other = skb - 1;
379 
380 		/* The clone portion is available for
381 		 * fast-cloning again.
382 		 */
383 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
384 
385 		if (atomic_dec_and_test(fclone_ref))
386 			kmem_cache_free(skbuff_fclone_cache, other);
387 		break;
388 	}
389 }
390 
391 static void skb_release_head_state(struct sk_buff *skb)
392 {
393 	skb_dst_drop(skb);
394 #ifdef CONFIG_XFRM
395 	secpath_put(skb->sp);
396 #endif
397 	if (skb->destructor) {
398 		WARN_ON(in_irq());
399 		skb->destructor(skb);
400 	}
401 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
402 	nf_conntrack_put(skb->nfct);
403 	nf_conntrack_put_reasm(skb->nfct_reasm);
404 #endif
405 #ifdef CONFIG_BRIDGE_NETFILTER
406 	nf_bridge_put(skb->nf_bridge);
407 #endif
408 /* XXX: IS this still necessary? - JHS */
409 #ifdef CONFIG_NET_SCHED
410 	skb->tc_index = 0;
411 #ifdef CONFIG_NET_CLS_ACT
412 	skb->tc_verd = 0;
413 #endif
414 #endif
415 }
416 
417 /* Free everything but the sk_buff shell. */
418 static void skb_release_all(struct sk_buff *skb)
419 {
420 	skb_release_head_state(skb);
421 	skb_release_data(skb);
422 }
423 
424 /**
425  *	__kfree_skb - private function
426  *	@skb: buffer
427  *
428  *	Free an sk_buff. Release anything attached to the buffer.
429  *	Clean the state. This is an internal helper function. Users should
430  *	always call kfree_skb
431  */
432 
433 void __kfree_skb(struct sk_buff *skb)
434 {
435 	skb_release_all(skb);
436 	kfree_skbmem(skb);
437 }
438 EXPORT_SYMBOL(__kfree_skb);
439 
440 /**
441  *	kfree_skb - free an sk_buff
442  *	@skb: buffer to free
443  *
444  *	Drop a reference to the buffer and free it if the usage count has
445  *	hit zero.
446  */
447 void kfree_skb(struct sk_buff *skb)
448 {
449 	if (unlikely(!skb))
450 		return;
451 	if (likely(atomic_read(&skb->users) == 1))
452 		smp_rmb();
453 	else if (likely(!atomic_dec_and_test(&skb->users)))
454 		return;
455 	trace_kfree_skb(skb, __builtin_return_address(0));
456 	__kfree_skb(skb);
457 }
458 EXPORT_SYMBOL(kfree_skb);
459 
460 /**
461  *	consume_skb - free an skbuff
462  *	@skb: buffer to free
463  *
464  *	Drop a ref to the buffer and free it if the usage count has hit zero
465  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
466  *	is being dropped after a failure and notes that
467  */
468 void consume_skb(struct sk_buff *skb)
469 {
470 	if (unlikely(!skb))
471 		return;
472 	if (likely(atomic_read(&skb->users) == 1))
473 		smp_rmb();
474 	else if (likely(!atomic_dec_and_test(&skb->users)))
475 		return;
476 	__kfree_skb(skb);
477 }
478 EXPORT_SYMBOL(consume_skb);
479 
480 /**
481  *	skb_recycle_check - check if skb can be reused for receive
482  *	@skb: buffer
483  *	@skb_size: minimum receive buffer size
484  *
485  *	Checks that the skb passed in is not shared or cloned, and
486  *	that it is linear and its head portion at least as large as
487  *	skb_size so that it can be recycled as a receive buffer.
488  *	If these conditions are met, this function does any necessary
489  *	reference count dropping and cleans up the skbuff as if it
490  *	just came from __alloc_skb().
491  */
492 int skb_recycle_check(struct sk_buff *skb, int skb_size)
493 {
494 	struct skb_shared_info *shinfo;
495 
496 	if (irqs_disabled())
497 		return 0;
498 
499 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
500 		return 0;
501 
502 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
503 	if (skb_end_pointer(skb) - skb->head < skb_size)
504 		return 0;
505 
506 	if (skb_shared(skb) || skb_cloned(skb))
507 		return 0;
508 
509 	skb_release_head_state(skb);
510 	shinfo = skb_shinfo(skb);
511 	atomic_set(&shinfo->dataref, 1);
512 	shinfo->nr_frags = 0;
513 	shinfo->gso_size = 0;
514 	shinfo->gso_segs = 0;
515 	shinfo->gso_type = 0;
516 	shinfo->ip6_frag_id = 0;
517 	shinfo->tx_flags.flags = 0;
518 	skb_frag_list_init(skb);
519 	memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
520 
521 	memset(skb, 0, offsetof(struct sk_buff, tail));
522 	skb->data = skb->head + NET_SKB_PAD;
523 	skb_reset_tail_pointer(skb);
524 
525 	return 1;
526 }
527 EXPORT_SYMBOL(skb_recycle_check);
528 
529 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
530 {
531 	new->tstamp		= old->tstamp;
532 	new->dev		= old->dev;
533 	new->transport_header	= old->transport_header;
534 	new->network_header	= old->network_header;
535 	new->mac_header		= old->mac_header;
536 	skb_dst_set(new, dst_clone(skb_dst(old)));
537 #ifdef CONFIG_XFRM
538 	new->sp			= secpath_get(old->sp);
539 #endif
540 	memcpy(new->cb, old->cb, sizeof(old->cb));
541 	new->csum		= old->csum;
542 	new->local_df		= old->local_df;
543 	new->pkt_type		= old->pkt_type;
544 	new->ip_summed		= old->ip_summed;
545 	skb_copy_queue_mapping(new, old);
546 	new->priority		= old->priority;
547 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
548 	new->ipvs_property	= old->ipvs_property;
549 #endif
550 	new->protocol		= old->protocol;
551 	new->mark		= old->mark;
552 	new->skb_iif		= old->skb_iif;
553 	__nf_copy(new, old);
554 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
555     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
556 	new->nf_trace		= old->nf_trace;
557 #endif
558 #ifdef CONFIG_NET_SCHED
559 	new->tc_index		= old->tc_index;
560 #ifdef CONFIG_NET_CLS_ACT
561 	new->tc_verd		= old->tc_verd;
562 #endif
563 #endif
564 	new->vlan_tci		= old->vlan_tci;
565 
566 	skb_copy_secmark(new, old);
567 }
568 
569 /*
570  * You should not add any new code to this function.  Add it to
571  * __copy_skb_header above instead.
572  */
573 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
574 {
575 #define C(x) n->x = skb->x
576 
577 	n->next = n->prev = NULL;
578 	n->sk = NULL;
579 	__copy_skb_header(n, skb);
580 
581 	C(len);
582 	C(data_len);
583 	C(mac_len);
584 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
585 	n->cloned = 1;
586 	n->nohdr = 0;
587 	n->destructor = NULL;
588 	C(tail);
589 	C(end);
590 	C(head);
591 	C(data);
592 	C(truesize);
593 	atomic_set(&n->users, 1);
594 
595 	atomic_inc(&(skb_shinfo(skb)->dataref));
596 	skb->cloned = 1;
597 
598 	return n;
599 #undef C
600 }
601 
602 /**
603  *	skb_morph	-	morph one skb into another
604  *	@dst: the skb to receive the contents
605  *	@src: the skb to supply the contents
606  *
607  *	This is identical to skb_clone except that the target skb is
608  *	supplied by the user.
609  *
610  *	The target skb is returned upon exit.
611  */
612 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
613 {
614 	skb_release_all(dst);
615 	return __skb_clone(dst, src);
616 }
617 EXPORT_SYMBOL_GPL(skb_morph);
618 
619 /**
620  *	skb_clone	-	duplicate an sk_buff
621  *	@skb: buffer to clone
622  *	@gfp_mask: allocation priority
623  *
624  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
625  *	copies share the same packet data but not structure. The new
626  *	buffer has a reference count of 1. If the allocation fails the
627  *	function returns %NULL otherwise the new buffer is returned.
628  *
629  *	If this function is called from an interrupt gfp_mask() must be
630  *	%GFP_ATOMIC.
631  */
632 
633 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
634 {
635 	struct sk_buff *n;
636 
637 	n = skb + 1;
638 	if (skb->fclone == SKB_FCLONE_ORIG &&
639 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
640 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
641 		n->fclone = SKB_FCLONE_CLONE;
642 		atomic_inc(fclone_ref);
643 	} else {
644 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
645 		if (!n)
646 			return NULL;
647 
648 		kmemcheck_annotate_bitfield(n, flags1);
649 		kmemcheck_annotate_bitfield(n, flags2);
650 		n->fclone = SKB_FCLONE_UNAVAILABLE;
651 	}
652 
653 	return __skb_clone(n, skb);
654 }
655 EXPORT_SYMBOL(skb_clone);
656 
657 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
658 {
659 #ifndef NET_SKBUFF_DATA_USES_OFFSET
660 	/*
661 	 *	Shift between the two data areas in bytes
662 	 */
663 	unsigned long offset = new->data - old->data;
664 #endif
665 
666 	__copy_skb_header(new, old);
667 
668 #ifndef NET_SKBUFF_DATA_USES_OFFSET
669 	/* {transport,network,mac}_header are relative to skb->head */
670 	new->transport_header += offset;
671 	new->network_header   += offset;
672 	if (skb_mac_header_was_set(new))
673 		new->mac_header	      += offset;
674 #endif
675 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
676 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
677 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
678 }
679 
680 /**
681  *	skb_copy	-	create private copy of an sk_buff
682  *	@skb: buffer to copy
683  *	@gfp_mask: allocation priority
684  *
685  *	Make a copy of both an &sk_buff and its data. This is used when the
686  *	caller wishes to modify the data and needs a private copy of the
687  *	data to alter. Returns %NULL on failure or the pointer to the buffer
688  *	on success. The returned buffer has a reference count of 1.
689  *
690  *	As by-product this function converts non-linear &sk_buff to linear
691  *	one, so that &sk_buff becomes completely private and caller is allowed
692  *	to modify all the data of returned buffer. This means that this
693  *	function is not recommended for use in circumstances when only
694  *	header is going to be modified. Use pskb_copy() instead.
695  */
696 
697 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
698 {
699 	int headerlen = skb->data - skb->head;
700 	/*
701 	 *	Allocate the copy buffer
702 	 */
703 	struct sk_buff *n;
704 #ifdef NET_SKBUFF_DATA_USES_OFFSET
705 	n = alloc_skb(skb->end + skb->data_len, gfp_mask);
706 #else
707 	n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
708 #endif
709 	if (!n)
710 		return NULL;
711 
712 	/* Set the data pointer */
713 	skb_reserve(n, headerlen);
714 	/* Set the tail pointer and length */
715 	skb_put(n, skb->len);
716 
717 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
718 		BUG();
719 
720 	copy_skb_header(n, skb);
721 	return n;
722 }
723 EXPORT_SYMBOL(skb_copy);
724 
725 /**
726  *	pskb_copy	-	create copy of an sk_buff with private head.
727  *	@skb: buffer to copy
728  *	@gfp_mask: allocation priority
729  *
730  *	Make a copy of both an &sk_buff and part of its data, located
731  *	in header. Fragmented data remain shared. This is used when
732  *	the caller wishes to modify only header of &sk_buff and needs
733  *	private copy of the header to alter. Returns %NULL on failure
734  *	or the pointer to the buffer on success.
735  *	The returned buffer has a reference count of 1.
736  */
737 
738 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
739 {
740 	/*
741 	 *	Allocate the copy buffer
742 	 */
743 	struct sk_buff *n;
744 #ifdef NET_SKBUFF_DATA_USES_OFFSET
745 	n = alloc_skb(skb->end, gfp_mask);
746 #else
747 	n = alloc_skb(skb->end - skb->head, gfp_mask);
748 #endif
749 	if (!n)
750 		goto out;
751 
752 	/* Set the data pointer */
753 	skb_reserve(n, skb->data - skb->head);
754 	/* Set the tail pointer and length */
755 	skb_put(n, skb_headlen(skb));
756 	/* Copy the bytes */
757 	skb_copy_from_linear_data(skb, n->data, n->len);
758 
759 	n->truesize += skb->data_len;
760 	n->data_len  = skb->data_len;
761 	n->len	     = skb->len;
762 
763 	if (skb_shinfo(skb)->nr_frags) {
764 		int i;
765 
766 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
767 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
768 			get_page(skb_shinfo(n)->frags[i].page);
769 		}
770 		skb_shinfo(n)->nr_frags = i;
771 	}
772 
773 	if (skb_has_frags(skb)) {
774 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
775 		skb_clone_fraglist(n);
776 	}
777 
778 	copy_skb_header(n, skb);
779 out:
780 	return n;
781 }
782 EXPORT_SYMBOL(pskb_copy);
783 
784 /**
785  *	pskb_expand_head - reallocate header of &sk_buff
786  *	@skb: buffer to reallocate
787  *	@nhead: room to add at head
788  *	@ntail: room to add at tail
789  *	@gfp_mask: allocation priority
790  *
791  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
792  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
793  *	reference count of 1. Returns zero in the case of success or error,
794  *	if expansion failed. In the last case, &sk_buff is not changed.
795  *
796  *	All the pointers pointing into skb header may change and must be
797  *	reloaded after call to this function.
798  */
799 
800 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
801 		     gfp_t gfp_mask)
802 {
803 	int i;
804 	u8 *data;
805 #ifdef NET_SKBUFF_DATA_USES_OFFSET
806 	int size = nhead + skb->end + ntail;
807 #else
808 	int size = nhead + (skb->end - skb->head) + ntail;
809 #endif
810 	long off;
811 
812 	BUG_ON(nhead < 0);
813 
814 	if (skb_shared(skb))
815 		BUG();
816 
817 	size = SKB_DATA_ALIGN(size);
818 
819 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
820 	if (!data)
821 		goto nodata;
822 
823 	/* Copy only real data... and, alas, header. This should be
824 	 * optimized for the cases when header is void. */
825 #ifdef NET_SKBUFF_DATA_USES_OFFSET
826 	memcpy(data + nhead, skb->head, skb->tail);
827 #else
828 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
829 #endif
830 	memcpy(data + size, skb_end_pointer(skb),
831 	       sizeof(struct skb_shared_info));
832 
833 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
834 		get_page(skb_shinfo(skb)->frags[i].page);
835 
836 	if (skb_has_frags(skb))
837 		skb_clone_fraglist(skb);
838 
839 	skb_release_data(skb);
840 
841 	off = (data + nhead) - skb->head;
842 
843 	skb->head     = data;
844 	skb->data    += off;
845 #ifdef NET_SKBUFF_DATA_USES_OFFSET
846 	skb->end      = size;
847 	off           = nhead;
848 #else
849 	skb->end      = skb->head + size;
850 #endif
851 	/* {transport,network,mac}_header and tail are relative to skb->head */
852 	skb->tail	      += off;
853 	skb->transport_header += off;
854 	skb->network_header   += off;
855 	if (skb_mac_header_was_set(skb))
856 		skb->mac_header += off;
857 	skb->csum_start       += nhead;
858 	skb->cloned   = 0;
859 	skb->hdr_len  = 0;
860 	skb->nohdr    = 0;
861 	atomic_set(&skb_shinfo(skb)->dataref, 1);
862 	return 0;
863 
864 nodata:
865 	return -ENOMEM;
866 }
867 EXPORT_SYMBOL(pskb_expand_head);
868 
869 /* Make private copy of skb with writable head and some headroom */
870 
871 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
872 {
873 	struct sk_buff *skb2;
874 	int delta = headroom - skb_headroom(skb);
875 
876 	if (delta <= 0)
877 		skb2 = pskb_copy(skb, GFP_ATOMIC);
878 	else {
879 		skb2 = skb_clone(skb, GFP_ATOMIC);
880 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
881 					     GFP_ATOMIC)) {
882 			kfree_skb(skb2);
883 			skb2 = NULL;
884 		}
885 	}
886 	return skb2;
887 }
888 EXPORT_SYMBOL(skb_realloc_headroom);
889 
890 /**
891  *	skb_copy_expand	-	copy and expand sk_buff
892  *	@skb: buffer to copy
893  *	@newheadroom: new free bytes at head
894  *	@newtailroom: new free bytes at tail
895  *	@gfp_mask: allocation priority
896  *
897  *	Make a copy of both an &sk_buff and its data and while doing so
898  *	allocate additional space.
899  *
900  *	This is used when the caller wishes to modify the data and needs a
901  *	private copy of the data to alter as well as more space for new fields.
902  *	Returns %NULL on failure or the pointer to the buffer
903  *	on success. The returned buffer has a reference count of 1.
904  *
905  *	You must pass %GFP_ATOMIC as the allocation priority if this function
906  *	is called from an interrupt.
907  */
908 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
909 				int newheadroom, int newtailroom,
910 				gfp_t gfp_mask)
911 {
912 	/*
913 	 *	Allocate the copy buffer
914 	 */
915 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
916 				      gfp_mask);
917 	int oldheadroom = skb_headroom(skb);
918 	int head_copy_len, head_copy_off;
919 	int off;
920 
921 	if (!n)
922 		return NULL;
923 
924 	skb_reserve(n, newheadroom);
925 
926 	/* Set the tail pointer and length */
927 	skb_put(n, skb->len);
928 
929 	head_copy_len = oldheadroom;
930 	head_copy_off = 0;
931 	if (newheadroom <= head_copy_len)
932 		head_copy_len = newheadroom;
933 	else
934 		head_copy_off = newheadroom - head_copy_len;
935 
936 	/* Copy the linear header and data. */
937 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
938 			  skb->len + head_copy_len))
939 		BUG();
940 
941 	copy_skb_header(n, skb);
942 
943 	off                  = newheadroom - oldheadroom;
944 	n->csum_start       += off;
945 #ifdef NET_SKBUFF_DATA_USES_OFFSET
946 	n->transport_header += off;
947 	n->network_header   += off;
948 	if (skb_mac_header_was_set(skb))
949 		n->mac_header += off;
950 #endif
951 
952 	return n;
953 }
954 EXPORT_SYMBOL(skb_copy_expand);
955 
956 /**
957  *	skb_pad			-	zero pad the tail of an skb
958  *	@skb: buffer to pad
959  *	@pad: space to pad
960  *
961  *	Ensure that a buffer is followed by a padding area that is zero
962  *	filled. Used by network drivers which may DMA or transfer data
963  *	beyond the buffer end onto the wire.
964  *
965  *	May return error in out of memory cases. The skb is freed on error.
966  */
967 
968 int skb_pad(struct sk_buff *skb, int pad)
969 {
970 	int err;
971 	int ntail;
972 
973 	/* If the skbuff is non linear tailroom is always zero.. */
974 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
975 		memset(skb->data+skb->len, 0, pad);
976 		return 0;
977 	}
978 
979 	ntail = skb->data_len + pad - (skb->end - skb->tail);
980 	if (likely(skb_cloned(skb) || ntail > 0)) {
981 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
982 		if (unlikely(err))
983 			goto free_skb;
984 	}
985 
986 	/* FIXME: The use of this function with non-linear skb's really needs
987 	 * to be audited.
988 	 */
989 	err = skb_linearize(skb);
990 	if (unlikely(err))
991 		goto free_skb;
992 
993 	memset(skb->data + skb->len, 0, pad);
994 	return 0;
995 
996 free_skb:
997 	kfree_skb(skb);
998 	return err;
999 }
1000 EXPORT_SYMBOL(skb_pad);
1001 
1002 /**
1003  *	skb_put - add data to a buffer
1004  *	@skb: buffer to use
1005  *	@len: amount of data to add
1006  *
1007  *	This function extends the used data area of the buffer. If this would
1008  *	exceed the total buffer size the kernel will panic. A pointer to the
1009  *	first byte of the extra data is returned.
1010  */
1011 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1012 {
1013 	unsigned char *tmp = skb_tail_pointer(skb);
1014 	SKB_LINEAR_ASSERT(skb);
1015 	skb->tail += len;
1016 	skb->len  += len;
1017 	if (unlikely(skb->tail > skb->end))
1018 		skb_over_panic(skb, len, __builtin_return_address(0));
1019 	return tmp;
1020 }
1021 EXPORT_SYMBOL(skb_put);
1022 
1023 /**
1024  *	skb_push - add data to the start of a buffer
1025  *	@skb: buffer to use
1026  *	@len: amount of data to add
1027  *
1028  *	This function extends the used data area of the buffer at the buffer
1029  *	start. If this would exceed the total buffer headroom the kernel will
1030  *	panic. A pointer to the first byte of the extra data is returned.
1031  */
1032 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1033 {
1034 	skb->data -= len;
1035 	skb->len  += len;
1036 	if (unlikely(skb->data<skb->head))
1037 		skb_under_panic(skb, len, __builtin_return_address(0));
1038 	return skb->data;
1039 }
1040 EXPORT_SYMBOL(skb_push);
1041 
1042 /**
1043  *	skb_pull - remove data from the start of a buffer
1044  *	@skb: buffer to use
1045  *	@len: amount of data to remove
1046  *
1047  *	This function removes data from the start of a buffer, returning
1048  *	the memory to the headroom. A pointer to the next data in the buffer
1049  *	is returned. Once the data has been pulled future pushes will overwrite
1050  *	the old data.
1051  */
1052 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1053 {
1054 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1055 }
1056 EXPORT_SYMBOL(skb_pull);
1057 
1058 /**
1059  *	skb_trim - remove end from a buffer
1060  *	@skb: buffer to alter
1061  *	@len: new length
1062  *
1063  *	Cut the length of a buffer down by removing data from the tail. If
1064  *	the buffer is already under the length specified it is not modified.
1065  *	The skb must be linear.
1066  */
1067 void skb_trim(struct sk_buff *skb, unsigned int len)
1068 {
1069 	if (skb->len > len)
1070 		__skb_trim(skb, len);
1071 }
1072 EXPORT_SYMBOL(skb_trim);
1073 
1074 /* Trims skb to length len. It can change skb pointers.
1075  */
1076 
1077 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1078 {
1079 	struct sk_buff **fragp;
1080 	struct sk_buff *frag;
1081 	int offset = skb_headlen(skb);
1082 	int nfrags = skb_shinfo(skb)->nr_frags;
1083 	int i;
1084 	int err;
1085 
1086 	if (skb_cloned(skb) &&
1087 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1088 		return err;
1089 
1090 	i = 0;
1091 	if (offset >= len)
1092 		goto drop_pages;
1093 
1094 	for (; i < nfrags; i++) {
1095 		int end = offset + skb_shinfo(skb)->frags[i].size;
1096 
1097 		if (end < len) {
1098 			offset = end;
1099 			continue;
1100 		}
1101 
1102 		skb_shinfo(skb)->frags[i++].size = len - offset;
1103 
1104 drop_pages:
1105 		skb_shinfo(skb)->nr_frags = i;
1106 
1107 		for (; i < nfrags; i++)
1108 			put_page(skb_shinfo(skb)->frags[i].page);
1109 
1110 		if (skb_has_frags(skb))
1111 			skb_drop_fraglist(skb);
1112 		goto done;
1113 	}
1114 
1115 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1116 	     fragp = &frag->next) {
1117 		int end = offset + frag->len;
1118 
1119 		if (skb_shared(frag)) {
1120 			struct sk_buff *nfrag;
1121 
1122 			nfrag = skb_clone(frag, GFP_ATOMIC);
1123 			if (unlikely(!nfrag))
1124 				return -ENOMEM;
1125 
1126 			nfrag->next = frag->next;
1127 			kfree_skb(frag);
1128 			frag = nfrag;
1129 			*fragp = frag;
1130 		}
1131 
1132 		if (end < len) {
1133 			offset = end;
1134 			continue;
1135 		}
1136 
1137 		if (end > len &&
1138 		    unlikely((err = pskb_trim(frag, len - offset))))
1139 			return err;
1140 
1141 		if (frag->next)
1142 			skb_drop_list(&frag->next);
1143 		break;
1144 	}
1145 
1146 done:
1147 	if (len > skb_headlen(skb)) {
1148 		skb->data_len -= skb->len - len;
1149 		skb->len       = len;
1150 	} else {
1151 		skb->len       = len;
1152 		skb->data_len  = 0;
1153 		skb_set_tail_pointer(skb, len);
1154 	}
1155 
1156 	return 0;
1157 }
1158 EXPORT_SYMBOL(___pskb_trim);
1159 
1160 /**
1161  *	__pskb_pull_tail - advance tail of skb header
1162  *	@skb: buffer to reallocate
1163  *	@delta: number of bytes to advance tail
1164  *
1165  *	The function makes a sense only on a fragmented &sk_buff,
1166  *	it expands header moving its tail forward and copying necessary
1167  *	data from fragmented part.
1168  *
1169  *	&sk_buff MUST have reference count of 1.
1170  *
1171  *	Returns %NULL (and &sk_buff does not change) if pull failed
1172  *	or value of new tail of skb in the case of success.
1173  *
1174  *	All the pointers pointing into skb header may change and must be
1175  *	reloaded after call to this function.
1176  */
1177 
1178 /* Moves tail of skb head forward, copying data from fragmented part,
1179  * when it is necessary.
1180  * 1. It may fail due to malloc failure.
1181  * 2. It may change skb pointers.
1182  *
1183  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1184  */
1185 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1186 {
1187 	/* If skb has not enough free space at tail, get new one
1188 	 * plus 128 bytes for future expansions. If we have enough
1189 	 * room at tail, reallocate without expansion only if skb is cloned.
1190 	 */
1191 	int i, k, eat = (skb->tail + delta) - skb->end;
1192 
1193 	if (eat > 0 || skb_cloned(skb)) {
1194 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1195 				     GFP_ATOMIC))
1196 			return NULL;
1197 	}
1198 
1199 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1200 		BUG();
1201 
1202 	/* Optimization: no fragments, no reasons to preestimate
1203 	 * size of pulled pages. Superb.
1204 	 */
1205 	if (!skb_has_frags(skb))
1206 		goto pull_pages;
1207 
1208 	/* Estimate size of pulled pages. */
1209 	eat = delta;
1210 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1211 		if (skb_shinfo(skb)->frags[i].size >= eat)
1212 			goto pull_pages;
1213 		eat -= skb_shinfo(skb)->frags[i].size;
1214 	}
1215 
1216 	/* If we need update frag list, we are in troubles.
1217 	 * Certainly, it possible to add an offset to skb data,
1218 	 * but taking into account that pulling is expected to
1219 	 * be very rare operation, it is worth to fight against
1220 	 * further bloating skb head and crucify ourselves here instead.
1221 	 * Pure masohism, indeed. 8)8)
1222 	 */
1223 	if (eat) {
1224 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1225 		struct sk_buff *clone = NULL;
1226 		struct sk_buff *insp = NULL;
1227 
1228 		do {
1229 			BUG_ON(!list);
1230 
1231 			if (list->len <= eat) {
1232 				/* Eaten as whole. */
1233 				eat -= list->len;
1234 				list = list->next;
1235 				insp = list;
1236 			} else {
1237 				/* Eaten partially. */
1238 
1239 				if (skb_shared(list)) {
1240 					/* Sucks! We need to fork list. :-( */
1241 					clone = skb_clone(list, GFP_ATOMIC);
1242 					if (!clone)
1243 						return NULL;
1244 					insp = list->next;
1245 					list = clone;
1246 				} else {
1247 					/* This may be pulled without
1248 					 * problems. */
1249 					insp = list;
1250 				}
1251 				if (!pskb_pull(list, eat)) {
1252 					kfree_skb(clone);
1253 					return NULL;
1254 				}
1255 				break;
1256 			}
1257 		} while (eat);
1258 
1259 		/* Free pulled out fragments. */
1260 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1261 			skb_shinfo(skb)->frag_list = list->next;
1262 			kfree_skb(list);
1263 		}
1264 		/* And insert new clone at head. */
1265 		if (clone) {
1266 			clone->next = list;
1267 			skb_shinfo(skb)->frag_list = clone;
1268 		}
1269 	}
1270 	/* Success! Now we may commit changes to skb data. */
1271 
1272 pull_pages:
1273 	eat = delta;
1274 	k = 0;
1275 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1276 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1277 			put_page(skb_shinfo(skb)->frags[i].page);
1278 			eat -= skb_shinfo(skb)->frags[i].size;
1279 		} else {
1280 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1281 			if (eat) {
1282 				skb_shinfo(skb)->frags[k].page_offset += eat;
1283 				skb_shinfo(skb)->frags[k].size -= eat;
1284 				eat = 0;
1285 			}
1286 			k++;
1287 		}
1288 	}
1289 	skb_shinfo(skb)->nr_frags = k;
1290 
1291 	skb->tail     += delta;
1292 	skb->data_len -= delta;
1293 
1294 	return skb_tail_pointer(skb);
1295 }
1296 EXPORT_SYMBOL(__pskb_pull_tail);
1297 
1298 /* Copy some data bits from skb to kernel buffer. */
1299 
1300 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1301 {
1302 	int start = skb_headlen(skb);
1303 	struct sk_buff *frag_iter;
1304 	int i, copy;
1305 
1306 	if (offset > (int)skb->len - len)
1307 		goto fault;
1308 
1309 	/* Copy header. */
1310 	if ((copy = start - offset) > 0) {
1311 		if (copy > len)
1312 			copy = len;
1313 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1314 		if ((len -= copy) == 0)
1315 			return 0;
1316 		offset += copy;
1317 		to     += copy;
1318 	}
1319 
1320 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1321 		int end;
1322 
1323 		WARN_ON(start > offset + len);
1324 
1325 		end = start + skb_shinfo(skb)->frags[i].size;
1326 		if ((copy = end - offset) > 0) {
1327 			u8 *vaddr;
1328 
1329 			if (copy > len)
1330 				copy = len;
1331 
1332 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1333 			memcpy(to,
1334 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1335 			       offset - start, copy);
1336 			kunmap_skb_frag(vaddr);
1337 
1338 			if ((len -= copy) == 0)
1339 				return 0;
1340 			offset += copy;
1341 			to     += copy;
1342 		}
1343 		start = end;
1344 	}
1345 
1346 	skb_walk_frags(skb, frag_iter) {
1347 		int end;
1348 
1349 		WARN_ON(start > offset + len);
1350 
1351 		end = start + frag_iter->len;
1352 		if ((copy = end - offset) > 0) {
1353 			if (copy > len)
1354 				copy = len;
1355 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1356 				goto fault;
1357 			if ((len -= copy) == 0)
1358 				return 0;
1359 			offset += copy;
1360 			to     += copy;
1361 		}
1362 		start = end;
1363 	}
1364 	if (!len)
1365 		return 0;
1366 
1367 fault:
1368 	return -EFAULT;
1369 }
1370 EXPORT_SYMBOL(skb_copy_bits);
1371 
1372 /*
1373  * Callback from splice_to_pipe(), if we need to release some pages
1374  * at the end of the spd in case we error'ed out in filling the pipe.
1375  */
1376 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1377 {
1378 	put_page(spd->pages[i]);
1379 }
1380 
1381 static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1382 					  unsigned int *offset,
1383 					  struct sk_buff *skb, struct sock *sk)
1384 {
1385 	struct page *p = sk->sk_sndmsg_page;
1386 	unsigned int off;
1387 
1388 	if (!p) {
1389 new_page:
1390 		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1391 		if (!p)
1392 			return NULL;
1393 
1394 		off = sk->sk_sndmsg_off = 0;
1395 		/* hold one ref to this page until it's full */
1396 	} else {
1397 		unsigned int mlen;
1398 
1399 		off = sk->sk_sndmsg_off;
1400 		mlen = PAGE_SIZE - off;
1401 		if (mlen < 64 && mlen < *len) {
1402 			put_page(p);
1403 			goto new_page;
1404 		}
1405 
1406 		*len = min_t(unsigned int, *len, mlen);
1407 	}
1408 
1409 	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1410 	sk->sk_sndmsg_off += *len;
1411 	*offset = off;
1412 	get_page(p);
1413 
1414 	return p;
1415 }
1416 
1417 /*
1418  * Fill page/offset/length into spd, if it can hold more pages.
1419  */
1420 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1421 				unsigned int *len, unsigned int offset,
1422 				struct sk_buff *skb, int linear,
1423 				struct sock *sk)
1424 {
1425 	if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1426 		return 1;
1427 
1428 	if (linear) {
1429 		page = linear_to_page(page, len, &offset, skb, sk);
1430 		if (!page)
1431 			return 1;
1432 	} else
1433 		get_page(page);
1434 
1435 	spd->pages[spd->nr_pages] = page;
1436 	spd->partial[spd->nr_pages].len = *len;
1437 	spd->partial[spd->nr_pages].offset = offset;
1438 	spd->nr_pages++;
1439 
1440 	return 0;
1441 }
1442 
1443 static inline void __segment_seek(struct page **page, unsigned int *poff,
1444 				  unsigned int *plen, unsigned int off)
1445 {
1446 	unsigned long n;
1447 
1448 	*poff += off;
1449 	n = *poff / PAGE_SIZE;
1450 	if (n)
1451 		*page = nth_page(*page, n);
1452 
1453 	*poff = *poff % PAGE_SIZE;
1454 	*plen -= off;
1455 }
1456 
1457 static inline int __splice_segment(struct page *page, unsigned int poff,
1458 				   unsigned int plen, unsigned int *off,
1459 				   unsigned int *len, struct sk_buff *skb,
1460 				   struct splice_pipe_desc *spd, int linear,
1461 				   struct sock *sk)
1462 {
1463 	if (!*len)
1464 		return 1;
1465 
1466 	/* skip this segment if already processed */
1467 	if (*off >= plen) {
1468 		*off -= plen;
1469 		return 0;
1470 	}
1471 
1472 	/* ignore any bits we already processed */
1473 	if (*off) {
1474 		__segment_seek(&page, &poff, &plen, *off);
1475 		*off = 0;
1476 	}
1477 
1478 	do {
1479 		unsigned int flen = min(*len, plen);
1480 
1481 		/* the linear region may spread across several pages  */
1482 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1483 
1484 		if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
1485 			return 1;
1486 
1487 		__segment_seek(&page, &poff, &plen, flen);
1488 		*len -= flen;
1489 
1490 	} while (*len && plen);
1491 
1492 	return 0;
1493 }
1494 
1495 /*
1496  * Map linear and fragment data from the skb to spd. It reports failure if the
1497  * pipe is full or if we already spliced the requested length.
1498  */
1499 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1500 			     unsigned int *len, struct splice_pipe_desc *spd,
1501 			     struct sock *sk)
1502 {
1503 	int seg;
1504 
1505 	/*
1506 	 * map the linear part
1507 	 */
1508 	if (__splice_segment(virt_to_page(skb->data),
1509 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1510 			     skb_headlen(skb),
1511 			     offset, len, skb, spd, 1, sk))
1512 		return 1;
1513 
1514 	/*
1515 	 * then map the fragments
1516 	 */
1517 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1518 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1519 
1520 		if (__splice_segment(f->page, f->page_offset, f->size,
1521 				     offset, len, skb, spd, 0, sk))
1522 			return 1;
1523 	}
1524 
1525 	return 0;
1526 }
1527 
1528 /*
1529  * Map data from the skb to a pipe. Should handle both the linear part,
1530  * the fragments, and the frag list. It does NOT handle frag lists within
1531  * the frag list, if such a thing exists. We'd probably need to recurse to
1532  * handle that cleanly.
1533  */
1534 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1535 		    struct pipe_inode_info *pipe, unsigned int tlen,
1536 		    unsigned int flags)
1537 {
1538 	struct partial_page partial[PIPE_BUFFERS];
1539 	struct page *pages[PIPE_BUFFERS];
1540 	struct splice_pipe_desc spd = {
1541 		.pages = pages,
1542 		.partial = partial,
1543 		.flags = flags,
1544 		.ops = &sock_pipe_buf_ops,
1545 		.spd_release = sock_spd_release,
1546 	};
1547 	struct sk_buff *frag_iter;
1548 	struct sock *sk = skb->sk;
1549 
1550 	/*
1551 	 * __skb_splice_bits() only fails if the output has no room left,
1552 	 * so no point in going over the frag_list for the error case.
1553 	 */
1554 	if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
1555 		goto done;
1556 	else if (!tlen)
1557 		goto done;
1558 
1559 	/*
1560 	 * now see if we have a frag_list to map
1561 	 */
1562 	skb_walk_frags(skb, frag_iter) {
1563 		if (!tlen)
1564 			break;
1565 		if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1566 			break;
1567 	}
1568 
1569 done:
1570 	if (spd.nr_pages) {
1571 		int ret;
1572 
1573 		/*
1574 		 * Drop the socket lock, otherwise we have reverse
1575 		 * locking dependencies between sk_lock and i_mutex
1576 		 * here as compared to sendfile(). We enter here
1577 		 * with the socket lock held, and splice_to_pipe() will
1578 		 * grab the pipe inode lock. For sendfile() emulation,
1579 		 * we call into ->sendpage() with the i_mutex lock held
1580 		 * and networking will grab the socket lock.
1581 		 */
1582 		release_sock(sk);
1583 		ret = splice_to_pipe(pipe, &spd);
1584 		lock_sock(sk);
1585 		return ret;
1586 	}
1587 
1588 	return 0;
1589 }
1590 
1591 /**
1592  *	skb_store_bits - store bits from kernel buffer to skb
1593  *	@skb: destination buffer
1594  *	@offset: offset in destination
1595  *	@from: source buffer
1596  *	@len: number of bytes to copy
1597  *
1598  *	Copy the specified number of bytes from the source buffer to the
1599  *	destination skb.  This function handles all the messy bits of
1600  *	traversing fragment lists and such.
1601  */
1602 
1603 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1604 {
1605 	int start = skb_headlen(skb);
1606 	struct sk_buff *frag_iter;
1607 	int i, copy;
1608 
1609 	if (offset > (int)skb->len - len)
1610 		goto fault;
1611 
1612 	if ((copy = start - offset) > 0) {
1613 		if (copy > len)
1614 			copy = len;
1615 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1616 		if ((len -= copy) == 0)
1617 			return 0;
1618 		offset += copy;
1619 		from += copy;
1620 	}
1621 
1622 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1623 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1624 		int end;
1625 
1626 		WARN_ON(start > offset + len);
1627 
1628 		end = start + frag->size;
1629 		if ((copy = end - offset) > 0) {
1630 			u8 *vaddr;
1631 
1632 			if (copy > len)
1633 				copy = len;
1634 
1635 			vaddr = kmap_skb_frag(frag);
1636 			memcpy(vaddr + frag->page_offset + offset - start,
1637 			       from, copy);
1638 			kunmap_skb_frag(vaddr);
1639 
1640 			if ((len -= copy) == 0)
1641 				return 0;
1642 			offset += copy;
1643 			from += copy;
1644 		}
1645 		start = end;
1646 	}
1647 
1648 	skb_walk_frags(skb, frag_iter) {
1649 		int end;
1650 
1651 		WARN_ON(start > offset + len);
1652 
1653 		end = start + frag_iter->len;
1654 		if ((copy = end - offset) > 0) {
1655 			if (copy > len)
1656 				copy = len;
1657 			if (skb_store_bits(frag_iter, offset - start,
1658 					   from, copy))
1659 				goto fault;
1660 			if ((len -= copy) == 0)
1661 				return 0;
1662 			offset += copy;
1663 			from += copy;
1664 		}
1665 		start = end;
1666 	}
1667 	if (!len)
1668 		return 0;
1669 
1670 fault:
1671 	return -EFAULT;
1672 }
1673 EXPORT_SYMBOL(skb_store_bits);
1674 
1675 /* Checksum skb data. */
1676 
1677 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1678 			  int len, __wsum csum)
1679 {
1680 	int start = skb_headlen(skb);
1681 	int i, copy = start - offset;
1682 	struct sk_buff *frag_iter;
1683 	int pos = 0;
1684 
1685 	/* Checksum header. */
1686 	if (copy > 0) {
1687 		if (copy > len)
1688 			copy = len;
1689 		csum = csum_partial(skb->data + offset, copy, csum);
1690 		if ((len -= copy) == 0)
1691 			return csum;
1692 		offset += copy;
1693 		pos	= copy;
1694 	}
1695 
1696 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1697 		int end;
1698 
1699 		WARN_ON(start > offset + len);
1700 
1701 		end = start + skb_shinfo(skb)->frags[i].size;
1702 		if ((copy = end - offset) > 0) {
1703 			__wsum csum2;
1704 			u8 *vaddr;
1705 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1706 
1707 			if (copy > len)
1708 				copy = len;
1709 			vaddr = kmap_skb_frag(frag);
1710 			csum2 = csum_partial(vaddr + frag->page_offset +
1711 					     offset - start, copy, 0);
1712 			kunmap_skb_frag(vaddr);
1713 			csum = csum_block_add(csum, csum2, pos);
1714 			if (!(len -= copy))
1715 				return csum;
1716 			offset += copy;
1717 			pos    += copy;
1718 		}
1719 		start = end;
1720 	}
1721 
1722 	skb_walk_frags(skb, frag_iter) {
1723 		int end;
1724 
1725 		WARN_ON(start > offset + len);
1726 
1727 		end = start + frag_iter->len;
1728 		if ((copy = end - offset) > 0) {
1729 			__wsum csum2;
1730 			if (copy > len)
1731 				copy = len;
1732 			csum2 = skb_checksum(frag_iter, offset - start,
1733 					     copy, 0);
1734 			csum = csum_block_add(csum, csum2, pos);
1735 			if ((len -= copy) == 0)
1736 				return csum;
1737 			offset += copy;
1738 			pos    += copy;
1739 		}
1740 		start = end;
1741 	}
1742 	BUG_ON(len);
1743 
1744 	return csum;
1745 }
1746 EXPORT_SYMBOL(skb_checksum);
1747 
1748 /* Both of above in one bottle. */
1749 
1750 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1751 				    u8 *to, int len, __wsum csum)
1752 {
1753 	int start = skb_headlen(skb);
1754 	int i, copy = start - offset;
1755 	struct sk_buff *frag_iter;
1756 	int pos = 0;
1757 
1758 	/* Copy header. */
1759 	if (copy > 0) {
1760 		if (copy > len)
1761 			copy = len;
1762 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1763 						 copy, csum);
1764 		if ((len -= copy) == 0)
1765 			return csum;
1766 		offset += copy;
1767 		to     += copy;
1768 		pos	= copy;
1769 	}
1770 
1771 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1772 		int end;
1773 
1774 		WARN_ON(start > offset + len);
1775 
1776 		end = start + skb_shinfo(skb)->frags[i].size;
1777 		if ((copy = end - offset) > 0) {
1778 			__wsum csum2;
1779 			u8 *vaddr;
1780 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1781 
1782 			if (copy > len)
1783 				copy = len;
1784 			vaddr = kmap_skb_frag(frag);
1785 			csum2 = csum_partial_copy_nocheck(vaddr +
1786 							  frag->page_offset +
1787 							  offset - start, to,
1788 							  copy, 0);
1789 			kunmap_skb_frag(vaddr);
1790 			csum = csum_block_add(csum, csum2, pos);
1791 			if (!(len -= copy))
1792 				return csum;
1793 			offset += copy;
1794 			to     += copy;
1795 			pos    += copy;
1796 		}
1797 		start = end;
1798 	}
1799 
1800 	skb_walk_frags(skb, frag_iter) {
1801 		__wsum csum2;
1802 		int end;
1803 
1804 		WARN_ON(start > offset + len);
1805 
1806 		end = start + frag_iter->len;
1807 		if ((copy = end - offset) > 0) {
1808 			if (copy > len)
1809 				copy = len;
1810 			csum2 = skb_copy_and_csum_bits(frag_iter,
1811 						       offset - start,
1812 						       to, copy, 0);
1813 			csum = csum_block_add(csum, csum2, pos);
1814 			if ((len -= copy) == 0)
1815 				return csum;
1816 			offset += copy;
1817 			to     += copy;
1818 			pos    += copy;
1819 		}
1820 		start = end;
1821 	}
1822 	BUG_ON(len);
1823 	return csum;
1824 }
1825 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1826 
1827 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1828 {
1829 	__wsum csum;
1830 	long csstart;
1831 
1832 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1833 		csstart = skb->csum_start - skb_headroom(skb);
1834 	else
1835 		csstart = skb_headlen(skb);
1836 
1837 	BUG_ON(csstart > skb_headlen(skb));
1838 
1839 	skb_copy_from_linear_data(skb, to, csstart);
1840 
1841 	csum = 0;
1842 	if (csstart != skb->len)
1843 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1844 					      skb->len - csstart, 0);
1845 
1846 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1847 		long csstuff = csstart + skb->csum_offset;
1848 
1849 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1850 	}
1851 }
1852 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1853 
1854 /**
1855  *	skb_dequeue - remove from the head of the queue
1856  *	@list: list to dequeue from
1857  *
1858  *	Remove the head of the list. The list lock is taken so the function
1859  *	may be used safely with other locking list functions. The head item is
1860  *	returned or %NULL if the list is empty.
1861  */
1862 
1863 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1864 {
1865 	unsigned long flags;
1866 	struct sk_buff *result;
1867 
1868 	spin_lock_irqsave(&list->lock, flags);
1869 	result = __skb_dequeue(list);
1870 	spin_unlock_irqrestore(&list->lock, flags);
1871 	return result;
1872 }
1873 EXPORT_SYMBOL(skb_dequeue);
1874 
1875 /**
1876  *	skb_dequeue_tail - remove from the tail of the queue
1877  *	@list: list to dequeue from
1878  *
1879  *	Remove the tail of the list. The list lock is taken so the function
1880  *	may be used safely with other locking list functions. The tail item is
1881  *	returned or %NULL if the list is empty.
1882  */
1883 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1884 {
1885 	unsigned long flags;
1886 	struct sk_buff *result;
1887 
1888 	spin_lock_irqsave(&list->lock, flags);
1889 	result = __skb_dequeue_tail(list);
1890 	spin_unlock_irqrestore(&list->lock, flags);
1891 	return result;
1892 }
1893 EXPORT_SYMBOL(skb_dequeue_tail);
1894 
1895 /**
1896  *	skb_queue_purge - empty a list
1897  *	@list: list to empty
1898  *
1899  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1900  *	the list and one reference dropped. This function takes the list
1901  *	lock and is atomic with respect to other list locking functions.
1902  */
1903 void skb_queue_purge(struct sk_buff_head *list)
1904 {
1905 	struct sk_buff *skb;
1906 	while ((skb = skb_dequeue(list)) != NULL)
1907 		kfree_skb(skb);
1908 }
1909 EXPORT_SYMBOL(skb_queue_purge);
1910 
1911 /**
1912  *	skb_queue_head - queue a buffer at the list head
1913  *	@list: list to use
1914  *	@newsk: buffer to queue
1915  *
1916  *	Queue a buffer at the start of the list. This function takes the
1917  *	list lock and can be used safely with other locking &sk_buff functions
1918  *	safely.
1919  *
1920  *	A buffer cannot be placed on two lists at the same time.
1921  */
1922 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1923 {
1924 	unsigned long flags;
1925 
1926 	spin_lock_irqsave(&list->lock, flags);
1927 	__skb_queue_head(list, newsk);
1928 	spin_unlock_irqrestore(&list->lock, flags);
1929 }
1930 EXPORT_SYMBOL(skb_queue_head);
1931 
1932 /**
1933  *	skb_queue_tail - queue a buffer at the list tail
1934  *	@list: list to use
1935  *	@newsk: buffer to queue
1936  *
1937  *	Queue a buffer at the tail of the list. This function takes the
1938  *	list lock and can be used safely with other locking &sk_buff functions
1939  *	safely.
1940  *
1941  *	A buffer cannot be placed on two lists at the same time.
1942  */
1943 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1944 {
1945 	unsigned long flags;
1946 
1947 	spin_lock_irqsave(&list->lock, flags);
1948 	__skb_queue_tail(list, newsk);
1949 	spin_unlock_irqrestore(&list->lock, flags);
1950 }
1951 EXPORT_SYMBOL(skb_queue_tail);
1952 
1953 /**
1954  *	skb_unlink	-	remove a buffer from a list
1955  *	@skb: buffer to remove
1956  *	@list: list to use
1957  *
1958  *	Remove a packet from a list. The list locks are taken and this
1959  *	function is atomic with respect to other list locked calls
1960  *
1961  *	You must know what list the SKB is on.
1962  */
1963 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1964 {
1965 	unsigned long flags;
1966 
1967 	spin_lock_irqsave(&list->lock, flags);
1968 	__skb_unlink(skb, list);
1969 	spin_unlock_irqrestore(&list->lock, flags);
1970 }
1971 EXPORT_SYMBOL(skb_unlink);
1972 
1973 /**
1974  *	skb_append	-	append a buffer
1975  *	@old: buffer to insert after
1976  *	@newsk: buffer to insert
1977  *	@list: list to use
1978  *
1979  *	Place a packet after a given packet in a list. The list locks are taken
1980  *	and this function is atomic with respect to other list locked calls.
1981  *	A buffer cannot be placed on two lists at the same time.
1982  */
1983 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1984 {
1985 	unsigned long flags;
1986 
1987 	spin_lock_irqsave(&list->lock, flags);
1988 	__skb_queue_after(list, old, newsk);
1989 	spin_unlock_irqrestore(&list->lock, flags);
1990 }
1991 EXPORT_SYMBOL(skb_append);
1992 
1993 /**
1994  *	skb_insert	-	insert a buffer
1995  *	@old: buffer to insert before
1996  *	@newsk: buffer to insert
1997  *	@list: list to use
1998  *
1999  *	Place a packet before a given packet in a list. The list locks are
2000  * 	taken and this function is atomic with respect to other list locked
2001  *	calls.
2002  *
2003  *	A buffer cannot be placed on two lists at the same time.
2004  */
2005 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2006 {
2007 	unsigned long flags;
2008 
2009 	spin_lock_irqsave(&list->lock, flags);
2010 	__skb_insert(newsk, old->prev, old, list);
2011 	spin_unlock_irqrestore(&list->lock, flags);
2012 }
2013 EXPORT_SYMBOL(skb_insert);
2014 
2015 static inline void skb_split_inside_header(struct sk_buff *skb,
2016 					   struct sk_buff* skb1,
2017 					   const u32 len, const int pos)
2018 {
2019 	int i;
2020 
2021 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2022 					 pos - len);
2023 	/* And move data appendix as is. */
2024 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2025 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2026 
2027 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2028 	skb_shinfo(skb)->nr_frags  = 0;
2029 	skb1->data_len		   = skb->data_len;
2030 	skb1->len		   += skb1->data_len;
2031 	skb->data_len		   = 0;
2032 	skb->len		   = len;
2033 	skb_set_tail_pointer(skb, len);
2034 }
2035 
2036 static inline void skb_split_no_header(struct sk_buff *skb,
2037 				       struct sk_buff* skb1,
2038 				       const u32 len, int pos)
2039 {
2040 	int i, k = 0;
2041 	const int nfrags = skb_shinfo(skb)->nr_frags;
2042 
2043 	skb_shinfo(skb)->nr_frags = 0;
2044 	skb1->len		  = skb1->data_len = skb->len - len;
2045 	skb->len		  = len;
2046 	skb->data_len		  = len - pos;
2047 
2048 	for (i = 0; i < nfrags; i++) {
2049 		int size = skb_shinfo(skb)->frags[i].size;
2050 
2051 		if (pos + size > len) {
2052 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2053 
2054 			if (pos < len) {
2055 				/* Split frag.
2056 				 * We have two variants in this case:
2057 				 * 1. Move all the frag to the second
2058 				 *    part, if it is possible. F.e.
2059 				 *    this approach is mandatory for TUX,
2060 				 *    where splitting is expensive.
2061 				 * 2. Split is accurately. We make this.
2062 				 */
2063 				get_page(skb_shinfo(skb)->frags[i].page);
2064 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2065 				skb_shinfo(skb1)->frags[0].size -= len - pos;
2066 				skb_shinfo(skb)->frags[i].size	= len - pos;
2067 				skb_shinfo(skb)->nr_frags++;
2068 			}
2069 			k++;
2070 		} else
2071 			skb_shinfo(skb)->nr_frags++;
2072 		pos += size;
2073 	}
2074 	skb_shinfo(skb1)->nr_frags = k;
2075 }
2076 
2077 /**
2078  * skb_split - Split fragmented skb to two parts at length len.
2079  * @skb: the buffer to split
2080  * @skb1: the buffer to receive the second part
2081  * @len: new length for skb
2082  */
2083 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2084 {
2085 	int pos = skb_headlen(skb);
2086 
2087 	if (len < pos)	/* Split line is inside header. */
2088 		skb_split_inside_header(skb, skb1, len, pos);
2089 	else		/* Second chunk has no header, nothing to copy. */
2090 		skb_split_no_header(skb, skb1, len, pos);
2091 }
2092 EXPORT_SYMBOL(skb_split);
2093 
2094 /* Shifting from/to a cloned skb is a no-go.
2095  *
2096  * Caller cannot keep skb_shinfo related pointers past calling here!
2097  */
2098 static int skb_prepare_for_shift(struct sk_buff *skb)
2099 {
2100 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2101 }
2102 
2103 /**
2104  * skb_shift - Shifts paged data partially from skb to another
2105  * @tgt: buffer into which tail data gets added
2106  * @skb: buffer from which the paged data comes from
2107  * @shiftlen: shift up to this many bytes
2108  *
2109  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2110  * the length of the skb, from tgt to skb. Returns number bytes shifted.
2111  * It's up to caller to free skb if everything was shifted.
2112  *
2113  * If @tgt runs out of frags, the whole operation is aborted.
2114  *
2115  * Skb cannot include anything else but paged data while tgt is allowed
2116  * to have non-paged data as well.
2117  *
2118  * TODO: full sized shift could be optimized but that would need
2119  * specialized skb free'er to handle frags without up-to-date nr_frags.
2120  */
2121 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2122 {
2123 	int from, to, merge, todo;
2124 	struct skb_frag_struct *fragfrom, *fragto;
2125 
2126 	BUG_ON(shiftlen > skb->len);
2127 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2128 
2129 	todo = shiftlen;
2130 	from = 0;
2131 	to = skb_shinfo(tgt)->nr_frags;
2132 	fragfrom = &skb_shinfo(skb)->frags[from];
2133 
2134 	/* Actual merge is delayed until the point when we know we can
2135 	 * commit all, so that we don't have to undo partial changes
2136 	 */
2137 	if (!to ||
2138 	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2139 		merge = -1;
2140 	} else {
2141 		merge = to - 1;
2142 
2143 		todo -= fragfrom->size;
2144 		if (todo < 0) {
2145 			if (skb_prepare_for_shift(skb) ||
2146 			    skb_prepare_for_shift(tgt))
2147 				return 0;
2148 
2149 			/* All previous frag pointers might be stale! */
2150 			fragfrom = &skb_shinfo(skb)->frags[from];
2151 			fragto = &skb_shinfo(tgt)->frags[merge];
2152 
2153 			fragto->size += shiftlen;
2154 			fragfrom->size -= shiftlen;
2155 			fragfrom->page_offset += shiftlen;
2156 
2157 			goto onlymerged;
2158 		}
2159 
2160 		from++;
2161 	}
2162 
2163 	/* Skip full, not-fitting skb to avoid expensive operations */
2164 	if ((shiftlen == skb->len) &&
2165 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2166 		return 0;
2167 
2168 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2169 		return 0;
2170 
2171 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2172 		if (to == MAX_SKB_FRAGS)
2173 			return 0;
2174 
2175 		fragfrom = &skb_shinfo(skb)->frags[from];
2176 		fragto = &skb_shinfo(tgt)->frags[to];
2177 
2178 		if (todo >= fragfrom->size) {
2179 			*fragto = *fragfrom;
2180 			todo -= fragfrom->size;
2181 			from++;
2182 			to++;
2183 
2184 		} else {
2185 			get_page(fragfrom->page);
2186 			fragto->page = fragfrom->page;
2187 			fragto->page_offset = fragfrom->page_offset;
2188 			fragto->size = todo;
2189 
2190 			fragfrom->page_offset += todo;
2191 			fragfrom->size -= todo;
2192 			todo = 0;
2193 
2194 			to++;
2195 			break;
2196 		}
2197 	}
2198 
2199 	/* Ready to "commit" this state change to tgt */
2200 	skb_shinfo(tgt)->nr_frags = to;
2201 
2202 	if (merge >= 0) {
2203 		fragfrom = &skb_shinfo(skb)->frags[0];
2204 		fragto = &skb_shinfo(tgt)->frags[merge];
2205 
2206 		fragto->size += fragfrom->size;
2207 		put_page(fragfrom->page);
2208 	}
2209 
2210 	/* Reposition in the original skb */
2211 	to = 0;
2212 	while (from < skb_shinfo(skb)->nr_frags)
2213 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2214 	skb_shinfo(skb)->nr_frags = to;
2215 
2216 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2217 
2218 onlymerged:
2219 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2220 	 * the other hand might need it if it needs to be resent
2221 	 */
2222 	tgt->ip_summed = CHECKSUM_PARTIAL;
2223 	skb->ip_summed = CHECKSUM_PARTIAL;
2224 
2225 	/* Yak, is it really working this way? Some helper please? */
2226 	skb->len -= shiftlen;
2227 	skb->data_len -= shiftlen;
2228 	skb->truesize -= shiftlen;
2229 	tgt->len += shiftlen;
2230 	tgt->data_len += shiftlen;
2231 	tgt->truesize += shiftlen;
2232 
2233 	return shiftlen;
2234 }
2235 
2236 /**
2237  * skb_prepare_seq_read - Prepare a sequential read of skb data
2238  * @skb: the buffer to read
2239  * @from: lower offset of data to be read
2240  * @to: upper offset of data to be read
2241  * @st: state variable
2242  *
2243  * Initializes the specified state variable. Must be called before
2244  * invoking skb_seq_read() for the first time.
2245  */
2246 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2247 			  unsigned int to, struct skb_seq_state *st)
2248 {
2249 	st->lower_offset = from;
2250 	st->upper_offset = to;
2251 	st->root_skb = st->cur_skb = skb;
2252 	st->frag_idx = st->stepped_offset = 0;
2253 	st->frag_data = NULL;
2254 }
2255 EXPORT_SYMBOL(skb_prepare_seq_read);
2256 
2257 /**
2258  * skb_seq_read - Sequentially read skb data
2259  * @consumed: number of bytes consumed by the caller so far
2260  * @data: destination pointer for data to be returned
2261  * @st: state variable
2262  *
2263  * Reads a block of skb data at &consumed relative to the
2264  * lower offset specified to skb_prepare_seq_read(). Assigns
2265  * the head of the data block to &data and returns the length
2266  * of the block or 0 if the end of the skb data or the upper
2267  * offset has been reached.
2268  *
2269  * The caller is not required to consume all of the data
2270  * returned, i.e. &consumed is typically set to the number
2271  * of bytes already consumed and the next call to
2272  * skb_seq_read() will return the remaining part of the block.
2273  *
2274  * Note 1: The size of each block of data returned can be arbitary,
2275  *       this limitation is the cost for zerocopy seqeuental
2276  *       reads of potentially non linear data.
2277  *
2278  * Note 2: Fragment lists within fragments are not implemented
2279  *       at the moment, state->root_skb could be replaced with
2280  *       a stack for this purpose.
2281  */
2282 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2283 			  struct skb_seq_state *st)
2284 {
2285 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2286 	skb_frag_t *frag;
2287 
2288 	if (unlikely(abs_offset >= st->upper_offset))
2289 		return 0;
2290 
2291 next_skb:
2292 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2293 
2294 	if (abs_offset < block_limit && !st->frag_data) {
2295 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2296 		return block_limit - abs_offset;
2297 	}
2298 
2299 	if (st->frag_idx == 0 && !st->frag_data)
2300 		st->stepped_offset += skb_headlen(st->cur_skb);
2301 
2302 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2303 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2304 		block_limit = frag->size + st->stepped_offset;
2305 
2306 		if (abs_offset < block_limit) {
2307 			if (!st->frag_data)
2308 				st->frag_data = kmap_skb_frag(frag);
2309 
2310 			*data = (u8 *) st->frag_data + frag->page_offset +
2311 				(abs_offset - st->stepped_offset);
2312 
2313 			return block_limit - abs_offset;
2314 		}
2315 
2316 		if (st->frag_data) {
2317 			kunmap_skb_frag(st->frag_data);
2318 			st->frag_data = NULL;
2319 		}
2320 
2321 		st->frag_idx++;
2322 		st->stepped_offset += frag->size;
2323 	}
2324 
2325 	if (st->frag_data) {
2326 		kunmap_skb_frag(st->frag_data);
2327 		st->frag_data = NULL;
2328 	}
2329 
2330 	if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2331 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2332 		st->frag_idx = 0;
2333 		goto next_skb;
2334 	} else if (st->cur_skb->next) {
2335 		st->cur_skb = st->cur_skb->next;
2336 		st->frag_idx = 0;
2337 		goto next_skb;
2338 	}
2339 
2340 	return 0;
2341 }
2342 EXPORT_SYMBOL(skb_seq_read);
2343 
2344 /**
2345  * skb_abort_seq_read - Abort a sequential read of skb data
2346  * @st: state variable
2347  *
2348  * Must be called if skb_seq_read() was not called until it
2349  * returned 0.
2350  */
2351 void skb_abort_seq_read(struct skb_seq_state *st)
2352 {
2353 	if (st->frag_data)
2354 		kunmap_skb_frag(st->frag_data);
2355 }
2356 EXPORT_SYMBOL(skb_abort_seq_read);
2357 
2358 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2359 
2360 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2361 					  struct ts_config *conf,
2362 					  struct ts_state *state)
2363 {
2364 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2365 }
2366 
2367 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2368 {
2369 	skb_abort_seq_read(TS_SKB_CB(state));
2370 }
2371 
2372 /**
2373  * skb_find_text - Find a text pattern in skb data
2374  * @skb: the buffer to look in
2375  * @from: search offset
2376  * @to: search limit
2377  * @config: textsearch configuration
2378  * @state: uninitialized textsearch state variable
2379  *
2380  * Finds a pattern in the skb data according to the specified
2381  * textsearch configuration. Use textsearch_next() to retrieve
2382  * subsequent occurrences of the pattern. Returns the offset
2383  * to the first occurrence or UINT_MAX if no match was found.
2384  */
2385 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2386 			   unsigned int to, struct ts_config *config,
2387 			   struct ts_state *state)
2388 {
2389 	unsigned int ret;
2390 
2391 	config->get_next_block = skb_ts_get_next_block;
2392 	config->finish = skb_ts_finish;
2393 
2394 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2395 
2396 	ret = textsearch_find(config, state);
2397 	return (ret <= to - from ? ret : UINT_MAX);
2398 }
2399 EXPORT_SYMBOL(skb_find_text);
2400 
2401 /**
2402  * skb_append_datato_frags: - append the user data to a skb
2403  * @sk: sock  structure
2404  * @skb: skb structure to be appened with user data.
2405  * @getfrag: call back function to be used for getting the user data
2406  * @from: pointer to user message iov
2407  * @length: length of the iov message
2408  *
2409  * Description: This procedure append the user data in the fragment part
2410  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2411  */
2412 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2413 			int (*getfrag)(void *from, char *to, int offset,
2414 					int len, int odd, struct sk_buff *skb),
2415 			void *from, int length)
2416 {
2417 	int frg_cnt = 0;
2418 	skb_frag_t *frag = NULL;
2419 	struct page *page = NULL;
2420 	int copy, left;
2421 	int offset = 0;
2422 	int ret;
2423 
2424 	do {
2425 		/* Return error if we don't have space for new frag */
2426 		frg_cnt = skb_shinfo(skb)->nr_frags;
2427 		if (frg_cnt >= MAX_SKB_FRAGS)
2428 			return -EFAULT;
2429 
2430 		/* allocate a new page for next frag */
2431 		page = alloc_pages(sk->sk_allocation, 0);
2432 
2433 		/* If alloc_page fails just return failure and caller will
2434 		 * free previous allocated pages by doing kfree_skb()
2435 		 */
2436 		if (page == NULL)
2437 			return -ENOMEM;
2438 
2439 		/* initialize the next frag */
2440 		sk->sk_sndmsg_page = page;
2441 		sk->sk_sndmsg_off = 0;
2442 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2443 		skb->truesize += PAGE_SIZE;
2444 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2445 
2446 		/* get the new initialized frag */
2447 		frg_cnt = skb_shinfo(skb)->nr_frags;
2448 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2449 
2450 		/* copy the user data to page */
2451 		left = PAGE_SIZE - frag->page_offset;
2452 		copy = (length > left)? left : length;
2453 
2454 		ret = getfrag(from, (page_address(frag->page) +
2455 			    frag->page_offset + frag->size),
2456 			    offset, copy, 0, skb);
2457 		if (ret < 0)
2458 			return -EFAULT;
2459 
2460 		/* copy was successful so update the size parameters */
2461 		sk->sk_sndmsg_off += copy;
2462 		frag->size += copy;
2463 		skb->len += copy;
2464 		skb->data_len += copy;
2465 		offset += copy;
2466 		length -= copy;
2467 
2468 	} while (length > 0);
2469 
2470 	return 0;
2471 }
2472 EXPORT_SYMBOL(skb_append_datato_frags);
2473 
2474 /**
2475  *	skb_pull_rcsum - pull skb and update receive checksum
2476  *	@skb: buffer to update
2477  *	@len: length of data pulled
2478  *
2479  *	This function performs an skb_pull on the packet and updates
2480  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2481  *	receive path processing instead of skb_pull unless you know
2482  *	that the checksum difference is zero (e.g., a valid IP header)
2483  *	or you are setting ip_summed to CHECKSUM_NONE.
2484  */
2485 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2486 {
2487 	BUG_ON(len > skb->len);
2488 	skb->len -= len;
2489 	BUG_ON(skb->len < skb->data_len);
2490 	skb_postpull_rcsum(skb, skb->data, len);
2491 	return skb->data += len;
2492 }
2493 
2494 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2495 
2496 /**
2497  *	skb_segment - Perform protocol segmentation on skb.
2498  *	@skb: buffer to segment
2499  *	@features: features for the output path (see dev->features)
2500  *
2501  *	This function performs segmentation on the given skb.  It returns
2502  *	a pointer to the first in a list of new skbs for the segments.
2503  *	In case of error it returns ERR_PTR(err).
2504  */
2505 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2506 {
2507 	struct sk_buff *segs = NULL;
2508 	struct sk_buff *tail = NULL;
2509 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2510 	unsigned int mss = skb_shinfo(skb)->gso_size;
2511 	unsigned int doffset = skb->data - skb_mac_header(skb);
2512 	unsigned int offset = doffset;
2513 	unsigned int headroom;
2514 	unsigned int len;
2515 	int sg = features & NETIF_F_SG;
2516 	int nfrags = skb_shinfo(skb)->nr_frags;
2517 	int err = -ENOMEM;
2518 	int i = 0;
2519 	int pos;
2520 
2521 	__skb_push(skb, doffset);
2522 	headroom = skb_headroom(skb);
2523 	pos = skb_headlen(skb);
2524 
2525 	do {
2526 		struct sk_buff *nskb;
2527 		skb_frag_t *frag;
2528 		int hsize;
2529 		int size;
2530 
2531 		len = skb->len - offset;
2532 		if (len > mss)
2533 			len = mss;
2534 
2535 		hsize = skb_headlen(skb) - offset;
2536 		if (hsize < 0)
2537 			hsize = 0;
2538 		if (hsize > len || !sg)
2539 			hsize = len;
2540 
2541 		if (!hsize && i >= nfrags) {
2542 			BUG_ON(fskb->len != len);
2543 
2544 			pos += len;
2545 			nskb = skb_clone(fskb, GFP_ATOMIC);
2546 			fskb = fskb->next;
2547 
2548 			if (unlikely(!nskb))
2549 				goto err;
2550 
2551 			hsize = skb_end_pointer(nskb) - nskb->head;
2552 			if (skb_cow_head(nskb, doffset + headroom)) {
2553 				kfree_skb(nskb);
2554 				goto err;
2555 			}
2556 
2557 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2558 					  hsize;
2559 			skb_release_head_state(nskb);
2560 			__skb_push(nskb, doffset);
2561 		} else {
2562 			nskb = alloc_skb(hsize + doffset + headroom,
2563 					 GFP_ATOMIC);
2564 
2565 			if (unlikely(!nskb))
2566 				goto err;
2567 
2568 			skb_reserve(nskb, headroom);
2569 			__skb_put(nskb, doffset);
2570 		}
2571 
2572 		if (segs)
2573 			tail->next = nskb;
2574 		else
2575 			segs = nskb;
2576 		tail = nskb;
2577 
2578 		__copy_skb_header(nskb, skb);
2579 		nskb->mac_len = skb->mac_len;
2580 
2581 		skb_reset_mac_header(nskb);
2582 		skb_set_network_header(nskb, skb->mac_len);
2583 		nskb->transport_header = (nskb->network_header +
2584 					  skb_network_header_len(skb));
2585 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2586 
2587 		if (fskb != skb_shinfo(skb)->frag_list)
2588 			continue;
2589 
2590 		if (!sg) {
2591 			nskb->ip_summed = CHECKSUM_NONE;
2592 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2593 							    skb_put(nskb, len),
2594 							    len, 0);
2595 			continue;
2596 		}
2597 
2598 		frag = skb_shinfo(nskb)->frags;
2599 
2600 		skb_copy_from_linear_data_offset(skb, offset,
2601 						 skb_put(nskb, hsize), hsize);
2602 
2603 		while (pos < offset + len && i < nfrags) {
2604 			*frag = skb_shinfo(skb)->frags[i];
2605 			get_page(frag->page);
2606 			size = frag->size;
2607 
2608 			if (pos < offset) {
2609 				frag->page_offset += offset - pos;
2610 				frag->size -= offset - pos;
2611 			}
2612 
2613 			skb_shinfo(nskb)->nr_frags++;
2614 
2615 			if (pos + size <= offset + len) {
2616 				i++;
2617 				pos += size;
2618 			} else {
2619 				frag->size -= pos + size - (offset + len);
2620 				goto skip_fraglist;
2621 			}
2622 
2623 			frag++;
2624 		}
2625 
2626 		if (pos < offset + len) {
2627 			struct sk_buff *fskb2 = fskb;
2628 
2629 			BUG_ON(pos + fskb->len != offset + len);
2630 
2631 			pos += fskb->len;
2632 			fskb = fskb->next;
2633 
2634 			if (fskb2->next) {
2635 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2636 				if (!fskb2)
2637 					goto err;
2638 			} else
2639 				skb_get(fskb2);
2640 
2641 			SKB_FRAG_ASSERT(nskb);
2642 			skb_shinfo(nskb)->frag_list = fskb2;
2643 		}
2644 
2645 skip_fraglist:
2646 		nskb->data_len = len - hsize;
2647 		nskb->len += nskb->data_len;
2648 		nskb->truesize += nskb->data_len;
2649 	} while ((offset += len) < skb->len);
2650 
2651 	return segs;
2652 
2653 err:
2654 	while ((skb = segs)) {
2655 		segs = skb->next;
2656 		kfree_skb(skb);
2657 	}
2658 	return ERR_PTR(err);
2659 }
2660 EXPORT_SYMBOL_GPL(skb_segment);
2661 
2662 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2663 {
2664 	struct sk_buff *p = *head;
2665 	struct sk_buff *nskb;
2666 	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2667 	struct skb_shared_info *pinfo = skb_shinfo(p);
2668 	unsigned int headroom;
2669 	unsigned int len = skb_gro_len(skb);
2670 	unsigned int offset = skb_gro_offset(skb);
2671 	unsigned int headlen = skb_headlen(skb);
2672 
2673 	if (p->len + len >= 65536)
2674 		return -E2BIG;
2675 
2676 	if (pinfo->frag_list)
2677 		goto merge;
2678 	else if (headlen <= offset) {
2679 		skb_frag_t *frag;
2680 		skb_frag_t *frag2;
2681 		int i = skbinfo->nr_frags;
2682 		int nr_frags = pinfo->nr_frags + i;
2683 
2684 		offset -= headlen;
2685 
2686 		if (nr_frags > MAX_SKB_FRAGS)
2687 			return -E2BIG;
2688 
2689 		pinfo->nr_frags = nr_frags;
2690 		skbinfo->nr_frags = 0;
2691 
2692 		frag = pinfo->frags + nr_frags;
2693 		frag2 = skbinfo->frags + i;
2694 		do {
2695 			*--frag = *--frag2;
2696 		} while (--i);
2697 
2698 		frag->page_offset += offset;
2699 		frag->size -= offset;
2700 
2701 		skb->truesize -= skb->data_len;
2702 		skb->len -= skb->data_len;
2703 		skb->data_len = 0;
2704 
2705 		NAPI_GRO_CB(skb)->free = 1;
2706 		goto done;
2707 	} else if (skb_gro_len(p) != pinfo->gso_size)
2708 		return -E2BIG;
2709 
2710 	headroom = skb_headroom(p);
2711 	nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2712 	if (unlikely(!nskb))
2713 		return -ENOMEM;
2714 
2715 	__copy_skb_header(nskb, p);
2716 	nskb->mac_len = p->mac_len;
2717 
2718 	skb_reserve(nskb, headroom);
2719 	__skb_put(nskb, skb_gro_offset(p));
2720 
2721 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2722 	skb_set_network_header(nskb, skb_network_offset(p));
2723 	skb_set_transport_header(nskb, skb_transport_offset(p));
2724 
2725 	__skb_pull(p, skb_gro_offset(p));
2726 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2727 	       p->data - skb_mac_header(p));
2728 
2729 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2730 	skb_shinfo(nskb)->frag_list = p;
2731 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2732 	skb_header_release(p);
2733 	nskb->prev = p;
2734 
2735 	nskb->data_len += p->len;
2736 	nskb->truesize += p->len;
2737 	nskb->len += p->len;
2738 
2739 	*head = nskb;
2740 	nskb->next = p->next;
2741 	p->next = NULL;
2742 
2743 	p = nskb;
2744 
2745 merge:
2746 	if (offset > headlen) {
2747 		skbinfo->frags[0].page_offset += offset - headlen;
2748 		skbinfo->frags[0].size -= offset - headlen;
2749 		offset = headlen;
2750 	}
2751 
2752 	__skb_pull(skb, offset);
2753 
2754 	p->prev->next = skb;
2755 	p->prev = skb;
2756 	skb_header_release(skb);
2757 
2758 done:
2759 	NAPI_GRO_CB(p)->count++;
2760 	p->data_len += len;
2761 	p->truesize += len;
2762 	p->len += len;
2763 
2764 	NAPI_GRO_CB(skb)->same_flow = 1;
2765 	return 0;
2766 }
2767 EXPORT_SYMBOL_GPL(skb_gro_receive);
2768 
2769 void __init skb_init(void)
2770 {
2771 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2772 					      sizeof(struct sk_buff),
2773 					      0,
2774 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2775 					      NULL);
2776 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2777 						(2*sizeof(struct sk_buff)) +
2778 						sizeof(atomic_t),
2779 						0,
2780 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2781 						NULL);
2782 }
2783 
2784 /**
2785  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2786  *	@skb: Socket buffer containing the buffers to be mapped
2787  *	@sg: The scatter-gather list to map into
2788  *	@offset: The offset into the buffer's contents to start mapping
2789  *	@len: Length of buffer space to be mapped
2790  *
2791  *	Fill the specified scatter-gather list with mappings/pointers into a
2792  *	region of the buffer space attached to a socket buffer.
2793  */
2794 static int
2795 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2796 {
2797 	int start = skb_headlen(skb);
2798 	int i, copy = start - offset;
2799 	struct sk_buff *frag_iter;
2800 	int elt = 0;
2801 
2802 	if (copy > 0) {
2803 		if (copy > len)
2804 			copy = len;
2805 		sg_set_buf(sg, skb->data + offset, copy);
2806 		elt++;
2807 		if ((len -= copy) == 0)
2808 			return elt;
2809 		offset += copy;
2810 	}
2811 
2812 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2813 		int end;
2814 
2815 		WARN_ON(start > offset + len);
2816 
2817 		end = start + skb_shinfo(skb)->frags[i].size;
2818 		if ((copy = end - offset) > 0) {
2819 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2820 
2821 			if (copy > len)
2822 				copy = len;
2823 			sg_set_page(&sg[elt], frag->page, copy,
2824 					frag->page_offset+offset-start);
2825 			elt++;
2826 			if (!(len -= copy))
2827 				return elt;
2828 			offset += copy;
2829 		}
2830 		start = end;
2831 	}
2832 
2833 	skb_walk_frags(skb, frag_iter) {
2834 		int end;
2835 
2836 		WARN_ON(start > offset + len);
2837 
2838 		end = start + frag_iter->len;
2839 		if ((copy = end - offset) > 0) {
2840 			if (copy > len)
2841 				copy = len;
2842 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2843 					      copy);
2844 			if ((len -= copy) == 0)
2845 				return elt;
2846 			offset += copy;
2847 		}
2848 		start = end;
2849 	}
2850 	BUG_ON(len);
2851 	return elt;
2852 }
2853 
2854 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2855 {
2856 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2857 
2858 	sg_mark_end(&sg[nsg - 1]);
2859 
2860 	return nsg;
2861 }
2862 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2863 
2864 /**
2865  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2866  *	@skb: The socket buffer to check.
2867  *	@tailbits: Amount of trailing space to be added
2868  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2869  *
2870  *	Make sure that the data buffers attached to a socket buffer are
2871  *	writable. If they are not, private copies are made of the data buffers
2872  *	and the socket buffer is set to use these instead.
2873  *
2874  *	If @tailbits is given, make sure that there is space to write @tailbits
2875  *	bytes of data beyond current end of socket buffer.  @trailer will be
2876  *	set to point to the skb in which this space begins.
2877  *
2878  *	The number of scatterlist elements required to completely map the
2879  *	COW'd and extended socket buffer will be returned.
2880  */
2881 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2882 {
2883 	int copyflag;
2884 	int elt;
2885 	struct sk_buff *skb1, **skb_p;
2886 
2887 	/* If skb is cloned or its head is paged, reallocate
2888 	 * head pulling out all the pages (pages are considered not writable
2889 	 * at the moment even if they are anonymous).
2890 	 */
2891 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2892 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2893 		return -ENOMEM;
2894 
2895 	/* Easy case. Most of packets will go this way. */
2896 	if (!skb_has_frags(skb)) {
2897 		/* A little of trouble, not enough of space for trailer.
2898 		 * This should not happen, when stack is tuned to generate
2899 		 * good frames. OK, on miss we reallocate and reserve even more
2900 		 * space, 128 bytes is fair. */
2901 
2902 		if (skb_tailroom(skb) < tailbits &&
2903 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2904 			return -ENOMEM;
2905 
2906 		/* Voila! */
2907 		*trailer = skb;
2908 		return 1;
2909 	}
2910 
2911 	/* Misery. We are in troubles, going to mincer fragments... */
2912 
2913 	elt = 1;
2914 	skb_p = &skb_shinfo(skb)->frag_list;
2915 	copyflag = 0;
2916 
2917 	while ((skb1 = *skb_p) != NULL) {
2918 		int ntail = 0;
2919 
2920 		/* The fragment is partially pulled by someone,
2921 		 * this can happen on input. Copy it and everything
2922 		 * after it. */
2923 
2924 		if (skb_shared(skb1))
2925 			copyflag = 1;
2926 
2927 		/* If the skb is the last, worry about trailer. */
2928 
2929 		if (skb1->next == NULL && tailbits) {
2930 			if (skb_shinfo(skb1)->nr_frags ||
2931 			    skb_has_frags(skb1) ||
2932 			    skb_tailroom(skb1) < tailbits)
2933 				ntail = tailbits + 128;
2934 		}
2935 
2936 		if (copyflag ||
2937 		    skb_cloned(skb1) ||
2938 		    ntail ||
2939 		    skb_shinfo(skb1)->nr_frags ||
2940 		    skb_has_frags(skb1)) {
2941 			struct sk_buff *skb2;
2942 
2943 			/* Fuck, we are miserable poor guys... */
2944 			if (ntail == 0)
2945 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2946 			else
2947 				skb2 = skb_copy_expand(skb1,
2948 						       skb_headroom(skb1),
2949 						       ntail,
2950 						       GFP_ATOMIC);
2951 			if (unlikely(skb2 == NULL))
2952 				return -ENOMEM;
2953 
2954 			if (skb1->sk)
2955 				skb_set_owner_w(skb2, skb1->sk);
2956 
2957 			/* Looking around. Are we still alive?
2958 			 * OK, link new skb, drop old one */
2959 
2960 			skb2->next = skb1->next;
2961 			*skb_p = skb2;
2962 			kfree_skb(skb1);
2963 			skb1 = skb2;
2964 		}
2965 		elt++;
2966 		*trailer = skb1;
2967 		skb_p = &skb1->next;
2968 	}
2969 
2970 	return elt;
2971 }
2972 EXPORT_SYMBOL_GPL(skb_cow_data);
2973 
2974 void skb_tstamp_tx(struct sk_buff *orig_skb,
2975 		struct skb_shared_hwtstamps *hwtstamps)
2976 {
2977 	struct sock *sk = orig_skb->sk;
2978 	struct sock_exterr_skb *serr;
2979 	struct sk_buff *skb;
2980 	int err;
2981 
2982 	if (!sk)
2983 		return;
2984 
2985 	skb = skb_clone(orig_skb, GFP_ATOMIC);
2986 	if (!skb)
2987 		return;
2988 
2989 	if (hwtstamps) {
2990 		*skb_hwtstamps(skb) =
2991 			*hwtstamps;
2992 	} else {
2993 		/*
2994 		 * no hardware time stamps available,
2995 		 * so keep the skb_shared_tx and only
2996 		 * store software time stamp
2997 		 */
2998 		skb->tstamp = ktime_get_real();
2999 	}
3000 
3001 	serr = SKB_EXT_ERR(skb);
3002 	memset(serr, 0, sizeof(*serr));
3003 	serr->ee.ee_errno = ENOMSG;
3004 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3005 	err = sock_queue_err_skb(sk, skb);
3006 	if (err)
3007 		kfree_skb(skb);
3008 }
3009 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3010 
3011 
3012 /**
3013  * skb_partial_csum_set - set up and verify partial csum values for packet
3014  * @skb: the skb to set
3015  * @start: the number of bytes after skb->data to start checksumming.
3016  * @off: the offset from start to place the checksum.
3017  *
3018  * For untrusted partially-checksummed packets, we need to make sure the values
3019  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3020  *
3021  * This function checks and sets those values and skb->ip_summed: if this
3022  * returns false you should drop the packet.
3023  */
3024 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3025 {
3026 	if (unlikely(start > skb_headlen(skb)) ||
3027 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3028 		if (net_ratelimit())
3029 			printk(KERN_WARNING
3030 			       "bad partial csum: csum=%u/%u len=%u\n",
3031 			       start, off, skb_headlen(skb));
3032 		return false;
3033 	}
3034 	skb->ip_summed = CHECKSUM_PARTIAL;
3035 	skb->csum_start = skb_headroom(skb) + start;
3036 	skb->csum_offset = off;
3037 	return true;
3038 }
3039 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3040 
3041 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3042 {
3043 	if (net_ratelimit())
3044 		pr_warning("%s: received packets cannot be forwarded"
3045 			   " while LRO is enabled\n", skb->dev->name);
3046 }
3047 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3048