xref: /openbmc/linux/net/core/skbuff.c (revision 732a675a)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <iiitac@pyr.swan.ac.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Version:	$Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8  *
9  *	Fixes:
10  *		Alan Cox	:	Fixed the worst of the load
11  *					balancer bugs.
12  *		Dave Platt	:	Interrupt stacking fix.
13  *	Richard Kooijman	:	Timestamp fixes.
14  *		Alan Cox	:	Changed buffer format.
15  *		Alan Cox	:	destructor hook for AF_UNIX etc.
16  *		Linus Torvalds	:	Better skb_clone.
17  *		Alan Cox	:	Added skb_copy.
18  *		Alan Cox	:	Added all the changed routines Linus
19  *					only put in the headers
20  *		Ray VanTassle	:	Fixed --skb->lock in free
21  *		Alan Cox	:	skb_copy copy arp field
22  *		Andi Kleen	:	slabified it.
23  *		Robert Olsson	:	Removed skb_head_pool
24  *
25  *	NOTE:
26  *		The __skb_ routines should be called with interrupts
27  *	disabled, or you better be *real* sure that the operation is atomic
28  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
29  *	or via disabling bottom half handlers, etc).
30  *
31  *	This program is free software; you can redistribute it and/or
32  *	modify it under the terms of the GNU General Public License
33  *	as published by the Free Software Foundation; either version
34  *	2 of the License, or (at your option) any later version.
35  */
36 
37 /*
38  *	The functions in this file will not compile correctly with gcc 2.4.x
39  */
40 
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/mm.h>
45 #include <linux/interrupt.h>
46 #include <linux/in.h>
47 #include <linux/inet.h>
48 #include <linux/slab.h>
49 #include <linux/netdevice.h>
50 #ifdef CONFIG_NET_CLS_ACT
51 #include <net/pkt_sched.h>
52 #endif
53 #include <linux/string.h>
54 #include <linux/skbuff.h>
55 #include <linux/splice.h>
56 #include <linux/cache.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/init.h>
59 #include <linux/scatterlist.h>
60 
61 #include <net/protocol.h>
62 #include <net/dst.h>
63 #include <net/sock.h>
64 #include <net/checksum.h>
65 #include <net/xfrm.h>
66 
67 #include <asm/uaccess.h>
68 #include <asm/system.h>
69 
70 #include "kmap_skb.h"
71 
72 static struct kmem_cache *skbuff_head_cache __read_mostly;
73 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
74 
75 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
76 				  struct pipe_buffer *buf)
77 {
78 	struct sk_buff *skb = (struct sk_buff *) buf->private;
79 
80 	kfree_skb(skb);
81 }
82 
83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
84 				struct pipe_buffer *buf)
85 {
86 	struct sk_buff *skb = (struct sk_buff *) buf->private;
87 
88 	skb_get(skb);
89 }
90 
91 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
92 			       struct pipe_buffer *buf)
93 {
94 	return 1;
95 }
96 
97 
98 /* Pipe buffer operations for a socket. */
99 static struct pipe_buf_operations sock_pipe_buf_ops = {
100 	.can_merge = 0,
101 	.map = generic_pipe_buf_map,
102 	.unmap = generic_pipe_buf_unmap,
103 	.confirm = generic_pipe_buf_confirm,
104 	.release = sock_pipe_buf_release,
105 	.steal = sock_pipe_buf_steal,
106 	.get = sock_pipe_buf_get,
107 };
108 
109 /*
110  *	Keep out-of-line to prevent kernel bloat.
111  *	__builtin_return_address is not used because it is not always
112  *	reliable.
113  */
114 
115 /**
116  *	skb_over_panic	- 	private function
117  *	@skb: buffer
118  *	@sz: size
119  *	@here: address
120  *
121  *	Out of line support code for skb_put(). Not user callable.
122  */
123 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
124 {
125 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
126 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
127 	       here, skb->len, sz, skb->head, skb->data,
128 	       (unsigned long)skb->tail, (unsigned long)skb->end,
129 	       skb->dev ? skb->dev->name : "<NULL>");
130 	BUG();
131 }
132 
133 /**
134  *	skb_under_panic	- 	private function
135  *	@skb: buffer
136  *	@sz: size
137  *	@here: address
138  *
139  *	Out of line support code for skb_push(). Not user callable.
140  */
141 
142 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
143 {
144 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
145 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
146 	       here, skb->len, sz, skb->head, skb->data,
147 	       (unsigned long)skb->tail, (unsigned long)skb->end,
148 	       skb->dev ? skb->dev->name : "<NULL>");
149 	BUG();
150 }
151 
152 void skb_truesize_bug(struct sk_buff *skb)
153 {
154 	printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
155 	       "len=%u, sizeof(sk_buff)=%Zd\n",
156 	       skb->truesize, skb->len, sizeof(struct sk_buff));
157 }
158 EXPORT_SYMBOL(skb_truesize_bug);
159 
160 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
161  *	'private' fields and also do memory statistics to find all the
162  *	[BEEP] leaks.
163  *
164  */
165 
166 /**
167  *	__alloc_skb	-	allocate a network buffer
168  *	@size: size to allocate
169  *	@gfp_mask: allocation mask
170  *	@fclone: allocate from fclone cache instead of head cache
171  *		and allocate a cloned (child) skb
172  *	@node: numa node to allocate memory on
173  *
174  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
175  *	tail room of size bytes. The object has a reference count of one.
176  *	The return is the buffer. On a failure the return is %NULL.
177  *
178  *	Buffers may only be allocated from interrupts using a @gfp_mask of
179  *	%GFP_ATOMIC.
180  */
181 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
182 			    int fclone, int node)
183 {
184 	struct kmem_cache *cache;
185 	struct skb_shared_info *shinfo;
186 	struct sk_buff *skb;
187 	u8 *data;
188 
189 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
190 
191 	/* Get the HEAD */
192 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
193 	if (!skb)
194 		goto out;
195 
196 	size = SKB_DATA_ALIGN(size);
197 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
198 			gfp_mask, node);
199 	if (!data)
200 		goto nodata;
201 
202 	/*
203 	 * Only clear those fields we need to clear, not those that we will
204 	 * actually initialise below. Hence, don't put any more fields after
205 	 * the tail pointer in struct sk_buff!
206 	 */
207 	memset(skb, 0, offsetof(struct sk_buff, tail));
208 	skb->truesize = size + sizeof(struct sk_buff);
209 	atomic_set(&skb->users, 1);
210 	skb->head = data;
211 	skb->data = data;
212 	skb_reset_tail_pointer(skb);
213 	skb->end = skb->tail + size;
214 	/* make sure we initialize shinfo sequentially */
215 	shinfo = skb_shinfo(skb);
216 	atomic_set(&shinfo->dataref, 1);
217 	shinfo->nr_frags  = 0;
218 	shinfo->gso_size = 0;
219 	shinfo->gso_segs = 0;
220 	shinfo->gso_type = 0;
221 	shinfo->ip6_frag_id = 0;
222 	shinfo->frag_list = NULL;
223 
224 	if (fclone) {
225 		struct sk_buff *child = skb + 1;
226 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
227 
228 		skb->fclone = SKB_FCLONE_ORIG;
229 		atomic_set(fclone_ref, 1);
230 
231 		child->fclone = SKB_FCLONE_UNAVAILABLE;
232 	}
233 out:
234 	return skb;
235 nodata:
236 	kmem_cache_free(cache, skb);
237 	skb = NULL;
238 	goto out;
239 }
240 
241 /**
242  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
243  *	@dev: network device to receive on
244  *	@length: length to allocate
245  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
246  *
247  *	Allocate a new &sk_buff and assign it a usage count of one. The
248  *	buffer has unspecified headroom built in. Users should allocate
249  *	the headroom they think they need without accounting for the
250  *	built in space. The built in space is used for optimisations.
251  *
252  *	%NULL is returned if there is no free memory.
253  */
254 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
255 		unsigned int length, gfp_t gfp_mask)
256 {
257 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
258 	struct sk_buff *skb;
259 
260 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
261 	if (likely(skb)) {
262 		skb_reserve(skb, NET_SKB_PAD);
263 		skb->dev = dev;
264 	}
265 	return skb;
266 }
267 
268 /**
269  *	dev_alloc_skb - allocate an skbuff for receiving
270  *	@length: length to allocate
271  *
272  *	Allocate a new &sk_buff and assign it a usage count of one. The
273  *	buffer has unspecified headroom built in. Users should allocate
274  *	the headroom they think they need without accounting for the
275  *	built in space. The built in space is used for optimisations.
276  *
277  *	%NULL is returned if there is no free memory. Although this function
278  *	allocates memory it can be called from an interrupt.
279  */
280 struct sk_buff *dev_alloc_skb(unsigned int length)
281 {
282 	/*
283 	 * There is more code here than it seems:
284 	 * __dev_alloc_skb is an inline
285 	 */
286 	return __dev_alloc_skb(length, GFP_ATOMIC);
287 }
288 EXPORT_SYMBOL(dev_alloc_skb);
289 
290 static void skb_drop_list(struct sk_buff **listp)
291 {
292 	struct sk_buff *list = *listp;
293 
294 	*listp = NULL;
295 
296 	do {
297 		struct sk_buff *this = list;
298 		list = list->next;
299 		kfree_skb(this);
300 	} while (list);
301 }
302 
303 static inline void skb_drop_fraglist(struct sk_buff *skb)
304 {
305 	skb_drop_list(&skb_shinfo(skb)->frag_list);
306 }
307 
308 static void skb_clone_fraglist(struct sk_buff *skb)
309 {
310 	struct sk_buff *list;
311 
312 	for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
313 		skb_get(list);
314 }
315 
316 static void skb_release_data(struct sk_buff *skb)
317 {
318 	if (!skb->cloned ||
319 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
320 			       &skb_shinfo(skb)->dataref)) {
321 		if (skb_shinfo(skb)->nr_frags) {
322 			int i;
323 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
324 				put_page(skb_shinfo(skb)->frags[i].page);
325 		}
326 
327 		if (skb_shinfo(skb)->frag_list)
328 			skb_drop_fraglist(skb);
329 
330 		kfree(skb->head);
331 	}
332 }
333 
334 /*
335  *	Free an skbuff by memory without cleaning the state.
336  */
337 static void kfree_skbmem(struct sk_buff *skb)
338 {
339 	struct sk_buff *other;
340 	atomic_t *fclone_ref;
341 
342 	switch (skb->fclone) {
343 	case SKB_FCLONE_UNAVAILABLE:
344 		kmem_cache_free(skbuff_head_cache, skb);
345 		break;
346 
347 	case SKB_FCLONE_ORIG:
348 		fclone_ref = (atomic_t *) (skb + 2);
349 		if (atomic_dec_and_test(fclone_ref))
350 			kmem_cache_free(skbuff_fclone_cache, skb);
351 		break;
352 
353 	case SKB_FCLONE_CLONE:
354 		fclone_ref = (atomic_t *) (skb + 1);
355 		other = skb - 1;
356 
357 		/* The clone portion is available for
358 		 * fast-cloning again.
359 		 */
360 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
361 
362 		if (atomic_dec_and_test(fclone_ref))
363 			kmem_cache_free(skbuff_fclone_cache, other);
364 		break;
365 	}
366 }
367 
368 /* Free everything but the sk_buff shell. */
369 static void skb_release_all(struct sk_buff *skb)
370 {
371 	dst_release(skb->dst);
372 #ifdef CONFIG_XFRM
373 	secpath_put(skb->sp);
374 #endif
375 	if (skb->destructor) {
376 		WARN_ON(in_irq());
377 		skb->destructor(skb);
378 	}
379 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
380 	nf_conntrack_put(skb->nfct);
381 	nf_conntrack_put_reasm(skb->nfct_reasm);
382 #endif
383 #ifdef CONFIG_BRIDGE_NETFILTER
384 	nf_bridge_put(skb->nf_bridge);
385 #endif
386 /* XXX: IS this still necessary? - JHS */
387 #ifdef CONFIG_NET_SCHED
388 	skb->tc_index = 0;
389 #ifdef CONFIG_NET_CLS_ACT
390 	skb->tc_verd = 0;
391 #endif
392 #endif
393 	skb_release_data(skb);
394 }
395 
396 /**
397  *	__kfree_skb - private function
398  *	@skb: buffer
399  *
400  *	Free an sk_buff. Release anything attached to the buffer.
401  *	Clean the state. This is an internal helper function. Users should
402  *	always call kfree_skb
403  */
404 
405 void __kfree_skb(struct sk_buff *skb)
406 {
407 	skb_release_all(skb);
408 	kfree_skbmem(skb);
409 }
410 
411 /**
412  *	kfree_skb - free an sk_buff
413  *	@skb: buffer to free
414  *
415  *	Drop a reference to the buffer and free it if the usage count has
416  *	hit zero.
417  */
418 void kfree_skb(struct sk_buff *skb)
419 {
420 	if (unlikely(!skb))
421 		return;
422 	if (likely(atomic_read(&skb->users) == 1))
423 		smp_rmb();
424 	else if (likely(!atomic_dec_and_test(&skb->users)))
425 		return;
426 	__kfree_skb(skb);
427 }
428 
429 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
430 {
431 	new->tstamp		= old->tstamp;
432 	new->dev		= old->dev;
433 	new->transport_header	= old->transport_header;
434 	new->network_header	= old->network_header;
435 	new->mac_header		= old->mac_header;
436 	new->dst		= dst_clone(old->dst);
437 #ifdef CONFIG_INET
438 	new->sp			= secpath_get(old->sp);
439 #endif
440 	memcpy(new->cb, old->cb, sizeof(old->cb));
441 	new->csum_start		= old->csum_start;
442 	new->csum_offset	= old->csum_offset;
443 	new->local_df		= old->local_df;
444 	new->pkt_type		= old->pkt_type;
445 	new->ip_summed		= old->ip_summed;
446 	skb_copy_queue_mapping(new, old);
447 	new->priority		= old->priority;
448 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
449 	new->ipvs_property	= old->ipvs_property;
450 #endif
451 	new->protocol		= old->protocol;
452 	new->mark		= old->mark;
453 	__nf_copy(new, old);
454 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
455     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
456 	new->nf_trace		= old->nf_trace;
457 #endif
458 #ifdef CONFIG_NET_SCHED
459 	new->tc_index		= old->tc_index;
460 #ifdef CONFIG_NET_CLS_ACT
461 	new->tc_verd		= old->tc_verd;
462 #endif
463 #endif
464 	skb_copy_secmark(new, old);
465 }
466 
467 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
468 {
469 #define C(x) n->x = skb->x
470 
471 	n->next = n->prev = NULL;
472 	n->sk = NULL;
473 	__copy_skb_header(n, skb);
474 
475 	C(len);
476 	C(data_len);
477 	C(mac_len);
478 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
479 	n->cloned = 1;
480 	n->nohdr = 0;
481 	n->destructor = NULL;
482 	C(iif);
483 	C(tail);
484 	C(end);
485 	C(head);
486 	C(data);
487 	C(truesize);
488 	atomic_set(&n->users, 1);
489 
490 	atomic_inc(&(skb_shinfo(skb)->dataref));
491 	skb->cloned = 1;
492 
493 	return n;
494 #undef C
495 }
496 
497 /**
498  *	skb_morph	-	morph one skb into another
499  *	@dst: the skb to receive the contents
500  *	@src: the skb to supply the contents
501  *
502  *	This is identical to skb_clone except that the target skb is
503  *	supplied by the user.
504  *
505  *	The target skb is returned upon exit.
506  */
507 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
508 {
509 	skb_release_all(dst);
510 	return __skb_clone(dst, src);
511 }
512 EXPORT_SYMBOL_GPL(skb_morph);
513 
514 /**
515  *	skb_clone	-	duplicate an sk_buff
516  *	@skb: buffer to clone
517  *	@gfp_mask: allocation priority
518  *
519  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
520  *	copies share the same packet data but not structure. The new
521  *	buffer has a reference count of 1. If the allocation fails the
522  *	function returns %NULL otherwise the new buffer is returned.
523  *
524  *	If this function is called from an interrupt gfp_mask() must be
525  *	%GFP_ATOMIC.
526  */
527 
528 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
529 {
530 	struct sk_buff *n;
531 
532 	n = skb + 1;
533 	if (skb->fclone == SKB_FCLONE_ORIG &&
534 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
535 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
536 		n->fclone = SKB_FCLONE_CLONE;
537 		atomic_inc(fclone_ref);
538 	} else {
539 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
540 		if (!n)
541 			return NULL;
542 		n->fclone = SKB_FCLONE_UNAVAILABLE;
543 	}
544 
545 	return __skb_clone(n, skb);
546 }
547 
548 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
549 {
550 #ifndef NET_SKBUFF_DATA_USES_OFFSET
551 	/*
552 	 *	Shift between the two data areas in bytes
553 	 */
554 	unsigned long offset = new->data - old->data;
555 #endif
556 
557 	__copy_skb_header(new, old);
558 
559 #ifndef NET_SKBUFF_DATA_USES_OFFSET
560 	/* {transport,network,mac}_header are relative to skb->head */
561 	new->transport_header += offset;
562 	new->network_header   += offset;
563 	new->mac_header	      += offset;
564 #endif
565 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
566 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
567 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
568 }
569 
570 /**
571  *	skb_copy	-	create private copy of an sk_buff
572  *	@skb: buffer to copy
573  *	@gfp_mask: allocation priority
574  *
575  *	Make a copy of both an &sk_buff and its data. This is used when the
576  *	caller wishes to modify the data and needs a private copy of the
577  *	data to alter. Returns %NULL on failure or the pointer to the buffer
578  *	on success. The returned buffer has a reference count of 1.
579  *
580  *	As by-product this function converts non-linear &sk_buff to linear
581  *	one, so that &sk_buff becomes completely private and caller is allowed
582  *	to modify all the data of returned buffer. This means that this
583  *	function is not recommended for use in circumstances when only
584  *	header is going to be modified. Use pskb_copy() instead.
585  */
586 
587 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
588 {
589 	int headerlen = skb->data - skb->head;
590 	/*
591 	 *	Allocate the copy buffer
592 	 */
593 	struct sk_buff *n;
594 #ifdef NET_SKBUFF_DATA_USES_OFFSET
595 	n = alloc_skb(skb->end + skb->data_len, gfp_mask);
596 #else
597 	n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
598 #endif
599 	if (!n)
600 		return NULL;
601 
602 	/* Set the data pointer */
603 	skb_reserve(n, headerlen);
604 	/* Set the tail pointer and length */
605 	skb_put(n, skb->len);
606 
607 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
608 		BUG();
609 
610 	copy_skb_header(n, skb);
611 	return n;
612 }
613 
614 
615 /**
616  *	pskb_copy	-	create copy of an sk_buff with private head.
617  *	@skb: buffer to copy
618  *	@gfp_mask: allocation priority
619  *
620  *	Make a copy of both an &sk_buff and part of its data, located
621  *	in header. Fragmented data remain shared. This is used when
622  *	the caller wishes to modify only header of &sk_buff and needs
623  *	private copy of the header to alter. Returns %NULL on failure
624  *	or the pointer to the buffer on success.
625  *	The returned buffer has a reference count of 1.
626  */
627 
628 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
629 {
630 	/*
631 	 *	Allocate the copy buffer
632 	 */
633 	struct sk_buff *n;
634 #ifdef NET_SKBUFF_DATA_USES_OFFSET
635 	n = alloc_skb(skb->end, gfp_mask);
636 #else
637 	n = alloc_skb(skb->end - skb->head, gfp_mask);
638 #endif
639 	if (!n)
640 		goto out;
641 
642 	/* Set the data pointer */
643 	skb_reserve(n, skb->data - skb->head);
644 	/* Set the tail pointer and length */
645 	skb_put(n, skb_headlen(skb));
646 	/* Copy the bytes */
647 	skb_copy_from_linear_data(skb, n->data, n->len);
648 
649 	n->truesize += skb->data_len;
650 	n->data_len  = skb->data_len;
651 	n->len	     = skb->len;
652 
653 	if (skb_shinfo(skb)->nr_frags) {
654 		int i;
655 
656 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
657 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
658 			get_page(skb_shinfo(n)->frags[i].page);
659 		}
660 		skb_shinfo(n)->nr_frags = i;
661 	}
662 
663 	if (skb_shinfo(skb)->frag_list) {
664 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
665 		skb_clone_fraglist(n);
666 	}
667 
668 	copy_skb_header(n, skb);
669 out:
670 	return n;
671 }
672 
673 /**
674  *	pskb_expand_head - reallocate header of &sk_buff
675  *	@skb: buffer to reallocate
676  *	@nhead: room to add at head
677  *	@ntail: room to add at tail
678  *	@gfp_mask: allocation priority
679  *
680  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
681  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
682  *	reference count of 1. Returns zero in the case of success or error,
683  *	if expansion failed. In the last case, &sk_buff is not changed.
684  *
685  *	All the pointers pointing into skb header may change and must be
686  *	reloaded after call to this function.
687  */
688 
689 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
690 		     gfp_t gfp_mask)
691 {
692 	int i;
693 	u8 *data;
694 #ifdef NET_SKBUFF_DATA_USES_OFFSET
695 	int size = nhead + skb->end + ntail;
696 #else
697 	int size = nhead + (skb->end - skb->head) + ntail;
698 #endif
699 	long off;
700 
701 	if (skb_shared(skb))
702 		BUG();
703 
704 	size = SKB_DATA_ALIGN(size);
705 
706 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
707 	if (!data)
708 		goto nodata;
709 
710 	/* Copy only real data... and, alas, header. This should be
711 	 * optimized for the cases when header is void. */
712 #ifdef NET_SKBUFF_DATA_USES_OFFSET
713 	memcpy(data + nhead, skb->head, skb->tail);
714 #else
715 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
716 #endif
717 	memcpy(data + size, skb_end_pointer(skb),
718 	       sizeof(struct skb_shared_info));
719 
720 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
721 		get_page(skb_shinfo(skb)->frags[i].page);
722 
723 	if (skb_shinfo(skb)->frag_list)
724 		skb_clone_fraglist(skb);
725 
726 	skb_release_data(skb);
727 
728 	off = (data + nhead) - skb->head;
729 
730 	skb->head     = data;
731 	skb->data    += off;
732 #ifdef NET_SKBUFF_DATA_USES_OFFSET
733 	skb->end      = size;
734 	off           = nhead;
735 #else
736 	skb->end      = skb->head + size;
737 #endif
738 	/* {transport,network,mac}_header and tail are relative to skb->head */
739 	skb->tail	      += off;
740 	skb->transport_header += off;
741 	skb->network_header   += off;
742 	skb->mac_header	      += off;
743 	skb->csum_start       += nhead;
744 	skb->cloned   = 0;
745 	skb->hdr_len  = 0;
746 	skb->nohdr    = 0;
747 	atomic_set(&skb_shinfo(skb)->dataref, 1);
748 	return 0;
749 
750 nodata:
751 	return -ENOMEM;
752 }
753 
754 /* Make private copy of skb with writable head and some headroom */
755 
756 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
757 {
758 	struct sk_buff *skb2;
759 	int delta = headroom - skb_headroom(skb);
760 
761 	if (delta <= 0)
762 		skb2 = pskb_copy(skb, GFP_ATOMIC);
763 	else {
764 		skb2 = skb_clone(skb, GFP_ATOMIC);
765 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
766 					     GFP_ATOMIC)) {
767 			kfree_skb(skb2);
768 			skb2 = NULL;
769 		}
770 	}
771 	return skb2;
772 }
773 
774 
775 /**
776  *	skb_copy_expand	-	copy and expand sk_buff
777  *	@skb: buffer to copy
778  *	@newheadroom: new free bytes at head
779  *	@newtailroom: new free bytes at tail
780  *	@gfp_mask: allocation priority
781  *
782  *	Make a copy of both an &sk_buff and its data and while doing so
783  *	allocate additional space.
784  *
785  *	This is used when the caller wishes to modify the data and needs a
786  *	private copy of the data to alter as well as more space for new fields.
787  *	Returns %NULL on failure or the pointer to the buffer
788  *	on success. The returned buffer has a reference count of 1.
789  *
790  *	You must pass %GFP_ATOMIC as the allocation priority if this function
791  *	is called from an interrupt.
792  */
793 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
794 				int newheadroom, int newtailroom,
795 				gfp_t gfp_mask)
796 {
797 	/*
798 	 *	Allocate the copy buffer
799 	 */
800 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
801 				      gfp_mask);
802 	int oldheadroom = skb_headroom(skb);
803 	int head_copy_len, head_copy_off;
804 	int off;
805 
806 	if (!n)
807 		return NULL;
808 
809 	skb_reserve(n, newheadroom);
810 
811 	/* Set the tail pointer and length */
812 	skb_put(n, skb->len);
813 
814 	head_copy_len = oldheadroom;
815 	head_copy_off = 0;
816 	if (newheadroom <= head_copy_len)
817 		head_copy_len = newheadroom;
818 	else
819 		head_copy_off = newheadroom - head_copy_len;
820 
821 	/* Copy the linear header and data. */
822 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
823 			  skb->len + head_copy_len))
824 		BUG();
825 
826 	copy_skb_header(n, skb);
827 
828 	off                  = newheadroom - oldheadroom;
829 	n->csum_start       += off;
830 #ifdef NET_SKBUFF_DATA_USES_OFFSET
831 	n->transport_header += off;
832 	n->network_header   += off;
833 	n->mac_header	    += off;
834 #endif
835 
836 	return n;
837 }
838 
839 /**
840  *	skb_pad			-	zero pad the tail of an skb
841  *	@skb: buffer to pad
842  *	@pad: space to pad
843  *
844  *	Ensure that a buffer is followed by a padding area that is zero
845  *	filled. Used by network drivers which may DMA or transfer data
846  *	beyond the buffer end onto the wire.
847  *
848  *	May return error in out of memory cases. The skb is freed on error.
849  */
850 
851 int skb_pad(struct sk_buff *skb, int pad)
852 {
853 	int err;
854 	int ntail;
855 
856 	/* If the skbuff is non linear tailroom is always zero.. */
857 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
858 		memset(skb->data+skb->len, 0, pad);
859 		return 0;
860 	}
861 
862 	ntail = skb->data_len + pad - (skb->end - skb->tail);
863 	if (likely(skb_cloned(skb) || ntail > 0)) {
864 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
865 		if (unlikely(err))
866 			goto free_skb;
867 	}
868 
869 	/* FIXME: The use of this function with non-linear skb's really needs
870 	 * to be audited.
871 	 */
872 	err = skb_linearize(skb);
873 	if (unlikely(err))
874 		goto free_skb;
875 
876 	memset(skb->data + skb->len, 0, pad);
877 	return 0;
878 
879 free_skb:
880 	kfree_skb(skb);
881 	return err;
882 }
883 
884 /**
885  *	skb_put - add data to a buffer
886  *	@skb: buffer to use
887  *	@len: amount of data to add
888  *
889  *	This function extends the used data area of the buffer. If this would
890  *	exceed the total buffer size the kernel will panic. A pointer to the
891  *	first byte of the extra data is returned.
892  */
893 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
894 {
895 	unsigned char *tmp = skb_tail_pointer(skb);
896 	SKB_LINEAR_ASSERT(skb);
897 	skb->tail += len;
898 	skb->len  += len;
899 	if (unlikely(skb->tail > skb->end))
900 		skb_over_panic(skb, len, __builtin_return_address(0));
901 	return tmp;
902 }
903 EXPORT_SYMBOL(skb_put);
904 
905 /**
906  *	skb_push - add data to the start of a buffer
907  *	@skb: buffer to use
908  *	@len: amount of data to add
909  *
910  *	This function extends the used data area of the buffer at the buffer
911  *	start. If this would exceed the total buffer headroom the kernel will
912  *	panic. A pointer to the first byte of the extra data is returned.
913  */
914 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
915 {
916 	skb->data -= len;
917 	skb->len  += len;
918 	if (unlikely(skb->data<skb->head))
919 		skb_under_panic(skb, len, __builtin_return_address(0));
920 	return skb->data;
921 }
922 EXPORT_SYMBOL(skb_push);
923 
924 /**
925  *	skb_pull - remove data from the start of a buffer
926  *	@skb: buffer to use
927  *	@len: amount of data to remove
928  *
929  *	This function removes data from the start of a buffer, returning
930  *	the memory to the headroom. A pointer to the next data in the buffer
931  *	is returned. Once the data has been pulled future pushes will overwrite
932  *	the old data.
933  */
934 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
935 {
936 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
937 }
938 EXPORT_SYMBOL(skb_pull);
939 
940 /**
941  *	skb_trim - remove end from a buffer
942  *	@skb: buffer to alter
943  *	@len: new length
944  *
945  *	Cut the length of a buffer down by removing data from the tail. If
946  *	the buffer is already under the length specified it is not modified.
947  *	The skb must be linear.
948  */
949 void skb_trim(struct sk_buff *skb, unsigned int len)
950 {
951 	if (skb->len > len)
952 		__skb_trim(skb, len);
953 }
954 EXPORT_SYMBOL(skb_trim);
955 
956 /* Trims skb to length len. It can change skb pointers.
957  */
958 
959 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
960 {
961 	struct sk_buff **fragp;
962 	struct sk_buff *frag;
963 	int offset = skb_headlen(skb);
964 	int nfrags = skb_shinfo(skb)->nr_frags;
965 	int i;
966 	int err;
967 
968 	if (skb_cloned(skb) &&
969 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
970 		return err;
971 
972 	i = 0;
973 	if (offset >= len)
974 		goto drop_pages;
975 
976 	for (; i < nfrags; i++) {
977 		int end = offset + skb_shinfo(skb)->frags[i].size;
978 
979 		if (end < len) {
980 			offset = end;
981 			continue;
982 		}
983 
984 		skb_shinfo(skb)->frags[i++].size = len - offset;
985 
986 drop_pages:
987 		skb_shinfo(skb)->nr_frags = i;
988 
989 		for (; i < nfrags; i++)
990 			put_page(skb_shinfo(skb)->frags[i].page);
991 
992 		if (skb_shinfo(skb)->frag_list)
993 			skb_drop_fraglist(skb);
994 		goto done;
995 	}
996 
997 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
998 	     fragp = &frag->next) {
999 		int end = offset + frag->len;
1000 
1001 		if (skb_shared(frag)) {
1002 			struct sk_buff *nfrag;
1003 
1004 			nfrag = skb_clone(frag, GFP_ATOMIC);
1005 			if (unlikely(!nfrag))
1006 				return -ENOMEM;
1007 
1008 			nfrag->next = frag->next;
1009 			kfree_skb(frag);
1010 			frag = nfrag;
1011 			*fragp = frag;
1012 		}
1013 
1014 		if (end < len) {
1015 			offset = end;
1016 			continue;
1017 		}
1018 
1019 		if (end > len &&
1020 		    unlikely((err = pskb_trim(frag, len - offset))))
1021 			return err;
1022 
1023 		if (frag->next)
1024 			skb_drop_list(&frag->next);
1025 		break;
1026 	}
1027 
1028 done:
1029 	if (len > skb_headlen(skb)) {
1030 		skb->data_len -= skb->len - len;
1031 		skb->len       = len;
1032 	} else {
1033 		skb->len       = len;
1034 		skb->data_len  = 0;
1035 		skb_set_tail_pointer(skb, len);
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 /**
1042  *	__pskb_pull_tail - advance tail of skb header
1043  *	@skb: buffer to reallocate
1044  *	@delta: number of bytes to advance tail
1045  *
1046  *	The function makes a sense only on a fragmented &sk_buff,
1047  *	it expands header moving its tail forward and copying necessary
1048  *	data from fragmented part.
1049  *
1050  *	&sk_buff MUST have reference count of 1.
1051  *
1052  *	Returns %NULL (and &sk_buff does not change) if pull failed
1053  *	or value of new tail of skb in the case of success.
1054  *
1055  *	All the pointers pointing into skb header may change and must be
1056  *	reloaded after call to this function.
1057  */
1058 
1059 /* Moves tail of skb head forward, copying data from fragmented part,
1060  * when it is necessary.
1061  * 1. It may fail due to malloc failure.
1062  * 2. It may change skb pointers.
1063  *
1064  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1065  */
1066 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1067 {
1068 	/* If skb has not enough free space at tail, get new one
1069 	 * plus 128 bytes for future expansions. If we have enough
1070 	 * room at tail, reallocate without expansion only if skb is cloned.
1071 	 */
1072 	int i, k, eat = (skb->tail + delta) - skb->end;
1073 
1074 	if (eat > 0 || skb_cloned(skb)) {
1075 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1076 				     GFP_ATOMIC))
1077 			return NULL;
1078 	}
1079 
1080 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1081 		BUG();
1082 
1083 	/* Optimization: no fragments, no reasons to preestimate
1084 	 * size of pulled pages. Superb.
1085 	 */
1086 	if (!skb_shinfo(skb)->frag_list)
1087 		goto pull_pages;
1088 
1089 	/* Estimate size of pulled pages. */
1090 	eat = delta;
1091 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1092 		if (skb_shinfo(skb)->frags[i].size >= eat)
1093 			goto pull_pages;
1094 		eat -= skb_shinfo(skb)->frags[i].size;
1095 	}
1096 
1097 	/* If we need update frag list, we are in troubles.
1098 	 * Certainly, it possible to add an offset to skb data,
1099 	 * but taking into account that pulling is expected to
1100 	 * be very rare operation, it is worth to fight against
1101 	 * further bloating skb head and crucify ourselves here instead.
1102 	 * Pure masohism, indeed. 8)8)
1103 	 */
1104 	if (eat) {
1105 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1106 		struct sk_buff *clone = NULL;
1107 		struct sk_buff *insp = NULL;
1108 
1109 		do {
1110 			BUG_ON(!list);
1111 
1112 			if (list->len <= eat) {
1113 				/* Eaten as whole. */
1114 				eat -= list->len;
1115 				list = list->next;
1116 				insp = list;
1117 			} else {
1118 				/* Eaten partially. */
1119 
1120 				if (skb_shared(list)) {
1121 					/* Sucks! We need to fork list. :-( */
1122 					clone = skb_clone(list, GFP_ATOMIC);
1123 					if (!clone)
1124 						return NULL;
1125 					insp = list->next;
1126 					list = clone;
1127 				} else {
1128 					/* This may be pulled without
1129 					 * problems. */
1130 					insp = list;
1131 				}
1132 				if (!pskb_pull(list, eat)) {
1133 					if (clone)
1134 						kfree_skb(clone);
1135 					return NULL;
1136 				}
1137 				break;
1138 			}
1139 		} while (eat);
1140 
1141 		/* Free pulled out fragments. */
1142 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1143 			skb_shinfo(skb)->frag_list = list->next;
1144 			kfree_skb(list);
1145 		}
1146 		/* And insert new clone at head. */
1147 		if (clone) {
1148 			clone->next = list;
1149 			skb_shinfo(skb)->frag_list = clone;
1150 		}
1151 	}
1152 	/* Success! Now we may commit changes to skb data. */
1153 
1154 pull_pages:
1155 	eat = delta;
1156 	k = 0;
1157 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1158 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1159 			put_page(skb_shinfo(skb)->frags[i].page);
1160 			eat -= skb_shinfo(skb)->frags[i].size;
1161 		} else {
1162 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1163 			if (eat) {
1164 				skb_shinfo(skb)->frags[k].page_offset += eat;
1165 				skb_shinfo(skb)->frags[k].size -= eat;
1166 				eat = 0;
1167 			}
1168 			k++;
1169 		}
1170 	}
1171 	skb_shinfo(skb)->nr_frags = k;
1172 
1173 	skb->tail     += delta;
1174 	skb->data_len -= delta;
1175 
1176 	return skb_tail_pointer(skb);
1177 }
1178 
1179 /* Copy some data bits from skb to kernel buffer. */
1180 
1181 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1182 {
1183 	int i, copy;
1184 	int start = skb_headlen(skb);
1185 
1186 	if (offset > (int)skb->len - len)
1187 		goto fault;
1188 
1189 	/* Copy header. */
1190 	if ((copy = start - offset) > 0) {
1191 		if (copy > len)
1192 			copy = len;
1193 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1194 		if ((len -= copy) == 0)
1195 			return 0;
1196 		offset += copy;
1197 		to     += copy;
1198 	}
1199 
1200 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1201 		int end;
1202 
1203 		BUG_TRAP(start <= offset + len);
1204 
1205 		end = start + skb_shinfo(skb)->frags[i].size;
1206 		if ((copy = end - offset) > 0) {
1207 			u8 *vaddr;
1208 
1209 			if (copy > len)
1210 				copy = len;
1211 
1212 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1213 			memcpy(to,
1214 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1215 			       offset - start, copy);
1216 			kunmap_skb_frag(vaddr);
1217 
1218 			if ((len -= copy) == 0)
1219 				return 0;
1220 			offset += copy;
1221 			to     += copy;
1222 		}
1223 		start = end;
1224 	}
1225 
1226 	if (skb_shinfo(skb)->frag_list) {
1227 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1228 
1229 		for (; list; list = list->next) {
1230 			int end;
1231 
1232 			BUG_TRAP(start <= offset + len);
1233 
1234 			end = start + list->len;
1235 			if ((copy = end - offset) > 0) {
1236 				if (copy > len)
1237 					copy = len;
1238 				if (skb_copy_bits(list, offset - start,
1239 						  to, copy))
1240 					goto fault;
1241 				if ((len -= copy) == 0)
1242 					return 0;
1243 				offset += copy;
1244 				to     += copy;
1245 			}
1246 			start = end;
1247 		}
1248 	}
1249 	if (!len)
1250 		return 0;
1251 
1252 fault:
1253 	return -EFAULT;
1254 }
1255 
1256 /*
1257  * Callback from splice_to_pipe(), if we need to release some pages
1258  * at the end of the spd in case we error'ed out in filling the pipe.
1259  */
1260 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1261 {
1262 	struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
1263 
1264 	kfree_skb(skb);
1265 }
1266 
1267 /*
1268  * Fill page/offset/length into spd, if it can hold more pages.
1269  */
1270 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1271 				unsigned int len, unsigned int offset,
1272 				struct sk_buff *skb)
1273 {
1274 	if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1275 		return 1;
1276 
1277 	spd->pages[spd->nr_pages] = page;
1278 	spd->partial[spd->nr_pages].len = len;
1279 	spd->partial[spd->nr_pages].offset = offset;
1280 	spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
1281 	spd->nr_pages++;
1282 	return 0;
1283 }
1284 
1285 /*
1286  * Map linear and fragment data from the skb to spd. Returns number of
1287  * pages mapped.
1288  */
1289 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1290 			     unsigned int *total_len,
1291 			     struct splice_pipe_desc *spd)
1292 {
1293 	unsigned int nr_pages = spd->nr_pages;
1294 	unsigned int poff, plen, len, toff, tlen;
1295 	int headlen, seg;
1296 
1297 	toff = *offset;
1298 	tlen = *total_len;
1299 	if (!tlen)
1300 		goto err;
1301 
1302 	/*
1303 	 * if the offset is greater than the linear part, go directly to
1304 	 * the fragments.
1305 	 */
1306 	headlen = skb_headlen(skb);
1307 	if (toff >= headlen) {
1308 		toff -= headlen;
1309 		goto map_frag;
1310 	}
1311 
1312 	/*
1313 	 * first map the linear region into the pages/partial map, skipping
1314 	 * any potential initial offset.
1315 	 */
1316 	len = 0;
1317 	while (len < headlen) {
1318 		void *p = skb->data + len;
1319 
1320 		poff = (unsigned long) p & (PAGE_SIZE - 1);
1321 		plen = min_t(unsigned int, headlen - len, PAGE_SIZE - poff);
1322 		len += plen;
1323 
1324 		if (toff) {
1325 			if (plen <= toff) {
1326 				toff -= plen;
1327 				continue;
1328 			}
1329 			plen -= toff;
1330 			poff += toff;
1331 			toff = 0;
1332 		}
1333 
1334 		plen = min(plen, tlen);
1335 		if (!plen)
1336 			break;
1337 
1338 		/*
1339 		 * just jump directly to update and return, no point
1340 		 * in going over fragments when the output is full.
1341 		 */
1342 		if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb))
1343 			goto done;
1344 
1345 		tlen -= plen;
1346 	}
1347 
1348 	/*
1349 	 * then map the fragments
1350 	 */
1351 map_frag:
1352 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1353 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1354 
1355 		plen = f->size;
1356 		poff = f->page_offset;
1357 
1358 		if (toff) {
1359 			if (plen <= toff) {
1360 				toff -= plen;
1361 				continue;
1362 			}
1363 			plen -= toff;
1364 			poff += toff;
1365 			toff = 0;
1366 		}
1367 
1368 		plen = min(plen, tlen);
1369 		if (!plen)
1370 			break;
1371 
1372 		if (spd_fill_page(spd, f->page, plen, poff, skb))
1373 			break;
1374 
1375 		tlen -= plen;
1376 	}
1377 
1378 done:
1379 	if (spd->nr_pages - nr_pages) {
1380 		*offset = 0;
1381 		*total_len = tlen;
1382 		return 0;
1383 	}
1384 err:
1385 	return 1;
1386 }
1387 
1388 /*
1389  * Map data from the skb to a pipe. Should handle both the linear part,
1390  * the fragments, and the frag list. It does NOT handle frag lists within
1391  * the frag list, if such a thing exists. We'd probably need to recurse to
1392  * handle that cleanly.
1393  */
1394 int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
1395 		    struct pipe_inode_info *pipe, unsigned int tlen,
1396 		    unsigned int flags)
1397 {
1398 	struct partial_page partial[PIPE_BUFFERS];
1399 	struct page *pages[PIPE_BUFFERS];
1400 	struct splice_pipe_desc spd = {
1401 		.pages = pages,
1402 		.partial = partial,
1403 		.flags = flags,
1404 		.ops = &sock_pipe_buf_ops,
1405 		.spd_release = sock_spd_release,
1406 	};
1407 	struct sk_buff *skb;
1408 
1409 	/*
1410 	 * I'd love to avoid the clone here, but tcp_read_sock()
1411 	 * ignores reference counts and unconditonally kills the sk_buff
1412 	 * on return from the actor.
1413 	 */
1414 	skb = skb_clone(__skb, GFP_KERNEL);
1415 	if (unlikely(!skb))
1416 		return -ENOMEM;
1417 
1418 	/*
1419 	 * __skb_splice_bits() only fails if the output has no room left,
1420 	 * so no point in going over the frag_list for the error case.
1421 	 */
1422 	if (__skb_splice_bits(skb, &offset, &tlen, &spd))
1423 		goto done;
1424 	else if (!tlen)
1425 		goto done;
1426 
1427 	/*
1428 	 * now see if we have a frag_list to map
1429 	 */
1430 	if (skb_shinfo(skb)->frag_list) {
1431 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1432 
1433 		for (; list && tlen; list = list->next) {
1434 			if (__skb_splice_bits(list, &offset, &tlen, &spd))
1435 				break;
1436 		}
1437 	}
1438 
1439 done:
1440 	/*
1441 	 * drop our reference to the clone, the pipe consumption will
1442 	 * drop the rest.
1443 	 */
1444 	kfree_skb(skb);
1445 
1446 	if (spd.nr_pages) {
1447 		int ret;
1448 		struct sock *sk = __skb->sk;
1449 
1450 		/*
1451 		 * Drop the socket lock, otherwise we have reverse
1452 		 * locking dependencies between sk_lock and i_mutex
1453 		 * here as compared to sendfile(). We enter here
1454 		 * with the socket lock held, and splice_to_pipe() will
1455 		 * grab the pipe inode lock. For sendfile() emulation,
1456 		 * we call into ->sendpage() with the i_mutex lock held
1457 		 * and networking will grab the socket lock.
1458 		 */
1459 		release_sock(sk);
1460 		ret = splice_to_pipe(pipe, &spd);
1461 		lock_sock(sk);
1462 		return ret;
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 /**
1469  *	skb_store_bits - store bits from kernel buffer to skb
1470  *	@skb: destination buffer
1471  *	@offset: offset in destination
1472  *	@from: source buffer
1473  *	@len: number of bytes to copy
1474  *
1475  *	Copy the specified number of bytes from the source buffer to the
1476  *	destination skb.  This function handles all the messy bits of
1477  *	traversing fragment lists and such.
1478  */
1479 
1480 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1481 {
1482 	int i, copy;
1483 	int start = skb_headlen(skb);
1484 
1485 	if (offset > (int)skb->len - len)
1486 		goto fault;
1487 
1488 	if ((copy = start - offset) > 0) {
1489 		if (copy > len)
1490 			copy = len;
1491 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1492 		if ((len -= copy) == 0)
1493 			return 0;
1494 		offset += copy;
1495 		from += copy;
1496 	}
1497 
1498 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1499 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1500 		int end;
1501 
1502 		BUG_TRAP(start <= offset + len);
1503 
1504 		end = start + frag->size;
1505 		if ((copy = end - offset) > 0) {
1506 			u8 *vaddr;
1507 
1508 			if (copy > len)
1509 				copy = len;
1510 
1511 			vaddr = kmap_skb_frag(frag);
1512 			memcpy(vaddr + frag->page_offset + offset - start,
1513 			       from, copy);
1514 			kunmap_skb_frag(vaddr);
1515 
1516 			if ((len -= copy) == 0)
1517 				return 0;
1518 			offset += copy;
1519 			from += copy;
1520 		}
1521 		start = end;
1522 	}
1523 
1524 	if (skb_shinfo(skb)->frag_list) {
1525 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1526 
1527 		for (; list; list = list->next) {
1528 			int end;
1529 
1530 			BUG_TRAP(start <= offset + len);
1531 
1532 			end = start + list->len;
1533 			if ((copy = end - offset) > 0) {
1534 				if (copy > len)
1535 					copy = len;
1536 				if (skb_store_bits(list, offset - start,
1537 						   from, copy))
1538 					goto fault;
1539 				if ((len -= copy) == 0)
1540 					return 0;
1541 				offset += copy;
1542 				from += copy;
1543 			}
1544 			start = end;
1545 		}
1546 	}
1547 	if (!len)
1548 		return 0;
1549 
1550 fault:
1551 	return -EFAULT;
1552 }
1553 
1554 EXPORT_SYMBOL(skb_store_bits);
1555 
1556 /* Checksum skb data. */
1557 
1558 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1559 			  int len, __wsum csum)
1560 {
1561 	int start = skb_headlen(skb);
1562 	int i, copy = start - offset;
1563 	int pos = 0;
1564 
1565 	/* Checksum header. */
1566 	if (copy > 0) {
1567 		if (copy > len)
1568 			copy = len;
1569 		csum = csum_partial(skb->data + offset, copy, csum);
1570 		if ((len -= copy) == 0)
1571 			return csum;
1572 		offset += copy;
1573 		pos	= copy;
1574 	}
1575 
1576 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1577 		int end;
1578 
1579 		BUG_TRAP(start <= offset + len);
1580 
1581 		end = start + skb_shinfo(skb)->frags[i].size;
1582 		if ((copy = end - offset) > 0) {
1583 			__wsum csum2;
1584 			u8 *vaddr;
1585 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1586 
1587 			if (copy > len)
1588 				copy = len;
1589 			vaddr = kmap_skb_frag(frag);
1590 			csum2 = csum_partial(vaddr + frag->page_offset +
1591 					     offset - start, copy, 0);
1592 			kunmap_skb_frag(vaddr);
1593 			csum = csum_block_add(csum, csum2, pos);
1594 			if (!(len -= copy))
1595 				return csum;
1596 			offset += copy;
1597 			pos    += copy;
1598 		}
1599 		start = end;
1600 	}
1601 
1602 	if (skb_shinfo(skb)->frag_list) {
1603 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1604 
1605 		for (; list; list = list->next) {
1606 			int end;
1607 
1608 			BUG_TRAP(start <= offset + len);
1609 
1610 			end = start + list->len;
1611 			if ((copy = end - offset) > 0) {
1612 				__wsum csum2;
1613 				if (copy > len)
1614 					copy = len;
1615 				csum2 = skb_checksum(list, offset - start,
1616 						     copy, 0);
1617 				csum = csum_block_add(csum, csum2, pos);
1618 				if ((len -= copy) == 0)
1619 					return csum;
1620 				offset += copy;
1621 				pos    += copy;
1622 			}
1623 			start = end;
1624 		}
1625 	}
1626 	BUG_ON(len);
1627 
1628 	return csum;
1629 }
1630 
1631 /* Both of above in one bottle. */
1632 
1633 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1634 				    u8 *to, int len, __wsum csum)
1635 {
1636 	int start = skb_headlen(skb);
1637 	int i, copy = start - offset;
1638 	int pos = 0;
1639 
1640 	/* Copy header. */
1641 	if (copy > 0) {
1642 		if (copy > len)
1643 			copy = len;
1644 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1645 						 copy, csum);
1646 		if ((len -= copy) == 0)
1647 			return csum;
1648 		offset += copy;
1649 		to     += copy;
1650 		pos	= copy;
1651 	}
1652 
1653 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1654 		int end;
1655 
1656 		BUG_TRAP(start <= offset + len);
1657 
1658 		end = start + skb_shinfo(skb)->frags[i].size;
1659 		if ((copy = end - offset) > 0) {
1660 			__wsum csum2;
1661 			u8 *vaddr;
1662 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1663 
1664 			if (copy > len)
1665 				copy = len;
1666 			vaddr = kmap_skb_frag(frag);
1667 			csum2 = csum_partial_copy_nocheck(vaddr +
1668 							  frag->page_offset +
1669 							  offset - start, to,
1670 							  copy, 0);
1671 			kunmap_skb_frag(vaddr);
1672 			csum = csum_block_add(csum, csum2, pos);
1673 			if (!(len -= copy))
1674 				return csum;
1675 			offset += copy;
1676 			to     += copy;
1677 			pos    += copy;
1678 		}
1679 		start = end;
1680 	}
1681 
1682 	if (skb_shinfo(skb)->frag_list) {
1683 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1684 
1685 		for (; list; list = list->next) {
1686 			__wsum csum2;
1687 			int end;
1688 
1689 			BUG_TRAP(start <= offset + len);
1690 
1691 			end = start + list->len;
1692 			if ((copy = end - offset) > 0) {
1693 				if (copy > len)
1694 					copy = len;
1695 				csum2 = skb_copy_and_csum_bits(list,
1696 							       offset - start,
1697 							       to, copy, 0);
1698 				csum = csum_block_add(csum, csum2, pos);
1699 				if ((len -= copy) == 0)
1700 					return csum;
1701 				offset += copy;
1702 				to     += copy;
1703 				pos    += copy;
1704 			}
1705 			start = end;
1706 		}
1707 	}
1708 	BUG_ON(len);
1709 	return csum;
1710 }
1711 
1712 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1713 {
1714 	__wsum csum;
1715 	long csstart;
1716 
1717 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1718 		csstart = skb->csum_start - skb_headroom(skb);
1719 	else
1720 		csstart = skb_headlen(skb);
1721 
1722 	BUG_ON(csstart > skb_headlen(skb));
1723 
1724 	skb_copy_from_linear_data(skb, to, csstart);
1725 
1726 	csum = 0;
1727 	if (csstart != skb->len)
1728 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1729 					      skb->len - csstart, 0);
1730 
1731 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1732 		long csstuff = csstart + skb->csum_offset;
1733 
1734 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1735 	}
1736 }
1737 
1738 /**
1739  *	skb_dequeue - remove from the head of the queue
1740  *	@list: list to dequeue from
1741  *
1742  *	Remove the head of the list. The list lock is taken so the function
1743  *	may be used safely with other locking list functions. The head item is
1744  *	returned or %NULL if the list is empty.
1745  */
1746 
1747 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1748 {
1749 	unsigned long flags;
1750 	struct sk_buff *result;
1751 
1752 	spin_lock_irqsave(&list->lock, flags);
1753 	result = __skb_dequeue(list);
1754 	spin_unlock_irqrestore(&list->lock, flags);
1755 	return result;
1756 }
1757 
1758 /**
1759  *	skb_dequeue_tail - remove from the tail of the queue
1760  *	@list: list to dequeue from
1761  *
1762  *	Remove the tail of the list. The list lock is taken so the function
1763  *	may be used safely with other locking list functions. The tail item is
1764  *	returned or %NULL if the list is empty.
1765  */
1766 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1767 {
1768 	unsigned long flags;
1769 	struct sk_buff *result;
1770 
1771 	spin_lock_irqsave(&list->lock, flags);
1772 	result = __skb_dequeue_tail(list);
1773 	spin_unlock_irqrestore(&list->lock, flags);
1774 	return result;
1775 }
1776 
1777 /**
1778  *	skb_queue_purge - empty a list
1779  *	@list: list to empty
1780  *
1781  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1782  *	the list and one reference dropped. This function takes the list
1783  *	lock and is atomic with respect to other list locking functions.
1784  */
1785 void skb_queue_purge(struct sk_buff_head *list)
1786 {
1787 	struct sk_buff *skb;
1788 	while ((skb = skb_dequeue(list)) != NULL)
1789 		kfree_skb(skb);
1790 }
1791 
1792 /**
1793  *	skb_queue_head - queue a buffer at the list head
1794  *	@list: list to use
1795  *	@newsk: buffer to queue
1796  *
1797  *	Queue a buffer at the start of the list. This function takes the
1798  *	list lock and can be used safely with other locking &sk_buff functions
1799  *	safely.
1800  *
1801  *	A buffer cannot be placed on two lists at the same time.
1802  */
1803 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1804 {
1805 	unsigned long flags;
1806 
1807 	spin_lock_irqsave(&list->lock, flags);
1808 	__skb_queue_head(list, newsk);
1809 	spin_unlock_irqrestore(&list->lock, flags);
1810 }
1811 
1812 /**
1813  *	skb_queue_tail - queue a buffer at the list tail
1814  *	@list: list to use
1815  *	@newsk: buffer to queue
1816  *
1817  *	Queue a buffer at the tail of the list. This function takes the
1818  *	list lock and can be used safely with other locking &sk_buff functions
1819  *	safely.
1820  *
1821  *	A buffer cannot be placed on two lists at the same time.
1822  */
1823 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1824 {
1825 	unsigned long flags;
1826 
1827 	spin_lock_irqsave(&list->lock, flags);
1828 	__skb_queue_tail(list, newsk);
1829 	spin_unlock_irqrestore(&list->lock, flags);
1830 }
1831 
1832 /**
1833  *	skb_unlink	-	remove a buffer from a list
1834  *	@skb: buffer to remove
1835  *	@list: list to use
1836  *
1837  *	Remove a packet from a list. The list locks are taken and this
1838  *	function is atomic with respect to other list locked calls
1839  *
1840  *	You must know what list the SKB is on.
1841  */
1842 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1843 {
1844 	unsigned long flags;
1845 
1846 	spin_lock_irqsave(&list->lock, flags);
1847 	__skb_unlink(skb, list);
1848 	spin_unlock_irqrestore(&list->lock, flags);
1849 }
1850 
1851 /**
1852  *	skb_append	-	append a buffer
1853  *	@old: buffer to insert after
1854  *	@newsk: buffer to insert
1855  *	@list: list to use
1856  *
1857  *	Place a packet after a given packet in a list. The list locks are taken
1858  *	and this function is atomic with respect to other list locked calls.
1859  *	A buffer cannot be placed on two lists at the same time.
1860  */
1861 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1862 {
1863 	unsigned long flags;
1864 
1865 	spin_lock_irqsave(&list->lock, flags);
1866 	__skb_queue_after(list, old, newsk);
1867 	spin_unlock_irqrestore(&list->lock, flags);
1868 }
1869 
1870 
1871 /**
1872  *	skb_insert	-	insert a buffer
1873  *	@old: buffer to insert before
1874  *	@newsk: buffer to insert
1875  *	@list: list to use
1876  *
1877  *	Place a packet before a given packet in a list. The list locks are
1878  * 	taken and this function is atomic with respect to other list locked
1879  *	calls.
1880  *
1881  *	A buffer cannot be placed on two lists at the same time.
1882  */
1883 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1884 {
1885 	unsigned long flags;
1886 
1887 	spin_lock_irqsave(&list->lock, flags);
1888 	__skb_insert(newsk, old->prev, old, list);
1889 	spin_unlock_irqrestore(&list->lock, flags);
1890 }
1891 
1892 static inline void skb_split_inside_header(struct sk_buff *skb,
1893 					   struct sk_buff* skb1,
1894 					   const u32 len, const int pos)
1895 {
1896 	int i;
1897 
1898 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
1899 					 pos - len);
1900 	/* And move data appendix as is. */
1901 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1902 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1903 
1904 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1905 	skb_shinfo(skb)->nr_frags  = 0;
1906 	skb1->data_len		   = skb->data_len;
1907 	skb1->len		   += skb1->data_len;
1908 	skb->data_len		   = 0;
1909 	skb->len		   = len;
1910 	skb_set_tail_pointer(skb, len);
1911 }
1912 
1913 static inline void skb_split_no_header(struct sk_buff *skb,
1914 				       struct sk_buff* skb1,
1915 				       const u32 len, int pos)
1916 {
1917 	int i, k = 0;
1918 	const int nfrags = skb_shinfo(skb)->nr_frags;
1919 
1920 	skb_shinfo(skb)->nr_frags = 0;
1921 	skb1->len		  = skb1->data_len = skb->len - len;
1922 	skb->len		  = len;
1923 	skb->data_len		  = len - pos;
1924 
1925 	for (i = 0; i < nfrags; i++) {
1926 		int size = skb_shinfo(skb)->frags[i].size;
1927 
1928 		if (pos + size > len) {
1929 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1930 
1931 			if (pos < len) {
1932 				/* Split frag.
1933 				 * We have two variants in this case:
1934 				 * 1. Move all the frag to the second
1935 				 *    part, if it is possible. F.e.
1936 				 *    this approach is mandatory for TUX,
1937 				 *    where splitting is expensive.
1938 				 * 2. Split is accurately. We make this.
1939 				 */
1940 				get_page(skb_shinfo(skb)->frags[i].page);
1941 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1942 				skb_shinfo(skb1)->frags[0].size -= len - pos;
1943 				skb_shinfo(skb)->frags[i].size	= len - pos;
1944 				skb_shinfo(skb)->nr_frags++;
1945 			}
1946 			k++;
1947 		} else
1948 			skb_shinfo(skb)->nr_frags++;
1949 		pos += size;
1950 	}
1951 	skb_shinfo(skb1)->nr_frags = k;
1952 }
1953 
1954 /**
1955  * skb_split - Split fragmented skb to two parts at length len.
1956  * @skb: the buffer to split
1957  * @skb1: the buffer to receive the second part
1958  * @len: new length for skb
1959  */
1960 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1961 {
1962 	int pos = skb_headlen(skb);
1963 
1964 	if (len < pos)	/* Split line is inside header. */
1965 		skb_split_inside_header(skb, skb1, len, pos);
1966 	else		/* Second chunk has no header, nothing to copy. */
1967 		skb_split_no_header(skb, skb1, len, pos);
1968 }
1969 
1970 /**
1971  * skb_prepare_seq_read - Prepare a sequential read of skb data
1972  * @skb: the buffer to read
1973  * @from: lower offset of data to be read
1974  * @to: upper offset of data to be read
1975  * @st: state variable
1976  *
1977  * Initializes the specified state variable. Must be called before
1978  * invoking skb_seq_read() for the first time.
1979  */
1980 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1981 			  unsigned int to, struct skb_seq_state *st)
1982 {
1983 	st->lower_offset = from;
1984 	st->upper_offset = to;
1985 	st->root_skb = st->cur_skb = skb;
1986 	st->frag_idx = st->stepped_offset = 0;
1987 	st->frag_data = NULL;
1988 }
1989 
1990 /**
1991  * skb_seq_read - Sequentially read skb data
1992  * @consumed: number of bytes consumed by the caller so far
1993  * @data: destination pointer for data to be returned
1994  * @st: state variable
1995  *
1996  * Reads a block of skb data at &consumed relative to the
1997  * lower offset specified to skb_prepare_seq_read(). Assigns
1998  * the head of the data block to &data and returns the length
1999  * of the block or 0 if the end of the skb data or the upper
2000  * offset has been reached.
2001  *
2002  * The caller is not required to consume all of the data
2003  * returned, i.e. &consumed is typically set to the number
2004  * of bytes already consumed and the next call to
2005  * skb_seq_read() will return the remaining part of the block.
2006  *
2007  * Note 1: The size of each block of data returned can be arbitary,
2008  *       this limitation is the cost for zerocopy seqeuental
2009  *       reads of potentially non linear data.
2010  *
2011  * Note 2: Fragment lists within fragments are not implemented
2012  *       at the moment, state->root_skb could be replaced with
2013  *       a stack for this purpose.
2014  */
2015 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2016 			  struct skb_seq_state *st)
2017 {
2018 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2019 	skb_frag_t *frag;
2020 
2021 	if (unlikely(abs_offset >= st->upper_offset))
2022 		return 0;
2023 
2024 next_skb:
2025 	block_limit = skb_headlen(st->cur_skb);
2026 
2027 	if (abs_offset < block_limit) {
2028 		*data = st->cur_skb->data + abs_offset;
2029 		return block_limit - abs_offset;
2030 	}
2031 
2032 	if (st->frag_idx == 0 && !st->frag_data)
2033 		st->stepped_offset += skb_headlen(st->cur_skb);
2034 
2035 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2036 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2037 		block_limit = frag->size + st->stepped_offset;
2038 
2039 		if (abs_offset < block_limit) {
2040 			if (!st->frag_data)
2041 				st->frag_data = kmap_skb_frag(frag);
2042 
2043 			*data = (u8 *) st->frag_data + frag->page_offset +
2044 				(abs_offset - st->stepped_offset);
2045 
2046 			return block_limit - abs_offset;
2047 		}
2048 
2049 		if (st->frag_data) {
2050 			kunmap_skb_frag(st->frag_data);
2051 			st->frag_data = NULL;
2052 		}
2053 
2054 		st->frag_idx++;
2055 		st->stepped_offset += frag->size;
2056 	}
2057 
2058 	if (st->frag_data) {
2059 		kunmap_skb_frag(st->frag_data);
2060 		st->frag_data = NULL;
2061 	}
2062 
2063 	if (st->cur_skb->next) {
2064 		st->cur_skb = st->cur_skb->next;
2065 		st->frag_idx = 0;
2066 		goto next_skb;
2067 	} else if (st->root_skb == st->cur_skb &&
2068 		   skb_shinfo(st->root_skb)->frag_list) {
2069 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2070 		goto next_skb;
2071 	}
2072 
2073 	return 0;
2074 }
2075 
2076 /**
2077  * skb_abort_seq_read - Abort a sequential read of skb data
2078  * @st: state variable
2079  *
2080  * Must be called if skb_seq_read() was not called until it
2081  * returned 0.
2082  */
2083 void skb_abort_seq_read(struct skb_seq_state *st)
2084 {
2085 	if (st->frag_data)
2086 		kunmap_skb_frag(st->frag_data);
2087 }
2088 
2089 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2090 
2091 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2092 					  struct ts_config *conf,
2093 					  struct ts_state *state)
2094 {
2095 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2096 }
2097 
2098 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2099 {
2100 	skb_abort_seq_read(TS_SKB_CB(state));
2101 }
2102 
2103 /**
2104  * skb_find_text - Find a text pattern in skb data
2105  * @skb: the buffer to look in
2106  * @from: search offset
2107  * @to: search limit
2108  * @config: textsearch configuration
2109  * @state: uninitialized textsearch state variable
2110  *
2111  * Finds a pattern in the skb data according to the specified
2112  * textsearch configuration. Use textsearch_next() to retrieve
2113  * subsequent occurrences of the pattern. Returns the offset
2114  * to the first occurrence or UINT_MAX if no match was found.
2115  */
2116 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2117 			   unsigned int to, struct ts_config *config,
2118 			   struct ts_state *state)
2119 {
2120 	unsigned int ret;
2121 
2122 	config->get_next_block = skb_ts_get_next_block;
2123 	config->finish = skb_ts_finish;
2124 
2125 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2126 
2127 	ret = textsearch_find(config, state);
2128 	return (ret <= to - from ? ret : UINT_MAX);
2129 }
2130 
2131 /**
2132  * skb_append_datato_frags: - append the user data to a skb
2133  * @sk: sock  structure
2134  * @skb: skb structure to be appened with user data.
2135  * @getfrag: call back function to be used for getting the user data
2136  * @from: pointer to user message iov
2137  * @length: length of the iov message
2138  *
2139  * Description: This procedure append the user data in the fragment part
2140  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2141  */
2142 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2143 			int (*getfrag)(void *from, char *to, int offset,
2144 					int len, int odd, struct sk_buff *skb),
2145 			void *from, int length)
2146 {
2147 	int frg_cnt = 0;
2148 	skb_frag_t *frag = NULL;
2149 	struct page *page = NULL;
2150 	int copy, left;
2151 	int offset = 0;
2152 	int ret;
2153 
2154 	do {
2155 		/* Return error if we don't have space for new frag */
2156 		frg_cnt = skb_shinfo(skb)->nr_frags;
2157 		if (frg_cnt >= MAX_SKB_FRAGS)
2158 			return -EFAULT;
2159 
2160 		/* allocate a new page for next frag */
2161 		page = alloc_pages(sk->sk_allocation, 0);
2162 
2163 		/* If alloc_page fails just return failure and caller will
2164 		 * free previous allocated pages by doing kfree_skb()
2165 		 */
2166 		if (page == NULL)
2167 			return -ENOMEM;
2168 
2169 		/* initialize the next frag */
2170 		sk->sk_sndmsg_page = page;
2171 		sk->sk_sndmsg_off = 0;
2172 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2173 		skb->truesize += PAGE_SIZE;
2174 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2175 
2176 		/* get the new initialized frag */
2177 		frg_cnt = skb_shinfo(skb)->nr_frags;
2178 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2179 
2180 		/* copy the user data to page */
2181 		left = PAGE_SIZE - frag->page_offset;
2182 		copy = (length > left)? left : length;
2183 
2184 		ret = getfrag(from, (page_address(frag->page) +
2185 			    frag->page_offset + frag->size),
2186 			    offset, copy, 0, skb);
2187 		if (ret < 0)
2188 			return -EFAULT;
2189 
2190 		/* copy was successful so update the size parameters */
2191 		sk->sk_sndmsg_off += copy;
2192 		frag->size += copy;
2193 		skb->len += copy;
2194 		skb->data_len += copy;
2195 		offset += copy;
2196 		length -= copy;
2197 
2198 	} while (length > 0);
2199 
2200 	return 0;
2201 }
2202 
2203 /**
2204  *	skb_pull_rcsum - pull skb and update receive checksum
2205  *	@skb: buffer to update
2206  *	@len: length of data pulled
2207  *
2208  *	This function performs an skb_pull on the packet and updates
2209  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2210  *	receive path processing instead of skb_pull unless you know
2211  *	that the checksum difference is zero (e.g., a valid IP header)
2212  *	or you are setting ip_summed to CHECKSUM_NONE.
2213  */
2214 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2215 {
2216 	BUG_ON(len > skb->len);
2217 	skb->len -= len;
2218 	BUG_ON(skb->len < skb->data_len);
2219 	skb_postpull_rcsum(skb, skb->data, len);
2220 	return skb->data += len;
2221 }
2222 
2223 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2224 
2225 /**
2226  *	skb_segment - Perform protocol segmentation on skb.
2227  *	@skb: buffer to segment
2228  *	@features: features for the output path (see dev->features)
2229  *
2230  *	This function performs segmentation on the given skb.  It returns
2231  *	a pointer to the first in a list of new skbs for the segments.
2232  *	In case of error it returns ERR_PTR(err).
2233  */
2234 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2235 {
2236 	struct sk_buff *segs = NULL;
2237 	struct sk_buff *tail = NULL;
2238 	unsigned int mss = skb_shinfo(skb)->gso_size;
2239 	unsigned int doffset = skb->data - skb_mac_header(skb);
2240 	unsigned int offset = doffset;
2241 	unsigned int headroom;
2242 	unsigned int len;
2243 	int sg = features & NETIF_F_SG;
2244 	int nfrags = skb_shinfo(skb)->nr_frags;
2245 	int err = -ENOMEM;
2246 	int i = 0;
2247 	int pos;
2248 
2249 	__skb_push(skb, doffset);
2250 	headroom = skb_headroom(skb);
2251 	pos = skb_headlen(skb);
2252 
2253 	do {
2254 		struct sk_buff *nskb;
2255 		skb_frag_t *frag;
2256 		int hsize;
2257 		int k;
2258 		int size;
2259 
2260 		len = skb->len - offset;
2261 		if (len > mss)
2262 			len = mss;
2263 
2264 		hsize = skb_headlen(skb) - offset;
2265 		if (hsize < 0)
2266 			hsize = 0;
2267 		if (hsize > len || !sg)
2268 			hsize = len;
2269 
2270 		nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
2271 		if (unlikely(!nskb))
2272 			goto err;
2273 
2274 		if (segs)
2275 			tail->next = nskb;
2276 		else
2277 			segs = nskb;
2278 		tail = nskb;
2279 
2280 		nskb->dev = skb->dev;
2281 		skb_copy_queue_mapping(nskb, skb);
2282 		nskb->priority = skb->priority;
2283 		nskb->protocol = skb->protocol;
2284 		nskb->dst = dst_clone(skb->dst);
2285 		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
2286 		nskb->pkt_type = skb->pkt_type;
2287 		nskb->mac_len = skb->mac_len;
2288 
2289 		skb_reserve(nskb, headroom);
2290 		skb_reset_mac_header(nskb);
2291 		skb_set_network_header(nskb, skb->mac_len);
2292 		nskb->transport_header = (nskb->network_header +
2293 					  skb_network_header_len(skb));
2294 		skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
2295 					  doffset);
2296 		if (!sg) {
2297 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2298 							    skb_put(nskb, len),
2299 							    len, 0);
2300 			continue;
2301 		}
2302 
2303 		frag = skb_shinfo(nskb)->frags;
2304 		k = 0;
2305 
2306 		nskb->ip_summed = CHECKSUM_PARTIAL;
2307 		nskb->csum = skb->csum;
2308 		skb_copy_from_linear_data_offset(skb, offset,
2309 						 skb_put(nskb, hsize), hsize);
2310 
2311 		while (pos < offset + len) {
2312 			BUG_ON(i >= nfrags);
2313 
2314 			*frag = skb_shinfo(skb)->frags[i];
2315 			get_page(frag->page);
2316 			size = frag->size;
2317 
2318 			if (pos < offset) {
2319 				frag->page_offset += offset - pos;
2320 				frag->size -= offset - pos;
2321 			}
2322 
2323 			k++;
2324 
2325 			if (pos + size <= offset + len) {
2326 				i++;
2327 				pos += size;
2328 			} else {
2329 				frag->size -= pos + size - (offset + len);
2330 				break;
2331 			}
2332 
2333 			frag++;
2334 		}
2335 
2336 		skb_shinfo(nskb)->nr_frags = k;
2337 		nskb->data_len = len - hsize;
2338 		nskb->len += nskb->data_len;
2339 		nskb->truesize += nskb->data_len;
2340 	} while ((offset += len) < skb->len);
2341 
2342 	return segs;
2343 
2344 err:
2345 	while ((skb = segs)) {
2346 		segs = skb->next;
2347 		kfree_skb(skb);
2348 	}
2349 	return ERR_PTR(err);
2350 }
2351 
2352 EXPORT_SYMBOL_GPL(skb_segment);
2353 
2354 void __init skb_init(void)
2355 {
2356 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2357 					      sizeof(struct sk_buff),
2358 					      0,
2359 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2360 					      NULL);
2361 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2362 						(2*sizeof(struct sk_buff)) +
2363 						sizeof(atomic_t),
2364 						0,
2365 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2366 						NULL);
2367 }
2368 
2369 /**
2370  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2371  *	@skb: Socket buffer containing the buffers to be mapped
2372  *	@sg: The scatter-gather list to map into
2373  *	@offset: The offset into the buffer's contents to start mapping
2374  *	@len: Length of buffer space to be mapped
2375  *
2376  *	Fill the specified scatter-gather list with mappings/pointers into a
2377  *	region of the buffer space attached to a socket buffer.
2378  */
2379 static int
2380 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2381 {
2382 	int start = skb_headlen(skb);
2383 	int i, copy = start - offset;
2384 	int elt = 0;
2385 
2386 	if (copy > 0) {
2387 		if (copy > len)
2388 			copy = len;
2389 		sg_set_buf(sg, skb->data + offset, copy);
2390 		elt++;
2391 		if ((len -= copy) == 0)
2392 			return elt;
2393 		offset += copy;
2394 	}
2395 
2396 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2397 		int end;
2398 
2399 		BUG_TRAP(start <= offset + len);
2400 
2401 		end = start + skb_shinfo(skb)->frags[i].size;
2402 		if ((copy = end - offset) > 0) {
2403 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2404 
2405 			if (copy > len)
2406 				copy = len;
2407 			sg_set_page(&sg[elt], frag->page, copy,
2408 					frag->page_offset+offset-start);
2409 			elt++;
2410 			if (!(len -= copy))
2411 				return elt;
2412 			offset += copy;
2413 		}
2414 		start = end;
2415 	}
2416 
2417 	if (skb_shinfo(skb)->frag_list) {
2418 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
2419 
2420 		for (; list; list = list->next) {
2421 			int end;
2422 
2423 			BUG_TRAP(start <= offset + len);
2424 
2425 			end = start + list->len;
2426 			if ((copy = end - offset) > 0) {
2427 				if (copy > len)
2428 					copy = len;
2429 				elt += __skb_to_sgvec(list, sg+elt, offset - start,
2430 						      copy);
2431 				if ((len -= copy) == 0)
2432 					return elt;
2433 				offset += copy;
2434 			}
2435 			start = end;
2436 		}
2437 	}
2438 	BUG_ON(len);
2439 	return elt;
2440 }
2441 
2442 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2443 {
2444 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2445 
2446 	sg_mark_end(&sg[nsg - 1]);
2447 
2448 	return nsg;
2449 }
2450 
2451 /**
2452  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2453  *	@skb: The socket buffer to check.
2454  *	@tailbits: Amount of trailing space to be added
2455  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2456  *
2457  *	Make sure that the data buffers attached to a socket buffer are
2458  *	writable. If they are not, private copies are made of the data buffers
2459  *	and the socket buffer is set to use these instead.
2460  *
2461  *	If @tailbits is given, make sure that there is space to write @tailbits
2462  *	bytes of data beyond current end of socket buffer.  @trailer will be
2463  *	set to point to the skb in which this space begins.
2464  *
2465  *	The number of scatterlist elements required to completely map the
2466  *	COW'd and extended socket buffer will be returned.
2467  */
2468 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2469 {
2470 	int copyflag;
2471 	int elt;
2472 	struct sk_buff *skb1, **skb_p;
2473 
2474 	/* If skb is cloned or its head is paged, reallocate
2475 	 * head pulling out all the pages (pages are considered not writable
2476 	 * at the moment even if they are anonymous).
2477 	 */
2478 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2479 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2480 		return -ENOMEM;
2481 
2482 	/* Easy case. Most of packets will go this way. */
2483 	if (!skb_shinfo(skb)->frag_list) {
2484 		/* A little of trouble, not enough of space for trailer.
2485 		 * This should not happen, when stack is tuned to generate
2486 		 * good frames. OK, on miss we reallocate and reserve even more
2487 		 * space, 128 bytes is fair. */
2488 
2489 		if (skb_tailroom(skb) < tailbits &&
2490 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2491 			return -ENOMEM;
2492 
2493 		/* Voila! */
2494 		*trailer = skb;
2495 		return 1;
2496 	}
2497 
2498 	/* Misery. We are in troubles, going to mincer fragments... */
2499 
2500 	elt = 1;
2501 	skb_p = &skb_shinfo(skb)->frag_list;
2502 	copyflag = 0;
2503 
2504 	while ((skb1 = *skb_p) != NULL) {
2505 		int ntail = 0;
2506 
2507 		/* The fragment is partially pulled by someone,
2508 		 * this can happen on input. Copy it and everything
2509 		 * after it. */
2510 
2511 		if (skb_shared(skb1))
2512 			copyflag = 1;
2513 
2514 		/* If the skb is the last, worry about trailer. */
2515 
2516 		if (skb1->next == NULL && tailbits) {
2517 			if (skb_shinfo(skb1)->nr_frags ||
2518 			    skb_shinfo(skb1)->frag_list ||
2519 			    skb_tailroom(skb1) < tailbits)
2520 				ntail = tailbits + 128;
2521 		}
2522 
2523 		if (copyflag ||
2524 		    skb_cloned(skb1) ||
2525 		    ntail ||
2526 		    skb_shinfo(skb1)->nr_frags ||
2527 		    skb_shinfo(skb1)->frag_list) {
2528 			struct sk_buff *skb2;
2529 
2530 			/* Fuck, we are miserable poor guys... */
2531 			if (ntail == 0)
2532 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2533 			else
2534 				skb2 = skb_copy_expand(skb1,
2535 						       skb_headroom(skb1),
2536 						       ntail,
2537 						       GFP_ATOMIC);
2538 			if (unlikely(skb2 == NULL))
2539 				return -ENOMEM;
2540 
2541 			if (skb1->sk)
2542 				skb_set_owner_w(skb2, skb1->sk);
2543 
2544 			/* Looking around. Are we still alive?
2545 			 * OK, link new skb, drop old one */
2546 
2547 			skb2->next = skb1->next;
2548 			*skb_p = skb2;
2549 			kfree_skb(skb1);
2550 			skb1 = skb2;
2551 		}
2552 		elt++;
2553 		*trailer = skb1;
2554 		skb_p = &skb1->next;
2555 	}
2556 
2557 	return elt;
2558 }
2559 
2560 /**
2561  * skb_partial_csum_set - set up and verify partial csum values for packet
2562  * @skb: the skb to set
2563  * @start: the number of bytes after skb->data to start checksumming.
2564  * @off: the offset from start to place the checksum.
2565  *
2566  * For untrusted partially-checksummed packets, we need to make sure the values
2567  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
2568  *
2569  * This function checks and sets those values and skb->ip_summed: if this
2570  * returns false you should drop the packet.
2571  */
2572 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2573 {
2574 	if (unlikely(start > skb->len - 2) ||
2575 	    unlikely((int)start + off > skb->len - 2)) {
2576 		if (net_ratelimit())
2577 			printk(KERN_WARNING
2578 			       "bad partial csum: csum=%u/%u len=%u\n",
2579 			       start, off, skb->len);
2580 		return false;
2581 	}
2582 	skb->ip_summed = CHECKSUM_PARTIAL;
2583 	skb->csum_start = skb_headroom(skb) + start;
2584 	skb->csum_offset = off;
2585 	return true;
2586 }
2587 
2588 EXPORT_SYMBOL(___pskb_trim);
2589 EXPORT_SYMBOL(__kfree_skb);
2590 EXPORT_SYMBOL(kfree_skb);
2591 EXPORT_SYMBOL(__pskb_pull_tail);
2592 EXPORT_SYMBOL(__alloc_skb);
2593 EXPORT_SYMBOL(__netdev_alloc_skb);
2594 EXPORT_SYMBOL(pskb_copy);
2595 EXPORT_SYMBOL(pskb_expand_head);
2596 EXPORT_SYMBOL(skb_checksum);
2597 EXPORT_SYMBOL(skb_clone);
2598 EXPORT_SYMBOL(skb_copy);
2599 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2600 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2601 EXPORT_SYMBOL(skb_copy_bits);
2602 EXPORT_SYMBOL(skb_copy_expand);
2603 EXPORT_SYMBOL(skb_over_panic);
2604 EXPORT_SYMBOL(skb_pad);
2605 EXPORT_SYMBOL(skb_realloc_headroom);
2606 EXPORT_SYMBOL(skb_under_panic);
2607 EXPORT_SYMBOL(skb_dequeue);
2608 EXPORT_SYMBOL(skb_dequeue_tail);
2609 EXPORT_SYMBOL(skb_insert);
2610 EXPORT_SYMBOL(skb_queue_purge);
2611 EXPORT_SYMBOL(skb_queue_head);
2612 EXPORT_SYMBOL(skb_queue_tail);
2613 EXPORT_SYMBOL(skb_unlink);
2614 EXPORT_SYMBOL(skb_append);
2615 EXPORT_SYMBOL(skb_split);
2616 EXPORT_SYMBOL(skb_prepare_seq_read);
2617 EXPORT_SYMBOL(skb_seq_read);
2618 EXPORT_SYMBOL(skb_abort_seq_read);
2619 EXPORT_SYMBOL(skb_find_text);
2620 EXPORT_SYMBOL(skb_append_datato_frags);
2621 
2622 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2623 EXPORT_SYMBOL_GPL(skb_cow_data);
2624 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
2625