xref: /openbmc/linux/net/core/skbuff.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <iiitac@pyr.swan.ac.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Version:	$Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $
8  *
9  *	Fixes:
10  *		Alan Cox	:	Fixed the worst of the load
11  *					balancer bugs.
12  *		Dave Platt	:	Interrupt stacking fix.
13  *	Richard Kooijman	:	Timestamp fixes.
14  *		Alan Cox	:	Changed buffer format.
15  *		Alan Cox	:	destructor hook for AF_UNIX etc.
16  *		Linus Torvalds	:	Better skb_clone.
17  *		Alan Cox	:	Added skb_copy.
18  *		Alan Cox	:	Added all the changed routines Linus
19  *					only put in the headers
20  *		Ray VanTassle	:	Fixed --skb->lock in free
21  *		Alan Cox	:	skb_copy copy arp field
22  *		Andi Kleen	:	slabified it.
23  *		Robert Olsson	:	Removed skb_head_pool
24  *
25  *	NOTE:
26  *		The __skb_ routines should be called with interrupts
27  *	disabled, or you better be *real* sure that the operation is atomic
28  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
29  *	or via disabling bottom half handlers, etc).
30  *
31  *	This program is free software; you can redistribute it and/or
32  *	modify it under the terms of the GNU General Public License
33  *	as published by the Free Software Foundation; either version
34  *	2 of the License, or (at your option) any later version.
35  */
36 
37 /*
38  *	The functions in this file will not compile correctly with gcc 2.4.x
39  */
40 
41 #include <linux/config.h>
42 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/kernel.h>
45 #include <linux/sched.h>
46 #include <linux/mm.h>
47 #include <linux/interrupt.h>
48 #include <linux/in.h>
49 #include <linux/inet.h>
50 #include <linux/slab.h>
51 #include <linux/netdevice.h>
52 #ifdef CONFIG_NET_CLS_ACT
53 #include <net/pkt_sched.h>
54 #endif
55 #include <linux/string.h>
56 #include <linux/skbuff.h>
57 #include <linux/cache.h>
58 #include <linux/rtnetlink.h>
59 #include <linux/init.h>
60 #include <linux/highmem.h>
61 
62 #include <net/protocol.h>
63 #include <net/dst.h>
64 #include <net/sock.h>
65 #include <net/checksum.h>
66 #include <net/xfrm.h>
67 
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 
71 static kmem_cache_t *skbuff_head_cache __read_mostly;
72 static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73 
74 /*
75  *	Keep out-of-line to prevent kernel bloat.
76  *	__builtin_return_address is not used because it is not always
77  *	reliable.
78  */
79 
80 /**
81  *	skb_over_panic	- 	private function
82  *	@skb: buffer
83  *	@sz: size
84  *	@here: address
85  *
86  *	Out of line support code for skb_put(). Not user callable.
87  */
88 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
89 {
90 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
91 	                  "data:%p tail:%p end:%p dev:%s\n",
92 	       here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
93 	       skb->dev ? skb->dev->name : "<NULL>");
94 	BUG();
95 }
96 
97 /**
98  *	skb_under_panic	- 	private function
99  *	@skb: buffer
100  *	@sz: size
101  *	@here: address
102  *
103  *	Out of line support code for skb_push(). Not user callable.
104  */
105 
106 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
107 {
108 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
109 	                  "data:%p tail:%p end:%p dev:%s\n",
110 	       here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end,
111 	       skb->dev ? skb->dev->name : "<NULL>");
112 	BUG();
113 }
114 
115 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
116  *	'private' fields and also do memory statistics to find all the
117  *	[BEEP] leaks.
118  *
119  */
120 
121 /**
122  *	__alloc_skb	-	allocate a network buffer
123  *	@size: size to allocate
124  *	@gfp_mask: allocation mask
125  *	@fclone: allocate from fclone cache instead of head cache
126  *		and allocate a cloned (child) skb
127  *
128  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
129  *	tail room of size bytes. The object has a reference count of one.
130  *	The return is the buffer. On a failure the return is %NULL.
131  *
132  *	Buffers may only be allocated from interrupts using a @gfp_mask of
133  *	%GFP_ATOMIC.
134  */
135 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
136 			    int fclone)
137 {
138 	struct sk_buff *skb;
139 	u8 *data;
140 
141 	/* Get the HEAD */
142 	if (fclone)
143 		skb = kmem_cache_alloc(skbuff_fclone_cache,
144 				       gfp_mask & ~__GFP_DMA);
145 	else
146 		skb = kmem_cache_alloc(skbuff_head_cache,
147 				       gfp_mask & ~__GFP_DMA);
148 
149 	if (!skb)
150 		goto out;
151 
152 	/* Get the DATA. Size must match skb_add_mtu(). */
153 	size = SKB_DATA_ALIGN(size);
154 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
155 	if (!data)
156 		goto nodata;
157 
158 	memset(skb, 0, offsetof(struct sk_buff, truesize));
159 	skb->truesize = size + sizeof(struct sk_buff);
160 	atomic_set(&skb->users, 1);
161 	skb->head = data;
162 	skb->data = data;
163 	skb->tail = data;
164 	skb->end  = data + size;
165 	if (fclone) {
166 		struct sk_buff *child = skb + 1;
167 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
168 
169 		skb->fclone = SKB_FCLONE_ORIG;
170 		atomic_set(fclone_ref, 1);
171 
172 		child->fclone = SKB_FCLONE_UNAVAILABLE;
173 	}
174 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
175 	skb_shinfo(skb)->nr_frags  = 0;
176 	skb_shinfo(skb)->tso_size = 0;
177 	skb_shinfo(skb)->tso_segs = 0;
178 	skb_shinfo(skb)->frag_list = NULL;
179 	skb_shinfo(skb)->ufo_size = 0;
180 	skb_shinfo(skb)->ip6_frag_id = 0;
181 out:
182 	return skb;
183 nodata:
184 	kmem_cache_free(skbuff_head_cache, skb);
185 	skb = NULL;
186 	goto out;
187 }
188 
189 /**
190  *	alloc_skb_from_cache	-	allocate a network buffer
191  *	@cp: kmem_cache from which to allocate the data area
192  *           (object size must be big enough for @size bytes + skb overheads)
193  *	@size: size to allocate
194  *	@gfp_mask: allocation mask
195  *
196  *	Allocate a new &sk_buff. The returned buffer has no headroom and
197  *	tail room of size bytes. The object has a reference count of one.
198  *	The return is the buffer. On a failure the return is %NULL.
199  *
200  *	Buffers may only be allocated from interrupts using a @gfp_mask of
201  *	%GFP_ATOMIC.
202  */
203 struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
204 				     unsigned int size,
205 				     gfp_t gfp_mask)
206 {
207 	struct sk_buff *skb;
208 	u8 *data;
209 
210 	/* Get the HEAD */
211 	skb = kmem_cache_alloc(skbuff_head_cache,
212 			       gfp_mask & ~__GFP_DMA);
213 	if (!skb)
214 		goto out;
215 
216 	/* Get the DATA. */
217 	size = SKB_DATA_ALIGN(size);
218 	data = kmem_cache_alloc(cp, gfp_mask);
219 	if (!data)
220 		goto nodata;
221 
222 	memset(skb, 0, offsetof(struct sk_buff, truesize));
223 	skb->truesize = size + sizeof(struct sk_buff);
224 	atomic_set(&skb->users, 1);
225 	skb->head = data;
226 	skb->data = data;
227 	skb->tail = data;
228 	skb->end  = data + size;
229 
230 	atomic_set(&(skb_shinfo(skb)->dataref), 1);
231 	skb_shinfo(skb)->nr_frags  = 0;
232 	skb_shinfo(skb)->tso_size = 0;
233 	skb_shinfo(skb)->tso_segs = 0;
234 	skb_shinfo(skb)->frag_list = NULL;
235 out:
236 	return skb;
237 nodata:
238 	kmem_cache_free(skbuff_head_cache, skb);
239 	skb = NULL;
240 	goto out;
241 }
242 
243 
244 static void skb_drop_fraglist(struct sk_buff *skb)
245 {
246 	struct sk_buff *list = skb_shinfo(skb)->frag_list;
247 
248 	skb_shinfo(skb)->frag_list = NULL;
249 
250 	do {
251 		struct sk_buff *this = list;
252 		list = list->next;
253 		kfree_skb(this);
254 	} while (list);
255 }
256 
257 static void skb_clone_fraglist(struct sk_buff *skb)
258 {
259 	struct sk_buff *list;
260 
261 	for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
262 		skb_get(list);
263 }
264 
265 void skb_release_data(struct sk_buff *skb)
266 {
267 	if (!skb->cloned ||
268 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
269 			       &skb_shinfo(skb)->dataref)) {
270 		if (skb_shinfo(skb)->nr_frags) {
271 			int i;
272 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
273 				put_page(skb_shinfo(skb)->frags[i].page);
274 		}
275 
276 		if (skb_shinfo(skb)->frag_list)
277 			skb_drop_fraglist(skb);
278 
279 		kfree(skb->head);
280 	}
281 }
282 
283 /*
284  *	Free an skbuff by memory without cleaning the state.
285  */
286 void kfree_skbmem(struct sk_buff *skb)
287 {
288 	struct sk_buff *other;
289 	atomic_t *fclone_ref;
290 
291 	skb_release_data(skb);
292 	switch (skb->fclone) {
293 	case SKB_FCLONE_UNAVAILABLE:
294 		kmem_cache_free(skbuff_head_cache, skb);
295 		break;
296 
297 	case SKB_FCLONE_ORIG:
298 		fclone_ref = (atomic_t *) (skb + 2);
299 		if (atomic_dec_and_test(fclone_ref))
300 			kmem_cache_free(skbuff_fclone_cache, skb);
301 		break;
302 
303 	case SKB_FCLONE_CLONE:
304 		fclone_ref = (atomic_t *) (skb + 1);
305 		other = skb - 1;
306 
307 		/* The clone portion is available for
308 		 * fast-cloning again.
309 		 */
310 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
311 
312 		if (atomic_dec_and_test(fclone_ref))
313 			kmem_cache_free(skbuff_fclone_cache, other);
314 		break;
315 	};
316 }
317 
318 /**
319  *	__kfree_skb - private function
320  *	@skb: buffer
321  *
322  *	Free an sk_buff. Release anything attached to the buffer.
323  *	Clean the state. This is an internal helper function. Users should
324  *	always call kfree_skb
325  */
326 
327 void __kfree_skb(struct sk_buff *skb)
328 {
329 	dst_release(skb->dst);
330 #ifdef CONFIG_XFRM
331 	secpath_put(skb->sp);
332 #endif
333 	if (skb->destructor) {
334 		WARN_ON(in_irq());
335 		skb->destructor(skb);
336 	}
337 #ifdef CONFIG_NETFILTER
338 	nf_conntrack_put(skb->nfct);
339 #ifdef CONFIG_BRIDGE_NETFILTER
340 	nf_bridge_put(skb->nf_bridge);
341 #endif
342 #endif
343 /* XXX: IS this still necessary? - JHS */
344 #ifdef CONFIG_NET_SCHED
345 	skb->tc_index = 0;
346 #ifdef CONFIG_NET_CLS_ACT
347 	skb->tc_verd = 0;
348 #endif
349 #endif
350 
351 	kfree_skbmem(skb);
352 }
353 
354 /**
355  *	skb_clone	-	duplicate an sk_buff
356  *	@skb: buffer to clone
357  *	@gfp_mask: allocation priority
358  *
359  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
360  *	copies share the same packet data but not structure. The new
361  *	buffer has a reference count of 1. If the allocation fails the
362  *	function returns %NULL otherwise the new buffer is returned.
363  *
364  *	If this function is called from an interrupt gfp_mask() must be
365  *	%GFP_ATOMIC.
366  */
367 
368 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
369 {
370 	struct sk_buff *n;
371 
372 	n = skb + 1;
373 	if (skb->fclone == SKB_FCLONE_ORIG &&
374 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
375 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
376 		n->fclone = SKB_FCLONE_CLONE;
377 		atomic_inc(fclone_ref);
378 	} else {
379 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
380 		if (!n)
381 			return NULL;
382 		n->fclone = SKB_FCLONE_UNAVAILABLE;
383 	}
384 
385 #define C(x) n->x = skb->x
386 
387 	n->next = n->prev = NULL;
388 	n->sk = NULL;
389 	C(tstamp);
390 	C(dev);
391 	C(h);
392 	C(nh);
393 	C(mac);
394 	C(dst);
395 	dst_clone(skb->dst);
396 	C(sp);
397 #ifdef CONFIG_INET
398 	secpath_get(skb->sp);
399 #endif
400 	memcpy(n->cb, skb->cb, sizeof(skb->cb));
401 	C(len);
402 	C(data_len);
403 	C(csum);
404 	C(local_df);
405 	n->cloned = 1;
406 	n->nohdr = 0;
407 	C(pkt_type);
408 	C(ip_summed);
409 	C(priority);
410 	C(protocol);
411 	n->destructor = NULL;
412 #ifdef CONFIG_NETFILTER
413 	C(nfmark);
414 	C(nfct);
415 	nf_conntrack_get(skb->nfct);
416 	C(nfctinfo);
417 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
418 	C(ipvs_property);
419 #endif
420 #ifdef CONFIG_BRIDGE_NETFILTER
421 	C(nf_bridge);
422 	nf_bridge_get(skb->nf_bridge);
423 #endif
424 #endif /*CONFIG_NETFILTER*/
425 #ifdef CONFIG_NET_SCHED
426 	C(tc_index);
427 #ifdef CONFIG_NET_CLS_ACT
428 	n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
429 	n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
430 	n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
431 	C(input_dev);
432 #endif
433 
434 #endif
435 	C(truesize);
436 	atomic_set(&n->users, 1);
437 	C(head);
438 	C(data);
439 	C(tail);
440 	C(end);
441 
442 	atomic_inc(&(skb_shinfo(skb)->dataref));
443 	skb->cloned = 1;
444 
445 	return n;
446 }
447 
448 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
449 {
450 	/*
451 	 *	Shift between the two data areas in bytes
452 	 */
453 	unsigned long offset = new->data - old->data;
454 
455 	new->sk		= NULL;
456 	new->dev	= old->dev;
457 	new->priority	= old->priority;
458 	new->protocol	= old->protocol;
459 	new->dst	= dst_clone(old->dst);
460 #ifdef CONFIG_INET
461 	new->sp		= secpath_get(old->sp);
462 #endif
463 	new->h.raw	= old->h.raw + offset;
464 	new->nh.raw	= old->nh.raw + offset;
465 	new->mac.raw	= old->mac.raw + offset;
466 	memcpy(new->cb, old->cb, sizeof(old->cb));
467 	new->local_df	= old->local_df;
468 	new->fclone	= SKB_FCLONE_UNAVAILABLE;
469 	new->pkt_type	= old->pkt_type;
470 	new->tstamp	= old->tstamp;
471 	new->destructor = NULL;
472 #ifdef CONFIG_NETFILTER
473 	new->nfmark	= old->nfmark;
474 	new->nfct	= old->nfct;
475 	nf_conntrack_get(old->nfct);
476 	new->nfctinfo	= old->nfctinfo;
477 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
478 	new->ipvs_property = old->ipvs_property;
479 #endif
480 #ifdef CONFIG_BRIDGE_NETFILTER
481 	new->nf_bridge	= old->nf_bridge;
482 	nf_bridge_get(old->nf_bridge);
483 #endif
484 #endif
485 #ifdef CONFIG_NET_SCHED
486 #ifdef CONFIG_NET_CLS_ACT
487 	new->tc_verd = old->tc_verd;
488 #endif
489 	new->tc_index	= old->tc_index;
490 #endif
491 	atomic_set(&new->users, 1);
492 	skb_shinfo(new)->tso_size = skb_shinfo(old)->tso_size;
493 	skb_shinfo(new)->tso_segs = skb_shinfo(old)->tso_segs;
494 }
495 
496 /**
497  *	skb_copy	-	create private copy of an sk_buff
498  *	@skb: buffer to copy
499  *	@gfp_mask: allocation priority
500  *
501  *	Make a copy of both an &sk_buff and its data. This is used when the
502  *	caller wishes to modify the data and needs a private copy of the
503  *	data to alter. Returns %NULL on failure or the pointer to the buffer
504  *	on success. The returned buffer has a reference count of 1.
505  *
506  *	As by-product this function converts non-linear &sk_buff to linear
507  *	one, so that &sk_buff becomes completely private and caller is allowed
508  *	to modify all the data of returned buffer. This means that this
509  *	function is not recommended for use in circumstances when only
510  *	header is going to be modified. Use pskb_copy() instead.
511  */
512 
513 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
514 {
515 	int headerlen = skb->data - skb->head;
516 	/*
517 	 *	Allocate the copy buffer
518 	 */
519 	struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len,
520 				      gfp_mask);
521 	if (!n)
522 		return NULL;
523 
524 	/* Set the data pointer */
525 	skb_reserve(n, headerlen);
526 	/* Set the tail pointer and length */
527 	skb_put(n, skb->len);
528 	n->csum	     = skb->csum;
529 	n->ip_summed = skb->ip_summed;
530 
531 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
532 		BUG();
533 
534 	copy_skb_header(n, skb);
535 	return n;
536 }
537 
538 
539 /**
540  *	pskb_copy	-	create copy of an sk_buff with private head.
541  *	@skb: buffer to copy
542  *	@gfp_mask: allocation priority
543  *
544  *	Make a copy of both an &sk_buff and part of its data, located
545  *	in header. Fragmented data remain shared. This is used when
546  *	the caller wishes to modify only header of &sk_buff and needs
547  *	private copy of the header to alter. Returns %NULL on failure
548  *	or the pointer to the buffer on success.
549  *	The returned buffer has a reference count of 1.
550  */
551 
552 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
553 {
554 	/*
555 	 *	Allocate the copy buffer
556 	 */
557 	struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask);
558 
559 	if (!n)
560 		goto out;
561 
562 	/* Set the data pointer */
563 	skb_reserve(n, skb->data - skb->head);
564 	/* Set the tail pointer and length */
565 	skb_put(n, skb_headlen(skb));
566 	/* Copy the bytes */
567 	memcpy(n->data, skb->data, n->len);
568 	n->csum	     = skb->csum;
569 	n->ip_summed = skb->ip_summed;
570 
571 	n->data_len  = skb->data_len;
572 	n->len	     = skb->len;
573 
574 	if (skb_shinfo(skb)->nr_frags) {
575 		int i;
576 
577 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
578 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
579 			get_page(skb_shinfo(n)->frags[i].page);
580 		}
581 		skb_shinfo(n)->nr_frags = i;
582 	}
583 
584 	if (skb_shinfo(skb)->frag_list) {
585 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
586 		skb_clone_fraglist(n);
587 	}
588 
589 	copy_skb_header(n, skb);
590 out:
591 	return n;
592 }
593 
594 /**
595  *	pskb_expand_head - reallocate header of &sk_buff
596  *	@skb: buffer to reallocate
597  *	@nhead: room to add at head
598  *	@ntail: room to add at tail
599  *	@gfp_mask: allocation priority
600  *
601  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
602  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
603  *	reference count of 1. Returns zero in the case of success or error,
604  *	if expansion failed. In the last case, &sk_buff is not changed.
605  *
606  *	All the pointers pointing into skb header may change and must be
607  *	reloaded after call to this function.
608  */
609 
610 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
611 		     gfp_t gfp_mask)
612 {
613 	int i;
614 	u8 *data;
615 	int size = nhead + (skb->end - skb->head) + ntail;
616 	long off;
617 
618 	if (skb_shared(skb))
619 		BUG();
620 
621 	size = SKB_DATA_ALIGN(size);
622 
623 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
624 	if (!data)
625 		goto nodata;
626 
627 	/* Copy only real data... and, alas, header. This should be
628 	 * optimized for the cases when header is void. */
629 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
630 	memcpy(data + size, skb->end, sizeof(struct skb_shared_info));
631 
632 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
633 		get_page(skb_shinfo(skb)->frags[i].page);
634 
635 	if (skb_shinfo(skb)->frag_list)
636 		skb_clone_fraglist(skb);
637 
638 	skb_release_data(skb);
639 
640 	off = (data + nhead) - skb->head;
641 
642 	skb->head     = data;
643 	skb->end      = data + size;
644 	skb->data    += off;
645 	skb->tail    += off;
646 	skb->mac.raw += off;
647 	skb->h.raw   += off;
648 	skb->nh.raw  += off;
649 	skb->cloned   = 0;
650 	skb->nohdr    = 0;
651 	atomic_set(&skb_shinfo(skb)->dataref, 1);
652 	return 0;
653 
654 nodata:
655 	return -ENOMEM;
656 }
657 
658 /* Make private copy of skb with writable head and some headroom */
659 
660 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
661 {
662 	struct sk_buff *skb2;
663 	int delta = headroom - skb_headroom(skb);
664 
665 	if (delta <= 0)
666 		skb2 = pskb_copy(skb, GFP_ATOMIC);
667 	else {
668 		skb2 = skb_clone(skb, GFP_ATOMIC);
669 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
670 					     GFP_ATOMIC)) {
671 			kfree_skb(skb2);
672 			skb2 = NULL;
673 		}
674 	}
675 	return skb2;
676 }
677 
678 
679 /**
680  *	skb_copy_expand	-	copy and expand sk_buff
681  *	@skb: buffer to copy
682  *	@newheadroom: new free bytes at head
683  *	@newtailroom: new free bytes at tail
684  *	@gfp_mask: allocation priority
685  *
686  *	Make a copy of both an &sk_buff and its data and while doing so
687  *	allocate additional space.
688  *
689  *	This is used when the caller wishes to modify the data and needs a
690  *	private copy of the data to alter as well as more space for new fields.
691  *	Returns %NULL on failure or the pointer to the buffer
692  *	on success. The returned buffer has a reference count of 1.
693  *
694  *	You must pass %GFP_ATOMIC as the allocation priority if this function
695  *	is called from an interrupt.
696  *
697  *	BUG ALERT: ip_summed is not copied. Why does this work? Is it used
698  *	only by netfilter in the cases when checksum is recalculated? --ANK
699  */
700 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
701 				int newheadroom, int newtailroom,
702 				gfp_t gfp_mask)
703 {
704 	/*
705 	 *	Allocate the copy buffer
706 	 */
707 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
708 				      gfp_mask);
709 	int head_copy_len, head_copy_off;
710 
711 	if (!n)
712 		return NULL;
713 
714 	skb_reserve(n, newheadroom);
715 
716 	/* Set the tail pointer and length */
717 	skb_put(n, skb->len);
718 
719 	head_copy_len = skb_headroom(skb);
720 	head_copy_off = 0;
721 	if (newheadroom <= head_copy_len)
722 		head_copy_len = newheadroom;
723 	else
724 		head_copy_off = newheadroom - head_copy_len;
725 
726 	/* Copy the linear header and data. */
727 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
728 			  skb->len + head_copy_len))
729 		BUG();
730 
731 	copy_skb_header(n, skb);
732 
733 	return n;
734 }
735 
736 /**
737  *	skb_pad			-	zero pad the tail of an skb
738  *	@skb: buffer to pad
739  *	@pad: space to pad
740  *
741  *	Ensure that a buffer is followed by a padding area that is zero
742  *	filled. Used by network drivers which may DMA or transfer data
743  *	beyond the buffer end onto the wire.
744  *
745  *	May return NULL in out of memory cases.
746  */
747 
748 struct sk_buff *skb_pad(struct sk_buff *skb, int pad)
749 {
750 	struct sk_buff *nskb;
751 
752 	/* If the skbuff is non linear tailroom is always zero.. */
753 	if (skb_tailroom(skb) >= pad) {
754 		memset(skb->data+skb->len, 0, pad);
755 		return skb;
756 	}
757 
758 	nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC);
759 	kfree_skb(skb);
760 	if (nskb)
761 		memset(nskb->data+nskb->len, 0, pad);
762 	return nskb;
763 }
764 
765 /* Trims skb to length len. It can change skb pointers, if "realloc" is 1.
766  * If realloc==0 and trimming is impossible without change of data,
767  * it is BUG().
768  */
769 
770 int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc)
771 {
772 	int offset = skb_headlen(skb);
773 	int nfrags = skb_shinfo(skb)->nr_frags;
774 	int i;
775 
776 	for (i = 0; i < nfrags; i++) {
777 		int end = offset + skb_shinfo(skb)->frags[i].size;
778 		if (end > len) {
779 			if (skb_cloned(skb)) {
780 				if (!realloc)
781 					BUG();
782 				if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
783 					return -ENOMEM;
784 			}
785 			if (len <= offset) {
786 				put_page(skb_shinfo(skb)->frags[i].page);
787 				skb_shinfo(skb)->nr_frags--;
788 			} else {
789 				skb_shinfo(skb)->frags[i].size = len - offset;
790 			}
791 		}
792 		offset = end;
793 	}
794 
795 	if (offset < len) {
796 		skb->data_len -= skb->len - len;
797 		skb->len       = len;
798 	} else {
799 		if (len <= skb_headlen(skb)) {
800 			skb->len      = len;
801 			skb->data_len = 0;
802 			skb->tail     = skb->data + len;
803 			if (skb_shinfo(skb)->frag_list && !skb_cloned(skb))
804 				skb_drop_fraglist(skb);
805 		} else {
806 			skb->data_len -= skb->len - len;
807 			skb->len       = len;
808 		}
809 	}
810 
811 	return 0;
812 }
813 
814 /**
815  *	__pskb_pull_tail - advance tail of skb header
816  *	@skb: buffer to reallocate
817  *	@delta: number of bytes to advance tail
818  *
819  *	The function makes a sense only on a fragmented &sk_buff,
820  *	it expands header moving its tail forward and copying necessary
821  *	data from fragmented part.
822  *
823  *	&sk_buff MUST have reference count of 1.
824  *
825  *	Returns %NULL (and &sk_buff does not change) if pull failed
826  *	or value of new tail of skb in the case of success.
827  *
828  *	All the pointers pointing into skb header may change and must be
829  *	reloaded after call to this function.
830  */
831 
832 /* Moves tail of skb head forward, copying data from fragmented part,
833  * when it is necessary.
834  * 1. It may fail due to malloc failure.
835  * 2. It may change skb pointers.
836  *
837  * It is pretty complicated. Luckily, it is called only in exceptional cases.
838  */
839 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
840 {
841 	/* If skb has not enough free space at tail, get new one
842 	 * plus 128 bytes for future expansions. If we have enough
843 	 * room at tail, reallocate without expansion only if skb is cloned.
844 	 */
845 	int i, k, eat = (skb->tail + delta) - skb->end;
846 
847 	if (eat > 0 || skb_cloned(skb)) {
848 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
849 				     GFP_ATOMIC))
850 			return NULL;
851 	}
852 
853 	if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta))
854 		BUG();
855 
856 	/* Optimization: no fragments, no reasons to preestimate
857 	 * size of pulled pages. Superb.
858 	 */
859 	if (!skb_shinfo(skb)->frag_list)
860 		goto pull_pages;
861 
862 	/* Estimate size of pulled pages. */
863 	eat = delta;
864 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
865 		if (skb_shinfo(skb)->frags[i].size >= eat)
866 			goto pull_pages;
867 		eat -= skb_shinfo(skb)->frags[i].size;
868 	}
869 
870 	/* If we need update frag list, we are in troubles.
871 	 * Certainly, it possible to add an offset to skb data,
872 	 * but taking into account that pulling is expected to
873 	 * be very rare operation, it is worth to fight against
874 	 * further bloating skb head and crucify ourselves here instead.
875 	 * Pure masohism, indeed. 8)8)
876 	 */
877 	if (eat) {
878 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
879 		struct sk_buff *clone = NULL;
880 		struct sk_buff *insp = NULL;
881 
882 		do {
883 			if (!list)
884 				BUG();
885 
886 			if (list->len <= eat) {
887 				/* Eaten as whole. */
888 				eat -= list->len;
889 				list = list->next;
890 				insp = list;
891 			} else {
892 				/* Eaten partially. */
893 
894 				if (skb_shared(list)) {
895 					/* Sucks! We need to fork list. :-( */
896 					clone = skb_clone(list, GFP_ATOMIC);
897 					if (!clone)
898 						return NULL;
899 					insp = list->next;
900 					list = clone;
901 				} else {
902 					/* This may be pulled without
903 					 * problems. */
904 					insp = list;
905 				}
906 				if (!pskb_pull(list, eat)) {
907 					if (clone)
908 						kfree_skb(clone);
909 					return NULL;
910 				}
911 				break;
912 			}
913 		} while (eat);
914 
915 		/* Free pulled out fragments. */
916 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
917 			skb_shinfo(skb)->frag_list = list->next;
918 			kfree_skb(list);
919 		}
920 		/* And insert new clone at head. */
921 		if (clone) {
922 			clone->next = list;
923 			skb_shinfo(skb)->frag_list = clone;
924 		}
925 	}
926 	/* Success! Now we may commit changes to skb data. */
927 
928 pull_pages:
929 	eat = delta;
930 	k = 0;
931 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
932 		if (skb_shinfo(skb)->frags[i].size <= eat) {
933 			put_page(skb_shinfo(skb)->frags[i].page);
934 			eat -= skb_shinfo(skb)->frags[i].size;
935 		} else {
936 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
937 			if (eat) {
938 				skb_shinfo(skb)->frags[k].page_offset += eat;
939 				skb_shinfo(skb)->frags[k].size -= eat;
940 				eat = 0;
941 			}
942 			k++;
943 		}
944 	}
945 	skb_shinfo(skb)->nr_frags = k;
946 
947 	skb->tail     += delta;
948 	skb->data_len -= delta;
949 
950 	return skb->tail;
951 }
952 
953 /* Copy some data bits from skb to kernel buffer. */
954 
955 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
956 {
957 	int i, copy;
958 	int start = skb_headlen(skb);
959 
960 	if (offset > (int)skb->len - len)
961 		goto fault;
962 
963 	/* Copy header. */
964 	if ((copy = start - offset) > 0) {
965 		if (copy > len)
966 			copy = len;
967 		memcpy(to, skb->data + offset, copy);
968 		if ((len -= copy) == 0)
969 			return 0;
970 		offset += copy;
971 		to     += copy;
972 	}
973 
974 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
975 		int end;
976 
977 		BUG_TRAP(start <= offset + len);
978 
979 		end = start + skb_shinfo(skb)->frags[i].size;
980 		if ((copy = end - offset) > 0) {
981 			u8 *vaddr;
982 
983 			if (copy > len)
984 				copy = len;
985 
986 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
987 			memcpy(to,
988 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
989 			       offset - start, copy);
990 			kunmap_skb_frag(vaddr);
991 
992 			if ((len -= copy) == 0)
993 				return 0;
994 			offset += copy;
995 			to     += copy;
996 		}
997 		start = end;
998 	}
999 
1000 	if (skb_shinfo(skb)->frag_list) {
1001 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1002 
1003 		for (; list; list = list->next) {
1004 			int end;
1005 
1006 			BUG_TRAP(start <= offset + len);
1007 
1008 			end = start + list->len;
1009 			if ((copy = end - offset) > 0) {
1010 				if (copy > len)
1011 					copy = len;
1012 				if (skb_copy_bits(list, offset - start,
1013 						  to, copy))
1014 					goto fault;
1015 				if ((len -= copy) == 0)
1016 					return 0;
1017 				offset += copy;
1018 				to     += copy;
1019 			}
1020 			start = end;
1021 		}
1022 	}
1023 	if (!len)
1024 		return 0;
1025 
1026 fault:
1027 	return -EFAULT;
1028 }
1029 
1030 /**
1031  *	skb_store_bits - store bits from kernel buffer to skb
1032  *	@skb: destination buffer
1033  *	@offset: offset in destination
1034  *	@from: source buffer
1035  *	@len: number of bytes to copy
1036  *
1037  *	Copy the specified number of bytes from the source buffer to the
1038  *	destination skb.  This function handles all the messy bits of
1039  *	traversing fragment lists and such.
1040  */
1041 
1042 int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len)
1043 {
1044 	int i, copy;
1045 	int start = skb_headlen(skb);
1046 
1047 	if (offset > (int)skb->len - len)
1048 		goto fault;
1049 
1050 	if ((copy = start - offset) > 0) {
1051 		if (copy > len)
1052 			copy = len;
1053 		memcpy(skb->data + offset, from, copy);
1054 		if ((len -= copy) == 0)
1055 			return 0;
1056 		offset += copy;
1057 		from += copy;
1058 	}
1059 
1060 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1061 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1062 		int end;
1063 
1064 		BUG_TRAP(start <= offset + len);
1065 
1066 		end = start + frag->size;
1067 		if ((copy = end - offset) > 0) {
1068 			u8 *vaddr;
1069 
1070 			if (copy > len)
1071 				copy = len;
1072 
1073 			vaddr = kmap_skb_frag(frag);
1074 			memcpy(vaddr + frag->page_offset + offset - start,
1075 			       from, copy);
1076 			kunmap_skb_frag(vaddr);
1077 
1078 			if ((len -= copy) == 0)
1079 				return 0;
1080 			offset += copy;
1081 			from += copy;
1082 		}
1083 		start = end;
1084 	}
1085 
1086 	if (skb_shinfo(skb)->frag_list) {
1087 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1088 
1089 		for (; list; list = list->next) {
1090 			int end;
1091 
1092 			BUG_TRAP(start <= offset + len);
1093 
1094 			end = start + list->len;
1095 			if ((copy = end - offset) > 0) {
1096 				if (copy > len)
1097 					copy = len;
1098 				if (skb_store_bits(list, offset - start,
1099 						   from, copy))
1100 					goto fault;
1101 				if ((len -= copy) == 0)
1102 					return 0;
1103 				offset += copy;
1104 				from += copy;
1105 			}
1106 			start = end;
1107 		}
1108 	}
1109 	if (!len)
1110 		return 0;
1111 
1112 fault:
1113 	return -EFAULT;
1114 }
1115 
1116 EXPORT_SYMBOL(skb_store_bits);
1117 
1118 /* Checksum skb data. */
1119 
1120 unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1121 			  int len, unsigned int csum)
1122 {
1123 	int start = skb_headlen(skb);
1124 	int i, copy = start - offset;
1125 	int pos = 0;
1126 
1127 	/* Checksum header. */
1128 	if (copy > 0) {
1129 		if (copy > len)
1130 			copy = len;
1131 		csum = csum_partial(skb->data + offset, copy, csum);
1132 		if ((len -= copy) == 0)
1133 			return csum;
1134 		offset += copy;
1135 		pos	= copy;
1136 	}
1137 
1138 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1139 		int end;
1140 
1141 		BUG_TRAP(start <= offset + len);
1142 
1143 		end = start + skb_shinfo(skb)->frags[i].size;
1144 		if ((copy = end - offset) > 0) {
1145 			unsigned int csum2;
1146 			u8 *vaddr;
1147 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1148 
1149 			if (copy > len)
1150 				copy = len;
1151 			vaddr = kmap_skb_frag(frag);
1152 			csum2 = csum_partial(vaddr + frag->page_offset +
1153 					     offset - start, copy, 0);
1154 			kunmap_skb_frag(vaddr);
1155 			csum = csum_block_add(csum, csum2, pos);
1156 			if (!(len -= copy))
1157 				return csum;
1158 			offset += copy;
1159 			pos    += copy;
1160 		}
1161 		start = end;
1162 	}
1163 
1164 	if (skb_shinfo(skb)->frag_list) {
1165 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1166 
1167 		for (; list; list = list->next) {
1168 			int end;
1169 
1170 			BUG_TRAP(start <= offset + len);
1171 
1172 			end = start + list->len;
1173 			if ((copy = end - offset) > 0) {
1174 				unsigned int csum2;
1175 				if (copy > len)
1176 					copy = len;
1177 				csum2 = skb_checksum(list, offset - start,
1178 						     copy, 0);
1179 				csum = csum_block_add(csum, csum2, pos);
1180 				if ((len -= copy) == 0)
1181 					return csum;
1182 				offset += copy;
1183 				pos    += copy;
1184 			}
1185 			start = end;
1186 		}
1187 	}
1188 	if (len)
1189 		BUG();
1190 
1191 	return csum;
1192 }
1193 
1194 /* Both of above in one bottle. */
1195 
1196 unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1197 				    u8 *to, int len, unsigned int csum)
1198 {
1199 	int start = skb_headlen(skb);
1200 	int i, copy = start - offset;
1201 	int pos = 0;
1202 
1203 	/* Copy header. */
1204 	if (copy > 0) {
1205 		if (copy > len)
1206 			copy = len;
1207 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1208 						 copy, csum);
1209 		if ((len -= copy) == 0)
1210 			return csum;
1211 		offset += copy;
1212 		to     += copy;
1213 		pos	= copy;
1214 	}
1215 
1216 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1217 		int end;
1218 
1219 		BUG_TRAP(start <= offset + len);
1220 
1221 		end = start + skb_shinfo(skb)->frags[i].size;
1222 		if ((copy = end - offset) > 0) {
1223 			unsigned int csum2;
1224 			u8 *vaddr;
1225 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1226 
1227 			if (copy > len)
1228 				copy = len;
1229 			vaddr = kmap_skb_frag(frag);
1230 			csum2 = csum_partial_copy_nocheck(vaddr +
1231 							  frag->page_offset +
1232 							  offset - start, to,
1233 							  copy, 0);
1234 			kunmap_skb_frag(vaddr);
1235 			csum = csum_block_add(csum, csum2, pos);
1236 			if (!(len -= copy))
1237 				return csum;
1238 			offset += copy;
1239 			to     += copy;
1240 			pos    += copy;
1241 		}
1242 		start = end;
1243 	}
1244 
1245 	if (skb_shinfo(skb)->frag_list) {
1246 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1247 
1248 		for (; list; list = list->next) {
1249 			unsigned int csum2;
1250 			int end;
1251 
1252 			BUG_TRAP(start <= offset + len);
1253 
1254 			end = start + list->len;
1255 			if ((copy = end - offset) > 0) {
1256 				if (copy > len)
1257 					copy = len;
1258 				csum2 = skb_copy_and_csum_bits(list,
1259 							       offset - start,
1260 							       to, copy, 0);
1261 				csum = csum_block_add(csum, csum2, pos);
1262 				if ((len -= copy) == 0)
1263 					return csum;
1264 				offset += copy;
1265 				to     += copy;
1266 				pos    += copy;
1267 			}
1268 			start = end;
1269 		}
1270 	}
1271 	if (len)
1272 		BUG();
1273 	return csum;
1274 }
1275 
1276 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1277 {
1278 	unsigned int csum;
1279 	long csstart;
1280 
1281 	if (skb->ip_summed == CHECKSUM_HW)
1282 		csstart = skb->h.raw - skb->data;
1283 	else
1284 		csstart = skb_headlen(skb);
1285 
1286 	if (csstart > skb_headlen(skb))
1287 		BUG();
1288 
1289 	memcpy(to, skb->data, csstart);
1290 
1291 	csum = 0;
1292 	if (csstart != skb->len)
1293 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1294 					      skb->len - csstart, 0);
1295 
1296 	if (skb->ip_summed == CHECKSUM_HW) {
1297 		long csstuff = csstart + skb->csum;
1298 
1299 		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
1300 	}
1301 }
1302 
1303 /**
1304  *	skb_dequeue - remove from the head of the queue
1305  *	@list: list to dequeue from
1306  *
1307  *	Remove the head of the list. The list lock is taken so the function
1308  *	may be used safely with other locking list functions. The head item is
1309  *	returned or %NULL if the list is empty.
1310  */
1311 
1312 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1313 {
1314 	unsigned long flags;
1315 	struct sk_buff *result;
1316 
1317 	spin_lock_irqsave(&list->lock, flags);
1318 	result = __skb_dequeue(list);
1319 	spin_unlock_irqrestore(&list->lock, flags);
1320 	return result;
1321 }
1322 
1323 /**
1324  *	skb_dequeue_tail - remove from the tail of the queue
1325  *	@list: list to dequeue from
1326  *
1327  *	Remove the tail of the list. The list lock is taken so the function
1328  *	may be used safely with other locking list functions. The tail item is
1329  *	returned or %NULL if the list is empty.
1330  */
1331 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1332 {
1333 	unsigned long flags;
1334 	struct sk_buff *result;
1335 
1336 	spin_lock_irqsave(&list->lock, flags);
1337 	result = __skb_dequeue_tail(list);
1338 	spin_unlock_irqrestore(&list->lock, flags);
1339 	return result;
1340 }
1341 
1342 /**
1343  *	skb_queue_purge - empty a list
1344  *	@list: list to empty
1345  *
1346  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1347  *	the list and one reference dropped. This function takes the list
1348  *	lock and is atomic with respect to other list locking functions.
1349  */
1350 void skb_queue_purge(struct sk_buff_head *list)
1351 {
1352 	struct sk_buff *skb;
1353 	while ((skb = skb_dequeue(list)) != NULL)
1354 		kfree_skb(skb);
1355 }
1356 
1357 /**
1358  *	skb_queue_head - queue a buffer at the list head
1359  *	@list: list to use
1360  *	@newsk: buffer to queue
1361  *
1362  *	Queue a buffer at the start of the list. This function takes the
1363  *	list lock and can be used safely with other locking &sk_buff functions
1364  *	safely.
1365  *
1366  *	A buffer cannot be placed on two lists at the same time.
1367  */
1368 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1369 {
1370 	unsigned long flags;
1371 
1372 	spin_lock_irqsave(&list->lock, flags);
1373 	__skb_queue_head(list, newsk);
1374 	spin_unlock_irqrestore(&list->lock, flags);
1375 }
1376 
1377 /**
1378  *	skb_queue_tail - queue a buffer at the list tail
1379  *	@list: list to use
1380  *	@newsk: buffer to queue
1381  *
1382  *	Queue a buffer at the tail of the list. This function takes the
1383  *	list lock and can be used safely with other locking &sk_buff functions
1384  *	safely.
1385  *
1386  *	A buffer cannot be placed on two lists at the same time.
1387  */
1388 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1389 {
1390 	unsigned long flags;
1391 
1392 	spin_lock_irqsave(&list->lock, flags);
1393 	__skb_queue_tail(list, newsk);
1394 	spin_unlock_irqrestore(&list->lock, flags);
1395 }
1396 
1397 /**
1398  *	skb_unlink	-	remove a buffer from a list
1399  *	@skb: buffer to remove
1400  *	@list: list to use
1401  *
1402  *	Remove a packet from a list. The list locks are taken and this
1403  *	function is atomic with respect to other list locked calls
1404  *
1405  *	You must know what list the SKB is on.
1406  */
1407 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1408 {
1409 	unsigned long flags;
1410 
1411 	spin_lock_irqsave(&list->lock, flags);
1412 	__skb_unlink(skb, list);
1413 	spin_unlock_irqrestore(&list->lock, flags);
1414 }
1415 
1416 /**
1417  *	skb_append	-	append a buffer
1418  *	@old: buffer to insert after
1419  *	@newsk: buffer to insert
1420  *	@list: list to use
1421  *
1422  *	Place a packet after a given packet in a list. The list locks are taken
1423  *	and this function is atomic with respect to other list locked calls.
1424  *	A buffer cannot be placed on two lists at the same time.
1425  */
1426 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1427 {
1428 	unsigned long flags;
1429 
1430 	spin_lock_irqsave(&list->lock, flags);
1431 	__skb_append(old, newsk, list);
1432 	spin_unlock_irqrestore(&list->lock, flags);
1433 }
1434 
1435 
1436 /**
1437  *	skb_insert	-	insert a buffer
1438  *	@old: buffer to insert before
1439  *	@newsk: buffer to insert
1440  *	@list: list to use
1441  *
1442  *	Place a packet before a given packet in a list. The list locks are
1443  * 	taken and this function is atomic with respect to other list locked
1444  *	calls.
1445  *
1446  *	A buffer cannot be placed on two lists at the same time.
1447  */
1448 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1449 {
1450 	unsigned long flags;
1451 
1452 	spin_lock_irqsave(&list->lock, flags);
1453 	__skb_insert(newsk, old->prev, old, list);
1454 	spin_unlock_irqrestore(&list->lock, flags);
1455 }
1456 
1457 #if 0
1458 /*
1459  * 	Tune the memory allocator for a new MTU size.
1460  */
1461 void skb_add_mtu(int mtu)
1462 {
1463 	/* Must match allocation in alloc_skb */
1464 	mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info);
1465 
1466 	kmem_add_cache_size(mtu);
1467 }
1468 #endif
1469 
1470 static inline void skb_split_inside_header(struct sk_buff *skb,
1471 					   struct sk_buff* skb1,
1472 					   const u32 len, const int pos)
1473 {
1474 	int i;
1475 
1476 	memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len);
1477 
1478 	/* And move data appendix as is. */
1479 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1480 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1481 
1482 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1483 	skb_shinfo(skb)->nr_frags  = 0;
1484 	skb1->data_len		   = skb->data_len;
1485 	skb1->len		   += skb1->data_len;
1486 	skb->data_len		   = 0;
1487 	skb->len		   = len;
1488 	skb->tail		   = skb->data + len;
1489 }
1490 
1491 static inline void skb_split_no_header(struct sk_buff *skb,
1492 				       struct sk_buff* skb1,
1493 				       const u32 len, int pos)
1494 {
1495 	int i, k = 0;
1496 	const int nfrags = skb_shinfo(skb)->nr_frags;
1497 
1498 	skb_shinfo(skb)->nr_frags = 0;
1499 	skb1->len		  = skb1->data_len = skb->len - len;
1500 	skb->len		  = len;
1501 	skb->data_len		  = len - pos;
1502 
1503 	for (i = 0; i < nfrags; i++) {
1504 		int size = skb_shinfo(skb)->frags[i].size;
1505 
1506 		if (pos + size > len) {
1507 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1508 
1509 			if (pos < len) {
1510 				/* Split frag.
1511 				 * We have two variants in this case:
1512 				 * 1. Move all the frag to the second
1513 				 *    part, if it is possible. F.e.
1514 				 *    this approach is mandatory for TUX,
1515 				 *    where splitting is expensive.
1516 				 * 2. Split is accurately. We make this.
1517 				 */
1518 				get_page(skb_shinfo(skb)->frags[i].page);
1519 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1520 				skb_shinfo(skb1)->frags[0].size -= len - pos;
1521 				skb_shinfo(skb)->frags[i].size	= len - pos;
1522 				skb_shinfo(skb)->nr_frags++;
1523 			}
1524 			k++;
1525 		} else
1526 			skb_shinfo(skb)->nr_frags++;
1527 		pos += size;
1528 	}
1529 	skb_shinfo(skb1)->nr_frags = k;
1530 }
1531 
1532 /**
1533  * skb_split - Split fragmented skb to two parts at length len.
1534  * @skb: the buffer to split
1535  * @skb1: the buffer to receive the second part
1536  * @len: new length for skb
1537  */
1538 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
1539 {
1540 	int pos = skb_headlen(skb);
1541 
1542 	if (len < pos)	/* Split line is inside header. */
1543 		skb_split_inside_header(skb, skb1, len, pos);
1544 	else		/* Second chunk has no header, nothing to copy. */
1545 		skb_split_no_header(skb, skb1, len, pos);
1546 }
1547 
1548 /**
1549  * skb_prepare_seq_read - Prepare a sequential read of skb data
1550  * @skb: the buffer to read
1551  * @from: lower offset of data to be read
1552  * @to: upper offset of data to be read
1553  * @st: state variable
1554  *
1555  * Initializes the specified state variable. Must be called before
1556  * invoking skb_seq_read() for the first time.
1557  */
1558 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1559 			  unsigned int to, struct skb_seq_state *st)
1560 {
1561 	st->lower_offset = from;
1562 	st->upper_offset = to;
1563 	st->root_skb = st->cur_skb = skb;
1564 	st->frag_idx = st->stepped_offset = 0;
1565 	st->frag_data = NULL;
1566 }
1567 
1568 /**
1569  * skb_seq_read - Sequentially read skb data
1570  * @consumed: number of bytes consumed by the caller so far
1571  * @data: destination pointer for data to be returned
1572  * @st: state variable
1573  *
1574  * Reads a block of skb data at &consumed relative to the
1575  * lower offset specified to skb_prepare_seq_read(). Assigns
1576  * the head of the data block to &data and returns the length
1577  * of the block or 0 if the end of the skb data or the upper
1578  * offset has been reached.
1579  *
1580  * The caller is not required to consume all of the data
1581  * returned, i.e. &consumed is typically set to the number
1582  * of bytes already consumed and the next call to
1583  * skb_seq_read() will return the remaining part of the block.
1584  *
1585  * Note: The size of each block of data returned can be arbitary,
1586  *       this limitation is the cost for zerocopy seqeuental
1587  *       reads of potentially non linear data.
1588  *
1589  * Note: Fragment lists within fragments are not implemented
1590  *       at the moment, state->root_skb could be replaced with
1591  *       a stack for this purpose.
1592  */
1593 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1594 			  struct skb_seq_state *st)
1595 {
1596 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
1597 	skb_frag_t *frag;
1598 
1599 	if (unlikely(abs_offset >= st->upper_offset))
1600 		return 0;
1601 
1602 next_skb:
1603 	block_limit = skb_headlen(st->cur_skb);
1604 
1605 	if (abs_offset < block_limit) {
1606 		*data = st->cur_skb->data + abs_offset;
1607 		return block_limit - abs_offset;
1608 	}
1609 
1610 	if (st->frag_idx == 0 && !st->frag_data)
1611 		st->stepped_offset += skb_headlen(st->cur_skb);
1612 
1613 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
1614 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
1615 		block_limit = frag->size + st->stepped_offset;
1616 
1617 		if (abs_offset < block_limit) {
1618 			if (!st->frag_data)
1619 				st->frag_data = kmap_skb_frag(frag);
1620 
1621 			*data = (u8 *) st->frag_data + frag->page_offset +
1622 				(abs_offset - st->stepped_offset);
1623 
1624 			return block_limit - abs_offset;
1625 		}
1626 
1627 		if (st->frag_data) {
1628 			kunmap_skb_frag(st->frag_data);
1629 			st->frag_data = NULL;
1630 		}
1631 
1632 		st->frag_idx++;
1633 		st->stepped_offset += frag->size;
1634 	}
1635 
1636 	if (st->cur_skb->next) {
1637 		st->cur_skb = st->cur_skb->next;
1638 		st->frag_idx = 0;
1639 		goto next_skb;
1640 	} else if (st->root_skb == st->cur_skb &&
1641 		   skb_shinfo(st->root_skb)->frag_list) {
1642 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
1643 		goto next_skb;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 /**
1650  * skb_abort_seq_read - Abort a sequential read of skb data
1651  * @st: state variable
1652  *
1653  * Must be called if skb_seq_read() was not called until it
1654  * returned 0.
1655  */
1656 void skb_abort_seq_read(struct skb_seq_state *st)
1657 {
1658 	if (st->frag_data)
1659 		kunmap_skb_frag(st->frag_data);
1660 }
1661 
1662 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
1663 
1664 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
1665 					  struct ts_config *conf,
1666 					  struct ts_state *state)
1667 {
1668 	return skb_seq_read(offset, text, TS_SKB_CB(state));
1669 }
1670 
1671 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
1672 {
1673 	skb_abort_seq_read(TS_SKB_CB(state));
1674 }
1675 
1676 /**
1677  * skb_find_text - Find a text pattern in skb data
1678  * @skb: the buffer to look in
1679  * @from: search offset
1680  * @to: search limit
1681  * @config: textsearch configuration
1682  * @state: uninitialized textsearch state variable
1683  *
1684  * Finds a pattern in the skb data according to the specified
1685  * textsearch configuration. Use textsearch_next() to retrieve
1686  * subsequent occurrences of the pattern. Returns the offset
1687  * to the first occurrence or UINT_MAX if no match was found.
1688  */
1689 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1690 			   unsigned int to, struct ts_config *config,
1691 			   struct ts_state *state)
1692 {
1693 	config->get_next_block = skb_ts_get_next_block;
1694 	config->finish = skb_ts_finish;
1695 
1696 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
1697 
1698 	return textsearch_find(config, state);
1699 }
1700 
1701 /**
1702  * skb_append_datato_frags: - append the user data to a skb
1703  * @sk: sock  structure
1704  * @skb: skb structure to be appened with user data.
1705  * @getfrag: call back function to be used for getting the user data
1706  * @from: pointer to user message iov
1707  * @length: length of the iov message
1708  *
1709  * Description: This procedure append the user data in the fragment part
1710  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
1711  */
1712 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1713 			int getfrag(void *from, char *to, int offset,
1714 					int len, int odd, struct sk_buff *skb),
1715 			void *from, int length)
1716 {
1717 	int frg_cnt = 0;
1718 	skb_frag_t *frag = NULL;
1719 	struct page *page = NULL;
1720 	int copy, left;
1721 	int offset = 0;
1722 	int ret;
1723 
1724 	do {
1725 		/* Return error if we don't have space for new frag */
1726 		frg_cnt = skb_shinfo(skb)->nr_frags;
1727 		if (frg_cnt >= MAX_SKB_FRAGS)
1728 			return -EFAULT;
1729 
1730 		/* allocate a new page for next frag */
1731 		page = alloc_pages(sk->sk_allocation, 0);
1732 
1733 		/* If alloc_page fails just return failure and caller will
1734 		 * free previous allocated pages by doing kfree_skb()
1735 		 */
1736 		if (page == NULL)
1737 			return -ENOMEM;
1738 
1739 		/* initialize the next frag */
1740 		sk->sk_sndmsg_page = page;
1741 		sk->sk_sndmsg_off = 0;
1742 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
1743 		skb->truesize += PAGE_SIZE;
1744 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1745 
1746 		/* get the new initialized frag */
1747 		frg_cnt = skb_shinfo(skb)->nr_frags;
1748 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
1749 
1750 		/* copy the user data to page */
1751 		left = PAGE_SIZE - frag->page_offset;
1752 		copy = (length > left)? left : length;
1753 
1754 		ret = getfrag(from, (page_address(frag->page) +
1755 			    frag->page_offset + frag->size),
1756 			    offset, copy, 0, skb);
1757 		if (ret < 0)
1758 			return -EFAULT;
1759 
1760 		/* copy was successful so update the size parameters */
1761 		sk->sk_sndmsg_off += copy;
1762 		frag->size += copy;
1763 		skb->len += copy;
1764 		skb->data_len += copy;
1765 		offset += copy;
1766 		length -= copy;
1767 
1768 	} while (length > 0);
1769 
1770 	return 0;
1771 }
1772 
1773 void __init skb_init(void)
1774 {
1775 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
1776 					      sizeof(struct sk_buff),
1777 					      0,
1778 					      SLAB_HWCACHE_ALIGN,
1779 					      NULL, NULL);
1780 	if (!skbuff_head_cache)
1781 		panic("cannot create skbuff cache");
1782 
1783 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
1784 						(2*sizeof(struct sk_buff)) +
1785 						sizeof(atomic_t),
1786 						0,
1787 						SLAB_HWCACHE_ALIGN,
1788 						NULL, NULL);
1789 	if (!skbuff_fclone_cache)
1790 		panic("cannot create skbuff cache");
1791 }
1792 
1793 EXPORT_SYMBOL(___pskb_trim);
1794 EXPORT_SYMBOL(__kfree_skb);
1795 EXPORT_SYMBOL(__pskb_pull_tail);
1796 EXPORT_SYMBOL(__alloc_skb);
1797 EXPORT_SYMBOL(pskb_copy);
1798 EXPORT_SYMBOL(pskb_expand_head);
1799 EXPORT_SYMBOL(skb_checksum);
1800 EXPORT_SYMBOL(skb_clone);
1801 EXPORT_SYMBOL(skb_clone_fraglist);
1802 EXPORT_SYMBOL(skb_copy);
1803 EXPORT_SYMBOL(skb_copy_and_csum_bits);
1804 EXPORT_SYMBOL(skb_copy_and_csum_dev);
1805 EXPORT_SYMBOL(skb_copy_bits);
1806 EXPORT_SYMBOL(skb_copy_expand);
1807 EXPORT_SYMBOL(skb_over_panic);
1808 EXPORT_SYMBOL(skb_pad);
1809 EXPORT_SYMBOL(skb_realloc_headroom);
1810 EXPORT_SYMBOL(skb_under_panic);
1811 EXPORT_SYMBOL(skb_dequeue);
1812 EXPORT_SYMBOL(skb_dequeue_tail);
1813 EXPORT_SYMBOL(skb_insert);
1814 EXPORT_SYMBOL(skb_queue_purge);
1815 EXPORT_SYMBOL(skb_queue_head);
1816 EXPORT_SYMBOL(skb_queue_tail);
1817 EXPORT_SYMBOL(skb_unlink);
1818 EXPORT_SYMBOL(skb_append);
1819 EXPORT_SYMBOL(skb_split);
1820 EXPORT_SYMBOL(skb_prepare_seq_read);
1821 EXPORT_SYMBOL(skb_seq_read);
1822 EXPORT_SYMBOL(skb_abort_seq_read);
1823 EXPORT_SYMBOL(skb_find_text);
1824 EXPORT_SYMBOL(skb_append_datato_frags);
1825