1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ 8 * 9 * Fixes: 10 * Alan Cox : Fixed the worst of the load 11 * balancer bugs. 12 * Dave Platt : Interrupt stacking fix. 13 * Richard Kooijman : Timestamp fixes. 14 * Alan Cox : Changed buffer format. 15 * Alan Cox : destructor hook for AF_UNIX etc. 16 * Linus Torvalds : Better skb_clone. 17 * Alan Cox : Added skb_copy. 18 * Alan Cox : Added all the changed routines Linus 19 * only put in the headers 20 * Ray VanTassle : Fixed --skb->lock in free 21 * Alan Cox : skb_copy copy arp field 22 * Andi Kleen : slabified it. 23 * Robert Olsson : Removed skb_head_pool 24 * 25 * NOTE: 26 * The __skb_ routines should be called with interrupts 27 * disabled, or you better be *real* sure that the operation is atomic 28 * with respect to whatever list is being frobbed (e.g. via lock_sock() 29 * or via disabling bottom half handlers, etc). 30 * 31 * This program is free software; you can redistribute it and/or 32 * modify it under the terms of the GNU General Public License 33 * as published by the Free Software Foundation; either version 34 * 2 of the License, or (at your option) any later version. 35 */ 36 37 /* 38 * The functions in this file will not compile correctly with gcc 2.4.x 39 */ 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/mm.h> 45 #include <linux/interrupt.h> 46 #include <linux/in.h> 47 #include <linux/inet.h> 48 #include <linux/slab.h> 49 #include <linux/netdevice.h> 50 #ifdef CONFIG_NET_CLS_ACT 51 #include <net/pkt_sched.h> 52 #endif 53 #include <linux/string.h> 54 #include <linux/skbuff.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 60 #include <net/protocol.h> 61 #include <net/dst.h> 62 #include <net/sock.h> 63 #include <net/checksum.h> 64 #include <net/xfrm.h> 65 66 #include <asm/uaccess.h> 67 #include <asm/system.h> 68 69 #include "kmap_skb.h" 70 71 static struct kmem_cache *skbuff_head_cache __read_mostly; 72 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 73 74 /* 75 * Keep out-of-line to prevent kernel bloat. 76 * __builtin_return_address is not used because it is not always 77 * reliable. 78 */ 79 80 /** 81 * skb_over_panic - private function 82 * @skb: buffer 83 * @sz: size 84 * @here: address 85 * 86 * Out of line support code for skb_put(). Not user callable. 87 */ 88 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 89 { 90 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 91 "data:%p tail:%#lx end:%#lx dev:%s\n", 92 here, skb->len, sz, skb->head, skb->data, 93 (unsigned long)skb->tail, (unsigned long)skb->end, 94 skb->dev ? skb->dev->name : "<NULL>"); 95 BUG(); 96 } 97 98 /** 99 * skb_under_panic - private function 100 * @skb: buffer 101 * @sz: size 102 * @here: address 103 * 104 * Out of line support code for skb_push(). Not user callable. 105 */ 106 107 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 108 { 109 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 110 "data:%p tail:%#lx end:%#lx dev:%s\n", 111 here, skb->len, sz, skb->head, skb->data, 112 (unsigned long)skb->tail, (unsigned long)skb->end, 113 skb->dev ? skb->dev->name : "<NULL>"); 114 BUG(); 115 } 116 117 void skb_truesize_bug(struct sk_buff *skb) 118 { 119 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 120 "len=%u, sizeof(sk_buff)=%Zd\n", 121 skb->truesize, skb->len, sizeof(struct sk_buff)); 122 } 123 EXPORT_SYMBOL(skb_truesize_bug); 124 125 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 126 * 'private' fields and also do memory statistics to find all the 127 * [BEEP] leaks. 128 * 129 */ 130 131 /** 132 * __alloc_skb - allocate a network buffer 133 * @size: size to allocate 134 * @gfp_mask: allocation mask 135 * @fclone: allocate from fclone cache instead of head cache 136 * and allocate a cloned (child) skb 137 * @node: numa node to allocate memory on 138 * 139 * Allocate a new &sk_buff. The returned buffer has no headroom and a 140 * tail room of size bytes. The object has a reference count of one. 141 * The return is the buffer. On a failure the return is %NULL. 142 * 143 * Buffers may only be allocated from interrupts using a @gfp_mask of 144 * %GFP_ATOMIC. 145 */ 146 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 147 int fclone, int node) 148 { 149 struct kmem_cache *cache; 150 struct skb_shared_info *shinfo; 151 struct sk_buff *skb; 152 u8 *data; 153 154 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 155 156 /* Get the HEAD */ 157 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 158 if (!skb) 159 goto out; 160 161 size = SKB_DATA_ALIGN(size); 162 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 163 gfp_mask, node); 164 if (!data) 165 goto nodata; 166 167 /* 168 * See comment in sk_buff definition, just before the 'tail' member 169 */ 170 memset(skb, 0, offsetof(struct sk_buff, tail)); 171 skb->truesize = size + sizeof(struct sk_buff); 172 atomic_set(&skb->users, 1); 173 skb->head = data; 174 skb->data = data; 175 skb_reset_tail_pointer(skb); 176 skb->end = skb->tail + size; 177 /* make sure we initialize shinfo sequentially */ 178 shinfo = skb_shinfo(skb); 179 atomic_set(&shinfo->dataref, 1); 180 shinfo->nr_frags = 0; 181 shinfo->gso_size = 0; 182 shinfo->gso_segs = 0; 183 shinfo->gso_type = 0; 184 shinfo->ip6_frag_id = 0; 185 shinfo->frag_list = NULL; 186 187 if (fclone) { 188 struct sk_buff *child = skb + 1; 189 atomic_t *fclone_ref = (atomic_t *) (child + 1); 190 191 skb->fclone = SKB_FCLONE_ORIG; 192 atomic_set(fclone_ref, 1); 193 194 child->fclone = SKB_FCLONE_UNAVAILABLE; 195 } 196 out: 197 return skb; 198 nodata: 199 kmem_cache_free(cache, skb); 200 skb = NULL; 201 goto out; 202 } 203 204 /** 205 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 206 * @dev: network device to receive on 207 * @length: length to allocate 208 * @gfp_mask: get_free_pages mask, passed to alloc_skb 209 * 210 * Allocate a new &sk_buff and assign it a usage count of one. The 211 * buffer has unspecified headroom built in. Users should allocate 212 * the headroom they think they need without accounting for the 213 * built in space. The built in space is used for optimisations. 214 * 215 * %NULL is returned if there is no free memory. 216 */ 217 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 218 unsigned int length, gfp_t gfp_mask) 219 { 220 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 221 struct sk_buff *skb; 222 223 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 224 if (likely(skb)) { 225 skb_reserve(skb, NET_SKB_PAD); 226 skb->dev = dev; 227 } 228 return skb; 229 } 230 231 static void skb_drop_list(struct sk_buff **listp) 232 { 233 struct sk_buff *list = *listp; 234 235 *listp = NULL; 236 237 do { 238 struct sk_buff *this = list; 239 list = list->next; 240 kfree_skb(this); 241 } while (list); 242 } 243 244 static inline void skb_drop_fraglist(struct sk_buff *skb) 245 { 246 skb_drop_list(&skb_shinfo(skb)->frag_list); 247 } 248 249 static void skb_clone_fraglist(struct sk_buff *skb) 250 { 251 struct sk_buff *list; 252 253 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 254 skb_get(list); 255 } 256 257 static void skb_release_data(struct sk_buff *skb) 258 { 259 if (!skb->cloned || 260 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 261 &skb_shinfo(skb)->dataref)) { 262 if (skb_shinfo(skb)->nr_frags) { 263 int i; 264 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 265 put_page(skb_shinfo(skb)->frags[i].page); 266 } 267 268 if (skb_shinfo(skb)->frag_list) 269 skb_drop_fraglist(skb); 270 271 kfree(skb->head); 272 } 273 } 274 275 /* 276 * Free an skbuff by memory without cleaning the state. 277 */ 278 void kfree_skbmem(struct sk_buff *skb) 279 { 280 struct sk_buff *other; 281 atomic_t *fclone_ref; 282 283 skb_release_data(skb); 284 switch (skb->fclone) { 285 case SKB_FCLONE_UNAVAILABLE: 286 kmem_cache_free(skbuff_head_cache, skb); 287 break; 288 289 case SKB_FCLONE_ORIG: 290 fclone_ref = (atomic_t *) (skb + 2); 291 if (atomic_dec_and_test(fclone_ref)) 292 kmem_cache_free(skbuff_fclone_cache, skb); 293 break; 294 295 case SKB_FCLONE_CLONE: 296 fclone_ref = (atomic_t *) (skb + 1); 297 other = skb - 1; 298 299 /* The clone portion is available for 300 * fast-cloning again. 301 */ 302 skb->fclone = SKB_FCLONE_UNAVAILABLE; 303 304 if (atomic_dec_and_test(fclone_ref)) 305 kmem_cache_free(skbuff_fclone_cache, other); 306 break; 307 } 308 } 309 310 /** 311 * __kfree_skb - private function 312 * @skb: buffer 313 * 314 * Free an sk_buff. Release anything attached to the buffer. 315 * Clean the state. This is an internal helper function. Users should 316 * always call kfree_skb 317 */ 318 319 void __kfree_skb(struct sk_buff *skb) 320 { 321 dst_release(skb->dst); 322 #ifdef CONFIG_XFRM 323 secpath_put(skb->sp); 324 #endif 325 if (skb->destructor) { 326 WARN_ON(in_irq()); 327 skb->destructor(skb); 328 } 329 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 330 nf_conntrack_put(skb->nfct); 331 nf_conntrack_put_reasm(skb->nfct_reasm); 332 #endif 333 #ifdef CONFIG_BRIDGE_NETFILTER 334 nf_bridge_put(skb->nf_bridge); 335 #endif 336 /* XXX: IS this still necessary? - JHS */ 337 #ifdef CONFIG_NET_SCHED 338 skb->tc_index = 0; 339 #ifdef CONFIG_NET_CLS_ACT 340 skb->tc_verd = 0; 341 #endif 342 #endif 343 344 kfree_skbmem(skb); 345 } 346 347 /** 348 * kfree_skb - free an sk_buff 349 * @skb: buffer to free 350 * 351 * Drop a reference to the buffer and free it if the usage count has 352 * hit zero. 353 */ 354 void kfree_skb(struct sk_buff *skb) 355 { 356 if (unlikely(!skb)) 357 return; 358 if (likely(atomic_read(&skb->users) == 1)) 359 smp_rmb(); 360 else if (likely(!atomic_dec_and_test(&skb->users))) 361 return; 362 __kfree_skb(skb); 363 } 364 365 /** 366 * skb_clone - duplicate an sk_buff 367 * @skb: buffer to clone 368 * @gfp_mask: allocation priority 369 * 370 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 371 * copies share the same packet data but not structure. The new 372 * buffer has a reference count of 1. If the allocation fails the 373 * function returns %NULL otherwise the new buffer is returned. 374 * 375 * If this function is called from an interrupt gfp_mask() must be 376 * %GFP_ATOMIC. 377 */ 378 379 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 380 { 381 struct sk_buff *n; 382 383 n = skb + 1; 384 if (skb->fclone == SKB_FCLONE_ORIG && 385 n->fclone == SKB_FCLONE_UNAVAILABLE) { 386 atomic_t *fclone_ref = (atomic_t *) (n + 1); 387 n->fclone = SKB_FCLONE_CLONE; 388 atomic_inc(fclone_ref); 389 } else { 390 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 391 if (!n) 392 return NULL; 393 n->fclone = SKB_FCLONE_UNAVAILABLE; 394 } 395 396 #define C(x) n->x = skb->x 397 398 n->next = n->prev = NULL; 399 n->sk = NULL; 400 C(tstamp); 401 C(dev); 402 C(transport_header); 403 C(network_header); 404 C(mac_header); 405 C(dst); 406 dst_clone(skb->dst); 407 C(sp); 408 #ifdef CONFIG_INET 409 secpath_get(skb->sp); 410 #endif 411 memcpy(n->cb, skb->cb, sizeof(skb->cb)); 412 C(len); 413 C(data_len); 414 C(mac_len); 415 C(csum); 416 C(local_df); 417 n->cloned = 1; 418 n->nohdr = 0; 419 C(pkt_type); 420 C(ip_summed); 421 C(priority); 422 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 423 C(ipvs_property); 424 #endif 425 C(protocol); 426 n->destructor = NULL; 427 C(mark); 428 __nf_copy(n, skb); 429 #ifdef CONFIG_NET_SCHED 430 C(tc_index); 431 #ifdef CONFIG_NET_CLS_ACT 432 n->tc_verd = SET_TC_VERD(skb->tc_verd,0); 433 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 434 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 435 C(iif); 436 #endif 437 #endif 438 skb_copy_secmark(n, skb); 439 C(truesize); 440 atomic_set(&n->users, 1); 441 C(head); 442 C(data); 443 C(tail); 444 C(end); 445 446 atomic_inc(&(skb_shinfo(skb)->dataref)); 447 skb->cloned = 1; 448 449 return n; 450 } 451 452 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 453 { 454 #ifndef NET_SKBUFF_DATA_USES_OFFSET 455 /* 456 * Shift between the two data areas in bytes 457 */ 458 unsigned long offset = new->data - old->data; 459 #endif 460 new->sk = NULL; 461 new->dev = old->dev; 462 new->priority = old->priority; 463 new->protocol = old->protocol; 464 new->dst = dst_clone(old->dst); 465 #ifdef CONFIG_INET 466 new->sp = secpath_get(old->sp); 467 #endif 468 new->transport_header = old->transport_header; 469 new->network_header = old->network_header; 470 new->mac_header = old->mac_header; 471 #ifndef NET_SKBUFF_DATA_USES_OFFSET 472 /* {transport,network,mac}_header are relative to skb->head */ 473 new->transport_header += offset; 474 new->network_header += offset; 475 new->mac_header += offset; 476 #endif 477 memcpy(new->cb, old->cb, sizeof(old->cb)); 478 new->local_df = old->local_df; 479 new->fclone = SKB_FCLONE_UNAVAILABLE; 480 new->pkt_type = old->pkt_type; 481 new->tstamp = old->tstamp; 482 new->destructor = NULL; 483 new->mark = old->mark; 484 __nf_copy(new, old); 485 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 486 new->ipvs_property = old->ipvs_property; 487 #endif 488 #ifdef CONFIG_NET_SCHED 489 #ifdef CONFIG_NET_CLS_ACT 490 new->tc_verd = old->tc_verd; 491 #endif 492 new->tc_index = old->tc_index; 493 #endif 494 skb_copy_secmark(new, old); 495 atomic_set(&new->users, 1); 496 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 497 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 498 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 499 } 500 501 /** 502 * skb_copy - create private copy of an sk_buff 503 * @skb: buffer to copy 504 * @gfp_mask: allocation priority 505 * 506 * Make a copy of both an &sk_buff and its data. This is used when the 507 * caller wishes to modify the data and needs a private copy of the 508 * data to alter. Returns %NULL on failure or the pointer to the buffer 509 * on success. The returned buffer has a reference count of 1. 510 * 511 * As by-product this function converts non-linear &sk_buff to linear 512 * one, so that &sk_buff becomes completely private and caller is allowed 513 * to modify all the data of returned buffer. This means that this 514 * function is not recommended for use in circumstances when only 515 * header is going to be modified. Use pskb_copy() instead. 516 */ 517 518 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 519 { 520 int headerlen = skb->data - skb->head; 521 /* 522 * Allocate the copy buffer 523 */ 524 struct sk_buff *n; 525 #ifdef NET_SKBUFF_DATA_USES_OFFSET 526 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 527 #else 528 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 529 #endif 530 if (!n) 531 return NULL; 532 533 /* Set the data pointer */ 534 skb_reserve(n, headerlen); 535 /* Set the tail pointer and length */ 536 skb_put(n, skb->len); 537 n->csum = skb->csum; 538 n->ip_summed = skb->ip_summed; 539 540 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 541 BUG(); 542 543 copy_skb_header(n, skb); 544 return n; 545 } 546 547 548 /** 549 * pskb_copy - create copy of an sk_buff with private head. 550 * @skb: buffer to copy 551 * @gfp_mask: allocation priority 552 * 553 * Make a copy of both an &sk_buff and part of its data, located 554 * in header. Fragmented data remain shared. This is used when 555 * the caller wishes to modify only header of &sk_buff and needs 556 * private copy of the header to alter. Returns %NULL on failure 557 * or the pointer to the buffer on success. 558 * The returned buffer has a reference count of 1. 559 */ 560 561 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 562 { 563 /* 564 * Allocate the copy buffer 565 */ 566 struct sk_buff *n; 567 #ifdef NET_SKBUFF_DATA_USES_OFFSET 568 n = alloc_skb(skb->end, gfp_mask); 569 #else 570 n = alloc_skb(skb->end - skb->head, gfp_mask); 571 #endif 572 if (!n) 573 goto out; 574 575 /* Set the data pointer */ 576 skb_reserve(n, skb->data - skb->head); 577 /* Set the tail pointer and length */ 578 skb_put(n, skb_headlen(skb)); 579 /* Copy the bytes */ 580 skb_copy_from_linear_data(skb, n->data, n->len); 581 n->csum = skb->csum; 582 n->ip_summed = skb->ip_summed; 583 584 n->truesize += skb->data_len; 585 n->data_len = skb->data_len; 586 n->len = skb->len; 587 588 if (skb_shinfo(skb)->nr_frags) { 589 int i; 590 591 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 592 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 593 get_page(skb_shinfo(n)->frags[i].page); 594 } 595 skb_shinfo(n)->nr_frags = i; 596 } 597 598 if (skb_shinfo(skb)->frag_list) { 599 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 600 skb_clone_fraglist(n); 601 } 602 603 copy_skb_header(n, skb); 604 out: 605 return n; 606 } 607 608 /** 609 * pskb_expand_head - reallocate header of &sk_buff 610 * @skb: buffer to reallocate 611 * @nhead: room to add at head 612 * @ntail: room to add at tail 613 * @gfp_mask: allocation priority 614 * 615 * Expands (or creates identical copy, if &nhead and &ntail are zero) 616 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 617 * reference count of 1. Returns zero in the case of success or error, 618 * if expansion failed. In the last case, &sk_buff is not changed. 619 * 620 * All the pointers pointing into skb header may change and must be 621 * reloaded after call to this function. 622 */ 623 624 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 625 gfp_t gfp_mask) 626 { 627 int i; 628 u8 *data; 629 #ifdef NET_SKBUFF_DATA_USES_OFFSET 630 int size = nhead + skb->end + ntail; 631 #else 632 int size = nhead + (skb->end - skb->head) + ntail; 633 #endif 634 long off; 635 636 if (skb_shared(skb)) 637 BUG(); 638 639 size = SKB_DATA_ALIGN(size); 640 641 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 642 if (!data) 643 goto nodata; 644 645 /* Copy only real data... and, alas, header. This should be 646 * optimized for the cases when header is void. */ 647 #ifdef NET_SKBUFF_DATA_USES_OFFSET 648 memcpy(data + nhead, skb->head, skb->tail); 649 #else 650 memcpy(data + nhead, skb->head, skb->tail - skb->head); 651 #endif 652 memcpy(data + size, skb_end_pointer(skb), 653 sizeof(struct skb_shared_info)); 654 655 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 656 get_page(skb_shinfo(skb)->frags[i].page); 657 658 if (skb_shinfo(skb)->frag_list) 659 skb_clone_fraglist(skb); 660 661 skb_release_data(skb); 662 663 off = (data + nhead) - skb->head; 664 665 skb->head = data; 666 skb->data += off; 667 #ifdef NET_SKBUFF_DATA_USES_OFFSET 668 skb->end = size; 669 off = nhead; 670 #else 671 skb->end = skb->head + size; 672 #endif 673 /* {transport,network,mac}_header and tail are relative to skb->head */ 674 skb->tail += off; 675 skb->transport_header += off; 676 skb->network_header += off; 677 skb->mac_header += off; 678 skb->cloned = 0; 679 skb->nohdr = 0; 680 atomic_set(&skb_shinfo(skb)->dataref, 1); 681 return 0; 682 683 nodata: 684 return -ENOMEM; 685 } 686 687 /* Make private copy of skb with writable head and some headroom */ 688 689 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 690 { 691 struct sk_buff *skb2; 692 int delta = headroom - skb_headroom(skb); 693 694 if (delta <= 0) 695 skb2 = pskb_copy(skb, GFP_ATOMIC); 696 else { 697 skb2 = skb_clone(skb, GFP_ATOMIC); 698 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 699 GFP_ATOMIC)) { 700 kfree_skb(skb2); 701 skb2 = NULL; 702 } 703 } 704 return skb2; 705 } 706 707 708 /** 709 * skb_copy_expand - copy and expand sk_buff 710 * @skb: buffer to copy 711 * @newheadroom: new free bytes at head 712 * @newtailroom: new free bytes at tail 713 * @gfp_mask: allocation priority 714 * 715 * Make a copy of both an &sk_buff and its data and while doing so 716 * allocate additional space. 717 * 718 * This is used when the caller wishes to modify the data and needs a 719 * private copy of the data to alter as well as more space for new fields. 720 * Returns %NULL on failure or the pointer to the buffer 721 * on success. The returned buffer has a reference count of 1. 722 * 723 * You must pass %GFP_ATOMIC as the allocation priority if this function 724 * is called from an interrupt. 725 * 726 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used 727 * only by netfilter in the cases when checksum is recalculated? --ANK 728 */ 729 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 730 int newheadroom, int newtailroom, 731 gfp_t gfp_mask) 732 { 733 /* 734 * Allocate the copy buffer 735 */ 736 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 737 gfp_mask); 738 int oldheadroom = skb_headroom(skb); 739 int head_copy_len, head_copy_off; 740 int off = 0; 741 742 if (!n) 743 return NULL; 744 745 skb_reserve(n, newheadroom); 746 747 /* Set the tail pointer and length */ 748 skb_put(n, skb->len); 749 750 head_copy_len = oldheadroom; 751 head_copy_off = 0; 752 if (newheadroom <= head_copy_len) 753 head_copy_len = newheadroom; 754 else 755 head_copy_off = newheadroom - head_copy_len; 756 757 /* Copy the linear header and data. */ 758 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 759 skb->len + head_copy_len)) 760 BUG(); 761 762 copy_skb_header(n, skb); 763 764 #ifdef NET_SKBUFF_DATA_USES_OFFSET 765 off = newheadroom - oldheadroom; 766 #endif 767 n->transport_header += off; 768 n->network_header += off; 769 n->mac_header += off; 770 771 return n; 772 } 773 774 /** 775 * skb_pad - zero pad the tail of an skb 776 * @skb: buffer to pad 777 * @pad: space to pad 778 * 779 * Ensure that a buffer is followed by a padding area that is zero 780 * filled. Used by network drivers which may DMA or transfer data 781 * beyond the buffer end onto the wire. 782 * 783 * May return error in out of memory cases. The skb is freed on error. 784 */ 785 786 int skb_pad(struct sk_buff *skb, int pad) 787 { 788 int err; 789 int ntail; 790 791 /* If the skbuff is non linear tailroom is always zero.. */ 792 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 793 memset(skb->data+skb->len, 0, pad); 794 return 0; 795 } 796 797 ntail = skb->data_len + pad - (skb->end - skb->tail); 798 if (likely(skb_cloned(skb) || ntail > 0)) { 799 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 800 if (unlikely(err)) 801 goto free_skb; 802 } 803 804 /* FIXME: The use of this function with non-linear skb's really needs 805 * to be audited. 806 */ 807 err = skb_linearize(skb); 808 if (unlikely(err)) 809 goto free_skb; 810 811 memset(skb->data + skb->len, 0, pad); 812 return 0; 813 814 free_skb: 815 kfree_skb(skb); 816 return err; 817 } 818 819 /* Trims skb to length len. It can change skb pointers. 820 */ 821 822 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 823 { 824 struct sk_buff **fragp; 825 struct sk_buff *frag; 826 int offset = skb_headlen(skb); 827 int nfrags = skb_shinfo(skb)->nr_frags; 828 int i; 829 int err; 830 831 if (skb_cloned(skb) && 832 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 833 return err; 834 835 i = 0; 836 if (offset >= len) 837 goto drop_pages; 838 839 for (; i < nfrags; i++) { 840 int end = offset + skb_shinfo(skb)->frags[i].size; 841 842 if (end < len) { 843 offset = end; 844 continue; 845 } 846 847 skb_shinfo(skb)->frags[i++].size = len - offset; 848 849 drop_pages: 850 skb_shinfo(skb)->nr_frags = i; 851 852 for (; i < nfrags; i++) 853 put_page(skb_shinfo(skb)->frags[i].page); 854 855 if (skb_shinfo(skb)->frag_list) 856 skb_drop_fraglist(skb); 857 goto done; 858 } 859 860 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 861 fragp = &frag->next) { 862 int end = offset + frag->len; 863 864 if (skb_shared(frag)) { 865 struct sk_buff *nfrag; 866 867 nfrag = skb_clone(frag, GFP_ATOMIC); 868 if (unlikely(!nfrag)) 869 return -ENOMEM; 870 871 nfrag->next = frag->next; 872 kfree_skb(frag); 873 frag = nfrag; 874 *fragp = frag; 875 } 876 877 if (end < len) { 878 offset = end; 879 continue; 880 } 881 882 if (end > len && 883 unlikely((err = pskb_trim(frag, len - offset)))) 884 return err; 885 886 if (frag->next) 887 skb_drop_list(&frag->next); 888 break; 889 } 890 891 done: 892 if (len > skb_headlen(skb)) { 893 skb->data_len -= skb->len - len; 894 skb->len = len; 895 } else { 896 skb->len = len; 897 skb->data_len = 0; 898 skb_set_tail_pointer(skb, len); 899 } 900 901 return 0; 902 } 903 904 /** 905 * __pskb_pull_tail - advance tail of skb header 906 * @skb: buffer to reallocate 907 * @delta: number of bytes to advance tail 908 * 909 * The function makes a sense only on a fragmented &sk_buff, 910 * it expands header moving its tail forward and copying necessary 911 * data from fragmented part. 912 * 913 * &sk_buff MUST have reference count of 1. 914 * 915 * Returns %NULL (and &sk_buff does not change) if pull failed 916 * or value of new tail of skb in the case of success. 917 * 918 * All the pointers pointing into skb header may change and must be 919 * reloaded after call to this function. 920 */ 921 922 /* Moves tail of skb head forward, copying data from fragmented part, 923 * when it is necessary. 924 * 1. It may fail due to malloc failure. 925 * 2. It may change skb pointers. 926 * 927 * It is pretty complicated. Luckily, it is called only in exceptional cases. 928 */ 929 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 930 { 931 /* If skb has not enough free space at tail, get new one 932 * plus 128 bytes for future expansions. If we have enough 933 * room at tail, reallocate without expansion only if skb is cloned. 934 */ 935 int i, k, eat = (skb->tail + delta) - skb->end; 936 937 if (eat > 0 || skb_cloned(skb)) { 938 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 939 GFP_ATOMIC)) 940 return NULL; 941 } 942 943 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 944 BUG(); 945 946 /* Optimization: no fragments, no reasons to preestimate 947 * size of pulled pages. Superb. 948 */ 949 if (!skb_shinfo(skb)->frag_list) 950 goto pull_pages; 951 952 /* Estimate size of pulled pages. */ 953 eat = delta; 954 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 955 if (skb_shinfo(skb)->frags[i].size >= eat) 956 goto pull_pages; 957 eat -= skb_shinfo(skb)->frags[i].size; 958 } 959 960 /* If we need update frag list, we are in troubles. 961 * Certainly, it possible to add an offset to skb data, 962 * but taking into account that pulling is expected to 963 * be very rare operation, it is worth to fight against 964 * further bloating skb head and crucify ourselves here instead. 965 * Pure masohism, indeed. 8)8) 966 */ 967 if (eat) { 968 struct sk_buff *list = skb_shinfo(skb)->frag_list; 969 struct sk_buff *clone = NULL; 970 struct sk_buff *insp = NULL; 971 972 do { 973 BUG_ON(!list); 974 975 if (list->len <= eat) { 976 /* Eaten as whole. */ 977 eat -= list->len; 978 list = list->next; 979 insp = list; 980 } else { 981 /* Eaten partially. */ 982 983 if (skb_shared(list)) { 984 /* Sucks! We need to fork list. :-( */ 985 clone = skb_clone(list, GFP_ATOMIC); 986 if (!clone) 987 return NULL; 988 insp = list->next; 989 list = clone; 990 } else { 991 /* This may be pulled without 992 * problems. */ 993 insp = list; 994 } 995 if (!pskb_pull(list, eat)) { 996 if (clone) 997 kfree_skb(clone); 998 return NULL; 999 } 1000 break; 1001 } 1002 } while (eat); 1003 1004 /* Free pulled out fragments. */ 1005 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1006 skb_shinfo(skb)->frag_list = list->next; 1007 kfree_skb(list); 1008 } 1009 /* And insert new clone at head. */ 1010 if (clone) { 1011 clone->next = list; 1012 skb_shinfo(skb)->frag_list = clone; 1013 } 1014 } 1015 /* Success! Now we may commit changes to skb data. */ 1016 1017 pull_pages: 1018 eat = delta; 1019 k = 0; 1020 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1021 if (skb_shinfo(skb)->frags[i].size <= eat) { 1022 put_page(skb_shinfo(skb)->frags[i].page); 1023 eat -= skb_shinfo(skb)->frags[i].size; 1024 } else { 1025 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1026 if (eat) { 1027 skb_shinfo(skb)->frags[k].page_offset += eat; 1028 skb_shinfo(skb)->frags[k].size -= eat; 1029 eat = 0; 1030 } 1031 k++; 1032 } 1033 } 1034 skb_shinfo(skb)->nr_frags = k; 1035 1036 skb->tail += delta; 1037 skb->data_len -= delta; 1038 1039 return skb_tail_pointer(skb); 1040 } 1041 1042 /* Copy some data bits from skb to kernel buffer. */ 1043 1044 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1045 { 1046 int i, copy; 1047 int start = skb_headlen(skb); 1048 1049 if (offset > (int)skb->len - len) 1050 goto fault; 1051 1052 /* Copy header. */ 1053 if ((copy = start - offset) > 0) { 1054 if (copy > len) 1055 copy = len; 1056 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1057 if ((len -= copy) == 0) 1058 return 0; 1059 offset += copy; 1060 to += copy; 1061 } 1062 1063 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1064 int end; 1065 1066 BUG_TRAP(start <= offset + len); 1067 1068 end = start + skb_shinfo(skb)->frags[i].size; 1069 if ((copy = end - offset) > 0) { 1070 u8 *vaddr; 1071 1072 if (copy > len) 1073 copy = len; 1074 1075 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1076 memcpy(to, 1077 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1078 offset - start, copy); 1079 kunmap_skb_frag(vaddr); 1080 1081 if ((len -= copy) == 0) 1082 return 0; 1083 offset += copy; 1084 to += copy; 1085 } 1086 start = end; 1087 } 1088 1089 if (skb_shinfo(skb)->frag_list) { 1090 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1091 1092 for (; list; list = list->next) { 1093 int end; 1094 1095 BUG_TRAP(start <= offset + len); 1096 1097 end = start + list->len; 1098 if ((copy = end - offset) > 0) { 1099 if (copy > len) 1100 copy = len; 1101 if (skb_copy_bits(list, offset - start, 1102 to, copy)) 1103 goto fault; 1104 if ((len -= copy) == 0) 1105 return 0; 1106 offset += copy; 1107 to += copy; 1108 } 1109 start = end; 1110 } 1111 } 1112 if (!len) 1113 return 0; 1114 1115 fault: 1116 return -EFAULT; 1117 } 1118 1119 /** 1120 * skb_store_bits - store bits from kernel buffer to skb 1121 * @skb: destination buffer 1122 * @offset: offset in destination 1123 * @from: source buffer 1124 * @len: number of bytes to copy 1125 * 1126 * Copy the specified number of bytes from the source buffer to the 1127 * destination skb. This function handles all the messy bits of 1128 * traversing fragment lists and such. 1129 */ 1130 1131 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1132 { 1133 int i, copy; 1134 int start = skb_headlen(skb); 1135 1136 if (offset > (int)skb->len - len) 1137 goto fault; 1138 1139 if ((copy = start - offset) > 0) { 1140 if (copy > len) 1141 copy = len; 1142 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1143 if ((len -= copy) == 0) 1144 return 0; 1145 offset += copy; 1146 from += copy; 1147 } 1148 1149 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1150 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1151 int end; 1152 1153 BUG_TRAP(start <= offset + len); 1154 1155 end = start + frag->size; 1156 if ((copy = end - offset) > 0) { 1157 u8 *vaddr; 1158 1159 if (copy > len) 1160 copy = len; 1161 1162 vaddr = kmap_skb_frag(frag); 1163 memcpy(vaddr + frag->page_offset + offset - start, 1164 from, copy); 1165 kunmap_skb_frag(vaddr); 1166 1167 if ((len -= copy) == 0) 1168 return 0; 1169 offset += copy; 1170 from += copy; 1171 } 1172 start = end; 1173 } 1174 1175 if (skb_shinfo(skb)->frag_list) { 1176 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1177 1178 for (; list; list = list->next) { 1179 int end; 1180 1181 BUG_TRAP(start <= offset + len); 1182 1183 end = start + list->len; 1184 if ((copy = end - offset) > 0) { 1185 if (copy > len) 1186 copy = len; 1187 if (skb_store_bits(list, offset - start, 1188 from, copy)) 1189 goto fault; 1190 if ((len -= copy) == 0) 1191 return 0; 1192 offset += copy; 1193 from += copy; 1194 } 1195 start = end; 1196 } 1197 } 1198 if (!len) 1199 return 0; 1200 1201 fault: 1202 return -EFAULT; 1203 } 1204 1205 EXPORT_SYMBOL(skb_store_bits); 1206 1207 /* Checksum skb data. */ 1208 1209 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1210 int len, __wsum csum) 1211 { 1212 int start = skb_headlen(skb); 1213 int i, copy = start - offset; 1214 int pos = 0; 1215 1216 /* Checksum header. */ 1217 if (copy > 0) { 1218 if (copy > len) 1219 copy = len; 1220 csum = csum_partial(skb->data + offset, copy, csum); 1221 if ((len -= copy) == 0) 1222 return csum; 1223 offset += copy; 1224 pos = copy; 1225 } 1226 1227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1228 int end; 1229 1230 BUG_TRAP(start <= offset + len); 1231 1232 end = start + skb_shinfo(skb)->frags[i].size; 1233 if ((copy = end - offset) > 0) { 1234 __wsum csum2; 1235 u8 *vaddr; 1236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1237 1238 if (copy > len) 1239 copy = len; 1240 vaddr = kmap_skb_frag(frag); 1241 csum2 = csum_partial(vaddr + frag->page_offset + 1242 offset - start, copy, 0); 1243 kunmap_skb_frag(vaddr); 1244 csum = csum_block_add(csum, csum2, pos); 1245 if (!(len -= copy)) 1246 return csum; 1247 offset += copy; 1248 pos += copy; 1249 } 1250 start = end; 1251 } 1252 1253 if (skb_shinfo(skb)->frag_list) { 1254 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1255 1256 for (; list; list = list->next) { 1257 int end; 1258 1259 BUG_TRAP(start <= offset + len); 1260 1261 end = start + list->len; 1262 if ((copy = end - offset) > 0) { 1263 __wsum csum2; 1264 if (copy > len) 1265 copy = len; 1266 csum2 = skb_checksum(list, offset - start, 1267 copy, 0); 1268 csum = csum_block_add(csum, csum2, pos); 1269 if ((len -= copy) == 0) 1270 return csum; 1271 offset += copy; 1272 pos += copy; 1273 } 1274 start = end; 1275 } 1276 } 1277 BUG_ON(len); 1278 1279 return csum; 1280 } 1281 1282 /* Both of above in one bottle. */ 1283 1284 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1285 u8 *to, int len, __wsum csum) 1286 { 1287 int start = skb_headlen(skb); 1288 int i, copy = start - offset; 1289 int pos = 0; 1290 1291 /* Copy header. */ 1292 if (copy > 0) { 1293 if (copy > len) 1294 copy = len; 1295 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1296 copy, csum); 1297 if ((len -= copy) == 0) 1298 return csum; 1299 offset += copy; 1300 to += copy; 1301 pos = copy; 1302 } 1303 1304 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1305 int end; 1306 1307 BUG_TRAP(start <= offset + len); 1308 1309 end = start + skb_shinfo(skb)->frags[i].size; 1310 if ((copy = end - offset) > 0) { 1311 __wsum csum2; 1312 u8 *vaddr; 1313 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1314 1315 if (copy > len) 1316 copy = len; 1317 vaddr = kmap_skb_frag(frag); 1318 csum2 = csum_partial_copy_nocheck(vaddr + 1319 frag->page_offset + 1320 offset - start, to, 1321 copy, 0); 1322 kunmap_skb_frag(vaddr); 1323 csum = csum_block_add(csum, csum2, pos); 1324 if (!(len -= copy)) 1325 return csum; 1326 offset += copy; 1327 to += copy; 1328 pos += copy; 1329 } 1330 start = end; 1331 } 1332 1333 if (skb_shinfo(skb)->frag_list) { 1334 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1335 1336 for (; list; list = list->next) { 1337 __wsum csum2; 1338 int end; 1339 1340 BUG_TRAP(start <= offset + len); 1341 1342 end = start + list->len; 1343 if ((copy = end - offset) > 0) { 1344 if (copy > len) 1345 copy = len; 1346 csum2 = skb_copy_and_csum_bits(list, 1347 offset - start, 1348 to, copy, 0); 1349 csum = csum_block_add(csum, csum2, pos); 1350 if ((len -= copy) == 0) 1351 return csum; 1352 offset += copy; 1353 to += copy; 1354 pos += copy; 1355 } 1356 start = end; 1357 } 1358 } 1359 BUG_ON(len); 1360 return csum; 1361 } 1362 1363 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1364 { 1365 __wsum csum; 1366 long csstart; 1367 1368 if (skb->ip_summed == CHECKSUM_PARTIAL) 1369 csstart = skb->csum_start - skb_headroom(skb); 1370 else 1371 csstart = skb_headlen(skb); 1372 1373 BUG_ON(csstart > skb_headlen(skb)); 1374 1375 skb_copy_from_linear_data(skb, to, csstart); 1376 1377 csum = 0; 1378 if (csstart != skb->len) 1379 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1380 skb->len - csstart, 0); 1381 1382 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1383 long csstuff = csstart + skb->csum_offset; 1384 1385 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1386 } 1387 } 1388 1389 /** 1390 * skb_dequeue - remove from the head of the queue 1391 * @list: list to dequeue from 1392 * 1393 * Remove the head of the list. The list lock is taken so the function 1394 * may be used safely with other locking list functions. The head item is 1395 * returned or %NULL if the list is empty. 1396 */ 1397 1398 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1399 { 1400 unsigned long flags; 1401 struct sk_buff *result; 1402 1403 spin_lock_irqsave(&list->lock, flags); 1404 result = __skb_dequeue(list); 1405 spin_unlock_irqrestore(&list->lock, flags); 1406 return result; 1407 } 1408 1409 /** 1410 * skb_dequeue_tail - remove from the tail of the queue 1411 * @list: list to dequeue from 1412 * 1413 * Remove the tail of the list. The list lock is taken so the function 1414 * may be used safely with other locking list functions. The tail item is 1415 * returned or %NULL if the list is empty. 1416 */ 1417 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1418 { 1419 unsigned long flags; 1420 struct sk_buff *result; 1421 1422 spin_lock_irqsave(&list->lock, flags); 1423 result = __skb_dequeue_tail(list); 1424 spin_unlock_irqrestore(&list->lock, flags); 1425 return result; 1426 } 1427 1428 /** 1429 * skb_queue_purge - empty a list 1430 * @list: list to empty 1431 * 1432 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1433 * the list and one reference dropped. This function takes the list 1434 * lock and is atomic with respect to other list locking functions. 1435 */ 1436 void skb_queue_purge(struct sk_buff_head *list) 1437 { 1438 struct sk_buff *skb; 1439 while ((skb = skb_dequeue(list)) != NULL) 1440 kfree_skb(skb); 1441 } 1442 1443 /** 1444 * skb_queue_head - queue a buffer at the list head 1445 * @list: list to use 1446 * @newsk: buffer to queue 1447 * 1448 * Queue a buffer at the start of the list. This function takes the 1449 * list lock and can be used safely with other locking &sk_buff functions 1450 * safely. 1451 * 1452 * A buffer cannot be placed on two lists at the same time. 1453 */ 1454 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1455 { 1456 unsigned long flags; 1457 1458 spin_lock_irqsave(&list->lock, flags); 1459 __skb_queue_head(list, newsk); 1460 spin_unlock_irqrestore(&list->lock, flags); 1461 } 1462 1463 /** 1464 * skb_queue_tail - queue a buffer at the list tail 1465 * @list: list to use 1466 * @newsk: buffer to queue 1467 * 1468 * Queue a buffer at the tail of the list. This function takes the 1469 * list lock and can be used safely with other locking &sk_buff functions 1470 * safely. 1471 * 1472 * A buffer cannot be placed on two lists at the same time. 1473 */ 1474 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1475 { 1476 unsigned long flags; 1477 1478 spin_lock_irqsave(&list->lock, flags); 1479 __skb_queue_tail(list, newsk); 1480 spin_unlock_irqrestore(&list->lock, flags); 1481 } 1482 1483 /** 1484 * skb_unlink - remove a buffer from a list 1485 * @skb: buffer to remove 1486 * @list: list to use 1487 * 1488 * Remove a packet from a list. The list locks are taken and this 1489 * function is atomic with respect to other list locked calls 1490 * 1491 * You must know what list the SKB is on. 1492 */ 1493 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1494 { 1495 unsigned long flags; 1496 1497 spin_lock_irqsave(&list->lock, flags); 1498 __skb_unlink(skb, list); 1499 spin_unlock_irqrestore(&list->lock, flags); 1500 } 1501 1502 /** 1503 * skb_append - append a buffer 1504 * @old: buffer to insert after 1505 * @newsk: buffer to insert 1506 * @list: list to use 1507 * 1508 * Place a packet after a given packet in a list. The list locks are taken 1509 * and this function is atomic with respect to other list locked calls. 1510 * A buffer cannot be placed on two lists at the same time. 1511 */ 1512 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1513 { 1514 unsigned long flags; 1515 1516 spin_lock_irqsave(&list->lock, flags); 1517 __skb_append(old, newsk, list); 1518 spin_unlock_irqrestore(&list->lock, flags); 1519 } 1520 1521 1522 /** 1523 * skb_insert - insert a buffer 1524 * @old: buffer to insert before 1525 * @newsk: buffer to insert 1526 * @list: list to use 1527 * 1528 * Place a packet before a given packet in a list. The list locks are 1529 * taken and this function is atomic with respect to other list locked 1530 * calls. 1531 * 1532 * A buffer cannot be placed on two lists at the same time. 1533 */ 1534 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1535 { 1536 unsigned long flags; 1537 1538 spin_lock_irqsave(&list->lock, flags); 1539 __skb_insert(newsk, old->prev, old, list); 1540 spin_unlock_irqrestore(&list->lock, flags); 1541 } 1542 1543 static inline void skb_split_inside_header(struct sk_buff *skb, 1544 struct sk_buff* skb1, 1545 const u32 len, const int pos) 1546 { 1547 int i; 1548 1549 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 1550 pos - len); 1551 /* And move data appendix as is. */ 1552 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1553 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1554 1555 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1556 skb_shinfo(skb)->nr_frags = 0; 1557 skb1->data_len = skb->data_len; 1558 skb1->len += skb1->data_len; 1559 skb->data_len = 0; 1560 skb->len = len; 1561 skb_set_tail_pointer(skb, len); 1562 } 1563 1564 static inline void skb_split_no_header(struct sk_buff *skb, 1565 struct sk_buff* skb1, 1566 const u32 len, int pos) 1567 { 1568 int i, k = 0; 1569 const int nfrags = skb_shinfo(skb)->nr_frags; 1570 1571 skb_shinfo(skb)->nr_frags = 0; 1572 skb1->len = skb1->data_len = skb->len - len; 1573 skb->len = len; 1574 skb->data_len = len - pos; 1575 1576 for (i = 0; i < nfrags; i++) { 1577 int size = skb_shinfo(skb)->frags[i].size; 1578 1579 if (pos + size > len) { 1580 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1581 1582 if (pos < len) { 1583 /* Split frag. 1584 * We have two variants in this case: 1585 * 1. Move all the frag to the second 1586 * part, if it is possible. F.e. 1587 * this approach is mandatory for TUX, 1588 * where splitting is expensive. 1589 * 2. Split is accurately. We make this. 1590 */ 1591 get_page(skb_shinfo(skb)->frags[i].page); 1592 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1593 skb_shinfo(skb1)->frags[0].size -= len - pos; 1594 skb_shinfo(skb)->frags[i].size = len - pos; 1595 skb_shinfo(skb)->nr_frags++; 1596 } 1597 k++; 1598 } else 1599 skb_shinfo(skb)->nr_frags++; 1600 pos += size; 1601 } 1602 skb_shinfo(skb1)->nr_frags = k; 1603 } 1604 1605 /** 1606 * skb_split - Split fragmented skb to two parts at length len. 1607 * @skb: the buffer to split 1608 * @skb1: the buffer to receive the second part 1609 * @len: new length for skb 1610 */ 1611 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 1612 { 1613 int pos = skb_headlen(skb); 1614 1615 if (len < pos) /* Split line is inside header. */ 1616 skb_split_inside_header(skb, skb1, len, pos); 1617 else /* Second chunk has no header, nothing to copy. */ 1618 skb_split_no_header(skb, skb1, len, pos); 1619 } 1620 1621 /** 1622 * skb_prepare_seq_read - Prepare a sequential read of skb data 1623 * @skb: the buffer to read 1624 * @from: lower offset of data to be read 1625 * @to: upper offset of data to be read 1626 * @st: state variable 1627 * 1628 * Initializes the specified state variable. Must be called before 1629 * invoking skb_seq_read() for the first time. 1630 */ 1631 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1632 unsigned int to, struct skb_seq_state *st) 1633 { 1634 st->lower_offset = from; 1635 st->upper_offset = to; 1636 st->root_skb = st->cur_skb = skb; 1637 st->frag_idx = st->stepped_offset = 0; 1638 st->frag_data = NULL; 1639 } 1640 1641 /** 1642 * skb_seq_read - Sequentially read skb data 1643 * @consumed: number of bytes consumed by the caller so far 1644 * @data: destination pointer for data to be returned 1645 * @st: state variable 1646 * 1647 * Reads a block of skb data at &consumed relative to the 1648 * lower offset specified to skb_prepare_seq_read(). Assigns 1649 * the head of the data block to &data and returns the length 1650 * of the block or 0 if the end of the skb data or the upper 1651 * offset has been reached. 1652 * 1653 * The caller is not required to consume all of the data 1654 * returned, i.e. &consumed is typically set to the number 1655 * of bytes already consumed and the next call to 1656 * skb_seq_read() will return the remaining part of the block. 1657 * 1658 * Note: The size of each block of data returned can be arbitary, 1659 * this limitation is the cost for zerocopy seqeuental 1660 * reads of potentially non linear data. 1661 * 1662 * Note: Fragment lists within fragments are not implemented 1663 * at the moment, state->root_skb could be replaced with 1664 * a stack for this purpose. 1665 */ 1666 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1667 struct skb_seq_state *st) 1668 { 1669 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 1670 skb_frag_t *frag; 1671 1672 if (unlikely(abs_offset >= st->upper_offset)) 1673 return 0; 1674 1675 next_skb: 1676 block_limit = skb_headlen(st->cur_skb); 1677 1678 if (abs_offset < block_limit) { 1679 *data = st->cur_skb->data + abs_offset; 1680 return block_limit - abs_offset; 1681 } 1682 1683 if (st->frag_idx == 0 && !st->frag_data) 1684 st->stepped_offset += skb_headlen(st->cur_skb); 1685 1686 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 1687 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 1688 block_limit = frag->size + st->stepped_offset; 1689 1690 if (abs_offset < block_limit) { 1691 if (!st->frag_data) 1692 st->frag_data = kmap_skb_frag(frag); 1693 1694 *data = (u8 *) st->frag_data + frag->page_offset + 1695 (abs_offset - st->stepped_offset); 1696 1697 return block_limit - abs_offset; 1698 } 1699 1700 if (st->frag_data) { 1701 kunmap_skb_frag(st->frag_data); 1702 st->frag_data = NULL; 1703 } 1704 1705 st->frag_idx++; 1706 st->stepped_offset += frag->size; 1707 } 1708 1709 if (st->frag_data) { 1710 kunmap_skb_frag(st->frag_data); 1711 st->frag_data = NULL; 1712 } 1713 1714 if (st->cur_skb->next) { 1715 st->cur_skb = st->cur_skb->next; 1716 st->frag_idx = 0; 1717 goto next_skb; 1718 } else if (st->root_skb == st->cur_skb && 1719 skb_shinfo(st->root_skb)->frag_list) { 1720 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 1721 goto next_skb; 1722 } 1723 1724 return 0; 1725 } 1726 1727 /** 1728 * skb_abort_seq_read - Abort a sequential read of skb data 1729 * @st: state variable 1730 * 1731 * Must be called if skb_seq_read() was not called until it 1732 * returned 0. 1733 */ 1734 void skb_abort_seq_read(struct skb_seq_state *st) 1735 { 1736 if (st->frag_data) 1737 kunmap_skb_frag(st->frag_data); 1738 } 1739 1740 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 1741 1742 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 1743 struct ts_config *conf, 1744 struct ts_state *state) 1745 { 1746 return skb_seq_read(offset, text, TS_SKB_CB(state)); 1747 } 1748 1749 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 1750 { 1751 skb_abort_seq_read(TS_SKB_CB(state)); 1752 } 1753 1754 /** 1755 * skb_find_text - Find a text pattern in skb data 1756 * @skb: the buffer to look in 1757 * @from: search offset 1758 * @to: search limit 1759 * @config: textsearch configuration 1760 * @state: uninitialized textsearch state variable 1761 * 1762 * Finds a pattern in the skb data according to the specified 1763 * textsearch configuration. Use textsearch_next() to retrieve 1764 * subsequent occurrences of the pattern. Returns the offset 1765 * to the first occurrence or UINT_MAX if no match was found. 1766 */ 1767 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1768 unsigned int to, struct ts_config *config, 1769 struct ts_state *state) 1770 { 1771 unsigned int ret; 1772 1773 config->get_next_block = skb_ts_get_next_block; 1774 config->finish = skb_ts_finish; 1775 1776 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1777 1778 ret = textsearch_find(config, state); 1779 return (ret <= to - from ? ret : UINT_MAX); 1780 } 1781 1782 /** 1783 * skb_append_datato_frags: - append the user data to a skb 1784 * @sk: sock structure 1785 * @skb: skb structure to be appened with user data. 1786 * @getfrag: call back function to be used for getting the user data 1787 * @from: pointer to user message iov 1788 * @length: length of the iov message 1789 * 1790 * Description: This procedure append the user data in the fragment part 1791 * of the skb if any page alloc fails user this procedure returns -ENOMEM 1792 */ 1793 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1794 int (*getfrag)(void *from, char *to, int offset, 1795 int len, int odd, struct sk_buff *skb), 1796 void *from, int length) 1797 { 1798 int frg_cnt = 0; 1799 skb_frag_t *frag = NULL; 1800 struct page *page = NULL; 1801 int copy, left; 1802 int offset = 0; 1803 int ret; 1804 1805 do { 1806 /* Return error if we don't have space for new frag */ 1807 frg_cnt = skb_shinfo(skb)->nr_frags; 1808 if (frg_cnt >= MAX_SKB_FRAGS) 1809 return -EFAULT; 1810 1811 /* allocate a new page for next frag */ 1812 page = alloc_pages(sk->sk_allocation, 0); 1813 1814 /* If alloc_page fails just return failure and caller will 1815 * free previous allocated pages by doing kfree_skb() 1816 */ 1817 if (page == NULL) 1818 return -ENOMEM; 1819 1820 /* initialize the next frag */ 1821 sk->sk_sndmsg_page = page; 1822 sk->sk_sndmsg_off = 0; 1823 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 1824 skb->truesize += PAGE_SIZE; 1825 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 1826 1827 /* get the new initialized frag */ 1828 frg_cnt = skb_shinfo(skb)->nr_frags; 1829 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 1830 1831 /* copy the user data to page */ 1832 left = PAGE_SIZE - frag->page_offset; 1833 copy = (length > left)? left : length; 1834 1835 ret = getfrag(from, (page_address(frag->page) + 1836 frag->page_offset + frag->size), 1837 offset, copy, 0, skb); 1838 if (ret < 0) 1839 return -EFAULT; 1840 1841 /* copy was successful so update the size parameters */ 1842 sk->sk_sndmsg_off += copy; 1843 frag->size += copy; 1844 skb->len += copy; 1845 skb->data_len += copy; 1846 offset += copy; 1847 length -= copy; 1848 1849 } while (length > 0); 1850 1851 return 0; 1852 } 1853 1854 /** 1855 * skb_pull_rcsum - pull skb and update receive checksum 1856 * @skb: buffer to update 1857 * @start: start of data before pull 1858 * @len: length of data pulled 1859 * 1860 * This function performs an skb_pull on the packet and updates 1861 * update the CHECKSUM_COMPLETE checksum. It should be used on 1862 * receive path processing instead of skb_pull unless you know 1863 * that the checksum difference is zero (e.g., a valid IP header) 1864 * or you are setting ip_summed to CHECKSUM_NONE. 1865 */ 1866 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 1867 { 1868 BUG_ON(len > skb->len); 1869 skb->len -= len; 1870 BUG_ON(skb->len < skb->data_len); 1871 skb_postpull_rcsum(skb, skb->data, len); 1872 return skb->data += len; 1873 } 1874 1875 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 1876 1877 /** 1878 * skb_segment - Perform protocol segmentation on skb. 1879 * @skb: buffer to segment 1880 * @features: features for the output path (see dev->features) 1881 * 1882 * This function performs segmentation on the given skb. It returns 1883 * the segment at the given position. It returns NULL if there are 1884 * no more segments to generate, or when an error is encountered. 1885 */ 1886 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1887 { 1888 struct sk_buff *segs = NULL; 1889 struct sk_buff *tail = NULL; 1890 unsigned int mss = skb_shinfo(skb)->gso_size; 1891 unsigned int doffset = skb->data - skb_mac_header(skb); 1892 unsigned int offset = doffset; 1893 unsigned int headroom; 1894 unsigned int len; 1895 int sg = features & NETIF_F_SG; 1896 int nfrags = skb_shinfo(skb)->nr_frags; 1897 int err = -ENOMEM; 1898 int i = 0; 1899 int pos; 1900 1901 __skb_push(skb, doffset); 1902 headroom = skb_headroom(skb); 1903 pos = skb_headlen(skb); 1904 1905 do { 1906 struct sk_buff *nskb; 1907 skb_frag_t *frag; 1908 int hsize; 1909 int k; 1910 int size; 1911 1912 len = skb->len - offset; 1913 if (len > mss) 1914 len = mss; 1915 1916 hsize = skb_headlen(skb) - offset; 1917 if (hsize < 0) 1918 hsize = 0; 1919 if (hsize > len || !sg) 1920 hsize = len; 1921 1922 nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC); 1923 if (unlikely(!nskb)) 1924 goto err; 1925 1926 if (segs) 1927 tail->next = nskb; 1928 else 1929 segs = nskb; 1930 tail = nskb; 1931 1932 nskb->dev = skb->dev; 1933 nskb->priority = skb->priority; 1934 nskb->protocol = skb->protocol; 1935 nskb->dst = dst_clone(skb->dst); 1936 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 1937 nskb->pkt_type = skb->pkt_type; 1938 nskb->mac_len = skb->mac_len; 1939 1940 skb_reserve(nskb, headroom); 1941 skb_reset_mac_header(nskb); 1942 skb_set_network_header(nskb, skb->mac_len); 1943 nskb->transport_header = (nskb->network_header + 1944 skb_network_header_len(skb)); 1945 skb_copy_from_linear_data(skb, skb_put(nskb, doffset), 1946 doffset); 1947 if (!sg) { 1948 nskb->csum = skb_copy_and_csum_bits(skb, offset, 1949 skb_put(nskb, len), 1950 len, 0); 1951 continue; 1952 } 1953 1954 frag = skb_shinfo(nskb)->frags; 1955 k = 0; 1956 1957 nskb->ip_summed = CHECKSUM_PARTIAL; 1958 nskb->csum = skb->csum; 1959 skb_copy_from_linear_data_offset(skb, offset, 1960 skb_put(nskb, hsize), hsize); 1961 1962 while (pos < offset + len) { 1963 BUG_ON(i >= nfrags); 1964 1965 *frag = skb_shinfo(skb)->frags[i]; 1966 get_page(frag->page); 1967 size = frag->size; 1968 1969 if (pos < offset) { 1970 frag->page_offset += offset - pos; 1971 frag->size -= offset - pos; 1972 } 1973 1974 k++; 1975 1976 if (pos + size <= offset + len) { 1977 i++; 1978 pos += size; 1979 } else { 1980 frag->size -= pos + size - (offset + len); 1981 break; 1982 } 1983 1984 frag++; 1985 } 1986 1987 skb_shinfo(nskb)->nr_frags = k; 1988 nskb->data_len = len - hsize; 1989 nskb->len += nskb->data_len; 1990 nskb->truesize += nskb->data_len; 1991 } while ((offset += len) < skb->len); 1992 1993 return segs; 1994 1995 err: 1996 while ((skb = segs)) { 1997 segs = skb->next; 1998 kfree_skb(skb); 1999 } 2000 return ERR_PTR(err); 2001 } 2002 2003 EXPORT_SYMBOL_GPL(skb_segment); 2004 2005 void __init skb_init(void) 2006 { 2007 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2008 sizeof(struct sk_buff), 2009 0, 2010 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2011 NULL, NULL); 2012 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2013 (2*sizeof(struct sk_buff)) + 2014 sizeof(atomic_t), 2015 0, 2016 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2017 NULL, NULL); 2018 } 2019 2020 /** 2021 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2022 * @skb: Socket buffer containing the buffers to be mapped 2023 * @sg: The scatter-gather list to map into 2024 * @offset: The offset into the buffer's contents to start mapping 2025 * @len: Length of buffer space to be mapped 2026 * 2027 * Fill the specified scatter-gather list with mappings/pointers into a 2028 * region of the buffer space attached to a socket buffer. 2029 */ 2030 int 2031 skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2032 { 2033 int start = skb_headlen(skb); 2034 int i, copy = start - offset; 2035 int elt = 0; 2036 2037 if (copy > 0) { 2038 if (copy > len) 2039 copy = len; 2040 sg[elt].page = virt_to_page(skb->data + offset); 2041 sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 2042 sg[elt].length = copy; 2043 elt++; 2044 if ((len -= copy) == 0) 2045 return elt; 2046 offset += copy; 2047 } 2048 2049 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2050 int end; 2051 2052 BUG_TRAP(start <= offset + len); 2053 2054 end = start + skb_shinfo(skb)->frags[i].size; 2055 if ((copy = end - offset) > 0) { 2056 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2057 2058 if (copy > len) 2059 copy = len; 2060 sg[elt].page = frag->page; 2061 sg[elt].offset = frag->page_offset+offset-start; 2062 sg[elt].length = copy; 2063 elt++; 2064 if (!(len -= copy)) 2065 return elt; 2066 offset += copy; 2067 } 2068 start = end; 2069 } 2070 2071 if (skb_shinfo(skb)->frag_list) { 2072 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2073 2074 for (; list; list = list->next) { 2075 int end; 2076 2077 BUG_TRAP(start <= offset + len); 2078 2079 end = start + list->len; 2080 if ((copy = end - offset) > 0) { 2081 if (copy > len) 2082 copy = len; 2083 elt += skb_to_sgvec(list, sg+elt, offset - start, copy); 2084 if ((len -= copy) == 0) 2085 return elt; 2086 offset += copy; 2087 } 2088 start = end; 2089 } 2090 } 2091 BUG_ON(len); 2092 return elt; 2093 } 2094 2095 /** 2096 * skb_cow_data - Check that a socket buffer's data buffers are writable 2097 * @skb: The socket buffer to check. 2098 * @tailbits: Amount of trailing space to be added 2099 * @trailer: Returned pointer to the skb where the @tailbits space begins 2100 * 2101 * Make sure that the data buffers attached to a socket buffer are 2102 * writable. If they are not, private copies are made of the data buffers 2103 * and the socket buffer is set to use these instead. 2104 * 2105 * If @tailbits is given, make sure that there is space to write @tailbits 2106 * bytes of data beyond current end of socket buffer. @trailer will be 2107 * set to point to the skb in which this space begins. 2108 * 2109 * The number of scatterlist elements required to completely map the 2110 * COW'd and extended socket buffer will be returned. 2111 */ 2112 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2113 { 2114 int copyflag; 2115 int elt; 2116 struct sk_buff *skb1, **skb_p; 2117 2118 /* If skb is cloned or its head is paged, reallocate 2119 * head pulling out all the pages (pages are considered not writable 2120 * at the moment even if they are anonymous). 2121 */ 2122 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2123 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2124 return -ENOMEM; 2125 2126 /* Easy case. Most of packets will go this way. */ 2127 if (!skb_shinfo(skb)->frag_list) { 2128 /* A little of trouble, not enough of space for trailer. 2129 * This should not happen, when stack is tuned to generate 2130 * good frames. OK, on miss we reallocate and reserve even more 2131 * space, 128 bytes is fair. */ 2132 2133 if (skb_tailroom(skb) < tailbits && 2134 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2135 return -ENOMEM; 2136 2137 /* Voila! */ 2138 *trailer = skb; 2139 return 1; 2140 } 2141 2142 /* Misery. We are in troubles, going to mincer fragments... */ 2143 2144 elt = 1; 2145 skb_p = &skb_shinfo(skb)->frag_list; 2146 copyflag = 0; 2147 2148 while ((skb1 = *skb_p) != NULL) { 2149 int ntail = 0; 2150 2151 /* The fragment is partially pulled by someone, 2152 * this can happen on input. Copy it and everything 2153 * after it. */ 2154 2155 if (skb_shared(skb1)) 2156 copyflag = 1; 2157 2158 /* If the skb is the last, worry about trailer. */ 2159 2160 if (skb1->next == NULL && tailbits) { 2161 if (skb_shinfo(skb1)->nr_frags || 2162 skb_shinfo(skb1)->frag_list || 2163 skb_tailroom(skb1) < tailbits) 2164 ntail = tailbits + 128; 2165 } 2166 2167 if (copyflag || 2168 skb_cloned(skb1) || 2169 ntail || 2170 skb_shinfo(skb1)->nr_frags || 2171 skb_shinfo(skb1)->frag_list) { 2172 struct sk_buff *skb2; 2173 2174 /* Fuck, we are miserable poor guys... */ 2175 if (ntail == 0) 2176 skb2 = skb_copy(skb1, GFP_ATOMIC); 2177 else 2178 skb2 = skb_copy_expand(skb1, 2179 skb_headroom(skb1), 2180 ntail, 2181 GFP_ATOMIC); 2182 if (unlikely(skb2 == NULL)) 2183 return -ENOMEM; 2184 2185 if (skb1->sk) 2186 skb_set_owner_w(skb2, skb1->sk); 2187 2188 /* Looking around. Are we still alive? 2189 * OK, link new skb, drop old one */ 2190 2191 skb2->next = skb1->next; 2192 *skb_p = skb2; 2193 kfree_skb(skb1); 2194 skb1 = skb2; 2195 } 2196 elt++; 2197 *trailer = skb1; 2198 skb_p = &skb1->next; 2199 } 2200 2201 return elt; 2202 } 2203 2204 EXPORT_SYMBOL(___pskb_trim); 2205 EXPORT_SYMBOL(__kfree_skb); 2206 EXPORT_SYMBOL(kfree_skb); 2207 EXPORT_SYMBOL(__pskb_pull_tail); 2208 EXPORT_SYMBOL(__alloc_skb); 2209 EXPORT_SYMBOL(__netdev_alloc_skb); 2210 EXPORT_SYMBOL(pskb_copy); 2211 EXPORT_SYMBOL(pskb_expand_head); 2212 EXPORT_SYMBOL(skb_checksum); 2213 EXPORT_SYMBOL(skb_clone); 2214 EXPORT_SYMBOL(skb_copy); 2215 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2216 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2217 EXPORT_SYMBOL(skb_copy_bits); 2218 EXPORT_SYMBOL(skb_copy_expand); 2219 EXPORT_SYMBOL(skb_over_panic); 2220 EXPORT_SYMBOL(skb_pad); 2221 EXPORT_SYMBOL(skb_realloc_headroom); 2222 EXPORT_SYMBOL(skb_under_panic); 2223 EXPORT_SYMBOL(skb_dequeue); 2224 EXPORT_SYMBOL(skb_dequeue_tail); 2225 EXPORT_SYMBOL(skb_insert); 2226 EXPORT_SYMBOL(skb_queue_purge); 2227 EXPORT_SYMBOL(skb_queue_head); 2228 EXPORT_SYMBOL(skb_queue_tail); 2229 EXPORT_SYMBOL(skb_unlink); 2230 EXPORT_SYMBOL(skb_append); 2231 EXPORT_SYMBOL(skb_split); 2232 EXPORT_SYMBOL(skb_prepare_seq_read); 2233 EXPORT_SYMBOL(skb_seq_read); 2234 EXPORT_SYMBOL(skb_abort_seq_read); 2235 EXPORT_SYMBOL(skb_find_text); 2236 EXPORT_SYMBOL(skb_append_datato_frags); 2237 2238 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2239 EXPORT_SYMBOL_GPL(skb_cow_data); 2240