1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <trace/events/skb.h> 70 71 #include "kmap_skb.h" 72 73 static struct kmem_cache *skbuff_head_cache __read_mostly; 74 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 75 76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 77 struct pipe_buffer *buf) 78 { 79 put_page(buf->page); 80 } 81 82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 83 struct pipe_buffer *buf) 84 { 85 get_page(buf->page); 86 } 87 88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 return 1; 92 } 93 94 95 /* Pipe buffer operations for a socket. */ 96 static const struct pipe_buf_operations sock_pipe_buf_ops = { 97 .can_merge = 0, 98 .map = generic_pipe_buf_map, 99 .unmap = generic_pipe_buf_unmap, 100 .confirm = generic_pipe_buf_confirm, 101 .release = sock_pipe_buf_release, 102 .steal = sock_pipe_buf_steal, 103 .get = sock_pipe_buf_get, 104 }; 105 106 /* 107 * Keep out-of-line to prevent kernel bloat. 108 * __builtin_return_address is not used because it is not always 109 * reliable. 110 */ 111 112 /** 113 * skb_over_panic - private function 114 * @skb: buffer 115 * @sz: size 116 * @here: address 117 * 118 * Out of line support code for skb_put(). Not user callable. 119 */ 120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 121 { 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 123 "data:%p tail:%#lx end:%#lx dev:%s\n", 124 here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 149 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 150 * 'private' fields and also do memory statistics to find all the 151 * [BEEP] leaks. 152 * 153 */ 154 155 /** 156 * __alloc_skb - allocate a network buffer 157 * @size: size to allocate 158 * @gfp_mask: allocation mask 159 * @fclone: allocate from fclone cache instead of head cache 160 * and allocate a cloned (child) skb 161 * @node: numa node to allocate memory on 162 * 163 * Allocate a new &sk_buff. The returned buffer has no headroom and a 164 * tail room of size bytes. The object has a reference count of one. 165 * The return is the buffer. On a failure the return is %NULL. 166 * 167 * Buffers may only be allocated from interrupts using a @gfp_mask of 168 * %GFP_ATOMIC. 169 */ 170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 171 int fclone, int node) 172 { 173 struct kmem_cache *cache; 174 struct skb_shared_info *shinfo; 175 struct sk_buff *skb; 176 u8 *data; 177 178 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 179 180 /* Get the HEAD */ 181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 182 if (!skb) 183 goto out; 184 prefetchw(skb); 185 186 /* We do our best to align skb_shared_info on a separate cache 187 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 188 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 189 * Both skb->head and skb_shared_info are cache line aligned. 190 */ 191 size = SKB_DATA_ALIGN(size); 192 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 193 data = kmalloc_node_track_caller(size, gfp_mask, node); 194 if (!data) 195 goto nodata; 196 /* kmalloc(size) might give us more room than requested. 197 * Put skb_shared_info exactly at the end of allocated zone, 198 * to allow max possible filling before reallocation. 199 */ 200 size = SKB_WITH_OVERHEAD(ksize(data)); 201 prefetchw(data + size); 202 203 /* 204 * Only clear those fields we need to clear, not those that we will 205 * actually initialise below. Hence, don't put any more fields after 206 * the tail pointer in struct sk_buff! 207 */ 208 memset(skb, 0, offsetof(struct sk_buff, tail)); 209 /* Account for allocated memory : skb + skb->head */ 210 skb->truesize = SKB_TRUESIZE(size); 211 atomic_set(&skb->users, 1); 212 skb->head = data; 213 skb->data = data; 214 skb_reset_tail_pointer(skb); 215 skb->end = skb->tail + size; 216 #ifdef NET_SKBUFF_DATA_USES_OFFSET 217 skb->mac_header = ~0U; 218 #endif 219 220 /* make sure we initialize shinfo sequentially */ 221 shinfo = skb_shinfo(skb); 222 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 223 atomic_set(&shinfo->dataref, 1); 224 kmemcheck_annotate_variable(shinfo->destructor_arg); 225 226 if (fclone) { 227 struct sk_buff *child = skb + 1; 228 atomic_t *fclone_ref = (atomic_t *) (child + 1); 229 230 kmemcheck_annotate_bitfield(child, flags1); 231 kmemcheck_annotate_bitfield(child, flags2); 232 skb->fclone = SKB_FCLONE_ORIG; 233 atomic_set(fclone_ref, 1); 234 235 child->fclone = SKB_FCLONE_UNAVAILABLE; 236 } 237 out: 238 return skb; 239 nodata: 240 kmem_cache_free(cache, skb); 241 skb = NULL; 242 goto out; 243 } 244 EXPORT_SYMBOL(__alloc_skb); 245 246 /** 247 * build_skb - build a network buffer 248 * @data: data buffer provided by caller 249 * 250 * Allocate a new &sk_buff. Caller provides space holding head and 251 * skb_shared_info. @data must have been allocated by kmalloc() 252 * The return is the new skb buffer. 253 * On a failure the return is %NULL, and @data is not freed. 254 * Notes : 255 * Before IO, driver allocates only data buffer where NIC put incoming frame 256 * Driver should add room at head (NET_SKB_PAD) and 257 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 258 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 259 * before giving packet to stack. 260 * RX rings only contains data buffers, not full skbs. 261 */ 262 struct sk_buff *build_skb(void *data) 263 { 264 struct skb_shared_info *shinfo; 265 struct sk_buff *skb; 266 unsigned int size; 267 268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 269 if (!skb) 270 return NULL; 271 272 size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 273 274 memset(skb, 0, offsetof(struct sk_buff, tail)); 275 skb->truesize = SKB_TRUESIZE(size); 276 atomic_set(&skb->users, 1); 277 skb->head = data; 278 skb->data = data; 279 skb_reset_tail_pointer(skb); 280 skb->end = skb->tail + size; 281 #ifdef NET_SKBUFF_DATA_USES_OFFSET 282 skb->mac_header = ~0U; 283 #endif 284 285 /* make sure we initialize shinfo sequentially */ 286 shinfo = skb_shinfo(skb); 287 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 288 atomic_set(&shinfo->dataref, 1); 289 kmemcheck_annotate_variable(shinfo->destructor_arg); 290 291 return skb; 292 } 293 EXPORT_SYMBOL(build_skb); 294 295 /** 296 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 297 * @dev: network device to receive on 298 * @length: length to allocate 299 * @gfp_mask: get_free_pages mask, passed to alloc_skb 300 * 301 * Allocate a new &sk_buff and assign it a usage count of one. The 302 * buffer has unspecified headroom built in. Users should allocate 303 * the headroom they think they need without accounting for the 304 * built in space. The built in space is used for optimisations. 305 * 306 * %NULL is returned if there is no free memory. 307 */ 308 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 309 unsigned int length, gfp_t gfp_mask) 310 { 311 struct sk_buff *skb; 312 313 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 314 if (likely(skb)) { 315 skb_reserve(skb, NET_SKB_PAD); 316 skb->dev = dev; 317 } 318 return skb; 319 } 320 EXPORT_SYMBOL(__netdev_alloc_skb); 321 322 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 323 int size, unsigned int truesize) 324 { 325 skb_fill_page_desc(skb, i, page, off, size); 326 skb->len += size; 327 skb->data_len += size; 328 skb->truesize += truesize; 329 } 330 EXPORT_SYMBOL(skb_add_rx_frag); 331 332 /** 333 * dev_alloc_skb - allocate an skbuff for receiving 334 * @length: length to allocate 335 * 336 * Allocate a new &sk_buff and assign it a usage count of one. The 337 * buffer has unspecified headroom built in. Users should allocate 338 * the headroom they think they need without accounting for the 339 * built in space. The built in space is used for optimisations. 340 * 341 * %NULL is returned if there is no free memory. Although this function 342 * allocates memory it can be called from an interrupt. 343 */ 344 struct sk_buff *dev_alloc_skb(unsigned int length) 345 { 346 /* 347 * There is more code here than it seems: 348 * __dev_alloc_skb is an inline 349 */ 350 return __dev_alloc_skb(length, GFP_ATOMIC); 351 } 352 EXPORT_SYMBOL(dev_alloc_skb); 353 354 static void skb_drop_list(struct sk_buff **listp) 355 { 356 struct sk_buff *list = *listp; 357 358 *listp = NULL; 359 360 do { 361 struct sk_buff *this = list; 362 list = list->next; 363 kfree_skb(this); 364 } while (list); 365 } 366 367 static inline void skb_drop_fraglist(struct sk_buff *skb) 368 { 369 skb_drop_list(&skb_shinfo(skb)->frag_list); 370 } 371 372 static void skb_clone_fraglist(struct sk_buff *skb) 373 { 374 struct sk_buff *list; 375 376 skb_walk_frags(skb, list) 377 skb_get(list); 378 } 379 380 static void skb_release_data(struct sk_buff *skb) 381 { 382 if (!skb->cloned || 383 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 384 &skb_shinfo(skb)->dataref)) { 385 if (skb_shinfo(skb)->nr_frags) { 386 int i; 387 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 388 skb_frag_unref(skb, i); 389 } 390 391 /* 392 * If skb buf is from userspace, we need to notify the caller 393 * the lower device DMA has done; 394 */ 395 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 396 struct ubuf_info *uarg; 397 398 uarg = skb_shinfo(skb)->destructor_arg; 399 if (uarg->callback) 400 uarg->callback(uarg); 401 } 402 403 if (skb_has_frag_list(skb)) 404 skb_drop_fraglist(skb); 405 406 kfree(skb->head); 407 } 408 } 409 410 /* 411 * Free an skbuff by memory without cleaning the state. 412 */ 413 static void kfree_skbmem(struct sk_buff *skb) 414 { 415 struct sk_buff *other; 416 atomic_t *fclone_ref; 417 418 switch (skb->fclone) { 419 case SKB_FCLONE_UNAVAILABLE: 420 kmem_cache_free(skbuff_head_cache, skb); 421 break; 422 423 case SKB_FCLONE_ORIG: 424 fclone_ref = (atomic_t *) (skb + 2); 425 if (atomic_dec_and_test(fclone_ref)) 426 kmem_cache_free(skbuff_fclone_cache, skb); 427 break; 428 429 case SKB_FCLONE_CLONE: 430 fclone_ref = (atomic_t *) (skb + 1); 431 other = skb - 1; 432 433 /* The clone portion is available for 434 * fast-cloning again. 435 */ 436 skb->fclone = SKB_FCLONE_UNAVAILABLE; 437 438 if (atomic_dec_and_test(fclone_ref)) 439 kmem_cache_free(skbuff_fclone_cache, other); 440 break; 441 } 442 } 443 444 static void skb_release_head_state(struct sk_buff *skb) 445 { 446 skb_dst_drop(skb); 447 #ifdef CONFIG_XFRM 448 secpath_put(skb->sp); 449 #endif 450 if (skb->destructor) { 451 WARN_ON(in_irq()); 452 skb->destructor(skb); 453 } 454 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 455 nf_conntrack_put(skb->nfct); 456 #endif 457 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 458 nf_conntrack_put_reasm(skb->nfct_reasm); 459 #endif 460 #ifdef CONFIG_BRIDGE_NETFILTER 461 nf_bridge_put(skb->nf_bridge); 462 #endif 463 /* XXX: IS this still necessary? - JHS */ 464 #ifdef CONFIG_NET_SCHED 465 skb->tc_index = 0; 466 #ifdef CONFIG_NET_CLS_ACT 467 skb->tc_verd = 0; 468 #endif 469 #endif 470 } 471 472 /* Free everything but the sk_buff shell. */ 473 static void skb_release_all(struct sk_buff *skb) 474 { 475 skb_release_head_state(skb); 476 skb_release_data(skb); 477 } 478 479 /** 480 * __kfree_skb - private function 481 * @skb: buffer 482 * 483 * Free an sk_buff. Release anything attached to the buffer. 484 * Clean the state. This is an internal helper function. Users should 485 * always call kfree_skb 486 */ 487 488 void __kfree_skb(struct sk_buff *skb) 489 { 490 skb_release_all(skb); 491 kfree_skbmem(skb); 492 } 493 EXPORT_SYMBOL(__kfree_skb); 494 495 /** 496 * kfree_skb - free an sk_buff 497 * @skb: buffer to free 498 * 499 * Drop a reference to the buffer and free it if the usage count has 500 * hit zero. 501 */ 502 void kfree_skb(struct sk_buff *skb) 503 { 504 if (unlikely(!skb)) 505 return; 506 if (likely(atomic_read(&skb->users) == 1)) 507 smp_rmb(); 508 else if (likely(!atomic_dec_and_test(&skb->users))) 509 return; 510 trace_kfree_skb(skb, __builtin_return_address(0)); 511 __kfree_skb(skb); 512 } 513 EXPORT_SYMBOL(kfree_skb); 514 515 /** 516 * consume_skb - free an skbuff 517 * @skb: buffer to free 518 * 519 * Drop a ref to the buffer and free it if the usage count has hit zero 520 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 521 * is being dropped after a failure and notes that 522 */ 523 void consume_skb(struct sk_buff *skb) 524 { 525 if (unlikely(!skb)) 526 return; 527 if (likely(atomic_read(&skb->users) == 1)) 528 smp_rmb(); 529 else if (likely(!atomic_dec_and_test(&skb->users))) 530 return; 531 trace_consume_skb(skb); 532 __kfree_skb(skb); 533 } 534 EXPORT_SYMBOL(consume_skb); 535 536 /** 537 * skb_recycle - clean up an skb for reuse 538 * @skb: buffer 539 * 540 * Recycles the skb to be reused as a receive buffer. This 541 * function does any necessary reference count dropping, and 542 * cleans up the skbuff as if it just came from __alloc_skb(). 543 */ 544 void skb_recycle(struct sk_buff *skb) 545 { 546 struct skb_shared_info *shinfo; 547 548 skb_release_head_state(skb); 549 550 shinfo = skb_shinfo(skb); 551 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 552 atomic_set(&shinfo->dataref, 1); 553 554 memset(skb, 0, offsetof(struct sk_buff, tail)); 555 skb->data = skb->head + NET_SKB_PAD; 556 skb_reset_tail_pointer(skb); 557 } 558 EXPORT_SYMBOL(skb_recycle); 559 560 /** 561 * skb_recycle_check - check if skb can be reused for receive 562 * @skb: buffer 563 * @skb_size: minimum receive buffer size 564 * 565 * Checks that the skb passed in is not shared or cloned, and 566 * that it is linear and its head portion at least as large as 567 * skb_size so that it can be recycled as a receive buffer. 568 * If these conditions are met, this function does any necessary 569 * reference count dropping and cleans up the skbuff as if it 570 * just came from __alloc_skb(). 571 */ 572 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 573 { 574 if (!skb_is_recycleable(skb, skb_size)) 575 return false; 576 577 skb_recycle(skb); 578 579 return true; 580 } 581 EXPORT_SYMBOL(skb_recycle_check); 582 583 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 584 { 585 new->tstamp = old->tstamp; 586 new->dev = old->dev; 587 new->transport_header = old->transport_header; 588 new->network_header = old->network_header; 589 new->mac_header = old->mac_header; 590 skb_dst_copy(new, old); 591 new->rxhash = old->rxhash; 592 new->ooo_okay = old->ooo_okay; 593 new->l4_rxhash = old->l4_rxhash; 594 new->no_fcs = old->no_fcs; 595 #ifdef CONFIG_XFRM 596 new->sp = secpath_get(old->sp); 597 #endif 598 memcpy(new->cb, old->cb, sizeof(old->cb)); 599 new->csum = old->csum; 600 new->local_df = old->local_df; 601 new->pkt_type = old->pkt_type; 602 new->ip_summed = old->ip_summed; 603 skb_copy_queue_mapping(new, old); 604 new->priority = old->priority; 605 #if IS_ENABLED(CONFIG_IP_VS) 606 new->ipvs_property = old->ipvs_property; 607 #endif 608 new->protocol = old->protocol; 609 new->mark = old->mark; 610 new->skb_iif = old->skb_iif; 611 __nf_copy(new, old); 612 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 613 new->nf_trace = old->nf_trace; 614 #endif 615 #ifdef CONFIG_NET_SCHED 616 new->tc_index = old->tc_index; 617 #ifdef CONFIG_NET_CLS_ACT 618 new->tc_verd = old->tc_verd; 619 #endif 620 #endif 621 new->vlan_tci = old->vlan_tci; 622 623 skb_copy_secmark(new, old); 624 } 625 626 /* 627 * You should not add any new code to this function. Add it to 628 * __copy_skb_header above instead. 629 */ 630 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 631 { 632 #define C(x) n->x = skb->x 633 634 n->next = n->prev = NULL; 635 n->sk = NULL; 636 __copy_skb_header(n, skb); 637 638 C(len); 639 C(data_len); 640 C(mac_len); 641 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 642 n->cloned = 1; 643 n->nohdr = 0; 644 n->destructor = NULL; 645 C(tail); 646 C(end); 647 C(head); 648 C(data); 649 C(truesize); 650 atomic_set(&n->users, 1); 651 652 atomic_inc(&(skb_shinfo(skb)->dataref)); 653 skb->cloned = 1; 654 655 return n; 656 #undef C 657 } 658 659 /** 660 * skb_morph - morph one skb into another 661 * @dst: the skb to receive the contents 662 * @src: the skb to supply the contents 663 * 664 * This is identical to skb_clone except that the target skb is 665 * supplied by the user. 666 * 667 * The target skb is returned upon exit. 668 */ 669 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 670 { 671 skb_release_all(dst); 672 return __skb_clone(dst, src); 673 } 674 EXPORT_SYMBOL_GPL(skb_morph); 675 676 /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 677 * @skb: the skb to modify 678 * @gfp_mask: allocation priority 679 * 680 * This must be called on SKBTX_DEV_ZEROCOPY skb. 681 * It will copy all frags into kernel and drop the reference 682 * to userspace pages. 683 * 684 * If this function is called from an interrupt gfp_mask() must be 685 * %GFP_ATOMIC. 686 * 687 * Returns 0 on success or a negative error code on failure 688 * to allocate kernel memory to copy to. 689 */ 690 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 691 { 692 int i; 693 int num_frags = skb_shinfo(skb)->nr_frags; 694 struct page *page, *head = NULL; 695 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 696 697 for (i = 0; i < num_frags; i++) { 698 u8 *vaddr; 699 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 700 701 page = alloc_page(GFP_ATOMIC); 702 if (!page) { 703 while (head) { 704 struct page *next = (struct page *)head->private; 705 put_page(head); 706 head = next; 707 } 708 return -ENOMEM; 709 } 710 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 711 memcpy(page_address(page), 712 vaddr + f->page_offset, skb_frag_size(f)); 713 kunmap_skb_frag(vaddr); 714 page->private = (unsigned long)head; 715 head = page; 716 } 717 718 /* skb frags release userspace buffers */ 719 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 720 skb_frag_unref(skb, i); 721 722 uarg->callback(uarg); 723 724 /* skb frags point to kernel buffers */ 725 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 726 __skb_fill_page_desc(skb, i-1, head, 0, 727 skb_shinfo(skb)->frags[i - 1].size); 728 head = (struct page *)head->private; 729 } 730 731 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 732 return 0; 733 } 734 735 736 /** 737 * skb_clone - duplicate an sk_buff 738 * @skb: buffer to clone 739 * @gfp_mask: allocation priority 740 * 741 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 742 * copies share the same packet data but not structure. The new 743 * buffer has a reference count of 1. If the allocation fails the 744 * function returns %NULL otherwise the new buffer is returned. 745 * 746 * If this function is called from an interrupt gfp_mask() must be 747 * %GFP_ATOMIC. 748 */ 749 750 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 751 { 752 struct sk_buff *n; 753 754 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 755 if (skb_copy_ubufs(skb, gfp_mask)) 756 return NULL; 757 } 758 759 n = skb + 1; 760 if (skb->fclone == SKB_FCLONE_ORIG && 761 n->fclone == SKB_FCLONE_UNAVAILABLE) { 762 atomic_t *fclone_ref = (atomic_t *) (n + 1); 763 n->fclone = SKB_FCLONE_CLONE; 764 atomic_inc(fclone_ref); 765 } else { 766 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 767 if (!n) 768 return NULL; 769 770 kmemcheck_annotate_bitfield(n, flags1); 771 kmemcheck_annotate_bitfield(n, flags2); 772 n->fclone = SKB_FCLONE_UNAVAILABLE; 773 } 774 775 return __skb_clone(n, skb); 776 } 777 EXPORT_SYMBOL(skb_clone); 778 779 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 780 { 781 #ifndef NET_SKBUFF_DATA_USES_OFFSET 782 /* 783 * Shift between the two data areas in bytes 784 */ 785 unsigned long offset = new->data - old->data; 786 #endif 787 788 __copy_skb_header(new, old); 789 790 #ifndef NET_SKBUFF_DATA_USES_OFFSET 791 /* {transport,network,mac}_header are relative to skb->head */ 792 new->transport_header += offset; 793 new->network_header += offset; 794 if (skb_mac_header_was_set(new)) 795 new->mac_header += offset; 796 #endif 797 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 798 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 799 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 800 } 801 802 /** 803 * skb_copy - create private copy of an sk_buff 804 * @skb: buffer to copy 805 * @gfp_mask: allocation priority 806 * 807 * Make a copy of both an &sk_buff and its data. This is used when the 808 * caller wishes to modify the data and needs a private copy of the 809 * data to alter. Returns %NULL on failure or the pointer to the buffer 810 * on success. The returned buffer has a reference count of 1. 811 * 812 * As by-product this function converts non-linear &sk_buff to linear 813 * one, so that &sk_buff becomes completely private and caller is allowed 814 * to modify all the data of returned buffer. This means that this 815 * function is not recommended for use in circumstances when only 816 * header is going to be modified. Use pskb_copy() instead. 817 */ 818 819 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 820 { 821 int headerlen = skb_headroom(skb); 822 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 823 struct sk_buff *n = alloc_skb(size, gfp_mask); 824 825 if (!n) 826 return NULL; 827 828 /* Set the data pointer */ 829 skb_reserve(n, headerlen); 830 /* Set the tail pointer and length */ 831 skb_put(n, skb->len); 832 833 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 834 BUG(); 835 836 copy_skb_header(n, skb); 837 return n; 838 } 839 EXPORT_SYMBOL(skb_copy); 840 841 /** 842 * __pskb_copy - create copy of an sk_buff with private head. 843 * @skb: buffer to copy 844 * @headroom: headroom of new skb 845 * @gfp_mask: allocation priority 846 * 847 * Make a copy of both an &sk_buff and part of its data, located 848 * in header. Fragmented data remain shared. This is used when 849 * the caller wishes to modify only header of &sk_buff and needs 850 * private copy of the header to alter. Returns %NULL on failure 851 * or the pointer to the buffer on success. 852 * The returned buffer has a reference count of 1. 853 */ 854 855 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 856 { 857 unsigned int size = skb_headlen(skb) + headroom; 858 struct sk_buff *n = alloc_skb(size, gfp_mask); 859 860 if (!n) 861 goto out; 862 863 /* Set the data pointer */ 864 skb_reserve(n, headroom); 865 /* Set the tail pointer and length */ 866 skb_put(n, skb_headlen(skb)); 867 /* Copy the bytes */ 868 skb_copy_from_linear_data(skb, n->data, n->len); 869 870 n->truesize += skb->data_len; 871 n->data_len = skb->data_len; 872 n->len = skb->len; 873 874 if (skb_shinfo(skb)->nr_frags) { 875 int i; 876 877 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 878 if (skb_copy_ubufs(skb, gfp_mask)) { 879 kfree_skb(n); 880 n = NULL; 881 goto out; 882 } 883 } 884 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 885 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 886 skb_frag_ref(skb, i); 887 } 888 skb_shinfo(n)->nr_frags = i; 889 } 890 891 if (skb_has_frag_list(skb)) { 892 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 893 skb_clone_fraglist(n); 894 } 895 896 copy_skb_header(n, skb); 897 out: 898 return n; 899 } 900 EXPORT_SYMBOL(__pskb_copy); 901 902 /** 903 * pskb_expand_head - reallocate header of &sk_buff 904 * @skb: buffer to reallocate 905 * @nhead: room to add at head 906 * @ntail: room to add at tail 907 * @gfp_mask: allocation priority 908 * 909 * Expands (or creates identical copy, if &nhead and &ntail are zero) 910 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 911 * reference count of 1. Returns zero in the case of success or error, 912 * if expansion failed. In the last case, &sk_buff is not changed. 913 * 914 * All the pointers pointing into skb header may change and must be 915 * reloaded after call to this function. 916 */ 917 918 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 919 gfp_t gfp_mask) 920 { 921 int i; 922 u8 *data; 923 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 924 long off; 925 bool fastpath; 926 927 BUG_ON(nhead < 0); 928 929 if (skb_shared(skb)) 930 BUG(); 931 932 size = SKB_DATA_ALIGN(size); 933 934 /* Check if we can avoid taking references on fragments if we own 935 * the last reference on skb->head. (see skb_release_data()) 936 */ 937 if (!skb->cloned) 938 fastpath = true; 939 else { 940 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 941 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 942 } 943 944 if (fastpath && 945 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 946 memmove(skb->head + size, skb_shinfo(skb), 947 offsetof(struct skb_shared_info, 948 frags[skb_shinfo(skb)->nr_frags])); 949 memmove(skb->head + nhead, skb->head, 950 skb_tail_pointer(skb) - skb->head); 951 off = nhead; 952 goto adjust_others; 953 } 954 955 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 956 gfp_mask); 957 if (!data) 958 goto nodata; 959 size = SKB_WITH_OVERHEAD(ksize(data)); 960 961 /* Copy only real data... and, alas, header. This should be 962 * optimized for the cases when header is void. 963 */ 964 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 965 966 memcpy((struct skb_shared_info *)(data + size), 967 skb_shinfo(skb), 968 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 969 970 if (fastpath) { 971 kfree(skb->head); 972 } else { 973 /* copy this zero copy skb frags */ 974 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 975 if (skb_copy_ubufs(skb, gfp_mask)) 976 goto nofrags; 977 } 978 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 979 skb_frag_ref(skb, i); 980 981 if (skb_has_frag_list(skb)) 982 skb_clone_fraglist(skb); 983 984 skb_release_data(skb); 985 } 986 off = (data + nhead) - skb->head; 987 988 skb->head = data; 989 adjust_others: 990 skb->data += off; 991 #ifdef NET_SKBUFF_DATA_USES_OFFSET 992 skb->end = size; 993 off = nhead; 994 #else 995 skb->end = skb->head + size; 996 #endif 997 /* {transport,network,mac}_header and tail are relative to skb->head */ 998 skb->tail += off; 999 skb->transport_header += off; 1000 skb->network_header += off; 1001 if (skb_mac_header_was_set(skb)) 1002 skb->mac_header += off; 1003 /* Only adjust this if it actually is csum_start rather than csum */ 1004 if (skb->ip_summed == CHECKSUM_PARTIAL) 1005 skb->csum_start += nhead; 1006 skb->cloned = 0; 1007 skb->hdr_len = 0; 1008 skb->nohdr = 0; 1009 atomic_set(&skb_shinfo(skb)->dataref, 1); 1010 return 0; 1011 1012 nofrags: 1013 kfree(data); 1014 nodata: 1015 return -ENOMEM; 1016 } 1017 EXPORT_SYMBOL(pskb_expand_head); 1018 1019 /* Make private copy of skb with writable head and some headroom */ 1020 1021 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1022 { 1023 struct sk_buff *skb2; 1024 int delta = headroom - skb_headroom(skb); 1025 1026 if (delta <= 0) 1027 skb2 = pskb_copy(skb, GFP_ATOMIC); 1028 else { 1029 skb2 = skb_clone(skb, GFP_ATOMIC); 1030 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1031 GFP_ATOMIC)) { 1032 kfree_skb(skb2); 1033 skb2 = NULL; 1034 } 1035 } 1036 return skb2; 1037 } 1038 EXPORT_SYMBOL(skb_realloc_headroom); 1039 1040 /** 1041 * skb_copy_expand - copy and expand sk_buff 1042 * @skb: buffer to copy 1043 * @newheadroom: new free bytes at head 1044 * @newtailroom: new free bytes at tail 1045 * @gfp_mask: allocation priority 1046 * 1047 * Make a copy of both an &sk_buff and its data and while doing so 1048 * allocate additional space. 1049 * 1050 * This is used when the caller wishes to modify the data and needs a 1051 * private copy of the data to alter as well as more space for new fields. 1052 * Returns %NULL on failure or the pointer to the buffer 1053 * on success. The returned buffer has a reference count of 1. 1054 * 1055 * You must pass %GFP_ATOMIC as the allocation priority if this function 1056 * is called from an interrupt. 1057 */ 1058 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1059 int newheadroom, int newtailroom, 1060 gfp_t gfp_mask) 1061 { 1062 /* 1063 * Allocate the copy buffer 1064 */ 1065 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 1066 gfp_mask); 1067 int oldheadroom = skb_headroom(skb); 1068 int head_copy_len, head_copy_off; 1069 int off; 1070 1071 if (!n) 1072 return NULL; 1073 1074 skb_reserve(n, newheadroom); 1075 1076 /* Set the tail pointer and length */ 1077 skb_put(n, skb->len); 1078 1079 head_copy_len = oldheadroom; 1080 head_copy_off = 0; 1081 if (newheadroom <= head_copy_len) 1082 head_copy_len = newheadroom; 1083 else 1084 head_copy_off = newheadroom - head_copy_len; 1085 1086 /* Copy the linear header and data. */ 1087 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1088 skb->len + head_copy_len)) 1089 BUG(); 1090 1091 copy_skb_header(n, skb); 1092 1093 off = newheadroom - oldheadroom; 1094 if (n->ip_summed == CHECKSUM_PARTIAL) 1095 n->csum_start += off; 1096 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1097 n->transport_header += off; 1098 n->network_header += off; 1099 if (skb_mac_header_was_set(skb)) 1100 n->mac_header += off; 1101 #endif 1102 1103 return n; 1104 } 1105 EXPORT_SYMBOL(skb_copy_expand); 1106 1107 /** 1108 * skb_pad - zero pad the tail of an skb 1109 * @skb: buffer to pad 1110 * @pad: space to pad 1111 * 1112 * Ensure that a buffer is followed by a padding area that is zero 1113 * filled. Used by network drivers which may DMA or transfer data 1114 * beyond the buffer end onto the wire. 1115 * 1116 * May return error in out of memory cases. The skb is freed on error. 1117 */ 1118 1119 int skb_pad(struct sk_buff *skb, int pad) 1120 { 1121 int err; 1122 int ntail; 1123 1124 /* If the skbuff is non linear tailroom is always zero.. */ 1125 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1126 memset(skb->data+skb->len, 0, pad); 1127 return 0; 1128 } 1129 1130 ntail = skb->data_len + pad - (skb->end - skb->tail); 1131 if (likely(skb_cloned(skb) || ntail > 0)) { 1132 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1133 if (unlikely(err)) 1134 goto free_skb; 1135 } 1136 1137 /* FIXME: The use of this function with non-linear skb's really needs 1138 * to be audited. 1139 */ 1140 err = skb_linearize(skb); 1141 if (unlikely(err)) 1142 goto free_skb; 1143 1144 memset(skb->data + skb->len, 0, pad); 1145 return 0; 1146 1147 free_skb: 1148 kfree_skb(skb); 1149 return err; 1150 } 1151 EXPORT_SYMBOL(skb_pad); 1152 1153 /** 1154 * skb_put - add data to a buffer 1155 * @skb: buffer to use 1156 * @len: amount of data to add 1157 * 1158 * This function extends the used data area of the buffer. If this would 1159 * exceed the total buffer size the kernel will panic. A pointer to the 1160 * first byte of the extra data is returned. 1161 */ 1162 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1163 { 1164 unsigned char *tmp = skb_tail_pointer(skb); 1165 SKB_LINEAR_ASSERT(skb); 1166 skb->tail += len; 1167 skb->len += len; 1168 if (unlikely(skb->tail > skb->end)) 1169 skb_over_panic(skb, len, __builtin_return_address(0)); 1170 return tmp; 1171 } 1172 EXPORT_SYMBOL(skb_put); 1173 1174 /** 1175 * skb_push - add data to the start of a buffer 1176 * @skb: buffer to use 1177 * @len: amount of data to add 1178 * 1179 * This function extends the used data area of the buffer at the buffer 1180 * start. If this would exceed the total buffer headroom the kernel will 1181 * panic. A pointer to the first byte of the extra data is returned. 1182 */ 1183 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1184 { 1185 skb->data -= len; 1186 skb->len += len; 1187 if (unlikely(skb->data<skb->head)) 1188 skb_under_panic(skb, len, __builtin_return_address(0)); 1189 return skb->data; 1190 } 1191 EXPORT_SYMBOL(skb_push); 1192 1193 /** 1194 * skb_pull - remove data from the start of a buffer 1195 * @skb: buffer to use 1196 * @len: amount of data to remove 1197 * 1198 * This function removes data from the start of a buffer, returning 1199 * the memory to the headroom. A pointer to the next data in the buffer 1200 * is returned. Once the data has been pulled future pushes will overwrite 1201 * the old data. 1202 */ 1203 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1204 { 1205 return skb_pull_inline(skb, len); 1206 } 1207 EXPORT_SYMBOL(skb_pull); 1208 1209 /** 1210 * skb_trim - remove end from a buffer 1211 * @skb: buffer to alter 1212 * @len: new length 1213 * 1214 * Cut the length of a buffer down by removing data from the tail. If 1215 * the buffer is already under the length specified it is not modified. 1216 * The skb must be linear. 1217 */ 1218 void skb_trim(struct sk_buff *skb, unsigned int len) 1219 { 1220 if (skb->len > len) 1221 __skb_trim(skb, len); 1222 } 1223 EXPORT_SYMBOL(skb_trim); 1224 1225 /* Trims skb to length len. It can change skb pointers. 1226 */ 1227 1228 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1229 { 1230 struct sk_buff **fragp; 1231 struct sk_buff *frag; 1232 int offset = skb_headlen(skb); 1233 int nfrags = skb_shinfo(skb)->nr_frags; 1234 int i; 1235 int err; 1236 1237 if (skb_cloned(skb) && 1238 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1239 return err; 1240 1241 i = 0; 1242 if (offset >= len) 1243 goto drop_pages; 1244 1245 for (; i < nfrags; i++) { 1246 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1247 1248 if (end < len) { 1249 offset = end; 1250 continue; 1251 } 1252 1253 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1254 1255 drop_pages: 1256 skb_shinfo(skb)->nr_frags = i; 1257 1258 for (; i < nfrags; i++) 1259 skb_frag_unref(skb, i); 1260 1261 if (skb_has_frag_list(skb)) 1262 skb_drop_fraglist(skb); 1263 goto done; 1264 } 1265 1266 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1267 fragp = &frag->next) { 1268 int end = offset + frag->len; 1269 1270 if (skb_shared(frag)) { 1271 struct sk_buff *nfrag; 1272 1273 nfrag = skb_clone(frag, GFP_ATOMIC); 1274 if (unlikely(!nfrag)) 1275 return -ENOMEM; 1276 1277 nfrag->next = frag->next; 1278 kfree_skb(frag); 1279 frag = nfrag; 1280 *fragp = frag; 1281 } 1282 1283 if (end < len) { 1284 offset = end; 1285 continue; 1286 } 1287 1288 if (end > len && 1289 unlikely((err = pskb_trim(frag, len - offset)))) 1290 return err; 1291 1292 if (frag->next) 1293 skb_drop_list(&frag->next); 1294 break; 1295 } 1296 1297 done: 1298 if (len > skb_headlen(skb)) { 1299 skb->data_len -= skb->len - len; 1300 skb->len = len; 1301 } else { 1302 skb->len = len; 1303 skb->data_len = 0; 1304 skb_set_tail_pointer(skb, len); 1305 } 1306 1307 return 0; 1308 } 1309 EXPORT_SYMBOL(___pskb_trim); 1310 1311 /** 1312 * __pskb_pull_tail - advance tail of skb header 1313 * @skb: buffer to reallocate 1314 * @delta: number of bytes to advance tail 1315 * 1316 * The function makes a sense only on a fragmented &sk_buff, 1317 * it expands header moving its tail forward and copying necessary 1318 * data from fragmented part. 1319 * 1320 * &sk_buff MUST have reference count of 1. 1321 * 1322 * Returns %NULL (and &sk_buff does not change) if pull failed 1323 * or value of new tail of skb in the case of success. 1324 * 1325 * All the pointers pointing into skb header may change and must be 1326 * reloaded after call to this function. 1327 */ 1328 1329 /* Moves tail of skb head forward, copying data from fragmented part, 1330 * when it is necessary. 1331 * 1. It may fail due to malloc failure. 1332 * 2. It may change skb pointers. 1333 * 1334 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1335 */ 1336 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1337 { 1338 /* If skb has not enough free space at tail, get new one 1339 * plus 128 bytes for future expansions. If we have enough 1340 * room at tail, reallocate without expansion only if skb is cloned. 1341 */ 1342 int i, k, eat = (skb->tail + delta) - skb->end; 1343 1344 if (eat > 0 || skb_cloned(skb)) { 1345 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1346 GFP_ATOMIC)) 1347 return NULL; 1348 } 1349 1350 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1351 BUG(); 1352 1353 /* Optimization: no fragments, no reasons to preestimate 1354 * size of pulled pages. Superb. 1355 */ 1356 if (!skb_has_frag_list(skb)) 1357 goto pull_pages; 1358 1359 /* Estimate size of pulled pages. */ 1360 eat = delta; 1361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1362 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1363 1364 if (size >= eat) 1365 goto pull_pages; 1366 eat -= size; 1367 } 1368 1369 /* If we need update frag list, we are in troubles. 1370 * Certainly, it possible to add an offset to skb data, 1371 * but taking into account that pulling is expected to 1372 * be very rare operation, it is worth to fight against 1373 * further bloating skb head and crucify ourselves here instead. 1374 * Pure masohism, indeed. 8)8) 1375 */ 1376 if (eat) { 1377 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1378 struct sk_buff *clone = NULL; 1379 struct sk_buff *insp = NULL; 1380 1381 do { 1382 BUG_ON(!list); 1383 1384 if (list->len <= eat) { 1385 /* Eaten as whole. */ 1386 eat -= list->len; 1387 list = list->next; 1388 insp = list; 1389 } else { 1390 /* Eaten partially. */ 1391 1392 if (skb_shared(list)) { 1393 /* Sucks! We need to fork list. :-( */ 1394 clone = skb_clone(list, GFP_ATOMIC); 1395 if (!clone) 1396 return NULL; 1397 insp = list->next; 1398 list = clone; 1399 } else { 1400 /* This may be pulled without 1401 * problems. */ 1402 insp = list; 1403 } 1404 if (!pskb_pull(list, eat)) { 1405 kfree_skb(clone); 1406 return NULL; 1407 } 1408 break; 1409 } 1410 } while (eat); 1411 1412 /* Free pulled out fragments. */ 1413 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1414 skb_shinfo(skb)->frag_list = list->next; 1415 kfree_skb(list); 1416 } 1417 /* And insert new clone at head. */ 1418 if (clone) { 1419 clone->next = list; 1420 skb_shinfo(skb)->frag_list = clone; 1421 } 1422 } 1423 /* Success! Now we may commit changes to skb data. */ 1424 1425 pull_pages: 1426 eat = delta; 1427 k = 0; 1428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1429 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1430 1431 if (size <= eat) { 1432 skb_frag_unref(skb, i); 1433 eat -= size; 1434 } else { 1435 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1436 if (eat) { 1437 skb_shinfo(skb)->frags[k].page_offset += eat; 1438 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1439 eat = 0; 1440 } 1441 k++; 1442 } 1443 } 1444 skb_shinfo(skb)->nr_frags = k; 1445 1446 skb->tail += delta; 1447 skb->data_len -= delta; 1448 1449 return skb_tail_pointer(skb); 1450 } 1451 EXPORT_SYMBOL(__pskb_pull_tail); 1452 1453 /** 1454 * skb_copy_bits - copy bits from skb to kernel buffer 1455 * @skb: source skb 1456 * @offset: offset in source 1457 * @to: destination buffer 1458 * @len: number of bytes to copy 1459 * 1460 * Copy the specified number of bytes from the source skb to the 1461 * destination buffer. 1462 * 1463 * CAUTION ! : 1464 * If its prototype is ever changed, 1465 * check arch/{*}/net/{*}.S files, 1466 * since it is called from BPF assembly code. 1467 */ 1468 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1469 { 1470 int start = skb_headlen(skb); 1471 struct sk_buff *frag_iter; 1472 int i, copy; 1473 1474 if (offset > (int)skb->len - len) 1475 goto fault; 1476 1477 /* Copy header. */ 1478 if ((copy = start - offset) > 0) { 1479 if (copy > len) 1480 copy = len; 1481 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1482 if ((len -= copy) == 0) 1483 return 0; 1484 offset += copy; 1485 to += copy; 1486 } 1487 1488 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1489 int end; 1490 1491 WARN_ON(start > offset + len); 1492 1493 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1494 if ((copy = end - offset) > 0) { 1495 u8 *vaddr; 1496 1497 if (copy > len) 1498 copy = len; 1499 1500 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1501 memcpy(to, 1502 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1503 offset - start, copy); 1504 kunmap_skb_frag(vaddr); 1505 1506 if ((len -= copy) == 0) 1507 return 0; 1508 offset += copy; 1509 to += copy; 1510 } 1511 start = end; 1512 } 1513 1514 skb_walk_frags(skb, frag_iter) { 1515 int end; 1516 1517 WARN_ON(start > offset + len); 1518 1519 end = start + frag_iter->len; 1520 if ((copy = end - offset) > 0) { 1521 if (copy > len) 1522 copy = len; 1523 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1524 goto fault; 1525 if ((len -= copy) == 0) 1526 return 0; 1527 offset += copy; 1528 to += copy; 1529 } 1530 start = end; 1531 } 1532 1533 if (!len) 1534 return 0; 1535 1536 fault: 1537 return -EFAULT; 1538 } 1539 EXPORT_SYMBOL(skb_copy_bits); 1540 1541 /* 1542 * Callback from splice_to_pipe(), if we need to release some pages 1543 * at the end of the spd in case we error'ed out in filling the pipe. 1544 */ 1545 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1546 { 1547 put_page(spd->pages[i]); 1548 } 1549 1550 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1551 unsigned int *offset, 1552 struct sk_buff *skb, struct sock *sk) 1553 { 1554 struct page *p = sk->sk_sndmsg_page; 1555 unsigned int off; 1556 1557 if (!p) { 1558 new_page: 1559 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1560 if (!p) 1561 return NULL; 1562 1563 off = sk->sk_sndmsg_off = 0; 1564 /* hold one ref to this page until it's full */ 1565 } else { 1566 unsigned int mlen; 1567 1568 off = sk->sk_sndmsg_off; 1569 mlen = PAGE_SIZE - off; 1570 if (mlen < 64 && mlen < *len) { 1571 put_page(p); 1572 goto new_page; 1573 } 1574 1575 *len = min_t(unsigned int, *len, mlen); 1576 } 1577 1578 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1579 sk->sk_sndmsg_off += *len; 1580 *offset = off; 1581 get_page(p); 1582 1583 return p; 1584 } 1585 1586 /* 1587 * Fill page/offset/length into spd, if it can hold more pages. 1588 */ 1589 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1590 struct pipe_inode_info *pipe, struct page *page, 1591 unsigned int *len, unsigned int offset, 1592 struct sk_buff *skb, int linear, 1593 struct sock *sk) 1594 { 1595 if (unlikely(spd->nr_pages == pipe->buffers)) 1596 return 1; 1597 1598 if (linear) { 1599 page = linear_to_page(page, len, &offset, skb, sk); 1600 if (!page) 1601 return 1; 1602 } else 1603 get_page(page); 1604 1605 spd->pages[spd->nr_pages] = page; 1606 spd->partial[spd->nr_pages].len = *len; 1607 spd->partial[spd->nr_pages].offset = offset; 1608 spd->nr_pages++; 1609 1610 return 0; 1611 } 1612 1613 static inline void __segment_seek(struct page **page, unsigned int *poff, 1614 unsigned int *plen, unsigned int off) 1615 { 1616 unsigned long n; 1617 1618 *poff += off; 1619 n = *poff / PAGE_SIZE; 1620 if (n) 1621 *page = nth_page(*page, n); 1622 1623 *poff = *poff % PAGE_SIZE; 1624 *plen -= off; 1625 } 1626 1627 static inline int __splice_segment(struct page *page, unsigned int poff, 1628 unsigned int plen, unsigned int *off, 1629 unsigned int *len, struct sk_buff *skb, 1630 struct splice_pipe_desc *spd, int linear, 1631 struct sock *sk, 1632 struct pipe_inode_info *pipe) 1633 { 1634 if (!*len) 1635 return 1; 1636 1637 /* skip this segment if already processed */ 1638 if (*off >= plen) { 1639 *off -= plen; 1640 return 0; 1641 } 1642 1643 /* ignore any bits we already processed */ 1644 if (*off) { 1645 __segment_seek(&page, &poff, &plen, *off); 1646 *off = 0; 1647 } 1648 1649 do { 1650 unsigned int flen = min(*len, plen); 1651 1652 /* the linear region may spread across several pages */ 1653 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1654 1655 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1656 return 1; 1657 1658 __segment_seek(&page, &poff, &plen, flen); 1659 *len -= flen; 1660 1661 } while (*len && plen); 1662 1663 return 0; 1664 } 1665 1666 /* 1667 * Map linear and fragment data from the skb to spd. It reports failure if the 1668 * pipe is full or if we already spliced the requested length. 1669 */ 1670 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1671 unsigned int *offset, unsigned int *len, 1672 struct splice_pipe_desc *spd, struct sock *sk) 1673 { 1674 int seg; 1675 1676 /* 1677 * map the linear part 1678 */ 1679 if (__splice_segment(virt_to_page(skb->data), 1680 (unsigned long) skb->data & (PAGE_SIZE - 1), 1681 skb_headlen(skb), 1682 offset, len, skb, spd, 1, sk, pipe)) 1683 return 1; 1684 1685 /* 1686 * then map the fragments 1687 */ 1688 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1689 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1690 1691 if (__splice_segment(skb_frag_page(f), 1692 f->page_offset, skb_frag_size(f), 1693 offset, len, skb, spd, 0, sk, pipe)) 1694 return 1; 1695 } 1696 1697 return 0; 1698 } 1699 1700 /* 1701 * Map data from the skb to a pipe. Should handle both the linear part, 1702 * the fragments, and the frag list. It does NOT handle frag lists within 1703 * the frag list, if such a thing exists. We'd probably need to recurse to 1704 * handle that cleanly. 1705 */ 1706 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1707 struct pipe_inode_info *pipe, unsigned int tlen, 1708 unsigned int flags) 1709 { 1710 struct partial_page partial[PIPE_DEF_BUFFERS]; 1711 struct page *pages[PIPE_DEF_BUFFERS]; 1712 struct splice_pipe_desc spd = { 1713 .pages = pages, 1714 .partial = partial, 1715 .flags = flags, 1716 .ops = &sock_pipe_buf_ops, 1717 .spd_release = sock_spd_release, 1718 }; 1719 struct sk_buff *frag_iter; 1720 struct sock *sk = skb->sk; 1721 int ret = 0; 1722 1723 if (splice_grow_spd(pipe, &spd)) 1724 return -ENOMEM; 1725 1726 /* 1727 * __skb_splice_bits() only fails if the output has no room left, 1728 * so no point in going over the frag_list for the error case. 1729 */ 1730 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1731 goto done; 1732 else if (!tlen) 1733 goto done; 1734 1735 /* 1736 * now see if we have a frag_list to map 1737 */ 1738 skb_walk_frags(skb, frag_iter) { 1739 if (!tlen) 1740 break; 1741 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1742 break; 1743 } 1744 1745 done: 1746 if (spd.nr_pages) { 1747 /* 1748 * Drop the socket lock, otherwise we have reverse 1749 * locking dependencies between sk_lock and i_mutex 1750 * here as compared to sendfile(). We enter here 1751 * with the socket lock held, and splice_to_pipe() will 1752 * grab the pipe inode lock. For sendfile() emulation, 1753 * we call into ->sendpage() with the i_mutex lock held 1754 * and networking will grab the socket lock. 1755 */ 1756 release_sock(sk); 1757 ret = splice_to_pipe(pipe, &spd); 1758 lock_sock(sk); 1759 } 1760 1761 splice_shrink_spd(pipe, &spd); 1762 return ret; 1763 } 1764 1765 /** 1766 * skb_store_bits - store bits from kernel buffer to skb 1767 * @skb: destination buffer 1768 * @offset: offset in destination 1769 * @from: source buffer 1770 * @len: number of bytes to copy 1771 * 1772 * Copy the specified number of bytes from the source buffer to the 1773 * destination skb. This function handles all the messy bits of 1774 * traversing fragment lists and such. 1775 */ 1776 1777 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1778 { 1779 int start = skb_headlen(skb); 1780 struct sk_buff *frag_iter; 1781 int i, copy; 1782 1783 if (offset > (int)skb->len - len) 1784 goto fault; 1785 1786 if ((copy = start - offset) > 0) { 1787 if (copy > len) 1788 copy = len; 1789 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1790 if ((len -= copy) == 0) 1791 return 0; 1792 offset += copy; 1793 from += copy; 1794 } 1795 1796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1797 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1798 int end; 1799 1800 WARN_ON(start > offset + len); 1801 1802 end = start + skb_frag_size(frag); 1803 if ((copy = end - offset) > 0) { 1804 u8 *vaddr; 1805 1806 if (copy > len) 1807 copy = len; 1808 1809 vaddr = kmap_skb_frag(frag); 1810 memcpy(vaddr + frag->page_offset + offset - start, 1811 from, copy); 1812 kunmap_skb_frag(vaddr); 1813 1814 if ((len -= copy) == 0) 1815 return 0; 1816 offset += copy; 1817 from += copy; 1818 } 1819 start = end; 1820 } 1821 1822 skb_walk_frags(skb, frag_iter) { 1823 int end; 1824 1825 WARN_ON(start > offset + len); 1826 1827 end = start + frag_iter->len; 1828 if ((copy = end - offset) > 0) { 1829 if (copy > len) 1830 copy = len; 1831 if (skb_store_bits(frag_iter, offset - start, 1832 from, copy)) 1833 goto fault; 1834 if ((len -= copy) == 0) 1835 return 0; 1836 offset += copy; 1837 from += copy; 1838 } 1839 start = end; 1840 } 1841 if (!len) 1842 return 0; 1843 1844 fault: 1845 return -EFAULT; 1846 } 1847 EXPORT_SYMBOL(skb_store_bits); 1848 1849 /* Checksum skb data. */ 1850 1851 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1852 int len, __wsum csum) 1853 { 1854 int start = skb_headlen(skb); 1855 int i, copy = start - offset; 1856 struct sk_buff *frag_iter; 1857 int pos = 0; 1858 1859 /* Checksum header. */ 1860 if (copy > 0) { 1861 if (copy > len) 1862 copy = len; 1863 csum = csum_partial(skb->data + offset, copy, csum); 1864 if ((len -= copy) == 0) 1865 return csum; 1866 offset += copy; 1867 pos = copy; 1868 } 1869 1870 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1871 int end; 1872 1873 WARN_ON(start > offset + len); 1874 1875 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1876 if ((copy = end - offset) > 0) { 1877 __wsum csum2; 1878 u8 *vaddr; 1879 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1880 1881 if (copy > len) 1882 copy = len; 1883 vaddr = kmap_skb_frag(frag); 1884 csum2 = csum_partial(vaddr + frag->page_offset + 1885 offset - start, copy, 0); 1886 kunmap_skb_frag(vaddr); 1887 csum = csum_block_add(csum, csum2, pos); 1888 if (!(len -= copy)) 1889 return csum; 1890 offset += copy; 1891 pos += copy; 1892 } 1893 start = end; 1894 } 1895 1896 skb_walk_frags(skb, frag_iter) { 1897 int end; 1898 1899 WARN_ON(start > offset + len); 1900 1901 end = start + frag_iter->len; 1902 if ((copy = end - offset) > 0) { 1903 __wsum csum2; 1904 if (copy > len) 1905 copy = len; 1906 csum2 = skb_checksum(frag_iter, offset - start, 1907 copy, 0); 1908 csum = csum_block_add(csum, csum2, pos); 1909 if ((len -= copy) == 0) 1910 return csum; 1911 offset += copy; 1912 pos += copy; 1913 } 1914 start = end; 1915 } 1916 BUG_ON(len); 1917 1918 return csum; 1919 } 1920 EXPORT_SYMBOL(skb_checksum); 1921 1922 /* Both of above in one bottle. */ 1923 1924 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1925 u8 *to, int len, __wsum csum) 1926 { 1927 int start = skb_headlen(skb); 1928 int i, copy = start - offset; 1929 struct sk_buff *frag_iter; 1930 int pos = 0; 1931 1932 /* Copy header. */ 1933 if (copy > 0) { 1934 if (copy > len) 1935 copy = len; 1936 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1937 copy, csum); 1938 if ((len -= copy) == 0) 1939 return csum; 1940 offset += copy; 1941 to += copy; 1942 pos = copy; 1943 } 1944 1945 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1946 int end; 1947 1948 WARN_ON(start > offset + len); 1949 1950 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1951 if ((copy = end - offset) > 0) { 1952 __wsum csum2; 1953 u8 *vaddr; 1954 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1955 1956 if (copy > len) 1957 copy = len; 1958 vaddr = kmap_skb_frag(frag); 1959 csum2 = csum_partial_copy_nocheck(vaddr + 1960 frag->page_offset + 1961 offset - start, to, 1962 copy, 0); 1963 kunmap_skb_frag(vaddr); 1964 csum = csum_block_add(csum, csum2, pos); 1965 if (!(len -= copy)) 1966 return csum; 1967 offset += copy; 1968 to += copy; 1969 pos += copy; 1970 } 1971 start = end; 1972 } 1973 1974 skb_walk_frags(skb, frag_iter) { 1975 __wsum csum2; 1976 int end; 1977 1978 WARN_ON(start > offset + len); 1979 1980 end = start + frag_iter->len; 1981 if ((copy = end - offset) > 0) { 1982 if (copy > len) 1983 copy = len; 1984 csum2 = skb_copy_and_csum_bits(frag_iter, 1985 offset - start, 1986 to, copy, 0); 1987 csum = csum_block_add(csum, csum2, pos); 1988 if ((len -= copy) == 0) 1989 return csum; 1990 offset += copy; 1991 to += copy; 1992 pos += copy; 1993 } 1994 start = end; 1995 } 1996 BUG_ON(len); 1997 return csum; 1998 } 1999 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2000 2001 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2002 { 2003 __wsum csum; 2004 long csstart; 2005 2006 if (skb->ip_summed == CHECKSUM_PARTIAL) 2007 csstart = skb_checksum_start_offset(skb); 2008 else 2009 csstart = skb_headlen(skb); 2010 2011 BUG_ON(csstart > skb_headlen(skb)); 2012 2013 skb_copy_from_linear_data(skb, to, csstart); 2014 2015 csum = 0; 2016 if (csstart != skb->len) 2017 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2018 skb->len - csstart, 0); 2019 2020 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2021 long csstuff = csstart + skb->csum_offset; 2022 2023 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2024 } 2025 } 2026 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2027 2028 /** 2029 * skb_dequeue - remove from the head of the queue 2030 * @list: list to dequeue from 2031 * 2032 * Remove the head of the list. The list lock is taken so the function 2033 * may be used safely with other locking list functions. The head item is 2034 * returned or %NULL if the list is empty. 2035 */ 2036 2037 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2038 { 2039 unsigned long flags; 2040 struct sk_buff *result; 2041 2042 spin_lock_irqsave(&list->lock, flags); 2043 result = __skb_dequeue(list); 2044 spin_unlock_irqrestore(&list->lock, flags); 2045 return result; 2046 } 2047 EXPORT_SYMBOL(skb_dequeue); 2048 2049 /** 2050 * skb_dequeue_tail - remove from the tail of the queue 2051 * @list: list to dequeue from 2052 * 2053 * Remove the tail of the list. The list lock is taken so the function 2054 * may be used safely with other locking list functions. The tail item is 2055 * returned or %NULL if the list is empty. 2056 */ 2057 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2058 { 2059 unsigned long flags; 2060 struct sk_buff *result; 2061 2062 spin_lock_irqsave(&list->lock, flags); 2063 result = __skb_dequeue_tail(list); 2064 spin_unlock_irqrestore(&list->lock, flags); 2065 return result; 2066 } 2067 EXPORT_SYMBOL(skb_dequeue_tail); 2068 2069 /** 2070 * skb_queue_purge - empty a list 2071 * @list: list to empty 2072 * 2073 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2074 * the list and one reference dropped. This function takes the list 2075 * lock and is atomic with respect to other list locking functions. 2076 */ 2077 void skb_queue_purge(struct sk_buff_head *list) 2078 { 2079 struct sk_buff *skb; 2080 while ((skb = skb_dequeue(list)) != NULL) 2081 kfree_skb(skb); 2082 } 2083 EXPORT_SYMBOL(skb_queue_purge); 2084 2085 /** 2086 * skb_queue_head - queue a buffer at the list head 2087 * @list: list to use 2088 * @newsk: buffer to queue 2089 * 2090 * Queue a buffer at the start of the list. This function takes the 2091 * list lock and can be used safely with other locking &sk_buff functions 2092 * safely. 2093 * 2094 * A buffer cannot be placed on two lists at the same time. 2095 */ 2096 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2097 { 2098 unsigned long flags; 2099 2100 spin_lock_irqsave(&list->lock, flags); 2101 __skb_queue_head(list, newsk); 2102 spin_unlock_irqrestore(&list->lock, flags); 2103 } 2104 EXPORT_SYMBOL(skb_queue_head); 2105 2106 /** 2107 * skb_queue_tail - queue a buffer at the list tail 2108 * @list: list to use 2109 * @newsk: buffer to queue 2110 * 2111 * Queue a buffer at the tail of the list. This function takes the 2112 * list lock and can be used safely with other locking &sk_buff functions 2113 * safely. 2114 * 2115 * A buffer cannot be placed on two lists at the same time. 2116 */ 2117 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2118 { 2119 unsigned long flags; 2120 2121 spin_lock_irqsave(&list->lock, flags); 2122 __skb_queue_tail(list, newsk); 2123 spin_unlock_irqrestore(&list->lock, flags); 2124 } 2125 EXPORT_SYMBOL(skb_queue_tail); 2126 2127 /** 2128 * skb_unlink - remove a buffer from a list 2129 * @skb: buffer to remove 2130 * @list: list to use 2131 * 2132 * Remove a packet from a list. The list locks are taken and this 2133 * function is atomic with respect to other list locked calls 2134 * 2135 * You must know what list the SKB is on. 2136 */ 2137 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2138 { 2139 unsigned long flags; 2140 2141 spin_lock_irqsave(&list->lock, flags); 2142 __skb_unlink(skb, list); 2143 spin_unlock_irqrestore(&list->lock, flags); 2144 } 2145 EXPORT_SYMBOL(skb_unlink); 2146 2147 /** 2148 * skb_append - append a buffer 2149 * @old: buffer to insert after 2150 * @newsk: buffer to insert 2151 * @list: list to use 2152 * 2153 * Place a packet after a given packet in a list. The list locks are taken 2154 * and this function is atomic with respect to other list locked calls. 2155 * A buffer cannot be placed on two lists at the same time. 2156 */ 2157 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2158 { 2159 unsigned long flags; 2160 2161 spin_lock_irqsave(&list->lock, flags); 2162 __skb_queue_after(list, old, newsk); 2163 spin_unlock_irqrestore(&list->lock, flags); 2164 } 2165 EXPORT_SYMBOL(skb_append); 2166 2167 /** 2168 * skb_insert - insert a buffer 2169 * @old: buffer to insert before 2170 * @newsk: buffer to insert 2171 * @list: list to use 2172 * 2173 * Place a packet before a given packet in a list. The list locks are 2174 * taken and this function is atomic with respect to other list locked 2175 * calls. 2176 * 2177 * A buffer cannot be placed on two lists at the same time. 2178 */ 2179 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2180 { 2181 unsigned long flags; 2182 2183 spin_lock_irqsave(&list->lock, flags); 2184 __skb_insert(newsk, old->prev, old, list); 2185 spin_unlock_irqrestore(&list->lock, flags); 2186 } 2187 EXPORT_SYMBOL(skb_insert); 2188 2189 static inline void skb_split_inside_header(struct sk_buff *skb, 2190 struct sk_buff* skb1, 2191 const u32 len, const int pos) 2192 { 2193 int i; 2194 2195 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2196 pos - len); 2197 /* And move data appendix as is. */ 2198 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2199 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2200 2201 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2202 skb_shinfo(skb)->nr_frags = 0; 2203 skb1->data_len = skb->data_len; 2204 skb1->len += skb1->data_len; 2205 skb->data_len = 0; 2206 skb->len = len; 2207 skb_set_tail_pointer(skb, len); 2208 } 2209 2210 static inline void skb_split_no_header(struct sk_buff *skb, 2211 struct sk_buff* skb1, 2212 const u32 len, int pos) 2213 { 2214 int i, k = 0; 2215 const int nfrags = skb_shinfo(skb)->nr_frags; 2216 2217 skb_shinfo(skb)->nr_frags = 0; 2218 skb1->len = skb1->data_len = skb->len - len; 2219 skb->len = len; 2220 skb->data_len = len - pos; 2221 2222 for (i = 0; i < nfrags; i++) { 2223 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2224 2225 if (pos + size > len) { 2226 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2227 2228 if (pos < len) { 2229 /* Split frag. 2230 * We have two variants in this case: 2231 * 1. Move all the frag to the second 2232 * part, if it is possible. F.e. 2233 * this approach is mandatory for TUX, 2234 * where splitting is expensive. 2235 * 2. Split is accurately. We make this. 2236 */ 2237 skb_frag_ref(skb, i); 2238 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2239 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2240 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2241 skb_shinfo(skb)->nr_frags++; 2242 } 2243 k++; 2244 } else 2245 skb_shinfo(skb)->nr_frags++; 2246 pos += size; 2247 } 2248 skb_shinfo(skb1)->nr_frags = k; 2249 } 2250 2251 /** 2252 * skb_split - Split fragmented skb to two parts at length len. 2253 * @skb: the buffer to split 2254 * @skb1: the buffer to receive the second part 2255 * @len: new length for skb 2256 */ 2257 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2258 { 2259 int pos = skb_headlen(skb); 2260 2261 if (len < pos) /* Split line is inside header. */ 2262 skb_split_inside_header(skb, skb1, len, pos); 2263 else /* Second chunk has no header, nothing to copy. */ 2264 skb_split_no_header(skb, skb1, len, pos); 2265 } 2266 EXPORT_SYMBOL(skb_split); 2267 2268 /* Shifting from/to a cloned skb is a no-go. 2269 * 2270 * Caller cannot keep skb_shinfo related pointers past calling here! 2271 */ 2272 static int skb_prepare_for_shift(struct sk_buff *skb) 2273 { 2274 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2275 } 2276 2277 /** 2278 * skb_shift - Shifts paged data partially from skb to another 2279 * @tgt: buffer into which tail data gets added 2280 * @skb: buffer from which the paged data comes from 2281 * @shiftlen: shift up to this many bytes 2282 * 2283 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2284 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2285 * It's up to caller to free skb if everything was shifted. 2286 * 2287 * If @tgt runs out of frags, the whole operation is aborted. 2288 * 2289 * Skb cannot include anything else but paged data while tgt is allowed 2290 * to have non-paged data as well. 2291 * 2292 * TODO: full sized shift could be optimized but that would need 2293 * specialized skb free'er to handle frags without up-to-date nr_frags. 2294 */ 2295 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2296 { 2297 int from, to, merge, todo; 2298 struct skb_frag_struct *fragfrom, *fragto; 2299 2300 BUG_ON(shiftlen > skb->len); 2301 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2302 2303 todo = shiftlen; 2304 from = 0; 2305 to = skb_shinfo(tgt)->nr_frags; 2306 fragfrom = &skb_shinfo(skb)->frags[from]; 2307 2308 /* Actual merge is delayed until the point when we know we can 2309 * commit all, so that we don't have to undo partial changes 2310 */ 2311 if (!to || 2312 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2313 fragfrom->page_offset)) { 2314 merge = -1; 2315 } else { 2316 merge = to - 1; 2317 2318 todo -= skb_frag_size(fragfrom); 2319 if (todo < 0) { 2320 if (skb_prepare_for_shift(skb) || 2321 skb_prepare_for_shift(tgt)) 2322 return 0; 2323 2324 /* All previous frag pointers might be stale! */ 2325 fragfrom = &skb_shinfo(skb)->frags[from]; 2326 fragto = &skb_shinfo(tgt)->frags[merge]; 2327 2328 skb_frag_size_add(fragto, shiftlen); 2329 skb_frag_size_sub(fragfrom, shiftlen); 2330 fragfrom->page_offset += shiftlen; 2331 2332 goto onlymerged; 2333 } 2334 2335 from++; 2336 } 2337 2338 /* Skip full, not-fitting skb to avoid expensive operations */ 2339 if ((shiftlen == skb->len) && 2340 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2341 return 0; 2342 2343 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2344 return 0; 2345 2346 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2347 if (to == MAX_SKB_FRAGS) 2348 return 0; 2349 2350 fragfrom = &skb_shinfo(skb)->frags[from]; 2351 fragto = &skb_shinfo(tgt)->frags[to]; 2352 2353 if (todo >= skb_frag_size(fragfrom)) { 2354 *fragto = *fragfrom; 2355 todo -= skb_frag_size(fragfrom); 2356 from++; 2357 to++; 2358 2359 } else { 2360 __skb_frag_ref(fragfrom); 2361 fragto->page = fragfrom->page; 2362 fragto->page_offset = fragfrom->page_offset; 2363 skb_frag_size_set(fragto, todo); 2364 2365 fragfrom->page_offset += todo; 2366 skb_frag_size_sub(fragfrom, todo); 2367 todo = 0; 2368 2369 to++; 2370 break; 2371 } 2372 } 2373 2374 /* Ready to "commit" this state change to tgt */ 2375 skb_shinfo(tgt)->nr_frags = to; 2376 2377 if (merge >= 0) { 2378 fragfrom = &skb_shinfo(skb)->frags[0]; 2379 fragto = &skb_shinfo(tgt)->frags[merge]; 2380 2381 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2382 __skb_frag_unref(fragfrom); 2383 } 2384 2385 /* Reposition in the original skb */ 2386 to = 0; 2387 while (from < skb_shinfo(skb)->nr_frags) 2388 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2389 skb_shinfo(skb)->nr_frags = to; 2390 2391 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2392 2393 onlymerged: 2394 /* Most likely the tgt won't ever need its checksum anymore, skb on 2395 * the other hand might need it if it needs to be resent 2396 */ 2397 tgt->ip_summed = CHECKSUM_PARTIAL; 2398 skb->ip_summed = CHECKSUM_PARTIAL; 2399 2400 /* Yak, is it really working this way? Some helper please? */ 2401 skb->len -= shiftlen; 2402 skb->data_len -= shiftlen; 2403 skb->truesize -= shiftlen; 2404 tgt->len += shiftlen; 2405 tgt->data_len += shiftlen; 2406 tgt->truesize += shiftlen; 2407 2408 return shiftlen; 2409 } 2410 2411 /** 2412 * skb_prepare_seq_read - Prepare a sequential read of skb data 2413 * @skb: the buffer to read 2414 * @from: lower offset of data to be read 2415 * @to: upper offset of data to be read 2416 * @st: state variable 2417 * 2418 * Initializes the specified state variable. Must be called before 2419 * invoking skb_seq_read() for the first time. 2420 */ 2421 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2422 unsigned int to, struct skb_seq_state *st) 2423 { 2424 st->lower_offset = from; 2425 st->upper_offset = to; 2426 st->root_skb = st->cur_skb = skb; 2427 st->frag_idx = st->stepped_offset = 0; 2428 st->frag_data = NULL; 2429 } 2430 EXPORT_SYMBOL(skb_prepare_seq_read); 2431 2432 /** 2433 * skb_seq_read - Sequentially read skb data 2434 * @consumed: number of bytes consumed by the caller so far 2435 * @data: destination pointer for data to be returned 2436 * @st: state variable 2437 * 2438 * Reads a block of skb data at &consumed relative to the 2439 * lower offset specified to skb_prepare_seq_read(). Assigns 2440 * the head of the data block to &data and returns the length 2441 * of the block or 0 if the end of the skb data or the upper 2442 * offset has been reached. 2443 * 2444 * The caller is not required to consume all of the data 2445 * returned, i.e. &consumed is typically set to the number 2446 * of bytes already consumed and the next call to 2447 * skb_seq_read() will return the remaining part of the block. 2448 * 2449 * Note 1: The size of each block of data returned can be arbitrary, 2450 * this limitation is the cost for zerocopy seqeuental 2451 * reads of potentially non linear data. 2452 * 2453 * Note 2: Fragment lists within fragments are not implemented 2454 * at the moment, state->root_skb could be replaced with 2455 * a stack for this purpose. 2456 */ 2457 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2458 struct skb_seq_state *st) 2459 { 2460 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2461 skb_frag_t *frag; 2462 2463 if (unlikely(abs_offset >= st->upper_offset)) 2464 return 0; 2465 2466 next_skb: 2467 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2468 2469 if (abs_offset < block_limit && !st->frag_data) { 2470 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2471 return block_limit - abs_offset; 2472 } 2473 2474 if (st->frag_idx == 0 && !st->frag_data) 2475 st->stepped_offset += skb_headlen(st->cur_skb); 2476 2477 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2478 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2479 block_limit = skb_frag_size(frag) + st->stepped_offset; 2480 2481 if (abs_offset < block_limit) { 2482 if (!st->frag_data) 2483 st->frag_data = kmap_skb_frag(frag); 2484 2485 *data = (u8 *) st->frag_data + frag->page_offset + 2486 (abs_offset - st->stepped_offset); 2487 2488 return block_limit - abs_offset; 2489 } 2490 2491 if (st->frag_data) { 2492 kunmap_skb_frag(st->frag_data); 2493 st->frag_data = NULL; 2494 } 2495 2496 st->frag_idx++; 2497 st->stepped_offset += skb_frag_size(frag); 2498 } 2499 2500 if (st->frag_data) { 2501 kunmap_skb_frag(st->frag_data); 2502 st->frag_data = NULL; 2503 } 2504 2505 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2506 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2507 st->frag_idx = 0; 2508 goto next_skb; 2509 } else if (st->cur_skb->next) { 2510 st->cur_skb = st->cur_skb->next; 2511 st->frag_idx = 0; 2512 goto next_skb; 2513 } 2514 2515 return 0; 2516 } 2517 EXPORT_SYMBOL(skb_seq_read); 2518 2519 /** 2520 * skb_abort_seq_read - Abort a sequential read of skb data 2521 * @st: state variable 2522 * 2523 * Must be called if skb_seq_read() was not called until it 2524 * returned 0. 2525 */ 2526 void skb_abort_seq_read(struct skb_seq_state *st) 2527 { 2528 if (st->frag_data) 2529 kunmap_skb_frag(st->frag_data); 2530 } 2531 EXPORT_SYMBOL(skb_abort_seq_read); 2532 2533 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2534 2535 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2536 struct ts_config *conf, 2537 struct ts_state *state) 2538 { 2539 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2540 } 2541 2542 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2543 { 2544 skb_abort_seq_read(TS_SKB_CB(state)); 2545 } 2546 2547 /** 2548 * skb_find_text - Find a text pattern in skb data 2549 * @skb: the buffer to look in 2550 * @from: search offset 2551 * @to: search limit 2552 * @config: textsearch configuration 2553 * @state: uninitialized textsearch state variable 2554 * 2555 * Finds a pattern in the skb data according to the specified 2556 * textsearch configuration. Use textsearch_next() to retrieve 2557 * subsequent occurrences of the pattern. Returns the offset 2558 * to the first occurrence or UINT_MAX if no match was found. 2559 */ 2560 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2561 unsigned int to, struct ts_config *config, 2562 struct ts_state *state) 2563 { 2564 unsigned int ret; 2565 2566 config->get_next_block = skb_ts_get_next_block; 2567 config->finish = skb_ts_finish; 2568 2569 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2570 2571 ret = textsearch_find(config, state); 2572 return (ret <= to - from ? ret : UINT_MAX); 2573 } 2574 EXPORT_SYMBOL(skb_find_text); 2575 2576 /** 2577 * skb_append_datato_frags: - append the user data to a skb 2578 * @sk: sock structure 2579 * @skb: skb structure to be appened with user data. 2580 * @getfrag: call back function to be used for getting the user data 2581 * @from: pointer to user message iov 2582 * @length: length of the iov message 2583 * 2584 * Description: This procedure append the user data in the fragment part 2585 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2586 */ 2587 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2588 int (*getfrag)(void *from, char *to, int offset, 2589 int len, int odd, struct sk_buff *skb), 2590 void *from, int length) 2591 { 2592 int frg_cnt = 0; 2593 skb_frag_t *frag = NULL; 2594 struct page *page = NULL; 2595 int copy, left; 2596 int offset = 0; 2597 int ret; 2598 2599 do { 2600 /* Return error if we don't have space for new frag */ 2601 frg_cnt = skb_shinfo(skb)->nr_frags; 2602 if (frg_cnt >= MAX_SKB_FRAGS) 2603 return -EFAULT; 2604 2605 /* allocate a new page for next frag */ 2606 page = alloc_pages(sk->sk_allocation, 0); 2607 2608 /* If alloc_page fails just return failure and caller will 2609 * free previous allocated pages by doing kfree_skb() 2610 */ 2611 if (page == NULL) 2612 return -ENOMEM; 2613 2614 /* initialize the next frag */ 2615 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2616 skb->truesize += PAGE_SIZE; 2617 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2618 2619 /* get the new initialized frag */ 2620 frg_cnt = skb_shinfo(skb)->nr_frags; 2621 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2622 2623 /* copy the user data to page */ 2624 left = PAGE_SIZE - frag->page_offset; 2625 copy = (length > left)? left : length; 2626 2627 ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2628 offset, copy, 0, skb); 2629 if (ret < 0) 2630 return -EFAULT; 2631 2632 /* copy was successful so update the size parameters */ 2633 skb_frag_size_add(frag, copy); 2634 skb->len += copy; 2635 skb->data_len += copy; 2636 offset += copy; 2637 length -= copy; 2638 2639 } while (length > 0); 2640 2641 return 0; 2642 } 2643 EXPORT_SYMBOL(skb_append_datato_frags); 2644 2645 /** 2646 * skb_pull_rcsum - pull skb and update receive checksum 2647 * @skb: buffer to update 2648 * @len: length of data pulled 2649 * 2650 * This function performs an skb_pull on the packet and updates 2651 * the CHECKSUM_COMPLETE checksum. It should be used on 2652 * receive path processing instead of skb_pull unless you know 2653 * that the checksum difference is zero (e.g., a valid IP header) 2654 * or you are setting ip_summed to CHECKSUM_NONE. 2655 */ 2656 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2657 { 2658 BUG_ON(len > skb->len); 2659 skb->len -= len; 2660 BUG_ON(skb->len < skb->data_len); 2661 skb_postpull_rcsum(skb, skb->data, len); 2662 return skb->data += len; 2663 } 2664 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2665 2666 /** 2667 * skb_segment - Perform protocol segmentation on skb. 2668 * @skb: buffer to segment 2669 * @features: features for the output path (see dev->features) 2670 * 2671 * This function performs segmentation on the given skb. It returns 2672 * a pointer to the first in a list of new skbs for the segments. 2673 * In case of error it returns ERR_PTR(err). 2674 */ 2675 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2676 { 2677 struct sk_buff *segs = NULL; 2678 struct sk_buff *tail = NULL; 2679 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2680 unsigned int mss = skb_shinfo(skb)->gso_size; 2681 unsigned int doffset = skb->data - skb_mac_header(skb); 2682 unsigned int offset = doffset; 2683 unsigned int headroom; 2684 unsigned int len; 2685 int sg = !!(features & NETIF_F_SG); 2686 int nfrags = skb_shinfo(skb)->nr_frags; 2687 int err = -ENOMEM; 2688 int i = 0; 2689 int pos; 2690 2691 __skb_push(skb, doffset); 2692 headroom = skb_headroom(skb); 2693 pos = skb_headlen(skb); 2694 2695 do { 2696 struct sk_buff *nskb; 2697 skb_frag_t *frag; 2698 int hsize; 2699 int size; 2700 2701 len = skb->len - offset; 2702 if (len > mss) 2703 len = mss; 2704 2705 hsize = skb_headlen(skb) - offset; 2706 if (hsize < 0) 2707 hsize = 0; 2708 if (hsize > len || !sg) 2709 hsize = len; 2710 2711 if (!hsize && i >= nfrags) { 2712 BUG_ON(fskb->len != len); 2713 2714 pos += len; 2715 nskb = skb_clone(fskb, GFP_ATOMIC); 2716 fskb = fskb->next; 2717 2718 if (unlikely(!nskb)) 2719 goto err; 2720 2721 hsize = skb_end_pointer(nskb) - nskb->head; 2722 if (skb_cow_head(nskb, doffset + headroom)) { 2723 kfree_skb(nskb); 2724 goto err; 2725 } 2726 2727 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2728 hsize; 2729 skb_release_head_state(nskb); 2730 __skb_push(nskb, doffset); 2731 } else { 2732 nskb = alloc_skb(hsize + doffset + headroom, 2733 GFP_ATOMIC); 2734 2735 if (unlikely(!nskb)) 2736 goto err; 2737 2738 skb_reserve(nskb, headroom); 2739 __skb_put(nskb, doffset); 2740 } 2741 2742 if (segs) 2743 tail->next = nskb; 2744 else 2745 segs = nskb; 2746 tail = nskb; 2747 2748 __copy_skb_header(nskb, skb); 2749 nskb->mac_len = skb->mac_len; 2750 2751 /* nskb and skb might have different headroom */ 2752 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2753 nskb->csum_start += skb_headroom(nskb) - headroom; 2754 2755 skb_reset_mac_header(nskb); 2756 skb_set_network_header(nskb, skb->mac_len); 2757 nskb->transport_header = (nskb->network_header + 2758 skb_network_header_len(skb)); 2759 skb_copy_from_linear_data(skb, nskb->data, doffset); 2760 2761 if (fskb != skb_shinfo(skb)->frag_list) 2762 continue; 2763 2764 if (!sg) { 2765 nskb->ip_summed = CHECKSUM_NONE; 2766 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2767 skb_put(nskb, len), 2768 len, 0); 2769 continue; 2770 } 2771 2772 frag = skb_shinfo(nskb)->frags; 2773 2774 skb_copy_from_linear_data_offset(skb, offset, 2775 skb_put(nskb, hsize), hsize); 2776 2777 while (pos < offset + len && i < nfrags) { 2778 *frag = skb_shinfo(skb)->frags[i]; 2779 __skb_frag_ref(frag); 2780 size = skb_frag_size(frag); 2781 2782 if (pos < offset) { 2783 frag->page_offset += offset - pos; 2784 skb_frag_size_sub(frag, offset - pos); 2785 } 2786 2787 skb_shinfo(nskb)->nr_frags++; 2788 2789 if (pos + size <= offset + len) { 2790 i++; 2791 pos += size; 2792 } else { 2793 skb_frag_size_sub(frag, pos + size - (offset + len)); 2794 goto skip_fraglist; 2795 } 2796 2797 frag++; 2798 } 2799 2800 if (pos < offset + len) { 2801 struct sk_buff *fskb2 = fskb; 2802 2803 BUG_ON(pos + fskb->len != offset + len); 2804 2805 pos += fskb->len; 2806 fskb = fskb->next; 2807 2808 if (fskb2->next) { 2809 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2810 if (!fskb2) 2811 goto err; 2812 } else 2813 skb_get(fskb2); 2814 2815 SKB_FRAG_ASSERT(nskb); 2816 skb_shinfo(nskb)->frag_list = fskb2; 2817 } 2818 2819 skip_fraglist: 2820 nskb->data_len = len - hsize; 2821 nskb->len += nskb->data_len; 2822 nskb->truesize += nskb->data_len; 2823 } while ((offset += len) < skb->len); 2824 2825 return segs; 2826 2827 err: 2828 while ((skb = segs)) { 2829 segs = skb->next; 2830 kfree_skb(skb); 2831 } 2832 return ERR_PTR(err); 2833 } 2834 EXPORT_SYMBOL_GPL(skb_segment); 2835 2836 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2837 { 2838 struct sk_buff *p = *head; 2839 struct sk_buff *nskb; 2840 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2841 struct skb_shared_info *pinfo = skb_shinfo(p); 2842 unsigned int headroom; 2843 unsigned int len = skb_gro_len(skb); 2844 unsigned int offset = skb_gro_offset(skb); 2845 unsigned int headlen = skb_headlen(skb); 2846 2847 if (p->len + len >= 65536) 2848 return -E2BIG; 2849 2850 if (pinfo->frag_list) 2851 goto merge; 2852 else if (headlen <= offset) { 2853 skb_frag_t *frag; 2854 skb_frag_t *frag2; 2855 int i = skbinfo->nr_frags; 2856 int nr_frags = pinfo->nr_frags + i; 2857 2858 offset -= headlen; 2859 2860 if (nr_frags > MAX_SKB_FRAGS) 2861 return -E2BIG; 2862 2863 pinfo->nr_frags = nr_frags; 2864 skbinfo->nr_frags = 0; 2865 2866 frag = pinfo->frags + nr_frags; 2867 frag2 = skbinfo->frags + i; 2868 do { 2869 *--frag = *--frag2; 2870 } while (--i); 2871 2872 frag->page_offset += offset; 2873 skb_frag_size_sub(frag, offset); 2874 2875 skb->truesize -= skb->data_len; 2876 skb->len -= skb->data_len; 2877 skb->data_len = 0; 2878 2879 NAPI_GRO_CB(skb)->free = 1; 2880 goto done; 2881 } else if (skb_gro_len(p) != pinfo->gso_size) 2882 return -E2BIG; 2883 2884 headroom = skb_headroom(p); 2885 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2886 if (unlikely(!nskb)) 2887 return -ENOMEM; 2888 2889 __copy_skb_header(nskb, p); 2890 nskb->mac_len = p->mac_len; 2891 2892 skb_reserve(nskb, headroom); 2893 __skb_put(nskb, skb_gro_offset(p)); 2894 2895 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2896 skb_set_network_header(nskb, skb_network_offset(p)); 2897 skb_set_transport_header(nskb, skb_transport_offset(p)); 2898 2899 __skb_pull(p, skb_gro_offset(p)); 2900 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2901 p->data - skb_mac_header(p)); 2902 2903 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2904 skb_shinfo(nskb)->frag_list = p; 2905 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2906 pinfo->gso_size = 0; 2907 skb_header_release(p); 2908 nskb->prev = p; 2909 2910 nskb->data_len += p->len; 2911 nskb->truesize += p->truesize; 2912 nskb->len += p->len; 2913 2914 *head = nskb; 2915 nskb->next = p->next; 2916 p->next = NULL; 2917 2918 p = nskb; 2919 2920 merge: 2921 p->truesize += skb->truesize - len; 2922 if (offset > headlen) { 2923 unsigned int eat = offset - headlen; 2924 2925 skbinfo->frags[0].page_offset += eat; 2926 skb_frag_size_sub(&skbinfo->frags[0], eat); 2927 skb->data_len -= eat; 2928 skb->len -= eat; 2929 offset = headlen; 2930 } 2931 2932 __skb_pull(skb, offset); 2933 2934 p->prev->next = skb; 2935 p->prev = skb; 2936 skb_header_release(skb); 2937 2938 done: 2939 NAPI_GRO_CB(p)->count++; 2940 p->data_len += len; 2941 p->truesize += len; 2942 p->len += len; 2943 2944 NAPI_GRO_CB(skb)->same_flow = 1; 2945 return 0; 2946 } 2947 EXPORT_SYMBOL_GPL(skb_gro_receive); 2948 2949 void __init skb_init(void) 2950 { 2951 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2952 sizeof(struct sk_buff), 2953 0, 2954 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2955 NULL); 2956 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2957 (2*sizeof(struct sk_buff)) + 2958 sizeof(atomic_t), 2959 0, 2960 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2961 NULL); 2962 } 2963 2964 /** 2965 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2966 * @skb: Socket buffer containing the buffers to be mapped 2967 * @sg: The scatter-gather list to map into 2968 * @offset: The offset into the buffer's contents to start mapping 2969 * @len: Length of buffer space to be mapped 2970 * 2971 * Fill the specified scatter-gather list with mappings/pointers into a 2972 * region of the buffer space attached to a socket buffer. 2973 */ 2974 static int 2975 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2976 { 2977 int start = skb_headlen(skb); 2978 int i, copy = start - offset; 2979 struct sk_buff *frag_iter; 2980 int elt = 0; 2981 2982 if (copy > 0) { 2983 if (copy > len) 2984 copy = len; 2985 sg_set_buf(sg, skb->data + offset, copy); 2986 elt++; 2987 if ((len -= copy) == 0) 2988 return elt; 2989 offset += copy; 2990 } 2991 2992 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2993 int end; 2994 2995 WARN_ON(start > offset + len); 2996 2997 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2998 if ((copy = end - offset) > 0) { 2999 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3000 3001 if (copy > len) 3002 copy = len; 3003 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3004 frag->page_offset+offset-start); 3005 elt++; 3006 if (!(len -= copy)) 3007 return elt; 3008 offset += copy; 3009 } 3010 start = end; 3011 } 3012 3013 skb_walk_frags(skb, frag_iter) { 3014 int end; 3015 3016 WARN_ON(start > offset + len); 3017 3018 end = start + frag_iter->len; 3019 if ((copy = end - offset) > 0) { 3020 if (copy > len) 3021 copy = len; 3022 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3023 copy); 3024 if ((len -= copy) == 0) 3025 return elt; 3026 offset += copy; 3027 } 3028 start = end; 3029 } 3030 BUG_ON(len); 3031 return elt; 3032 } 3033 3034 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3035 { 3036 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3037 3038 sg_mark_end(&sg[nsg - 1]); 3039 3040 return nsg; 3041 } 3042 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3043 3044 /** 3045 * skb_cow_data - Check that a socket buffer's data buffers are writable 3046 * @skb: The socket buffer to check. 3047 * @tailbits: Amount of trailing space to be added 3048 * @trailer: Returned pointer to the skb where the @tailbits space begins 3049 * 3050 * Make sure that the data buffers attached to a socket buffer are 3051 * writable. If they are not, private copies are made of the data buffers 3052 * and the socket buffer is set to use these instead. 3053 * 3054 * If @tailbits is given, make sure that there is space to write @tailbits 3055 * bytes of data beyond current end of socket buffer. @trailer will be 3056 * set to point to the skb in which this space begins. 3057 * 3058 * The number of scatterlist elements required to completely map the 3059 * COW'd and extended socket buffer will be returned. 3060 */ 3061 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3062 { 3063 int copyflag; 3064 int elt; 3065 struct sk_buff *skb1, **skb_p; 3066 3067 /* If skb is cloned or its head is paged, reallocate 3068 * head pulling out all the pages (pages are considered not writable 3069 * at the moment even if they are anonymous). 3070 */ 3071 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3072 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3073 return -ENOMEM; 3074 3075 /* Easy case. Most of packets will go this way. */ 3076 if (!skb_has_frag_list(skb)) { 3077 /* A little of trouble, not enough of space for trailer. 3078 * This should not happen, when stack is tuned to generate 3079 * good frames. OK, on miss we reallocate and reserve even more 3080 * space, 128 bytes is fair. */ 3081 3082 if (skb_tailroom(skb) < tailbits && 3083 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3084 return -ENOMEM; 3085 3086 /* Voila! */ 3087 *trailer = skb; 3088 return 1; 3089 } 3090 3091 /* Misery. We are in troubles, going to mincer fragments... */ 3092 3093 elt = 1; 3094 skb_p = &skb_shinfo(skb)->frag_list; 3095 copyflag = 0; 3096 3097 while ((skb1 = *skb_p) != NULL) { 3098 int ntail = 0; 3099 3100 /* The fragment is partially pulled by someone, 3101 * this can happen on input. Copy it and everything 3102 * after it. */ 3103 3104 if (skb_shared(skb1)) 3105 copyflag = 1; 3106 3107 /* If the skb is the last, worry about trailer. */ 3108 3109 if (skb1->next == NULL && tailbits) { 3110 if (skb_shinfo(skb1)->nr_frags || 3111 skb_has_frag_list(skb1) || 3112 skb_tailroom(skb1) < tailbits) 3113 ntail = tailbits + 128; 3114 } 3115 3116 if (copyflag || 3117 skb_cloned(skb1) || 3118 ntail || 3119 skb_shinfo(skb1)->nr_frags || 3120 skb_has_frag_list(skb1)) { 3121 struct sk_buff *skb2; 3122 3123 /* Fuck, we are miserable poor guys... */ 3124 if (ntail == 0) 3125 skb2 = skb_copy(skb1, GFP_ATOMIC); 3126 else 3127 skb2 = skb_copy_expand(skb1, 3128 skb_headroom(skb1), 3129 ntail, 3130 GFP_ATOMIC); 3131 if (unlikely(skb2 == NULL)) 3132 return -ENOMEM; 3133 3134 if (skb1->sk) 3135 skb_set_owner_w(skb2, skb1->sk); 3136 3137 /* Looking around. Are we still alive? 3138 * OK, link new skb, drop old one */ 3139 3140 skb2->next = skb1->next; 3141 *skb_p = skb2; 3142 kfree_skb(skb1); 3143 skb1 = skb2; 3144 } 3145 elt++; 3146 *trailer = skb1; 3147 skb_p = &skb1->next; 3148 } 3149 3150 return elt; 3151 } 3152 EXPORT_SYMBOL_GPL(skb_cow_data); 3153 3154 static void sock_rmem_free(struct sk_buff *skb) 3155 { 3156 struct sock *sk = skb->sk; 3157 3158 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3159 } 3160 3161 /* 3162 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3163 */ 3164 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3165 { 3166 int len = skb->len; 3167 3168 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3169 (unsigned)sk->sk_rcvbuf) 3170 return -ENOMEM; 3171 3172 skb_orphan(skb); 3173 skb->sk = sk; 3174 skb->destructor = sock_rmem_free; 3175 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3176 3177 /* before exiting rcu section, make sure dst is refcounted */ 3178 skb_dst_force(skb); 3179 3180 skb_queue_tail(&sk->sk_error_queue, skb); 3181 if (!sock_flag(sk, SOCK_DEAD)) 3182 sk->sk_data_ready(sk, len); 3183 return 0; 3184 } 3185 EXPORT_SYMBOL(sock_queue_err_skb); 3186 3187 void skb_tstamp_tx(struct sk_buff *orig_skb, 3188 struct skb_shared_hwtstamps *hwtstamps) 3189 { 3190 struct sock *sk = orig_skb->sk; 3191 struct sock_exterr_skb *serr; 3192 struct sk_buff *skb; 3193 int err; 3194 3195 if (!sk) 3196 return; 3197 3198 skb = skb_clone(orig_skb, GFP_ATOMIC); 3199 if (!skb) 3200 return; 3201 3202 if (hwtstamps) { 3203 *skb_hwtstamps(skb) = 3204 *hwtstamps; 3205 } else { 3206 /* 3207 * no hardware time stamps available, 3208 * so keep the shared tx_flags and only 3209 * store software time stamp 3210 */ 3211 skb->tstamp = ktime_get_real(); 3212 } 3213 3214 serr = SKB_EXT_ERR(skb); 3215 memset(serr, 0, sizeof(*serr)); 3216 serr->ee.ee_errno = ENOMSG; 3217 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3218 3219 err = sock_queue_err_skb(sk, skb); 3220 3221 if (err) 3222 kfree_skb(skb); 3223 } 3224 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3225 3226 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3227 { 3228 struct sock *sk = skb->sk; 3229 struct sock_exterr_skb *serr; 3230 int err; 3231 3232 skb->wifi_acked_valid = 1; 3233 skb->wifi_acked = acked; 3234 3235 serr = SKB_EXT_ERR(skb); 3236 memset(serr, 0, sizeof(*serr)); 3237 serr->ee.ee_errno = ENOMSG; 3238 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3239 3240 err = sock_queue_err_skb(sk, skb); 3241 if (err) 3242 kfree_skb(skb); 3243 } 3244 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3245 3246 3247 /** 3248 * skb_partial_csum_set - set up and verify partial csum values for packet 3249 * @skb: the skb to set 3250 * @start: the number of bytes after skb->data to start checksumming. 3251 * @off: the offset from start to place the checksum. 3252 * 3253 * For untrusted partially-checksummed packets, we need to make sure the values 3254 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3255 * 3256 * This function checks and sets those values and skb->ip_summed: if this 3257 * returns false you should drop the packet. 3258 */ 3259 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3260 { 3261 if (unlikely(start > skb_headlen(skb)) || 3262 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3263 if (net_ratelimit()) 3264 printk(KERN_WARNING 3265 "bad partial csum: csum=%u/%u len=%u\n", 3266 start, off, skb_headlen(skb)); 3267 return false; 3268 } 3269 skb->ip_summed = CHECKSUM_PARTIAL; 3270 skb->csum_start = skb_headroom(skb) + start; 3271 skb->csum_offset = off; 3272 return true; 3273 } 3274 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3275 3276 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3277 { 3278 if (net_ratelimit()) 3279 pr_warning("%s: received packets cannot be forwarded" 3280 " while LRO is enabled\n", skb->dev->name); 3281 } 3282 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3283