1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 62 #include <net/protocol.h> 63 #include <net/dst.h> 64 #include <net/sock.h> 65 #include <net/checksum.h> 66 #include <net/xfrm.h> 67 68 #include <asm/uaccess.h> 69 #include <asm/system.h> 70 #include <trace/events/skb.h> 71 72 #include "kmap_skb.h" 73 74 static struct kmem_cache *skbuff_head_cache __read_mostly; 75 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 76 77 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 78 struct pipe_buffer *buf) 79 { 80 put_page(buf->page); 81 } 82 83 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 84 struct pipe_buffer *buf) 85 { 86 get_page(buf->page); 87 } 88 89 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 90 struct pipe_buffer *buf) 91 { 92 return 1; 93 } 94 95 96 /* Pipe buffer operations for a socket. */ 97 static const struct pipe_buf_operations sock_pipe_buf_ops = { 98 .can_merge = 0, 99 .map = generic_pipe_buf_map, 100 .unmap = generic_pipe_buf_unmap, 101 .confirm = generic_pipe_buf_confirm, 102 .release = sock_pipe_buf_release, 103 .steal = sock_pipe_buf_steal, 104 .get = sock_pipe_buf_get, 105 }; 106 107 /* 108 * Keep out-of-line to prevent kernel bloat. 109 * __builtin_return_address is not used because it is not always 110 * reliable. 111 */ 112 113 /** 114 * skb_over_panic - private function 115 * @skb: buffer 116 * @sz: size 117 * @here: address 118 * 119 * Out of line support code for skb_put(). Not user callable. 120 */ 121 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 122 { 123 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 124 "data:%p tail:%#lx end:%#lx dev:%s\n", 125 here, skb->len, sz, skb->head, skb->data, 126 (unsigned long)skb->tail, (unsigned long)skb->end, 127 skb->dev ? skb->dev->name : "<NULL>"); 128 BUG(); 129 } 130 131 /** 132 * skb_under_panic - private function 133 * @skb: buffer 134 * @sz: size 135 * @here: address 136 * 137 * Out of line support code for skb_push(). Not user callable. 138 */ 139 140 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 141 { 142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 143 "data:%p tail:%#lx end:%#lx dev:%s\n", 144 here, skb->len, sz, skb->head, skb->data, 145 (unsigned long)skb->tail, (unsigned long)skb->end, 146 skb->dev ? skb->dev->name : "<NULL>"); 147 BUG(); 148 } 149 150 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 151 * 'private' fields and also do memory statistics to find all the 152 * [BEEP] leaks. 153 * 154 */ 155 156 /** 157 * __alloc_skb - allocate a network buffer 158 * @size: size to allocate 159 * @gfp_mask: allocation mask 160 * @fclone: allocate from fclone cache instead of head cache 161 * and allocate a cloned (child) skb 162 * @node: numa node to allocate memory on 163 * 164 * Allocate a new &sk_buff. The returned buffer has no headroom and a 165 * tail room of size bytes. The object has a reference count of one. 166 * The return is the buffer. On a failure the return is %NULL. 167 * 168 * Buffers may only be allocated from interrupts using a @gfp_mask of 169 * %GFP_ATOMIC. 170 */ 171 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 172 int fclone, int node) 173 { 174 struct kmem_cache *cache; 175 struct skb_shared_info *shinfo; 176 struct sk_buff *skb; 177 u8 *data; 178 179 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 180 181 /* Get the HEAD */ 182 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 183 if (!skb) 184 goto out; 185 prefetchw(skb); 186 187 size = SKB_DATA_ALIGN(size); 188 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 189 gfp_mask, node); 190 if (!data) 191 goto nodata; 192 prefetchw(data + size); 193 194 /* 195 * Only clear those fields we need to clear, not those that we will 196 * actually initialise below. Hence, don't put any more fields after 197 * the tail pointer in struct sk_buff! 198 */ 199 memset(skb, 0, offsetof(struct sk_buff, tail)); 200 skb->truesize = size + sizeof(struct sk_buff); 201 atomic_set(&skb->users, 1); 202 skb->head = data; 203 skb->data = data; 204 skb_reset_tail_pointer(skb); 205 skb->end = skb->tail + size; 206 #ifdef NET_SKBUFF_DATA_USES_OFFSET 207 skb->mac_header = ~0U; 208 #endif 209 210 /* make sure we initialize shinfo sequentially */ 211 shinfo = skb_shinfo(skb); 212 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 213 atomic_set(&shinfo->dataref, 1); 214 kmemcheck_annotate_variable(shinfo->destructor_arg); 215 216 if (fclone) { 217 struct sk_buff *child = skb + 1; 218 atomic_t *fclone_ref = (atomic_t *) (child + 1); 219 220 kmemcheck_annotate_bitfield(child, flags1); 221 kmemcheck_annotate_bitfield(child, flags2); 222 skb->fclone = SKB_FCLONE_ORIG; 223 atomic_set(fclone_ref, 1); 224 225 child->fclone = SKB_FCLONE_UNAVAILABLE; 226 } 227 out: 228 return skb; 229 nodata: 230 kmem_cache_free(cache, skb); 231 skb = NULL; 232 goto out; 233 } 234 EXPORT_SYMBOL(__alloc_skb); 235 236 /** 237 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 238 * @dev: network device to receive on 239 * @length: length to allocate 240 * @gfp_mask: get_free_pages mask, passed to alloc_skb 241 * 242 * Allocate a new &sk_buff and assign it a usage count of one. The 243 * buffer has unspecified headroom built in. Users should allocate 244 * the headroom they think they need without accounting for the 245 * built in space. The built in space is used for optimisations. 246 * 247 * %NULL is returned if there is no free memory. 248 */ 249 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 250 unsigned int length, gfp_t gfp_mask) 251 { 252 struct sk_buff *skb; 253 254 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 255 if (likely(skb)) { 256 skb_reserve(skb, NET_SKB_PAD); 257 skb->dev = dev; 258 } 259 return skb; 260 } 261 EXPORT_SYMBOL(__netdev_alloc_skb); 262 263 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 264 int size) 265 { 266 skb_fill_page_desc(skb, i, page, off, size); 267 skb->len += size; 268 skb->data_len += size; 269 skb->truesize += size; 270 } 271 EXPORT_SYMBOL(skb_add_rx_frag); 272 273 /** 274 * dev_alloc_skb - allocate an skbuff for receiving 275 * @length: length to allocate 276 * 277 * Allocate a new &sk_buff and assign it a usage count of one. The 278 * buffer has unspecified headroom built in. Users should allocate 279 * the headroom they think they need without accounting for the 280 * built in space. The built in space is used for optimisations. 281 * 282 * %NULL is returned if there is no free memory. Although this function 283 * allocates memory it can be called from an interrupt. 284 */ 285 struct sk_buff *dev_alloc_skb(unsigned int length) 286 { 287 /* 288 * There is more code here than it seems: 289 * __dev_alloc_skb is an inline 290 */ 291 return __dev_alloc_skb(length, GFP_ATOMIC); 292 } 293 EXPORT_SYMBOL(dev_alloc_skb); 294 295 static void skb_drop_list(struct sk_buff **listp) 296 { 297 struct sk_buff *list = *listp; 298 299 *listp = NULL; 300 301 do { 302 struct sk_buff *this = list; 303 list = list->next; 304 kfree_skb(this); 305 } while (list); 306 } 307 308 static inline void skb_drop_fraglist(struct sk_buff *skb) 309 { 310 skb_drop_list(&skb_shinfo(skb)->frag_list); 311 } 312 313 static void skb_clone_fraglist(struct sk_buff *skb) 314 { 315 struct sk_buff *list; 316 317 skb_walk_frags(skb, list) 318 skb_get(list); 319 } 320 321 static void skb_release_data(struct sk_buff *skb) 322 { 323 if (!skb->cloned || 324 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 325 &skb_shinfo(skb)->dataref)) { 326 if (skb_shinfo(skb)->nr_frags) { 327 int i; 328 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 329 put_page(skb_shinfo(skb)->frags[i].page); 330 } 331 332 /* 333 * If skb buf is from userspace, we need to notify the caller 334 * the lower device DMA has done; 335 */ 336 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 337 struct ubuf_info *uarg; 338 339 uarg = skb_shinfo(skb)->destructor_arg; 340 if (uarg->callback) 341 uarg->callback(uarg); 342 } 343 344 if (skb_has_frag_list(skb)) 345 skb_drop_fraglist(skb); 346 347 kfree(skb->head); 348 } 349 } 350 351 /* 352 * Free an skbuff by memory without cleaning the state. 353 */ 354 static void kfree_skbmem(struct sk_buff *skb) 355 { 356 struct sk_buff *other; 357 atomic_t *fclone_ref; 358 359 switch (skb->fclone) { 360 case SKB_FCLONE_UNAVAILABLE: 361 kmem_cache_free(skbuff_head_cache, skb); 362 break; 363 364 case SKB_FCLONE_ORIG: 365 fclone_ref = (atomic_t *) (skb + 2); 366 if (atomic_dec_and_test(fclone_ref)) 367 kmem_cache_free(skbuff_fclone_cache, skb); 368 break; 369 370 case SKB_FCLONE_CLONE: 371 fclone_ref = (atomic_t *) (skb + 1); 372 other = skb - 1; 373 374 /* The clone portion is available for 375 * fast-cloning again. 376 */ 377 skb->fclone = SKB_FCLONE_UNAVAILABLE; 378 379 if (atomic_dec_and_test(fclone_ref)) 380 kmem_cache_free(skbuff_fclone_cache, other); 381 break; 382 } 383 } 384 385 static void skb_release_head_state(struct sk_buff *skb) 386 { 387 skb_dst_drop(skb); 388 #ifdef CONFIG_XFRM 389 secpath_put(skb->sp); 390 #endif 391 if (skb->destructor) { 392 WARN_ON(in_irq()); 393 skb->destructor(skb); 394 } 395 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 396 nf_conntrack_put(skb->nfct); 397 #endif 398 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 399 nf_conntrack_put_reasm(skb->nfct_reasm); 400 #endif 401 #ifdef CONFIG_BRIDGE_NETFILTER 402 nf_bridge_put(skb->nf_bridge); 403 #endif 404 /* XXX: IS this still necessary? - JHS */ 405 #ifdef CONFIG_NET_SCHED 406 skb->tc_index = 0; 407 #ifdef CONFIG_NET_CLS_ACT 408 skb->tc_verd = 0; 409 #endif 410 #endif 411 } 412 413 /* Free everything but the sk_buff shell. */ 414 static void skb_release_all(struct sk_buff *skb) 415 { 416 skb_release_head_state(skb); 417 skb_release_data(skb); 418 } 419 420 /** 421 * __kfree_skb - private function 422 * @skb: buffer 423 * 424 * Free an sk_buff. Release anything attached to the buffer. 425 * Clean the state. This is an internal helper function. Users should 426 * always call kfree_skb 427 */ 428 429 void __kfree_skb(struct sk_buff *skb) 430 { 431 skb_release_all(skb); 432 kfree_skbmem(skb); 433 } 434 EXPORT_SYMBOL(__kfree_skb); 435 436 /** 437 * kfree_skb - free an sk_buff 438 * @skb: buffer to free 439 * 440 * Drop a reference to the buffer and free it if the usage count has 441 * hit zero. 442 */ 443 void kfree_skb(struct sk_buff *skb) 444 { 445 if (unlikely(!skb)) 446 return; 447 if (likely(atomic_read(&skb->users) == 1)) 448 smp_rmb(); 449 else if (likely(!atomic_dec_and_test(&skb->users))) 450 return; 451 trace_kfree_skb(skb, __builtin_return_address(0)); 452 __kfree_skb(skb); 453 } 454 EXPORT_SYMBOL(kfree_skb); 455 456 /** 457 * consume_skb - free an skbuff 458 * @skb: buffer to free 459 * 460 * Drop a ref to the buffer and free it if the usage count has hit zero 461 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 462 * is being dropped after a failure and notes that 463 */ 464 void consume_skb(struct sk_buff *skb) 465 { 466 if (unlikely(!skb)) 467 return; 468 if (likely(atomic_read(&skb->users) == 1)) 469 smp_rmb(); 470 else if (likely(!atomic_dec_and_test(&skb->users))) 471 return; 472 trace_consume_skb(skb); 473 __kfree_skb(skb); 474 } 475 EXPORT_SYMBOL(consume_skb); 476 477 /** 478 * skb_recycle_check - check if skb can be reused for receive 479 * @skb: buffer 480 * @skb_size: minimum receive buffer size 481 * 482 * Checks that the skb passed in is not shared or cloned, and 483 * that it is linear and its head portion at least as large as 484 * skb_size so that it can be recycled as a receive buffer. 485 * If these conditions are met, this function does any necessary 486 * reference count dropping and cleans up the skbuff as if it 487 * just came from __alloc_skb(). 488 */ 489 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 490 { 491 struct skb_shared_info *shinfo; 492 493 if (irqs_disabled()) 494 return false; 495 496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 497 return false; 498 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 500 return false; 501 502 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 503 if (skb_end_pointer(skb) - skb->head < skb_size) 504 return false; 505 506 if (skb_shared(skb) || skb_cloned(skb)) 507 return false; 508 509 skb_release_head_state(skb); 510 511 shinfo = skb_shinfo(skb); 512 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 513 atomic_set(&shinfo->dataref, 1); 514 515 memset(skb, 0, offsetof(struct sk_buff, tail)); 516 skb->data = skb->head + NET_SKB_PAD; 517 skb_reset_tail_pointer(skb); 518 519 return true; 520 } 521 EXPORT_SYMBOL(skb_recycle_check); 522 523 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 524 { 525 new->tstamp = old->tstamp; 526 new->dev = old->dev; 527 new->transport_header = old->transport_header; 528 new->network_header = old->network_header; 529 new->mac_header = old->mac_header; 530 skb_dst_copy(new, old); 531 new->rxhash = old->rxhash; 532 #ifdef CONFIG_XFRM 533 new->sp = secpath_get(old->sp); 534 #endif 535 memcpy(new->cb, old->cb, sizeof(old->cb)); 536 new->csum = old->csum; 537 new->local_df = old->local_df; 538 new->pkt_type = old->pkt_type; 539 new->ip_summed = old->ip_summed; 540 skb_copy_queue_mapping(new, old); 541 new->priority = old->priority; 542 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 543 new->ipvs_property = old->ipvs_property; 544 #endif 545 new->protocol = old->protocol; 546 new->mark = old->mark; 547 new->skb_iif = old->skb_iif; 548 __nf_copy(new, old); 549 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 550 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 551 new->nf_trace = old->nf_trace; 552 #endif 553 #ifdef CONFIG_NET_SCHED 554 new->tc_index = old->tc_index; 555 #ifdef CONFIG_NET_CLS_ACT 556 new->tc_verd = old->tc_verd; 557 #endif 558 #endif 559 new->vlan_tci = old->vlan_tci; 560 561 skb_copy_secmark(new, old); 562 } 563 564 /* 565 * You should not add any new code to this function. Add it to 566 * __copy_skb_header above instead. 567 */ 568 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 569 { 570 #define C(x) n->x = skb->x 571 572 n->next = n->prev = NULL; 573 n->sk = NULL; 574 __copy_skb_header(n, skb); 575 576 C(len); 577 C(data_len); 578 C(mac_len); 579 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 580 n->cloned = 1; 581 n->nohdr = 0; 582 n->destructor = NULL; 583 C(tail); 584 C(end); 585 C(head); 586 C(data); 587 C(truesize); 588 atomic_set(&n->users, 1); 589 590 atomic_inc(&(skb_shinfo(skb)->dataref)); 591 skb->cloned = 1; 592 593 return n; 594 #undef C 595 } 596 597 /** 598 * skb_morph - morph one skb into another 599 * @dst: the skb to receive the contents 600 * @src: the skb to supply the contents 601 * 602 * This is identical to skb_clone except that the target skb is 603 * supplied by the user. 604 * 605 * The target skb is returned upon exit. 606 */ 607 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 608 { 609 skb_release_all(dst); 610 return __skb_clone(dst, src); 611 } 612 EXPORT_SYMBOL_GPL(skb_morph); 613 614 /* skb frags copy userspace buffers to kernel */ 615 static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 616 { 617 int i; 618 int num_frags = skb_shinfo(skb)->nr_frags; 619 struct page *page, *head = NULL; 620 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 621 622 for (i = 0; i < num_frags; i++) { 623 u8 *vaddr; 624 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 625 626 page = alloc_page(GFP_ATOMIC); 627 if (!page) { 628 while (head) { 629 struct page *next = (struct page *)head->private; 630 put_page(head); 631 head = next; 632 } 633 return -ENOMEM; 634 } 635 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 636 memcpy(page_address(page), 637 vaddr + f->page_offset, f->size); 638 kunmap_skb_frag(vaddr); 639 page->private = (unsigned long)head; 640 head = page; 641 } 642 643 /* skb frags release userspace buffers */ 644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 645 put_page(skb_shinfo(skb)->frags[i].page); 646 647 uarg->callback(uarg); 648 649 /* skb frags point to kernel buffers */ 650 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 651 skb_shinfo(skb)->frags[i - 1].page_offset = 0; 652 skb_shinfo(skb)->frags[i - 1].page = head; 653 head = (struct page *)head->private; 654 } 655 return 0; 656 } 657 658 659 /** 660 * skb_clone - duplicate an sk_buff 661 * @skb: buffer to clone 662 * @gfp_mask: allocation priority 663 * 664 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 665 * copies share the same packet data but not structure. The new 666 * buffer has a reference count of 1. If the allocation fails the 667 * function returns %NULL otherwise the new buffer is returned. 668 * 669 * If this function is called from an interrupt gfp_mask() must be 670 * %GFP_ATOMIC. 671 */ 672 673 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 674 { 675 struct sk_buff *n; 676 677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 678 if (skb_copy_ubufs(skb, gfp_mask)) 679 return NULL; 680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 681 } 682 683 n = skb + 1; 684 if (skb->fclone == SKB_FCLONE_ORIG && 685 n->fclone == SKB_FCLONE_UNAVAILABLE) { 686 atomic_t *fclone_ref = (atomic_t *) (n + 1); 687 n->fclone = SKB_FCLONE_CLONE; 688 atomic_inc(fclone_ref); 689 } else { 690 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 691 if (!n) 692 return NULL; 693 694 kmemcheck_annotate_bitfield(n, flags1); 695 kmemcheck_annotate_bitfield(n, flags2); 696 n->fclone = SKB_FCLONE_UNAVAILABLE; 697 } 698 699 return __skb_clone(n, skb); 700 } 701 EXPORT_SYMBOL(skb_clone); 702 703 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 704 { 705 #ifndef NET_SKBUFF_DATA_USES_OFFSET 706 /* 707 * Shift between the two data areas in bytes 708 */ 709 unsigned long offset = new->data - old->data; 710 #endif 711 712 __copy_skb_header(new, old); 713 714 #ifndef NET_SKBUFF_DATA_USES_OFFSET 715 /* {transport,network,mac}_header are relative to skb->head */ 716 new->transport_header += offset; 717 new->network_header += offset; 718 if (skb_mac_header_was_set(new)) 719 new->mac_header += offset; 720 #endif 721 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 722 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 723 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 724 } 725 726 /** 727 * skb_copy - create private copy of an sk_buff 728 * @skb: buffer to copy 729 * @gfp_mask: allocation priority 730 * 731 * Make a copy of both an &sk_buff and its data. This is used when the 732 * caller wishes to modify the data and needs a private copy of the 733 * data to alter. Returns %NULL on failure or the pointer to the buffer 734 * on success. The returned buffer has a reference count of 1. 735 * 736 * As by-product this function converts non-linear &sk_buff to linear 737 * one, so that &sk_buff becomes completely private and caller is allowed 738 * to modify all the data of returned buffer. This means that this 739 * function is not recommended for use in circumstances when only 740 * header is going to be modified. Use pskb_copy() instead. 741 */ 742 743 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 744 { 745 int headerlen = skb_headroom(skb); 746 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 747 struct sk_buff *n = alloc_skb(size, gfp_mask); 748 749 if (!n) 750 return NULL; 751 752 /* Set the data pointer */ 753 skb_reserve(n, headerlen); 754 /* Set the tail pointer and length */ 755 skb_put(n, skb->len); 756 757 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 758 BUG(); 759 760 copy_skb_header(n, skb); 761 return n; 762 } 763 EXPORT_SYMBOL(skb_copy); 764 765 /** 766 * pskb_copy - create copy of an sk_buff with private head. 767 * @skb: buffer to copy 768 * @gfp_mask: allocation priority 769 * 770 * Make a copy of both an &sk_buff and part of its data, located 771 * in header. Fragmented data remain shared. This is used when 772 * the caller wishes to modify only header of &sk_buff and needs 773 * private copy of the header to alter. Returns %NULL on failure 774 * or the pointer to the buffer on success. 775 * The returned buffer has a reference count of 1. 776 */ 777 778 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 779 { 780 unsigned int size = skb_end_pointer(skb) - skb->head; 781 struct sk_buff *n = alloc_skb(size, gfp_mask); 782 783 if (!n) 784 goto out; 785 786 /* Set the data pointer */ 787 skb_reserve(n, skb_headroom(skb)); 788 /* Set the tail pointer and length */ 789 skb_put(n, skb_headlen(skb)); 790 /* Copy the bytes */ 791 skb_copy_from_linear_data(skb, n->data, n->len); 792 793 n->truesize += skb->data_len; 794 n->data_len = skb->data_len; 795 n->len = skb->len; 796 797 if (skb_shinfo(skb)->nr_frags) { 798 int i; 799 800 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 801 if (skb_copy_ubufs(skb, gfp_mask)) { 802 kfree_skb(n); 803 n = NULL; 804 goto out; 805 } 806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 807 } 808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 810 get_page(skb_shinfo(n)->frags[i].page); 811 } 812 skb_shinfo(n)->nr_frags = i; 813 } 814 815 if (skb_has_frag_list(skb)) { 816 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 817 skb_clone_fraglist(n); 818 } 819 820 copy_skb_header(n, skb); 821 out: 822 return n; 823 } 824 EXPORT_SYMBOL(pskb_copy); 825 826 /** 827 * pskb_expand_head - reallocate header of &sk_buff 828 * @skb: buffer to reallocate 829 * @nhead: room to add at head 830 * @ntail: room to add at tail 831 * @gfp_mask: allocation priority 832 * 833 * Expands (or creates identical copy, if &nhead and &ntail are zero) 834 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 835 * reference count of 1. Returns zero in the case of success or error, 836 * if expansion failed. In the last case, &sk_buff is not changed. 837 * 838 * All the pointers pointing into skb header may change and must be 839 * reloaded after call to this function. 840 */ 841 842 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 843 gfp_t gfp_mask) 844 { 845 int i; 846 u8 *data; 847 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 848 long off; 849 bool fastpath; 850 851 BUG_ON(nhead < 0); 852 853 if (skb_shared(skb)) 854 BUG(); 855 856 size = SKB_DATA_ALIGN(size); 857 858 /* Check if we can avoid taking references on fragments if we own 859 * the last reference on skb->head. (see skb_release_data()) 860 */ 861 if (!skb->cloned) 862 fastpath = true; 863 else { 864 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 865 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 866 } 867 868 if (fastpath && 869 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 870 memmove(skb->head + size, skb_shinfo(skb), 871 offsetof(struct skb_shared_info, 872 frags[skb_shinfo(skb)->nr_frags])); 873 memmove(skb->head + nhead, skb->head, 874 skb_tail_pointer(skb) - skb->head); 875 off = nhead; 876 goto adjust_others; 877 } 878 879 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 880 if (!data) 881 goto nodata; 882 883 /* Copy only real data... and, alas, header. This should be 884 * optimized for the cases when header is void. 885 */ 886 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 887 888 memcpy((struct skb_shared_info *)(data + size), 889 skb_shinfo(skb), 890 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 891 892 if (fastpath) { 893 kfree(skb->head); 894 } else { 895 /* copy this zero copy skb frags */ 896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 897 if (skb_copy_ubufs(skb, gfp_mask)) 898 goto nofrags; 899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 900 } 901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 902 get_page(skb_shinfo(skb)->frags[i].page); 903 904 if (skb_has_frag_list(skb)) 905 skb_clone_fraglist(skb); 906 907 skb_release_data(skb); 908 } 909 off = (data + nhead) - skb->head; 910 911 skb->head = data; 912 adjust_others: 913 skb->data += off; 914 #ifdef NET_SKBUFF_DATA_USES_OFFSET 915 skb->end = size; 916 off = nhead; 917 #else 918 skb->end = skb->head + size; 919 #endif 920 /* {transport,network,mac}_header and tail are relative to skb->head */ 921 skb->tail += off; 922 skb->transport_header += off; 923 skb->network_header += off; 924 if (skb_mac_header_was_set(skb)) 925 skb->mac_header += off; 926 /* Only adjust this if it actually is csum_start rather than csum */ 927 if (skb->ip_summed == CHECKSUM_PARTIAL) 928 skb->csum_start += nhead; 929 skb->cloned = 0; 930 skb->hdr_len = 0; 931 skb->nohdr = 0; 932 atomic_set(&skb_shinfo(skb)->dataref, 1); 933 return 0; 934 935 nofrags: 936 kfree(data); 937 nodata: 938 return -ENOMEM; 939 } 940 EXPORT_SYMBOL(pskb_expand_head); 941 942 /* Make private copy of skb with writable head and some headroom */ 943 944 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 945 { 946 struct sk_buff *skb2; 947 int delta = headroom - skb_headroom(skb); 948 949 if (delta <= 0) 950 skb2 = pskb_copy(skb, GFP_ATOMIC); 951 else { 952 skb2 = skb_clone(skb, GFP_ATOMIC); 953 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 954 GFP_ATOMIC)) { 955 kfree_skb(skb2); 956 skb2 = NULL; 957 } 958 } 959 return skb2; 960 } 961 EXPORT_SYMBOL(skb_realloc_headroom); 962 963 /** 964 * skb_copy_expand - copy and expand sk_buff 965 * @skb: buffer to copy 966 * @newheadroom: new free bytes at head 967 * @newtailroom: new free bytes at tail 968 * @gfp_mask: allocation priority 969 * 970 * Make a copy of both an &sk_buff and its data and while doing so 971 * allocate additional space. 972 * 973 * This is used when the caller wishes to modify the data and needs a 974 * private copy of the data to alter as well as more space for new fields. 975 * Returns %NULL on failure or the pointer to the buffer 976 * on success. The returned buffer has a reference count of 1. 977 * 978 * You must pass %GFP_ATOMIC as the allocation priority if this function 979 * is called from an interrupt. 980 */ 981 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 982 int newheadroom, int newtailroom, 983 gfp_t gfp_mask) 984 { 985 /* 986 * Allocate the copy buffer 987 */ 988 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 989 gfp_mask); 990 int oldheadroom = skb_headroom(skb); 991 int head_copy_len, head_copy_off; 992 int off; 993 994 if (!n) 995 return NULL; 996 997 skb_reserve(n, newheadroom); 998 999 /* Set the tail pointer and length */ 1000 skb_put(n, skb->len); 1001 1002 head_copy_len = oldheadroom; 1003 head_copy_off = 0; 1004 if (newheadroom <= head_copy_len) 1005 head_copy_len = newheadroom; 1006 else 1007 head_copy_off = newheadroom - head_copy_len; 1008 1009 /* Copy the linear header and data. */ 1010 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1011 skb->len + head_copy_len)) 1012 BUG(); 1013 1014 copy_skb_header(n, skb); 1015 1016 off = newheadroom - oldheadroom; 1017 if (n->ip_summed == CHECKSUM_PARTIAL) 1018 n->csum_start += off; 1019 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1020 n->transport_header += off; 1021 n->network_header += off; 1022 if (skb_mac_header_was_set(skb)) 1023 n->mac_header += off; 1024 #endif 1025 1026 return n; 1027 } 1028 EXPORT_SYMBOL(skb_copy_expand); 1029 1030 /** 1031 * skb_pad - zero pad the tail of an skb 1032 * @skb: buffer to pad 1033 * @pad: space to pad 1034 * 1035 * Ensure that a buffer is followed by a padding area that is zero 1036 * filled. Used by network drivers which may DMA or transfer data 1037 * beyond the buffer end onto the wire. 1038 * 1039 * May return error in out of memory cases. The skb is freed on error. 1040 */ 1041 1042 int skb_pad(struct sk_buff *skb, int pad) 1043 { 1044 int err; 1045 int ntail; 1046 1047 /* If the skbuff is non linear tailroom is always zero.. */ 1048 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1049 memset(skb->data+skb->len, 0, pad); 1050 return 0; 1051 } 1052 1053 ntail = skb->data_len + pad - (skb->end - skb->tail); 1054 if (likely(skb_cloned(skb) || ntail > 0)) { 1055 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1056 if (unlikely(err)) 1057 goto free_skb; 1058 } 1059 1060 /* FIXME: The use of this function with non-linear skb's really needs 1061 * to be audited. 1062 */ 1063 err = skb_linearize(skb); 1064 if (unlikely(err)) 1065 goto free_skb; 1066 1067 memset(skb->data + skb->len, 0, pad); 1068 return 0; 1069 1070 free_skb: 1071 kfree_skb(skb); 1072 return err; 1073 } 1074 EXPORT_SYMBOL(skb_pad); 1075 1076 /** 1077 * skb_put - add data to a buffer 1078 * @skb: buffer to use 1079 * @len: amount of data to add 1080 * 1081 * This function extends the used data area of the buffer. If this would 1082 * exceed the total buffer size the kernel will panic. A pointer to the 1083 * first byte of the extra data is returned. 1084 */ 1085 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1086 { 1087 unsigned char *tmp = skb_tail_pointer(skb); 1088 SKB_LINEAR_ASSERT(skb); 1089 skb->tail += len; 1090 skb->len += len; 1091 if (unlikely(skb->tail > skb->end)) 1092 skb_over_panic(skb, len, __builtin_return_address(0)); 1093 return tmp; 1094 } 1095 EXPORT_SYMBOL(skb_put); 1096 1097 /** 1098 * skb_push - add data to the start of a buffer 1099 * @skb: buffer to use 1100 * @len: amount of data to add 1101 * 1102 * This function extends the used data area of the buffer at the buffer 1103 * start. If this would exceed the total buffer headroom the kernel will 1104 * panic. A pointer to the first byte of the extra data is returned. 1105 */ 1106 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1107 { 1108 skb->data -= len; 1109 skb->len += len; 1110 if (unlikely(skb->data<skb->head)) 1111 skb_under_panic(skb, len, __builtin_return_address(0)); 1112 return skb->data; 1113 } 1114 EXPORT_SYMBOL(skb_push); 1115 1116 /** 1117 * skb_pull - remove data from the start of a buffer 1118 * @skb: buffer to use 1119 * @len: amount of data to remove 1120 * 1121 * This function removes data from the start of a buffer, returning 1122 * the memory to the headroom. A pointer to the next data in the buffer 1123 * is returned. Once the data has been pulled future pushes will overwrite 1124 * the old data. 1125 */ 1126 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1127 { 1128 return skb_pull_inline(skb, len); 1129 } 1130 EXPORT_SYMBOL(skb_pull); 1131 1132 /** 1133 * skb_trim - remove end from a buffer 1134 * @skb: buffer to alter 1135 * @len: new length 1136 * 1137 * Cut the length of a buffer down by removing data from the tail. If 1138 * the buffer is already under the length specified it is not modified. 1139 * The skb must be linear. 1140 */ 1141 void skb_trim(struct sk_buff *skb, unsigned int len) 1142 { 1143 if (skb->len > len) 1144 __skb_trim(skb, len); 1145 } 1146 EXPORT_SYMBOL(skb_trim); 1147 1148 /* Trims skb to length len. It can change skb pointers. 1149 */ 1150 1151 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1152 { 1153 struct sk_buff **fragp; 1154 struct sk_buff *frag; 1155 int offset = skb_headlen(skb); 1156 int nfrags = skb_shinfo(skb)->nr_frags; 1157 int i; 1158 int err; 1159 1160 if (skb_cloned(skb) && 1161 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1162 return err; 1163 1164 i = 0; 1165 if (offset >= len) 1166 goto drop_pages; 1167 1168 for (; i < nfrags; i++) { 1169 int end = offset + skb_shinfo(skb)->frags[i].size; 1170 1171 if (end < len) { 1172 offset = end; 1173 continue; 1174 } 1175 1176 skb_shinfo(skb)->frags[i++].size = len - offset; 1177 1178 drop_pages: 1179 skb_shinfo(skb)->nr_frags = i; 1180 1181 for (; i < nfrags; i++) 1182 put_page(skb_shinfo(skb)->frags[i].page); 1183 1184 if (skb_has_frag_list(skb)) 1185 skb_drop_fraglist(skb); 1186 goto done; 1187 } 1188 1189 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1190 fragp = &frag->next) { 1191 int end = offset + frag->len; 1192 1193 if (skb_shared(frag)) { 1194 struct sk_buff *nfrag; 1195 1196 nfrag = skb_clone(frag, GFP_ATOMIC); 1197 if (unlikely(!nfrag)) 1198 return -ENOMEM; 1199 1200 nfrag->next = frag->next; 1201 kfree_skb(frag); 1202 frag = nfrag; 1203 *fragp = frag; 1204 } 1205 1206 if (end < len) { 1207 offset = end; 1208 continue; 1209 } 1210 1211 if (end > len && 1212 unlikely((err = pskb_trim(frag, len - offset)))) 1213 return err; 1214 1215 if (frag->next) 1216 skb_drop_list(&frag->next); 1217 break; 1218 } 1219 1220 done: 1221 if (len > skb_headlen(skb)) { 1222 skb->data_len -= skb->len - len; 1223 skb->len = len; 1224 } else { 1225 skb->len = len; 1226 skb->data_len = 0; 1227 skb_set_tail_pointer(skb, len); 1228 } 1229 1230 return 0; 1231 } 1232 EXPORT_SYMBOL(___pskb_trim); 1233 1234 /** 1235 * __pskb_pull_tail - advance tail of skb header 1236 * @skb: buffer to reallocate 1237 * @delta: number of bytes to advance tail 1238 * 1239 * The function makes a sense only on a fragmented &sk_buff, 1240 * it expands header moving its tail forward and copying necessary 1241 * data from fragmented part. 1242 * 1243 * &sk_buff MUST have reference count of 1. 1244 * 1245 * Returns %NULL (and &sk_buff does not change) if pull failed 1246 * or value of new tail of skb in the case of success. 1247 * 1248 * All the pointers pointing into skb header may change and must be 1249 * reloaded after call to this function. 1250 */ 1251 1252 /* Moves tail of skb head forward, copying data from fragmented part, 1253 * when it is necessary. 1254 * 1. It may fail due to malloc failure. 1255 * 2. It may change skb pointers. 1256 * 1257 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1258 */ 1259 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1260 { 1261 /* If skb has not enough free space at tail, get new one 1262 * plus 128 bytes for future expansions. If we have enough 1263 * room at tail, reallocate without expansion only if skb is cloned. 1264 */ 1265 int i, k, eat = (skb->tail + delta) - skb->end; 1266 1267 if (eat > 0 || skb_cloned(skb)) { 1268 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1269 GFP_ATOMIC)) 1270 return NULL; 1271 } 1272 1273 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1274 BUG(); 1275 1276 /* Optimization: no fragments, no reasons to preestimate 1277 * size of pulled pages. Superb. 1278 */ 1279 if (!skb_has_frag_list(skb)) 1280 goto pull_pages; 1281 1282 /* Estimate size of pulled pages. */ 1283 eat = delta; 1284 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1285 if (skb_shinfo(skb)->frags[i].size >= eat) 1286 goto pull_pages; 1287 eat -= skb_shinfo(skb)->frags[i].size; 1288 } 1289 1290 /* If we need update frag list, we are in troubles. 1291 * Certainly, it possible to add an offset to skb data, 1292 * but taking into account that pulling is expected to 1293 * be very rare operation, it is worth to fight against 1294 * further bloating skb head and crucify ourselves here instead. 1295 * Pure masohism, indeed. 8)8) 1296 */ 1297 if (eat) { 1298 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1299 struct sk_buff *clone = NULL; 1300 struct sk_buff *insp = NULL; 1301 1302 do { 1303 BUG_ON(!list); 1304 1305 if (list->len <= eat) { 1306 /* Eaten as whole. */ 1307 eat -= list->len; 1308 list = list->next; 1309 insp = list; 1310 } else { 1311 /* Eaten partially. */ 1312 1313 if (skb_shared(list)) { 1314 /* Sucks! We need to fork list. :-( */ 1315 clone = skb_clone(list, GFP_ATOMIC); 1316 if (!clone) 1317 return NULL; 1318 insp = list->next; 1319 list = clone; 1320 } else { 1321 /* This may be pulled without 1322 * problems. */ 1323 insp = list; 1324 } 1325 if (!pskb_pull(list, eat)) { 1326 kfree_skb(clone); 1327 return NULL; 1328 } 1329 break; 1330 } 1331 } while (eat); 1332 1333 /* Free pulled out fragments. */ 1334 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1335 skb_shinfo(skb)->frag_list = list->next; 1336 kfree_skb(list); 1337 } 1338 /* And insert new clone at head. */ 1339 if (clone) { 1340 clone->next = list; 1341 skb_shinfo(skb)->frag_list = clone; 1342 } 1343 } 1344 /* Success! Now we may commit changes to skb data. */ 1345 1346 pull_pages: 1347 eat = delta; 1348 k = 0; 1349 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1350 if (skb_shinfo(skb)->frags[i].size <= eat) { 1351 put_page(skb_shinfo(skb)->frags[i].page); 1352 eat -= skb_shinfo(skb)->frags[i].size; 1353 } else { 1354 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1355 if (eat) { 1356 skb_shinfo(skb)->frags[k].page_offset += eat; 1357 skb_shinfo(skb)->frags[k].size -= eat; 1358 eat = 0; 1359 } 1360 k++; 1361 } 1362 } 1363 skb_shinfo(skb)->nr_frags = k; 1364 1365 skb->tail += delta; 1366 skb->data_len -= delta; 1367 1368 return skb_tail_pointer(skb); 1369 } 1370 EXPORT_SYMBOL(__pskb_pull_tail); 1371 1372 /** 1373 * skb_copy_bits - copy bits from skb to kernel buffer 1374 * @skb: source skb 1375 * @offset: offset in source 1376 * @to: destination buffer 1377 * @len: number of bytes to copy 1378 * 1379 * Copy the specified number of bytes from the source skb to the 1380 * destination buffer. 1381 * 1382 * CAUTION ! : 1383 * If its prototype is ever changed, 1384 * check arch/{*}/net/{*}.S files, 1385 * since it is called from BPF assembly code. 1386 */ 1387 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1388 { 1389 int start = skb_headlen(skb); 1390 struct sk_buff *frag_iter; 1391 int i, copy; 1392 1393 if (offset > (int)skb->len - len) 1394 goto fault; 1395 1396 /* Copy header. */ 1397 if ((copy = start - offset) > 0) { 1398 if (copy > len) 1399 copy = len; 1400 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1401 if ((len -= copy) == 0) 1402 return 0; 1403 offset += copy; 1404 to += copy; 1405 } 1406 1407 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1408 int end; 1409 1410 WARN_ON(start > offset + len); 1411 1412 end = start + skb_shinfo(skb)->frags[i].size; 1413 if ((copy = end - offset) > 0) { 1414 u8 *vaddr; 1415 1416 if (copy > len) 1417 copy = len; 1418 1419 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1420 memcpy(to, 1421 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1422 offset - start, copy); 1423 kunmap_skb_frag(vaddr); 1424 1425 if ((len -= copy) == 0) 1426 return 0; 1427 offset += copy; 1428 to += copy; 1429 } 1430 start = end; 1431 } 1432 1433 skb_walk_frags(skb, frag_iter) { 1434 int end; 1435 1436 WARN_ON(start > offset + len); 1437 1438 end = start + frag_iter->len; 1439 if ((copy = end - offset) > 0) { 1440 if (copy > len) 1441 copy = len; 1442 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1443 goto fault; 1444 if ((len -= copy) == 0) 1445 return 0; 1446 offset += copy; 1447 to += copy; 1448 } 1449 start = end; 1450 } 1451 1452 if (!len) 1453 return 0; 1454 1455 fault: 1456 return -EFAULT; 1457 } 1458 EXPORT_SYMBOL(skb_copy_bits); 1459 1460 /* 1461 * Callback from splice_to_pipe(), if we need to release some pages 1462 * at the end of the spd in case we error'ed out in filling the pipe. 1463 */ 1464 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1465 { 1466 put_page(spd->pages[i]); 1467 } 1468 1469 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1470 unsigned int *offset, 1471 struct sk_buff *skb, struct sock *sk) 1472 { 1473 struct page *p = sk->sk_sndmsg_page; 1474 unsigned int off; 1475 1476 if (!p) { 1477 new_page: 1478 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1479 if (!p) 1480 return NULL; 1481 1482 off = sk->sk_sndmsg_off = 0; 1483 /* hold one ref to this page until it's full */ 1484 } else { 1485 unsigned int mlen; 1486 1487 off = sk->sk_sndmsg_off; 1488 mlen = PAGE_SIZE - off; 1489 if (mlen < 64 && mlen < *len) { 1490 put_page(p); 1491 goto new_page; 1492 } 1493 1494 *len = min_t(unsigned int, *len, mlen); 1495 } 1496 1497 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1498 sk->sk_sndmsg_off += *len; 1499 *offset = off; 1500 get_page(p); 1501 1502 return p; 1503 } 1504 1505 /* 1506 * Fill page/offset/length into spd, if it can hold more pages. 1507 */ 1508 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1509 struct pipe_inode_info *pipe, struct page *page, 1510 unsigned int *len, unsigned int offset, 1511 struct sk_buff *skb, int linear, 1512 struct sock *sk) 1513 { 1514 if (unlikely(spd->nr_pages == pipe->buffers)) 1515 return 1; 1516 1517 if (linear) { 1518 page = linear_to_page(page, len, &offset, skb, sk); 1519 if (!page) 1520 return 1; 1521 } else 1522 get_page(page); 1523 1524 spd->pages[spd->nr_pages] = page; 1525 spd->partial[spd->nr_pages].len = *len; 1526 spd->partial[spd->nr_pages].offset = offset; 1527 spd->nr_pages++; 1528 1529 return 0; 1530 } 1531 1532 static inline void __segment_seek(struct page **page, unsigned int *poff, 1533 unsigned int *plen, unsigned int off) 1534 { 1535 unsigned long n; 1536 1537 *poff += off; 1538 n = *poff / PAGE_SIZE; 1539 if (n) 1540 *page = nth_page(*page, n); 1541 1542 *poff = *poff % PAGE_SIZE; 1543 *plen -= off; 1544 } 1545 1546 static inline int __splice_segment(struct page *page, unsigned int poff, 1547 unsigned int plen, unsigned int *off, 1548 unsigned int *len, struct sk_buff *skb, 1549 struct splice_pipe_desc *spd, int linear, 1550 struct sock *sk, 1551 struct pipe_inode_info *pipe) 1552 { 1553 if (!*len) 1554 return 1; 1555 1556 /* skip this segment if already processed */ 1557 if (*off >= plen) { 1558 *off -= plen; 1559 return 0; 1560 } 1561 1562 /* ignore any bits we already processed */ 1563 if (*off) { 1564 __segment_seek(&page, &poff, &plen, *off); 1565 *off = 0; 1566 } 1567 1568 do { 1569 unsigned int flen = min(*len, plen); 1570 1571 /* the linear region may spread across several pages */ 1572 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1573 1574 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1575 return 1; 1576 1577 __segment_seek(&page, &poff, &plen, flen); 1578 *len -= flen; 1579 1580 } while (*len && plen); 1581 1582 return 0; 1583 } 1584 1585 /* 1586 * Map linear and fragment data from the skb to spd. It reports failure if the 1587 * pipe is full or if we already spliced the requested length. 1588 */ 1589 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1590 unsigned int *offset, unsigned int *len, 1591 struct splice_pipe_desc *spd, struct sock *sk) 1592 { 1593 int seg; 1594 1595 /* 1596 * map the linear part 1597 */ 1598 if (__splice_segment(virt_to_page(skb->data), 1599 (unsigned long) skb->data & (PAGE_SIZE - 1), 1600 skb_headlen(skb), 1601 offset, len, skb, spd, 1, sk, pipe)) 1602 return 1; 1603 1604 /* 1605 * then map the fragments 1606 */ 1607 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1608 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1609 1610 if (__splice_segment(f->page, f->page_offset, f->size, 1611 offset, len, skb, spd, 0, sk, pipe)) 1612 return 1; 1613 } 1614 1615 return 0; 1616 } 1617 1618 /* 1619 * Map data from the skb to a pipe. Should handle both the linear part, 1620 * the fragments, and the frag list. It does NOT handle frag lists within 1621 * the frag list, if such a thing exists. We'd probably need to recurse to 1622 * handle that cleanly. 1623 */ 1624 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1625 struct pipe_inode_info *pipe, unsigned int tlen, 1626 unsigned int flags) 1627 { 1628 struct partial_page partial[PIPE_DEF_BUFFERS]; 1629 struct page *pages[PIPE_DEF_BUFFERS]; 1630 struct splice_pipe_desc spd = { 1631 .pages = pages, 1632 .partial = partial, 1633 .flags = flags, 1634 .ops = &sock_pipe_buf_ops, 1635 .spd_release = sock_spd_release, 1636 }; 1637 struct sk_buff *frag_iter; 1638 struct sock *sk = skb->sk; 1639 int ret = 0; 1640 1641 if (splice_grow_spd(pipe, &spd)) 1642 return -ENOMEM; 1643 1644 /* 1645 * __skb_splice_bits() only fails if the output has no room left, 1646 * so no point in going over the frag_list for the error case. 1647 */ 1648 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1649 goto done; 1650 else if (!tlen) 1651 goto done; 1652 1653 /* 1654 * now see if we have a frag_list to map 1655 */ 1656 skb_walk_frags(skb, frag_iter) { 1657 if (!tlen) 1658 break; 1659 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1660 break; 1661 } 1662 1663 done: 1664 if (spd.nr_pages) { 1665 /* 1666 * Drop the socket lock, otherwise we have reverse 1667 * locking dependencies between sk_lock and i_mutex 1668 * here as compared to sendfile(). We enter here 1669 * with the socket lock held, and splice_to_pipe() will 1670 * grab the pipe inode lock. For sendfile() emulation, 1671 * we call into ->sendpage() with the i_mutex lock held 1672 * and networking will grab the socket lock. 1673 */ 1674 release_sock(sk); 1675 ret = splice_to_pipe(pipe, &spd); 1676 lock_sock(sk); 1677 } 1678 1679 splice_shrink_spd(pipe, &spd); 1680 return ret; 1681 } 1682 1683 /** 1684 * skb_store_bits - store bits from kernel buffer to skb 1685 * @skb: destination buffer 1686 * @offset: offset in destination 1687 * @from: source buffer 1688 * @len: number of bytes to copy 1689 * 1690 * Copy the specified number of bytes from the source buffer to the 1691 * destination skb. This function handles all the messy bits of 1692 * traversing fragment lists and such. 1693 */ 1694 1695 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1696 { 1697 int start = skb_headlen(skb); 1698 struct sk_buff *frag_iter; 1699 int i, copy; 1700 1701 if (offset > (int)skb->len - len) 1702 goto fault; 1703 1704 if ((copy = start - offset) > 0) { 1705 if (copy > len) 1706 copy = len; 1707 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1708 if ((len -= copy) == 0) 1709 return 0; 1710 offset += copy; 1711 from += copy; 1712 } 1713 1714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1715 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1716 int end; 1717 1718 WARN_ON(start > offset + len); 1719 1720 end = start + frag->size; 1721 if ((copy = end - offset) > 0) { 1722 u8 *vaddr; 1723 1724 if (copy > len) 1725 copy = len; 1726 1727 vaddr = kmap_skb_frag(frag); 1728 memcpy(vaddr + frag->page_offset + offset - start, 1729 from, copy); 1730 kunmap_skb_frag(vaddr); 1731 1732 if ((len -= copy) == 0) 1733 return 0; 1734 offset += copy; 1735 from += copy; 1736 } 1737 start = end; 1738 } 1739 1740 skb_walk_frags(skb, frag_iter) { 1741 int end; 1742 1743 WARN_ON(start > offset + len); 1744 1745 end = start + frag_iter->len; 1746 if ((copy = end - offset) > 0) { 1747 if (copy > len) 1748 copy = len; 1749 if (skb_store_bits(frag_iter, offset - start, 1750 from, copy)) 1751 goto fault; 1752 if ((len -= copy) == 0) 1753 return 0; 1754 offset += copy; 1755 from += copy; 1756 } 1757 start = end; 1758 } 1759 if (!len) 1760 return 0; 1761 1762 fault: 1763 return -EFAULT; 1764 } 1765 EXPORT_SYMBOL(skb_store_bits); 1766 1767 /* Checksum skb data. */ 1768 1769 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1770 int len, __wsum csum) 1771 { 1772 int start = skb_headlen(skb); 1773 int i, copy = start - offset; 1774 struct sk_buff *frag_iter; 1775 int pos = 0; 1776 1777 /* Checksum header. */ 1778 if (copy > 0) { 1779 if (copy > len) 1780 copy = len; 1781 csum = csum_partial(skb->data + offset, copy, csum); 1782 if ((len -= copy) == 0) 1783 return csum; 1784 offset += copy; 1785 pos = copy; 1786 } 1787 1788 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1789 int end; 1790 1791 WARN_ON(start > offset + len); 1792 1793 end = start + skb_shinfo(skb)->frags[i].size; 1794 if ((copy = end - offset) > 0) { 1795 __wsum csum2; 1796 u8 *vaddr; 1797 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1798 1799 if (copy > len) 1800 copy = len; 1801 vaddr = kmap_skb_frag(frag); 1802 csum2 = csum_partial(vaddr + frag->page_offset + 1803 offset - start, copy, 0); 1804 kunmap_skb_frag(vaddr); 1805 csum = csum_block_add(csum, csum2, pos); 1806 if (!(len -= copy)) 1807 return csum; 1808 offset += copy; 1809 pos += copy; 1810 } 1811 start = end; 1812 } 1813 1814 skb_walk_frags(skb, frag_iter) { 1815 int end; 1816 1817 WARN_ON(start > offset + len); 1818 1819 end = start + frag_iter->len; 1820 if ((copy = end - offset) > 0) { 1821 __wsum csum2; 1822 if (copy > len) 1823 copy = len; 1824 csum2 = skb_checksum(frag_iter, offset - start, 1825 copy, 0); 1826 csum = csum_block_add(csum, csum2, pos); 1827 if ((len -= copy) == 0) 1828 return csum; 1829 offset += copy; 1830 pos += copy; 1831 } 1832 start = end; 1833 } 1834 BUG_ON(len); 1835 1836 return csum; 1837 } 1838 EXPORT_SYMBOL(skb_checksum); 1839 1840 /* Both of above in one bottle. */ 1841 1842 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1843 u8 *to, int len, __wsum csum) 1844 { 1845 int start = skb_headlen(skb); 1846 int i, copy = start - offset; 1847 struct sk_buff *frag_iter; 1848 int pos = 0; 1849 1850 /* Copy header. */ 1851 if (copy > 0) { 1852 if (copy > len) 1853 copy = len; 1854 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1855 copy, csum); 1856 if ((len -= copy) == 0) 1857 return csum; 1858 offset += copy; 1859 to += copy; 1860 pos = copy; 1861 } 1862 1863 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1864 int end; 1865 1866 WARN_ON(start > offset + len); 1867 1868 end = start + skb_shinfo(skb)->frags[i].size; 1869 if ((copy = end - offset) > 0) { 1870 __wsum csum2; 1871 u8 *vaddr; 1872 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1873 1874 if (copy > len) 1875 copy = len; 1876 vaddr = kmap_skb_frag(frag); 1877 csum2 = csum_partial_copy_nocheck(vaddr + 1878 frag->page_offset + 1879 offset - start, to, 1880 copy, 0); 1881 kunmap_skb_frag(vaddr); 1882 csum = csum_block_add(csum, csum2, pos); 1883 if (!(len -= copy)) 1884 return csum; 1885 offset += copy; 1886 to += copy; 1887 pos += copy; 1888 } 1889 start = end; 1890 } 1891 1892 skb_walk_frags(skb, frag_iter) { 1893 __wsum csum2; 1894 int end; 1895 1896 WARN_ON(start > offset + len); 1897 1898 end = start + frag_iter->len; 1899 if ((copy = end - offset) > 0) { 1900 if (copy > len) 1901 copy = len; 1902 csum2 = skb_copy_and_csum_bits(frag_iter, 1903 offset - start, 1904 to, copy, 0); 1905 csum = csum_block_add(csum, csum2, pos); 1906 if ((len -= copy) == 0) 1907 return csum; 1908 offset += copy; 1909 to += copy; 1910 pos += copy; 1911 } 1912 start = end; 1913 } 1914 BUG_ON(len); 1915 return csum; 1916 } 1917 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1918 1919 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1920 { 1921 __wsum csum; 1922 long csstart; 1923 1924 if (skb->ip_summed == CHECKSUM_PARTIAL) 1925 csstart = skb_checksum_start_offset(skb); 1926 else 1927 csstart = skb_headlen(skb); 1928 1929 BUG_ON(csstart > skb_headlen(skb)); 1930 1931 skb_copy_from_linear_data(skb, to, csstart); 1932 1933 csum = 0; 1934 if (csstart != skb->len) 1935 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1936 skb->len - csstart, 0); 1937 1938 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1939 long csstuff = csstart + skb->csum_offset; 1940 1941 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1942 } 1943 } 1944 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1945 1946 /** 1947 * skb_dequeue - remove from the head of the queue 1948 * @list: list to dequeue from 1949 * 1950 * Remove the head of the list. The list lock is taken so the function 1951 * may be used safely with other locking list functions. The head item is 1952 * returned or %NULL if the list is empty. 1953 */ 1954 1955 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1956 { 1957 unsigned long flags; 1958 struct sk_buff *result; 1959 1960 spin_lock_irqsave(&list->lock, flags); 1961 result = __skb_dequeue(list); 1962 spin_unlock_irqrestore(&list->lock, flags); 1963 return result; 1964 } 1965 EXPORT_SYMBOL(skb_dequeue); 1966 1967 /** 1968 * skb_dequeue_tail - remove from the tail of the queue 1969 * @list: list to dequeue from 1970 * 1971 * Remove the tail of the list. The list lock is taken so the function 1972 * may be used safely with other locking list functions. The tail item is 1973 * returned or %NULL if the list is empty. 1974 */ 1975 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1976 { 1977 unsigned long flags; 1978 struct sk_buff *result; 1979 1980 spin_lock_irqsave(&list->lock, flags); 1981 result = __skb_dequeue_tail(list); 1982 spin_unlock_irqrestore(&list->lock, flags); 1983 return result; 1984 } 1985 EXPORT_SYMBOL(skb_dequeue_tail); 1986 1987 /** 1988 * skb_queue_purge - empty a list 1989 * @list: list to empty 1990 * 1991 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1992 * the list and one reference dropped. This function takes the list 1993 * lock and is atomic with respect to other list locking functions. 1994 */ 1995 void skb_queue_purge(struct sk_buff_head *list) 1996 { 1997 struct sk_buff *skb; 1998 while ((skb = skb_dequeue(list)) != NULL) 1999 kfree_skb(skb); 2000 } 2001 EXPORT_SYMBOL(skb_queue_purge); 2002 2003 /** 2004 * skb_queue_head - queue a buffer at the list head 2005 * @list: list to use 2006 * @newsk: buffer to queue 2007 * 2008 * Queue a buffer at the start of the list. This function takes the 2009 * list lock and can be used safely with other locking &sk_buff functions 2010 * safely. 2011 * 2012 * A buffer cannot be placed on two lists at the same time. 2013 */ 2014 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2015 { 2016 unsigned long flags; 2017 2018 spin_lock_irqsave(&list->lock, flags); 2019 __skb_queue_head(list, newsk); 2020 spin_unlock_irqrestore(&list->lock, flags); 2021 } 2022 EXPORT_SYMBOL(skb_queue_head); 2023 2024 /** 2025 * skb_queue_tail - queue a buffer at the list tail 2026 * @list: list to use 2027 * @newsk: buffer to queue 2028 * 2029 * Queue a buffer at the tail of the list. This function takes the 2030 * list lock and can be used safely with other locking &sk_buff functions 2031 * safely. 2032 * 2033 * A buffer cannot be placed on two lists at the same time. 2034 */ 2035 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2036 { 2037 unsigned long flags; 2038 2039 spin_lock_irqsave(&list->lock, flags); 2040 __skb_queue_tail(list, newsk); 2041 spin_unlock_irqrestore(&list->lock, flags); 2042 } 2043 EXPORT_SYMBOL(skb_queue_tail); 2044 2045 /** 2046 * skb_unlink - remove a buffer from a list 2047 * @skb: buffer to remove 2048 * @list: list to use 2049 * 2050 * Remove a packet from a list. The list locks are taken and this 2051 * function is atomic with respect to other list locked calls 2052 * 2053 * You must know what list the SKB is on. 2054 */ 2055 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2056 { 2057 unsigned long flags; 2058 2059 spin_lock_irqsave(&list->lock, flags); 2060 __skb_unlink(skb, list); 2061 spin_unlock_irqrestore(&list->lock, flags); 2062 } 2063 EXPORT_SYMBOL(skb_unlink); 2064 2065 /** 2066 * skb_append - append a buffer 2067 * @old: buffer to insert after 2068 * @newsk: buffer to insert 2069 * @list: list to use 2070 * 2071 * Place a packet after a given packet in a list. The list locks are taken 2072 * and this function is atomic with respect to other list locked calls. 2073 * A buffer cannot be placed on two lists at the same time. 2074 */ 2075 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2076 { 2077 unsigned long flags; 2078 2079 spin_lock_irqsave(&list->lock, flags); 2080 __skb_queue_after(list, old, newsk); 2081 spin_unlock_irqrestore(&list->lock, flags); 2082 } 2083 EXPORT_SYMBOL(skb_append); 2084 2085 /** 2086 * skb_insert - insert a buffer 2087 * @old: buffer to insert before 2088 * @newsk: buffer to insert 2089 * @list: list to use 2090 * 2091 * Place a packet before a given packet in a list. The list locks are 2092 * taken and this function is atomic with respect to other list locked 2093 * calls. 2094 * 2095 * A buffer cannot be placed on two lists at the same time. 2096 */ 2097 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2098 { 2099 unsigned long flags; 2100 2101 spin_lock_irqsave(&list->lock, flags); 2102 __skb_insert(newsk, old->prev, old, list); 2103 spin_unlock_irqrestore(&list->lock, flags); 2104 } 2105 EXPORT_SYMBOL(skb_insert); 2106 2107 static inline void skb_split_inside_header(struct sk_buff *skb, 2108 struct sk_buff* skb1, 2109 const u32 len, const int pos) 2110 { 2111 int i; 2112 2113 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2114 pos - len); 2115 /* And move data appendix as is. */ 2116 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2117 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2118 2119 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2120 skb_shinfo(skb)->nr_frags = 0; 2121 skb1->data_len = skb->data_len; 2122 skb1->len += skb1->data_len; 2123 skb->data_len = 0; 2124 skb->len = len; 2125 skb_set_tail_pointer(skb, len); 2126 } 2127 2128 static inline void skb_split_no_header(struct sk_buff *skb, 2129 struct sk_buff* skb1, 2130 const u32 len, int pos) 2131 { 2132 int i, k = 0; 2133 const int nfrags = skb_shinfo(skb)->nr_frags; 2134 2135 skb_shinfo(skb)->nr_frags = 0; 2136 skb1->len = skb1->data_len = skb->len - len; 2137 skb->len = len; 2138 skb->data_len = len - pos; 2139 2140 for (i = 0; i < nfrags; i++) { 2141 int size = skb_shinfo(skb)->frags[i].size; 2142 2143 if (pos + size > len) { 2144 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2145 2146 if (pos < len) { 2147 /* Split frag. 2148 * We have two variants in this case: 2149 * 1. Move all the frag to the second 2150 * part, if it is possible. F.e. 2151 * this approach is mandatory for TUX, 2152 * where splitting is expensive. 2153 * 2. Split is accurately. We make this. 2154 */ 2155 get_page(skb_shinfo(skb)->frags[i].page); 2156 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2157 skb_shinfo(skb1)->frags[0].size -= len - pos; 2158 skb_shinfo(skb)->frags[i].size = len - pos; 2159 skb_shinfo(skb)->nr_frags++; 2160 } 2161 k++; 2162 } else 2163 skb_shinfo(skb)->nr_frags++; 2164 pos += size; 2165 } 2166 skb_shinfo(skb1)->nr_frags = k; 2167 } 2168 2169 /** 2170 * skb_split - Split fragmented skb to two parts at length len. 2171 * @skb: the buffer to split 2172 * @skb1: the buffer to receive the second part 2173 * @len: new length for skb 2174 */ 2175 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2176 { 2177 int pos = skb_headlen(skb); 2178 2179 if (len < pos) /* Split line is inside header. */ 2180 skb_split_inside_header(skb, skb1, len, pos); 2181 else /* Second chunk has no header, nothing to copy. */ 2182 skb_split_no_header(skb, skb1, len, pos); 2183 } 2184 EXPORT_SYMBOL(skb_split); 2185 2186 /* Shifting from/to a cloned skb is a no-go. 2187 * 2188 * Caller cannot keep skb_shinfo related pointers past calling here! 2189 */ 2190 static int skb_prepare_for_shift(struct sk_buff *skb) 2191 { 2192 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2193 } 2194 2195 /** 2196 * skb_shift - Shifts paged data partially from skb to another 2197 * @tgt: buffer into which tail data gets added 2198 * @skb: buffer from which the paged data comes from 2199 * @shiftlen: shift up to this many bytes 2200 * 2201 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2202 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2203 * It's up to caller to free skb if everything was shifted. 2204 * 2205 * If @tgt runs out of frags, the whole operation is aborted. 2206 * 2207 * Skb cannot include anything else but paged data while tgt is allowed 2208 * to have non-paged data as well. 2209 * 2210 * TODO: full sized shift could be optimized but that would need 2211 * specialized skb free'er to handle frags without up-to-date nr_frags. 2212 */ 2213 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2214 { 2215 int from, to, merge, todo; 2216 struct skb_frag_struct *fragfrom, *fragto; 2217 2218 BUG_ON(shiftlen > skb->len); 2219 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2220 2221 todo = shiftlen; 2222 from = 0; 2223 to = skb_shinfo(tgt)->nr_frags; 2224 fragfrom = &skb_shinfo(skb)->frags[from]; 2225 2226 /* Actual merge is delayed until the point when we know we can 2227 * commit all, so that we don't have to undo partial changes 2228 */ 2229 if (!to || 2230 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2231 merge = -1; 2232 } else { 2233 merge = to - 1; 2234 2235 todo -= fragfrom->size; 2236 if (todo < 0) { 2237 if (skb_prepare_for_shift(skb) || 2238 skb_prepare_for_shift(tgt)) 2239 return 0; 2240 2241 /* All previous frag pointers might be stale! */ 2242 fragfrom = &skb_shinfo(skb)->frags[from]; 2243 fragto = &skb_shinfo(tgt)->frags[merge]; 2244 2245 fragto->size += shiftlen; 2246 fragfrom->size -= shiftlen; 2247 fragfrom->page_offset += shiftlen; 2248 2249 goto onlymerged; 2250 } 2251 2252 from++; 2253 } 2254 2255 /* Skip full, not-fitting skb to avoid expensive operations */ 2256 if ((shiftlen == skb->len) && 2257 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2258 return 0; 2259 2260 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2261 return 0; 2262 2263 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2264 if (to == MAX_SKB_FRAGS) 2265 return 0; 2266 2267 fragfrom = &skb_shinfo(skb)->frags[from]; 2268 fragto = &skb_shinfo(tgt)->frags[to]; 2269 2270 if (todo >= fragfrom->size) { 2271 *fragto = *fragfrom; 2272 todo -= fragfrom->size; 2273 from++; 2274 to++; 2275 2276 } else { 2277 get_page(fragfrom->page); 2278 fragto->page = fragfrom->page; 2279 fragto->page_offset = fragfrom->page_offset; 2280 fragto->size = todo; 2281 2282 fragfrom->page_offset += todo; 2283 fragfrom->size -= todo; 2284 todo = 0; 2285 2286 to++; 2287 break; 2288 } 2289 } 2290 2291 /* Ready to "commit" this state change to tgt */ 2292 skb_shinfo(tgt)->nr_frags = to; 2293 2294 if (merge >= 0) { 2295 fragfrom = &skb_shinfo(skb)->frags[0]; 2296 fragto = &skb_shinfo(tgt)->frags[merge]; 2297 2298 fragto->size += fragfrom->size; 2299 put_page(fragfrom->page); 2300 } 2301 2302 /* Reposition in the original skb */ 2303 to = 0; 2304 while (from < skb_shinfo(skb)->nr_frags) 2305 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2306 skb_shinfo(skb)->nr_frags = to; 2307 2308 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2309 2310 onlymerged: 2311 /* Most likely the tgt won't ever need its checksum anymore, skb on 2312 * the other hand might need it if it needs to be resent 2313 */ 2314 tgt->ip_summed = CHECKSUM_PARTIAL; 2315 skb->ip_summed = CHECKSUM_PARTIAL; 2316 2317 /* Yak, is it really working this way? Some helper please? */ 2318 skb->len -= shiftlen; 2319 skb->data_len -= shiftlen; 2320 skb->truesize -= shiftlen; 2321 tgt->len += shiftlen; 2322 tgt->data_len += shiftlen; 2323 tgt->truesize += shiftlen; 2324 2325 return shiftlen; 2326 } 2327 2328 /** 2329 * skb_prepare_seq_read - Prepare a sequential read of skb data 2330 * @skb: the buffer to read 2331 * @from: lower offset of data to be read 2332 * @to: upper offset of data to be read 2333 * @st: state variable 2334 * 2335 * Initializes the specified state variable. Must be called before 2336 * invoking skb_seq_read() for the first time. 2337 */ 2338 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2339 unsigned int to, struct skb_seq_state *st) 2340 { 2341 st->lower_offset = from; 2342 st->upper_offset = to; 2343 st->root_skb = st->cur_skb = skb; 2344 st->frag_idx = st->stepped_offset = 0; 2345 st->frag_data = NULL; 2346 } 2347 EXPORT_SYMBOL(skb_prepare_seq_read); 2348 2349 /** 2350 * skb_seq_read - Sequentially read skb data 2351 * @consumed: number of bytes consumed by the caller so far 2352 * @data: destination pointer for data to be returned 2353 * @st: state variable 2354 * 2355 * Reads a block of skb data at &consumed relative to the 2356 * lower offset specified to skb_prepare_seq_read(). Assigns 2357 * the head of the data block to &data and returns the length 2358 * of the block or 0 if the end of the skb data or the upper 2359 * offset has been reached. 2360 * 2361 * The caller is not required to consume all of the data 2362 * returned, i.e. &consumed is typically set to the number 2363 * of bytes already consumed and the next call to 2364 * skb_seq_read() will return the remaining part of the block. 2365 * 2366 * Note 1: The size of each block of data returned can be arbitrary, 2367 * this limitation is the cost for zerocopy seqeuental 2368 * reads of potentially non linear data. 2369 * 2370 * Note 2: Fragment lists within fragments are not implemented 2371 * at the moment, state->root_skb could be replaced with 2372 * a stack for this purpose. 2373 */ 2374 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2375 struct skb_seq_state *st) 2376 { 2377 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2378 skb_frag_t *frag; 2379 2380 if (unlikely(abs_offset >= st->upper_offset)) 2381 return 0; 2382 2383 next_skb: 2384 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2385 2386 if (abs_offset < block_limit && !st->frag_data) { 2387 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2388 return block_limit - abs_offset; 2389 } 2390 2391 if (st->frag_idx == 0 && !st->frag_data) 2392 st->stepped_offset += skb_headlen(st->cur_skb); 2393 2394 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2395 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2396 block_limit = frag->size + st->stepped_offset; 2397 2398 if (abs_offset < block_limit) { 2399 if (!st->frag_data) 2400 st->frag_data = kmap_skb_frag(frag); 2401 2402 *data = (u8 *) st->frag_data + frag->page_offset + 2403 (abs_offset - st->stepped_offset); 2404 2405 return block_limit - abs_offset; 2406 } 2407 2408 if (st->frag_data) { 2409 kunmap_skb_frag(st->frag_data); 2410 st->frag_data = NULL; 2411 } 2412 2413 st->frag_idx++; 2414 st->stepped_offset += frag->size; 2415 } 2416 2417 if (st->frag_data) { 2418 kunmap_skb_frag(st->frag_data); 2419 st->frag_data = NULL; 2420 } 2421 2422 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2423 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2424 st->frag_idx = 0; 2425 goto next_skb; 2426 } else if (st->cur_skb->next) { 2427 st->cur_skb = st->cur_skb->next; 2428 st->frag_idx = 0; 2429 goto next_skb; 2430 } 2431 2432 return 0; 2433 } 2434 EXPORT_SYMBOL(skb_seq_read); 2435 2436 /** 2437 * skb_abort_seq_read - Abort a sequential read of skb data 2438 * @st: state variable 2439 * 2440 * Must be called if skb_seq_read() was not called until it 2441 * returned 0. 2442 */ 2443 void skb_abort_seq_read(struct skb_seq_state *st) 2444 { 2445 if (st->frag_data) 2446 kunmap_skb_frag(st->frag_data); 2447 } 2448 EXPORT_SYMBOL(skb_abort_seq_read); 2449 2450 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2451 2452 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2453 struct ts_config *conf, 2454 struct ts_state *state) 2455 { 2456 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2457 } 2458 2459 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2460 { 2461 skb_abort_seq_read(TS_SKB_CB(state)); 2462 } 2463 2464 /** 2465 * skb_find_text - Find a text pattern in skb data 2466 * @skb: the buffer to look in 2467 * @from: search offset 2468 * @to: search limit 2469 * @config: textsearch configuration 2470 * @state: uninitialized textsearch state variable 2471 * 2472 * Finds a pattern in the skb data according to the specified 2473 * textsearch configuration. Use textsearch_next() to retrieve 2474 * subsequent occurrences of the pattern. Returns the offset 2475 * to the first occurrence or UINT_MAX if no match was found. 2476 */ 2477 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2478 unsigned int to, struct ts_config *config, 2479 struct ts_state *state) 2480 { 2481 unsigned int ret; 2482 2483 config->get_next_block = skb_ts_get_next_block; 2484 config->finish = skb_ts_finish; 2485 2486 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2487 2488 ret = textsearch_find(config, state); 2489 return (ret <= to - from ? ret : UINT_MAX); 2490 } 2491 EXPORT_SYMBOL(skb_find_text); 2492 2493 /** 2494 * skb_append_datato_frags: - append the user data to a skb 2495 * @sk: sock structure 2496 * @skb: skb structure to be appened with user data. 2497 * @getfrag: call back function to be used for getting the user data 2498 * @from: pointer to user message iov 2499 * @length: length of the iov message 2500 * 2501 * Description: This procedure append the user data in the fragment part 2502 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2503 */ 2504 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2505 int (*getfrag)(void *from, char *to, int offset, 2506 int len, int odd, struct sk_buff *skb), 2507 void *from, int length) 2508 { 2509 int frg_cnt = 0; 2510 skb_frag_t *frag = NULL; 2511 struct page *page = NULL; 2512 int copy, left; 2513 int offset = 0; 2514 int ret; 2515 2516 do { 2517 /* Return error if we don't have space for new frag */ 2518 frg_cnt = skb_shinfo(skb)->nr_frags; 2519 if (frg_cnt >= MAX_SKB_FRAGS) 2520 return -EFAULT; 2521 2522 /* allocate a new page for next frag */ 2523 page = alloc_pages(sk->sk_allocation, 0); 2524 2525 /* If alloc_page fails just return failure and caller will 2526 * free previous allocated pages by doing kfree_skb() 2527 */ 2528 if (page == NULL) 2529 return -ENOMEM; 2530 2531 /* initialize the next frag */ 2532 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2533 skb->truesize += PAGE_SIZE; 2534 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2535 2536 /* get the new initialized frag */ 2537 frg_cnt = skb_shinfo(skb)->nr_frags; 2538 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2539 2540 /* copy the user data to page */ 2541 left = PAGE_SIZE - frag->page_offset; 2542 copy = (length > left)? left : length; 2543 2544 ret = getfrag(from, (page_address(frag->page) + 2545 frag->page_offset + frag->size), 2546 offset, copy, 0, skb); 2547 if (ret < 0) 2548 return -EFAULT; 2549 2550 /* copy was successful so update the size parameters */ 2551 frag->size += copy; 2552 skb->len += copy; 2553 skb->data_len += copy; 2554 offset += copy; 2555 length -= copy; 2556 2557 } while (length > 0); 2558 2559 return 0; 2560 } 2561 EXPORT_SYMBOL(skb_append_datato_frags); 2562 2563 /** 2564 * skb_pull_rcsum - pull skb and update receive checksum 2565 * @skb: buffer to update 2566 * @len: length of data pulled 2567 * 2568 * This function performs an skb_pull on the packet and updates 2569 * the CHECKSUM_COMPLETE checksum. It should be used on 2570 * receive path processing instead of skb_pull unless you know 2571 * that the checksum difference is zero (e.g., a valid IP header) 2572 * or you are setting ip_summed to CHECKSUM_NONE. 2573 */ 2574 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2575 { 2576 BUG_ON(len > skb->len); 2577 skb->len -= len; 2578 BUG_ON(skb->len < skb->data_len); 2579 skb_postpull_rcsum(skb, skb->data, len); 2580 return skb->data += len; 2581 } 2582 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2583 2584 /** 2585 * skb_segment - Perform protocol segmentation on skb. 2586 * @skb: buffer to segment 2587 * @features: features for the output path (see dev->features) 2588 * 2589 * This function performs segmentation on the given skb. It returns 2590 * a pointer to the first in a list of new skbs for the segments. 2591 * In case of error it returns ERR_PTR(err). 2592 */ 2593 struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) 2594 { 2595 struct sk_buff *segs = NULL; 2596 struct sk_buff *tail = NULL; 2597 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2598 unsigned int mss = skb_shinfo(skb)->gso_size; 2599 unsigned int doffset = skb->data - skb_mac_header(skb); 2600 unsigned int offset = doffset; 2601 unsigned int headroom; 2602 unsigned int len; 2603 int sg = !!(features & NETIF_F_SG); 2604 int nfrags = skb_shinfo(skb)->nr_frags; 2605 int err = -ENOMEM; 2606 int i = 0; 2607 int pos; 2608 2609 __skb_push(skb, doffset); 2610 headroom = skb_headroom(skb); 2611 pos = skb_headlen(skb); 2612 2613 do { 2614 struct sk_buff *nskb; 2615 skb_frag_t *frag; 2616 int hsize; 2617 int size; 2618 2619 len = skb->len - offset; 2620 if (len > mss) 2621 len = mss; 2622 2623 hsize = skb_headlen(skb) - offset; 2624 if (hsize < 0) 2625 hsize = 0; 2626 if (hsize > len || !sg) 2627 hsize = len; 2628 2629 if (!hsize && i >= nfrags) { 2630 BUG_ON(fskb->len != len); 2631 2632 pos += len; 2633 nskb = skb_clone(fskb, GFP_ATOMIC); 2634 fskb = fskb->next; 2635 2636 if (unlikely(!nskb)) 2637 goto err; 2638 2639 hsize = skb_end_pointer(nskb) - nskb->head; 2640 if (skb_cow_head(nskb, doffset + headroom)) { 2641 kfree_skb(nskb); 2642 goto err; 2643 } 2644 2645 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2646 hsize; 2647 skb_release_head_state(nskb); 2648 __skb_push(nskb, doffset); 2649 } else { 2650 nskb = alloc_skb(hsize + doffset + headroom, 2651 GFP_ATOMIC); 2652 2653 if (unlikely(!nskb)) 2654 goto err; 2655 2656 skb_reserve(nskb, headroom); 2657 __skb_put(nskb, doffset); 2658 } 2659 2660 if (segs) 2661 tail->next = nskb; 2662 else 2663 segs = nskb; 2664 tail = nskb; 2665 2666 __copy_skb_header(nskb, skb); 2667 nskb->mac_len = skb->mac_len; 2668 2669 /* nskb and skb might have different headroom */ 2670 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2671 nskb->csum_start += skb_headroom(nskb) - headroom; 2672 2673 skb_reset_mac_header(nskb); 2674 skb_set_network_header(nskb, skb->mac_len); 2675 nskb->transport_header = (nskb->network_header + 2676 skb_network_header_len(skb)); 2677 skb_copy_from_linear_data(skb, nskb->data, doffset); 2678 2679 if (fskb != skb_shinfo(skb)->frag_list) 2680 continue; 2681 2682 if (!sg) { 2683 nskb->ip_summed = CHECKSUM_NONE; 2684 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2685 skb_put(nskb, len), 2686 len, 0); 2687 continue; 2688 } 2689 2690 frag = skb_shinfo(nskb)->frags; 2691 2692 skb_copy_from_linear_data_offset(skb, offset, 2693 skb_put(nskb, hsize), hsize); 2694 2695 while (pos < offset + len && i < nfrags) { 2696 *frag = skb_shinfo(skb)->frags[i]; 2697 get_page(frag->page); 2698 size = frag->size; 2699 2700 if (pos < offset) { 2701 frag->page_offset += offset - pos; 2702 frag->size -= offset - pos; 2703 } 2704 2705 skb_shinfo(nskb)->nr_frags++; 2706 2707 if (pos + size <= offset + len) { 2708 i++; 2709 pos += size; 2710 } else { 2711 frag->size -= pos + size - (offset + len); 2712 goto skip_fraglist; 2713 } 2714 2715 frag++; 2716 } 2717 2718 if (pos < offset + len) { 2719 struct sk_buff *fskb2 = fskb; 2720 2721 BUG_ON(pos + fskb->len != offset + len); 2722 2723 pos += fskb->len; 2724 fskb = fskb->next; 2725 2726 if (fskb2->next) { 2727 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2728 if (!fskb2) 2729 goto err; 2730 } else 2731 skb_get(fskb2); 2732 2733 SKB_FRAG_ASSERT(nskb); 2734 skb_shinfo(nskb)->frag_list = fskb2; 2735 } 2736 2737 skip_fraglist: 2738 nskb->data_len = len - hsize; 2739 nskb->len += nskb->data_len; 2740 nskb->truesize += nskb->data_len; 2741 } while ((offset += len) < skb->len); 2742 2743 return segs; 2744 2745 err: 2746 while ((skb = segs)) { 2747 segs = skb->next; 2748 kfree_skb(skb); 2749 } 2750 return ERR_PTR(err); 2751 } 2752 EXPORT_SYMBOL_GPL(skb_segment); 2753 2754 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2755 { 2756 struct sk_buff *p = *head; 2757 struct sk_buff *nskb; 2758 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2759 struct skb_shared_info *pinfo = skb_shinfo(p); 2760 unsigned int headroom; 2761 unsigned int len = skb_gro_len(skb); 2762 unsigned int offset = skb_gro_offset(skb); 2763 unsigned int headlen = skb_headlen(skb); 2764 2765 if (p->len + len >= 65536) 2766 return -E2BIG; 2767 2768 if (pinfo->frag_list) 2769 goto merge; 2770 else if (headlen <= offset) { 2771 skb_frag_t *frag; 2772 skb_frag_t *frag2; 2773 int i = skbinfo->nr_frags; 2774 int nr_frags = pinfo->nr_frags + i; 2775 2776 offset -= headlen; 2777 2778 if (nr_frags > MAX_SKB_FRAGS) 2779 return -E2BIG; 2780 2781 pinfo->nr_frags = nr_frags; 2782 skbinfo->nr_frags = 0; 2783 2784 frag = pinfo->frags + nr_frags; 2785 frag2 = skbinfo->frags + i; 2786 do { 2787 *--frag = *--frag2; 2788 } while (--i); 2789 2790 frag->page_offset += offset; 2791 frag->size -= offset; 2792 2793 skb->truesize -= skb->data_len; 2794 skb->len -= skb->data_len; 2795 skb->data_len = 0; 2796 2797 NAPI_GRO_CB(skb)->free = 1; 2798 goto done; 2799 } else if (skb_gro_len(p) != pinfo->gso_size) 2800 return -E2BIG; 2801 2802 headroom = skb_headroom(p); 2803 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2804 if (unlikely(!nskb)) 2805 return -ENOMEM; 2806 2807 __copy_skb_header(nskb, p); 2808 nskb->mac_len = p->mac_len; 2809 2810 skb_reserve(nskb, headroom); 2811 __skb_put(nskb, skb_gro_offset(p)); 2812 2813 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2814 skb_set_network_header(nskb, skb_network_offset(p)); 2815 skb_set_transport_header(nskb, skb_transport_offset(p)); 2816 2817 __skb_pull(p, skb_gro_offset(p)); 2818 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2819 p->data - skb_mac_header(p)); 2820 2821 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2822 skb_shinfo(nskb)->frag_list = p; 2823 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2824 pinfo->gso_size = 0; 2825 skb_header_release(p); 2826 nskb->prev = p; 2827 2828 nskb->data_len += p->len; 2829 nskb->truesize += p->len; 2830 nskb->len += p->len; 2831 2832 *head = nskb; 2833 nskb->next = p->next; 2834 p->next = NULL; 2835 2836 p = nskb; 2837 2838 merge: 2839 if (offset > headlen) { 2840 unsigned int eat = offset - headlen; 2841 2842 skbinfo->frags[0].page_offset += eat; 2843 skbinfo->frags[0].size -= eat; 2844 skb->data_len -= eat; 2845 skb->len -= eat; 2846 offset = headlen; 2847 } 2848 2849 __skb_pull(skb, offset); 2850 2851 p->prev->next = skb; 2852 p->prev = skb; 2853 skb_header_release(skb); 2854 2855 done: 2856 NAPI_GRO_CB(p)->count++; 2857 p->data_len += len; 2858 p->truesize += len; 2859 p->len += len; 2860 2861 NAPI_GRO_CB(skb)->same_flow = 1; 2862 return 0; 2863 } 2864 EXPORT_SYMBOL_GPL(skb_gro_receive); 2865 2866 void __init skb_init(void) 2867 { 2868 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2869 sizeof(struct sk_buff), 2870 0, 2871 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2872 NULL); 2873 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2874 (2*sizeof(struct sk_buff)) + 2875 sizeof(atomic_t), 2876 0, 2877 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2878 NULL); 2879 } 2880 2881 /** 2882 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2883 * @skb: Socket buffer containing the buffers to be mapped 2884 * @sg: The scatter-gather list to map into 2885 * @offset: The offset into the buffer's contents to start mapping 2886 * @len: Length of buffer space to be mapped 2887 * 2888 * Fill the specified scatter-gather list with mappings/pointers into a 2889 * region of the buffer space attached to a socket buffer. 2890 */ 2891 static int 2892 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2893 { 2894 int start = skb_headlen(skb); 2895 int i, copy = start - offset; 2896 struct sk_buff *frag_iter; 2897 int elt = 0; 2898 2899 if (copy > 0) { 2900 if (copy > len) 2901 copy = len; 2902 sg_set_buf(sg, skb->data + offset, copy); 2903 elt++; 2904 if ((len -= copy) == 0) 2905 return elt; 2906 offset += copy; 2907 } 2908 2909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2910 int end; 2911 2912 WARN_ON(start > offset + len); 2913 2914 end = start + skb_shinfo(skb)->frags[i].size; 2915 if ((copy = end - offset) > 0) { 2916 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2917 2918 if (copy > len) 2919 copy = len; 2920 sg_set_page(&sg[elt], frag->page, copy, 2921 frag->page_offset+offset-start); 2922 elt++; 2923 if (!(len -= copy)) 2924 return elt; 2925 offset += copy; 2926 } 2927 start = end; 2928 } 2929 2930 skb_walk_frags(skb, frag_iter) { 2931 int end; 2932 2933 WARN_ON(start > offset + len); 2934 2935 end = start + frag_iter->len; 2936 if ((copy = end - offset) > 0) { 2937 if (copy > len) 2938 copy = len; 2939 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2940 copy); 2941 if ((len -= copy) == 0) 2942 return elt; 2943 offset += copy; 2944 } 2945 start = end; 2946 } 2947 BUG_ON(len); 2948 return elt; 2949 } 2950 2951 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2952 { 2953 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2954 2955 sg_mark_end(&sg[nsg - 1]); 2956 2957 return nsg; 2958 } 2959 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2960 2961 /** 2962 * skb_cow_data - Check that a socket buffer's data buffers are writable 2963 * @skb: The socket buffer to check. 2964 * @tailbits: Amount of trailing space to be added 2965 * @trailer: Returned pointer to the skb where the @tailbits space begins 2966 * 2967 * Make sure that the data buffers attached to a socket buffer are 2968 * writable. If they are not, private copies are made of the data buffers 2969 * and the socket buffer is set to use these instead. 2970 * 2971 * If @tailbits is given, make sure that there is space to write @tailbits 2972 * bytes of data beyond current end of socket buffer. @trailer will be 2973 * set to point to the skb in which this space begins. 2974 * 2975 * The number of scatterlist elements required to completely map the 2976 * COW'd and extended socket buffer will be returned. 2977 */ 2978 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2979 { 2980 int copyflag; 2981 int elt; 2982 struct sk_buff *skb1, **skb_p; 2983 2984 /* If skb is cloned or its head is paged, reallocate 2985 * head pulling out all the pages (pages are considered not writable 2986 * at the moment even if they are anonymous). 2987 */ 2988 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2989 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2990 return -ENOMEM; 2991 2992 /* Easy case. Most of packets will go this way. */ 2993 if (!skb_has_frag_list(skb)) { 2994 /* A little of trouble, not enough of space for trailer. 2995 * This should not happen, when stack is tuned to generate 2996 * good frames. OK, on miss we reallocate and reserve even more 2997 * space, 128 bytes is fair. */ 2998 2999 if (skb_tailroom(skb) < tailbits && 3000 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3001 return -ENOMEM; 3002 3003 /* Voila! */ 3004 *trailer = skb; 3005 return 1; 3006 } 3007 3008 /* Misery. We are in troubles, going to mincer fragments... */ 3009 3010 elt = 1; 3011 skb_p = &skb_shinfo(skb)->frag_list; 3012 copyflag = 0; 3013 3014 while ((skb1 = *skb_p) != NULL) { 3015 int ntail = 0; 3016 3017 /* The fragment is partially pulled by someone, 3018 * this can happen on input. Copy it and everything 3019 * after it. */ 3020 3021 if (skb_shared(skb1)) 3022 copyflag = 1; 3023 3024 /* If the skb is the last, worry about trailer. */ 3025 3026 if (skb1->next == NULL && tailbits) { 3027 if (skb_shinfo(skb1)->nr_frags || 3028 skb_has_frag_list(skb1) || 3029 skb_tailroom(skb1) < tailbits) 3030 ntail = tailbits + 128; 3031 } 3032 3033 if (copyflag || 3034 skb_cloned(skb1) || 3035 ntail || 3036 skb_shinfo(skb1)->nr_frags || 3037 skb_has_frag_list(skb1)) { 3038 struct sk_buff *skb2; 3039 3040 /* Fuck, we are miserable poor guys... */ 3041 if (ntail == 0) 3042 skb2 = skb_copy(skb1, GFP_ATOMIC); 3043 else 3044 skb2 = skb_copy_expand(skb1, 3045 skb_headroom(skb1), 3046 ntail, 3047 GFP_ATOMIC); 3048 if (unlikely(skb2 == NULL)) 3049 return -ENOMEM; 3050 3051 if (skb1->sk) 3052 skb_set_owner_w(skb2, skb1->sk); 3053 3054 /* Looking around. Are we still alive? 3055 * OK, link new skb, drop old one */ 3056 3057 skb2->next = skb1->next; 3058 *skb_p = skb2; 3059 kfree_skb(skb1); 3060 skb1 = skb2; 3061 } 3062 elt++; 3063 *trailer = skb1; 3064 skb_p = &skb1->next; 3065 } 3066 3067 return elt; 3068 } 3069 EXPORT_SYMBOL_GPL(skb_cow_data); 3070 3071 static void sock_rmem_free(struct sk_buff *skb) 3072 { 3073 struct sock *sk = skb->sk; 3074 3075 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3076 } 3077 3078 /* 3079 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3080 */ 3081 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3082 { 3083 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3084 (unsigned)sk->sk_rcvbuf) 3085 return -ENOMEM; 3086 3087 skb_orphan(skb); 3088 skb->sk = sk; 3089 skb->destructor = sock_rmem_free; 3090 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3091 3092 /* before exiting rcu section, make sure dst is refcounted */ 3093 skb_dst_force(skb); 3094 3095 skb_queue_tail(&sk->sk_error_queue, skb); 3096 if (!sock_flag(sk, SOCK_DEAD)) 3097 sk->sk_data_ready(sk, skb->len); 3098 return 0; 3099 } 3100 EXPORT_SYMBOL(sock_queue_err_skb); 3101 3102 void skb_tstamp_tx(struct sk_buff *orig_skb, 3103 struct skb_shared_hwtstamps *hwtstamps) 3104 { 3105 struct sock *sk = orig_skb->sk; 3106 struct sock_exterr_skb *serr; 3107 struct sk_buff *skb; 3108 int err; 3109 3110 if (!sk) 3111 return; 3112 3113 skb = skb_clone(orig_skb, GFP_ATOMIC); 3114 if (!skb) 3115 return; 3116 3117 if (hwtstamps) { 3118 *skb_hwtstamps(skb) = 3119 *hwtstamps; 3120 } else { 3121 /* 3122 * no hardware time stamps available, 3123 * so keep the shared tx_flags and only 3124 * store software time stamp 3125 */ 3126 skb->tstamp = ktime_get_real(); 3127 } 3128 3129 serr = SKB_EXT_ERR(skb); 3130 memset(serr, 0, sizeof(*serr)); 3131 serr->ee.ee_errno = ENOMSG; 3132 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3133 3134 err = sock_queue_err_skb(sk, skb); 3135 3136 if (err) 3137 kfree_skb(skb); 3138 } 3139 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3140 3141 3142 /** 3143 * skb_partial_csum_set - set up and verify partial csum values for packet 3144 * @skb: the skb to set 3145 * @start: the number of bytes after skb->data to start checksumming. 3146 * @off: the offset from start to place the checksum. 3147 * 3148 * For untrusted partially-checksummed packets, we need to make sure the values 3149 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3150 * 3151 * This function checks and sets those values and skb->ip_summed: if this 3152 * returns false you should drop the packet. 3153 */ 3154 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3155 { 3156 if (unlikely(start > skb_headlen(skb)) || 3157 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3158 if (net_ratelimit()) 3159 printk(KERN_WARNING 3160 "bad partial csum: csum=%u/%u len=%u\n", 3161 start, off, skb_headlen(skb)); 3162 return false; 3163 } 3164 skb->ip_summed = CHECKSUM_PARTIAL; 3165 skb->csum_start = skb_headroom(skb) + start; 3166 skb->csum_offset = off; 3167 return true; 3168 } 3169 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3170 3171 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3172 { 3173 if (net_ratelimit()) 3174 pr_warning("%s: received packets cannot be forwarded" 3175 " while LRO is enabled\n", skb->dev->name); 3176 } 3177 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3178