1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #include <linux/module.h> 40 #include <linux/types.h> 41 #include <linux/kernel.h> 42 #include <linux/kmemcheck.h> 43 #include <linux/mm.h> 44 #include <linux/interrupt.h> 45 #include <linux/in.h> 46 #include <linux/inet.h> 47 #include <linux/slab.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 61 #include <net/protocol.h> 62 #include <net/dst.h> 63 #include <net/sock.h> 64 #include <net/checksum.h> 65 #include <net/xfrm.h> 66 67 #include <asm/uaccess.h> 68 #include <asm/system.h> 69 #include <trace/events/skb.h> 70 71 #include "kmap_skb.h" 72 73 static struct kmem_cache *skbuff_head_cache __read_mostly; 74 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 75 76 static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 77 struct pipe_buffer *buf) 78 { 79 put_page(buf->page); 80 } 81 82 static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 83 struct pipe_buffer *buf) 84 { 85 get_page(buf->page); 86 } 87 88 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 89 struct pipe_buffer *buf) 90 { 91 return 1; 92 } 93 94 95 /* Pipe buffer operations for a socket. */ 96 static const struct pipe_buf_operations sock_pipe_buf_ops = { 97 .can_merge = 0, 98 .map = generic_pipe_buf_map, 99 .unmap = generic_pipe_buf_unmap, 100 .confirm = generic_pipe_buf_confirm, 101 .release = sock_pipe_buf_release, 102 .steal = sock_pipe_buf_steal, 103 .get = sock_pipe_buf_get, 104 }; 105 106 /* 107 * Keep out-of-line to prevent kernel bloat. 108 * __builtin_return_address is not used because it is not always 109 * reliable. 110 */ 111 112 /** 113 * skb_over_panic - private function 114 * @skb: buffer 115 * @sz: size 116 * @here: address 117 * 118 * Out of line support code for skb_put(). Not user callable. 119 */ 120 static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 121 { 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 123 "data:%p tail:%#lx end:%#lx dev:%s\n", 124 here, skb->len, sz, skb->head, skb->data, 125 (unsigned long)skb->tail, (unsigned long)skb->end, 126 skb->dev ? skb->dev->name : "<NULL>"); 127 BUG(); 128 } 129 130 /** 131 * skb_under_panic - private function 132 * @skb: buffer 133 * @sz: size 134 * @here: address 135 * 136 * Out of line support code for skb_push(). Not user callable. 137 */ 138 139 static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 140 { 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 142 "data:%p tail:%#lx end:%#lx dev:%s\n", 143 here, skb->len, sz, skb->head, skb->data, 144 (unsigned long)skb->tail, (unsigned long)skb->end, 145 skb->dev ? skb->dev->name : "<NULL>"); 146 BUG(); 147 } 148 149 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 150 * 'private' fields and also do memory statistics to find all the 151 * [BEEP] leaks. 152 * 153 */ 154 155 /** 156 * __alloc_skb - allocate a network buffer 157 * @size: size to allocate 158 * @gfp_mask: allocation mask 159 * @fclone: allocate from fclone cache instead of head cache 160 * and allocate a cloned (child) skb 161 * @node: numa node to allocate memory on 162 * 163 * Allocate a new &sk_buff. The returned buffer has no headroom and a 164 * tail room of size bytes. The object has a reference count of one. 165 * The return is the buffer. On a failure the return is %NULL. 166 * 167 * Buffers may only be allocated from interrupts using a @gfp_mask of 168 * %GFP_ATOMIC. 169 */ 170 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 171 int fclone, int node) 172 { 173 struct kmem_cache *cache; 174 struct skb_shared_info *shinfo; 175 struct sk_buff *skb; 176 u8 *data; 177 178 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 179 180 /* Get the HEAD */ 181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 182 if (!skb) 183 goto out; 184 prefetchw(skb); 185 186 size = SKB_DATA_ALIGN(size); 187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 188 gfp_mask, node); 189 if (!data) 190 goto nodata; 191 prefetchw(data + size); 192 193 /* 194 * Only clear those fields we need to clear, not those that we will 195 * actually initialise below. Hence, don't put any more fields after 196 * the tail pointer in struct sk_buff! 197 */ 198 memset(skb, 0, offsetof(struct sk_buff, tail)); 199 skb->truesize = size + sizeof(struct sk_buff); 200 atomic_set(&skb->users, 1); 201 skb->head = data; 202 skb->data = data; 203 skb_reset_tail_pointer(skb); 204 skb->end = skb->tail + size; 205 #ifdef NET_SKBUFF_DATA_USES_OFFSET 206 skb->mac_header = ~0U; 207 #endif 208 209 /* make sure we initialize shinfo sequentially */ 210 shinfo = skb_shinfo(skb); 211 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 212 atomic_set(&shinfo->dataref, 1); 213 214 if (fclone) { 215 struct sk_buff *child = skb + 1; 216 atomic_t *fclone_ref = (atomic_t *) (child + 1); 217 218 kmemcheck_annotate_bitfield(child, flags1); 219 kmemcheck_annotate_bitfield(child, flags2); 220 skb->fclone = SKB_FCLONE_ORIG; 221 atomic_set(fclone_ref, 1); 222 223 child->fclone = SKB_FCLONE_UNAVAILABLE; 224 } 225 out: 226 return skb; 227 nodata: 228 kmem_cache_free(cache, skb); 229 skb = NULL; 230 goto out; 231 } 232 EXPORT_SYMBOL(__alloc_skb); 233 234 /** 235 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 236 * @dev: network device to receive on 237 * @length: length to allocate 238 * @gfp_mask: get_free_pages mask, passed to alloc_skb 239 * 240 * Allocate a new &sk_buff and assign it a usage count of one. The 241 * buffer has unspecified headroom built in. Users should allocate 242 * the headroom they think they need without accounting for the 243 * built in space. The built in space is used for optimisations. 244 * 245 * %NULL is returned if there is no free memory. 246 */ 247 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 248 unsigned int length, gfp_t gfp_mask) 249 { 250 struct sk_buff *skb; 251 252 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 253 if (likely(skb)) { 254 skb_reserve(skb, NET_SKB_PAD); 255 skb->dev = dev; 256 } 257 return skb; 258 } 259 EXPORT_SYMBOL(__netdev_alloc_skb); 260 261 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 262 int size) 263 { 264 skb_fill_page_desc(skb, i, page, off, size); 265 skb->len += size; 266 skb->data_len += size; 267 skb->truesize += size; 268 } 269 EXPORT_SYMBOL(skb_add_rx_frag); 270 271 /** 272 * dev_alloc_skb - allocate an skbuff for receiving 273 * @length: length to allocate 274 * 275 * Allocate a new &sk_buff and assign it a usage count of one. The 276 * buffer has unspecified headroom built in. Users should allocate 277 * the headroom they think they need without accounting for the 278 * built in space. The built in space is used for optimisations. 279 * 280 * %NULL is returned if there is no free memory. Although this function 281 * allocates memory it can be called from an interrupt. 282 */ 283 struct sk_buff *dev_alloc_skb(unsigned int length) 284 { 285 /* 286 * There is more code here than it seems: 287 * __dev_alloc_skb is an inline 288 */ 289 return __dev_alloc_skb(length, GFP_ATOMIC); 290 } 291 EXPORT_SYMBOL(dev_alloc_skb); 292 293 static void skb_drop_list(struct sk_buff **listp) 294 { 295 struct sk_buff *list = *listp; 296 297 *listp = NULL; 298 299 do { 300 struct sk_buff *this = list; 301 list = list->next; 302 kfree_skb(this); 303 } while (list); 304 } 305 306 static inline void skb_drop_fraglist(struct sk_buff *skb) 307 { 308 skb_drop_list(&skb_shinfo(skb)->frag_list); 309 } 310 311 static void skb_clone_fraglist(struct sk_buff *skb) 312 { 313 struct sk_buff *list; 314 315 skb_walk_frags(skb, list) 316 skb_get(list); 317 } 318 319 static void skb_release_data(struct sk_buff *skb) 320 { 321 if (!skb->cloned || 322 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 323 &skb_shinfo(skb)->dataref)) { 324 if (skb_shinfo(skb)->nr_frags) { 325 int i; 326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 327 put_page(skb_shinfo(skb)->frags[i].page); 328 } 329 330 if (skb_has_frag_list(skb)) 331 skb_drop_fraglist(skb); 332 333 kfree(skb->head); 334 } 335 } 336 337 /* 338 * Free an skbuff by memory without cleaning the state. 339 */ 340 static void kfree_skbmem(struct sk_buff *skb) 341 { 342 struct sk_buff *other; 343 atomic_t *fclone_ref; 344 345 switch (skb->fclone) { 346 case SKB_FCLONE_UNAVAILABLE: 347 kmem_cache_free(skbuff_head_cache, skb); 348 break; 349 350 case SKB_FCLONE_ORIG: 351 fclone_ref = (atomic_t *) (skb + 2); 352 if (atomic_dec_and_test(fclone_ref)) 353 kmem_cache_free(skbuff_fclone_cache, skb); 354 break; 355 356 case SKB_FCLONE_CLONE: 357 fclone_ref = (atomic_t *) (skb + 1); 358 other = skb - 1; 359 360 /* The clone portion is available for 361 * fast-cloning again. 362 */ 363 skb->fclone = SKB_FCLONE_UNAVAILABLE; 364 365 if (atomic_dec_and_test(fclone_ref)) 366 kmem_cache_free(skbuff_fclone_cache, other); 367 break; 368 } 369 } 370 371 static void skb_release_head_state(struct sk_buff *skb) 372 { 373 skb_dst_drop(skb); 374 #ifdef CONFIG_XFRM 375 secpath_put(skb->sp); 376 #endif 377 if (skb->destructor) { 378 WARN_ON(in_irq()); 379 skb->destructor(skb); 380 } 381 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 382 nf_conntrack_put(skb->nfct); 383 nf_conntrack_put_reasm(skb->nfct_reasm); 384 #endif 385 #ifdef CONFIG_BRIDGE_NETFILTER 386 nf_bridge_put(skb->nf_bridge); 387 #endif 388 /* XXX: IS this still necessary? - JHS */ 389 #ifdef CONFIG_NET_SCHED 390 skb->tc_index = 0; 391 #ifdef CONFIG_NET_CLS_ACT 392 skb->tc_verd = 0; 393 #endif 394 #endif 395 } 396 397 /* Free everything but the sk_buff shell. */ 398 static void skb_release_all(struct sk_buff *skb) 399 { 400 skb_release_head_state(skb); 401 skb_release_data(skb); 402 } 403 404 /** 405 * __kfree_skb - private function 406 * @skb: buffer 407 * 408 * Free an sk_buff. Release anything attached to the buffer. 409 * Clean the state. This is an internal helper function. Users should 410 * always call kfree_skb 411 */ 412 413 void __kfree_skb(struct sk_buff *skb) 414 { 415 skb_release_all(skb); 416 kfree_skbmem(skb); 417 } 418 EXPORT_SYMBOL(__kfree_skb); 419 420 /** 421 * kfree_skb - free an sk_buff 422 * @skb: buffer to free 423 * 424 * Drop a reference to the buffer and free it if the usage count has 425 * hit zero. 426 */ 427 void kfree_skb(struct sk_buff *skb) 428 { 429 if (unlikely(!skb)) 430 return; 431 if (likely(atomic_read(&skb->users) == 1)) 432 smp_rmb(); 433 else if (likely(!atomic_dec_and_test(&skb->users))) 434 return; 435 trace_kfree_skb(skb, __builtin_return_address(0)); 436 __kfree_skb(skb); 437 } 438 EXPORT_SYMBOL(kfree_skb); 439 440 /** 441 * consume_skb - free an skbuff 442 * @skb: buffer to free 443 * 444 * Drop a ref to the buffer and free it if the usage count has hit zero 445 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 446 * is being dropped after a failure and notes that 447 */ 448 void consume_skb(struct sk_buff *skb) 449 { 450 if (unlikely(!skb)) 451 return; 452 if (likely(atomic_read(&skb->users) == 1)) 453 smp_rmb(); 454 else if (likely(!atomic_dec_and_test(&skb->users))) 455 return; 456 trace_consume_skb(skb); 457 __kfree_skb(skb); 458 } 459 EXPORT_SYMBOL(consume_skb); 460 461 /** 462 * skb_recycle_check - check if skb can be reused for receive 463 * @skb: buffer 464 * @skb_size: minimum receive buffer size 465 * 466 * Checks that the skb passed in is not shared or cloned, and 467 * that it is linear and its head portion at least as large as 468 * skb_size so that it can be recycled as a receive buffer. 469 * If these conditions are met, this function does any necessary 470 * reference count dropping and cleans up the skbuff as if it 471 * just came from __alloc_skb(). 472 */ 473 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 474 { 475 struct skb_shared_info *shinfo; 476 477 if (irqs_disabled()) 478 return false; 479 480 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 481 return false; 482 483 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 484 if (skb_end_pointer(skb) - skb->head < skb_size) 485 return false; 486 487 if (skb_shared(skb) || skb_cloned(skb)) 488 return false; 489 490 skb_release_head_state(skb); 491 492 shinfo = skb_shinfo(skb); 493 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 494 atomic_set(&shinfo->dataref, 1); 495 496 memset(skb, 0, offsetof(struct sk_buff, tail)); 497 skb->data = skb->head + NET_SKB_PAD; 498 skb_reset_tail_pointer(skb); 499 500 return true; 501 } 502 EXPORT_SYMBOL(skb_recycle_check); 503 504 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 505 { 506 new->tstamp = old->tstamp; 507 new->dev = old->dev; 508 new->transport_header = old->transport_header; 509 new->network_header = old->network_header; 510 new->mac_header = old->mac_header; 511 skb_dst_copy(new, old); 512 new->rxhash = old->rxhash; 513 #ifdef CONFIG_XFRM 514 new->sp = secpath_get(old->sp); 515 #endif 516 memcpy(new->cb, old->cb, sizeof(old->cb)); 517 new->csum = old->csum; 518 new->local_df = old->local_df; 519 new->pkt_type = old->pkt_type; 520 new->ip_summed = old->ip_summed; 521 skb_copy_queue_mapping(new, old); 522 new->priority = old->priority; 523 new->deliver_no_wcard = old->deliver_no_wcard; 524 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 525 new->ipvs_property = old->ipvs_property; 526 #endif 527 new->protocol = old->protocol; 528 new->mark = old->mark; 529 new->skb_iif = old->skb_iif; 530 __nf_copy(new, old); 531 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 532 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 533 new->nf_trace = old->nf_trace; 534 #endif 535 #ifdef CONFIG_NET_SCHED 536 new->tc_index = old->tc_index; 537 #ifdef CONFIG_NET_CLS_ACT 538 new->tc_verd = old->tc_verd; 539 #endif 540 #endif 541 new->vlan_tci = old->vlan_tci; 542 543 skb_copy_secmark(new, old); 544 } 545 546 /* 547 * You should not add any new code to this function. Add it to 548 * __copy_skb_header above instead. 549 */ 550 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 551 { 552 #define C(x) n->x = skb->x 553 554 n->next = n->prev = NULL; 555 n->sk = NULL; 556 __copy_skb_header(n, skb); 557 558 C(len); 559 C(data_len); 560 C(mac_len); 561 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 562 n->cloned = 1; 563 n->nohdr = 0; 564 n->destructor = NULL; 565 C(tail); 566 C(end); 567 C(head); 568 C(data); 569 C(truesize); 570 atomic_set(&n->users, 1); 571 572 atomic_inc(&(skb_shinfo(skb)->dataref)); 573 skb->cloned = 1; 574 575 return n; 576 #undef C 577 } 578 579 /** 580 * skb_morph - morph one skb into another 581 * @dst: the skb to receive the contents 582 * @src: the skb to supply the contents 583 * 584 * This is identical to skb_clone except that the target skb is 585 * supplied by the user. 586 * 587 * The target skb is returned upon exit. 588 */ 589 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 590 { 591 skb_release_all(dst); 592 return __skb_clone(dst, src); 593 } 594 EXPORT_SYMBOL_GPL(skb_morph); 595 596 /** 597 * skb_clone - duplicate an sk_buff 598 * @skb: buffer to clone 599 * @gfp_mask: allocation priority 600 * 601 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 602 * copies share the same packet data but not structure. The new 603 * buffer has a reference count of 1. If the allocation fails the 604 * function returns %NULL otherwise the new buffer is returned. 605 * 606 * If this function is called from an interrupt gfp_mask() must be 607 * %GFP_ATOMIC. 608 */ 609 610 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 611 { 612 struct sk_buff *n; 613 614 n = skb + 1; 615 if (skb->fclone == SKB_FCLONE_ORIG && 616 n->fclone == SKB_FCLONE_UNAVAILABLE) { 617 atomic_t *fclone_ref = (atomic_t *) (n + 1); 618 n->fclone = SKB_FCLONE_CLONE; 619 atomic_inc(fclone_ref); 620 } else { 621 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 622 if (!n) 623 return NULL; 624 625 kmemcheck_annotate_bitfield(n, flags1); 626 kmemcheck_annotate_bitfield(n, flags2); 627 n->fclone = SKB_FCLONE_UNAVAILABLE; 628 } 629 630 return __skb_clone(n, skb); 631 } 632 EXPORT_SYMBOL(skb_clone); 633 634 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 635 { 636 #ifndef NET_SKBUFF_DATA_USES_OFFSET 637 /* 638 * Shift between the two data areas in bytes 639 */ 640 unsigned long offset = new->data - old->data; 641 #endif 642 643 __copy_skb_header(new, old); 644 645 #ifndef NET_SKBUFF_DATA_USES_OFFSET 646 /* {transport,network,mac}_header are relative to skb->head */ 647 new->transport_header += offset; 648 new->network_header += offset; 649 if (skb_mac_header_was_set(new)) 650 new->mac_header += offset; 651 #endif 652 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 653 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 654 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 655 } 656 657 /** 658 * skb_copy - create private copy of an sk_buff 659 * @skb: buffer to copy 660 * @gfp_mask: allocation priority 661 * 662 * Make a copy of both an &sk_buff and its data. This is used when the 663 * caller wishes to modify the data and needs a private copy of the 664 * data to alter. Returns %NULL on failure or the pointer to the buffer 665 * on success. The returned buffer has a reference count of 1. 666 * 667 * As by-product this function converts non-linear &sk_buff to linear 668 * one, so that &sk_buff becomes completely private and caller is allowed 669 * to modify all the data of returned buffer. This means that this 670 * function is not recommended for use in circumstances when only 671 * header is going to be modified. Use pskb_copy() instead. 672 */ 673 674 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 675 { 676 int headerlen = skb_headroom(skb); 677 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 678 struct sk_buff *n = alloc_skb(size, gfp_mask); 679 680 if (!n) 681 return NULL; 682 683 /* Set the data pointer */ 684 skb_reserve(n, headerlen); 685 /* Set the tail pointer and length */ 686 skb_put(n, skb->len); 687 688 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 689 BUG(); 690 691 copy_skb_header(n, skb); 692 return n; 693 } 694 EXPORT_SYMBOL(skb_copy); 695 696 /** 697 * pskb_copy - create copy of an sk_buff with private head. 698 * @skb: buffer to copy 699 * @gfp_mask: allocation priority 700 * 701 * Make a copy of both an &sk_buff and part of its data, located 702 * in header. Fragmented data remain shared. This is used when 703 * the caller wishes to modify only header of &sk_buff and needs 704 * private copy of the header to alter. Returns %NULL on failure 705 * or the pointer to the buffer on success. 706 * The returned buffer has a reference count of 1. 707 */ 708 709 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 710 { 711 unsigned int size = skb_end_pointer(skb) - skb->head; 712 struct sk_buff *n = alloc_skb(size, gfp_mask); 713 714 if (!n) 715 goto out; 716 717 /* Set the data pointer */ 718 skb_reserve(n, skb_headroom(skb)); 719 /* Set the tail pointer and length */ 720 skb_put(n, skb_headlen(skb)); 721 /* Copy the bytes */ 722 skb_copy_from_linear_data(skb, n->data, n->len); 723 724 n->truesize += skb->data_len; 725 n->data_len = skb->data_len; 726 n->len = skb->len; 727 728 if (skb_shinfo(skb)->nr_frags) { 729 int i; 730 731 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 732 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 733 get_page(skb_shinfo(n)->frags[i].page); 734 } 735 skb_shinfo(n)->nr_frags = i; 736 } 737 738 if (skb_has_frag_list(skb)) { 739 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 740 skb_clone_fraglist(n); 741 } 742 743 copy_skb_header(n, skb); 744 out: 745 return n; 746 } 747 EXPORT_SYMBOL(pskb_copy); 748 749 /** 750 * pskb_expand_head - reallocate header of &sk_buff 751 * @skb: buffer to reallocate 752 * @nhead: room to add at head 753 * @ntail: room to add at tail 754 * @gfp_mask: allocation priority 755 * 756 * Expands (or creates identical copy, if &nhead and &ntail are zero) 757 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 758 * reference count of 1. Returns zero in the case of success or error, 759 * if expansion failed. In the last case, &sk_buff is not changed. 760 * 761 * All the pointers pointing into skb header may change and must be 762 * reloaded after call to this function. 763 */ 764 765 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 766 gfp_t gfp_mask) 767 { 768 int i; 769 u8 *data; 770 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 771 long off; 772 bool fastpath; 773 774 BUG_ON(nhead < 0); 775 776 if (skb_shared(skb)) 777 BUG(); 778 779 size = SKB_DATA_ALIGN(size); 780 781 /* Check if we can avoid taking references on fragments if we own 782 * the last reference on skb->head. (see skb_release_data()) 783 */ 784 if (!skb->cloned) 785 fastpath = true; 786 else { 787 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 788 789 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 790 } 791 792 if (fastpath && 793 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 794 memmove(skb->head + size, skb_shinfo(skb), 795 offsetof(struct skb_shared_info, 796 frags[skb_shinfo(skb)->nr_frags])); 797 memmove(skb->head + nhead, skb->head, 798 skb_tail_pointer(skb) - skb->head); 799 off = nhead; 800 goto adjust_others; 801 } 802 803 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 804 if (!data) 805 goto nodata; 806 807 /* Copy only real data... and, alas, header. This should be 808 * optimized for the cases when header is void. 809 */ 810 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 811 812 memcpy((struct skb_shared_info *)(data + size), 813 skb_shinfo(skb), 814 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 815 816 if (fastpath) { 817 kfree(skb->head); 818 } else { 819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 820 get_page(skb_shinfo(skb)->frags[i].page); 821 822 if (skb_has_frag_list(skb)) 823 skb_clone_fraglist(skb); 824 825 skb_release_data(skb); 826 } 827 off = (data + nhead) - skb->head; 828 829 skb->head = data; 830 adjust_others: 831 skb->data += off; 832 #ifdef NET_SKBUFF_DATA_USES_OFFSET 833 skb->end = size; 834 off = nhead; 835 #else 836 skb->end = skb->head + size; 837 #endif 838 /* {transport,network,mac}_header and tail are relative to skb->head */ 839 skb->tail += off; 840 skb->transport_header += off; 841 skb->network_header += off; 842 if (skb_mac_header_was_set(skb)) 843 skb->mac_header += off; 844 /* Only adjust this if it actually is csum_start rather than csum */ 845 if (skb->ip_summed == CHECKSUM_PARTIAL) 846 skb->csum_start += nhead; 847 skb->cloned = 0; 848 skb->hdr_len = 0; 849 skb->nohdr = 0; 850 atomic_set(&skb_shinfo(skb)->dataref, 1); 851 return 0; 852 853 nodata: 854 return -ENOMEM; 855 } 856 EXPORT_SYMBOL(pskb_expand_head); 857 858 /* Make private copy of skb with writable head and some headroom */ 859 860 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 861 { 862 struct sk_buff *skb2; 863 int delta = headroom - skb_headroom(skb); 864 865 if (delta <= 0) 866 skb2 = pskb_copy(skb, GFP_ATOMIC); 867 else { 868 skb2 = skb_clone(skb, GFP_ATOMIC); 869 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 870 GFP_ATOMIC)) { 871 kfree_skb(skb2); 872 skb2 = NULL; 873 } 874 } 875 return skb2; 876 } 877 EXPORT_SYMBOL(skb_realloc_headroom); 878 879 /** 880 * skb_copy_expand - copy and expand sk_buff 881 * @skb: buffer to copy 882 * @newheadroom: new free bytes at head 883 * @newtailroom: new free bytes at tail 884 * @gfp_mask: allocation priority 885 * 886 * Make a copy of both an &sk_buff and its data and while doing so 887 * allocate additional space. 888 * 889 * This is used when the caller wishes to modify the data and needs a 890 * private copy of the data to alter as well as more space for new fields. 891 * Returns %NULL on failure or the pointer to the buffer 892 * on success. The returned buffer has a reference count of 1. 893 * 894 * You must pass %GFP_ATOMIC as the allocation priority if this function 895 * is called from an interrupt. 896 */ 897 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 898 int newheadroom, int newtailroom, 899 gfp_t gfp_mask) 900 { 901 /* 902 * Allocate the copy buffer 903 */ 904 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 905 gfp_mask); 906 int oldheadroom = skb_headroom(skb); 907 int head_copy_len, head_copy_off; 908 int off; 909 910 if (!n) 911 return NULL; 912 913 skb_reserve(n, newheadroom); 914 915 /* Set the tail pointer and length */ 916 skb_put(n, skb->len); 917 918 head_copy_len = oldheadroom; 919 head_copy_off = 0; 920 if (newheadroom <= head_copy_len) 921 head_copy_len = newheadroom; 922 else 923 head_copy_off = newheadroom - head_copy_len; 924 925 /* Copy the linear header and data. */ 926 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 927 skb->len + head_copy_len)) 928 BUG(); 929 930 copy_skb_header(n, skb); 931 932 off = newheadroom - oldheadroom; 933 if (n->ip_summed == CHECKSUM_PARTIAL) 934 n->csum_start += off; 935 #ifdef NET_SKBUFF_DATA_USES_OFFSET 936 n->transport_header += off; 937 n->network_header += off; 938 if (skb_mac_header_was_set(skb)) 939 n->mac_header += off; 940 #endif 941 942 return n; 943 } 944 EXPORT_SYMBOL(skb_copy_expand); 945 946 /** 947 * skb_pad - zero pad the tail of an skb 948 * @skb: buffer to pad 949 * @pad: space to pad 950 * 951 * Ensure that a buffer is followed by a padding area that is zero 952 * filled. Used by network drivers which may DMA or transfer data 953 * beyond the buffer end onto the wire. 954 * 955 * May return error in out of memory cases. The skb is freed on error. 956 */ 957 958 int skb_pad(struct sk_buff *skb, int pad) 959 { 960 int err; 961 int ntail; 962 963 /* If the skbuff is non linear tailroom is always zero.. */ 964 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 965 memset(skb->data+skb->len, 0, pad); 966 return 0; 967 } 968 969 ntail = skb->data_len + pad - (skb->end - skb->tail); 970 if (likely(skb_cloned(skb) || ntail > 0)) { 971 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 972 if (unlikely(err)) 973 goto free_skb; 974 } 975 976 /* FIXME: The use of this function with non-linear skb's really needs 977 * to be audited. 978 */ 979 err = skb_linearize(skb); 980 if (unlikely(err)) 981 goto free_skb; 982 983 memset(skb->data + skb->len, 0, pad); 984 return 0; 985 986 free_skb: 987 kfree_skb(skb); 988 return err; 989 } 990 EXPORT_SYMBOL(skb_pad); 991 992 /** 993 * skb_put - add data to a buffer 994 * @skb: buffer to use 995 * @len: amount of data to add 996 * 997 * This function extends the used data area of the buffer. If this would 998 * exceed the total buffer size the kernel will panic. A pointer to the 999 * first byte of the extra data is returned. 1000 */ 1001 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1002 { 1003 unsigned char *tmp = skb_tail_pointer(skb); 1004 SKB_LINEAR_ASSERT(skb); 1005 skb->tail += len; 1006 skb->len += len; 1007 if (unlikely(skb->tail > skb->end)) 1008 skb_over_panic(skb, len, __builtin_return_address(0)); 1009 return tmp; 1010 } 1011 EXPORT_SYMBOL(skb_put); 1012 1013 /** 1014 * skb_push - add data to the start of a buffer 1015 * @skb: buffer to use 1016 * @len: amount of data to add 1017 * 1018 * This function extends the used data area of the buffer at the buffer 1019 * start. If this would exceed the total buffer headroom the kernel will 1020 * panic. A pointer to the first byte of the extra data is returned. 1021 */ 1022 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1023 { 1024 skb->data -= len; 1025 skb->len += len; 1026 if (unlikely(skb->data<skb->head)) 1027 skb_under_panic(skb, len, __builtin_return_address(0)); 1028 return skb->data; 1029 } 1030 EXPORT_SYMBOL(skb_push); 1031 1032 /** 1033 * skb_pull - remove data from the start of a buffer 1034 * @skb: buffer to use 1035 * @len: amount of data to remove 1036 * 1037 * This function removes data from the start of a buffer, returning 1038 * the memory to the headroom. A pointer to the next data in the buffer 1039 * is returned. Once the data has been pulled future pushes will overwrite 1040 * the old data. 1041 */ 1042 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1043 { 1044 return skb_pull_inline(skb, len); 1045 } 1046 EXPORT_SYMBOL(skb_pull); 1047 1048 /** 1049 * skb_trim - remove end from a buffer 1050 * @skb: buffer to alter 1051 * @len: new length 1052 * 1053 * Cut the length of a buffer down by removing data from the tail. If 1054 * the buffer is already under the length specified it is not modified. 1055 * The skb must be linear. 1056 */ 1057 void skb_trim(struct sk_buff *skb, unsigned int len) 1058 { 1059 if (skb->len > len) 1060 __skb_trim(skb, len); 1061 } 1062 EXPORT_SYMBOL(skb_trim); 1063 1064 /* Trims skb to length len. It can change skb pointers. 1065 */ 1066 1067 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1068 { 1069 struct sk_buff **fragp; 1070 struct sk_buff *frag; 1071 int offset = skb_headlen(skb); 1072 int nfrags = skb_shinfo(skb)->nr_frags; 1073 int i; 1074 int err; 1075 1076 if (skb_cloned(skb) && 1077 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1078 return err; 1079 1080 i = 0; 1081 if (offset >= len) 1082 goto drop_pages; 1083 1084 for (; i < nfrags; i++) { 1085 int end = offset + skb_shinfo(skb)->frags[i].size; 1086 1087 if (end < len) { 1088 offset = end; 1089 continue; 1090 } 1091 1092 skb_shinfo(skb)->frags[i++].size = len - offset; 1093 1094 drop_pages: 1095 skb_shinfo(skb)->nr_frags = i; 1096 1097 for (; i < nfrags; i++) 1098 put_page(skb_shinfo(skb)->frags[i].page); 1099 1100 if (skb_has_frag_list(skb)) 1101 skb_drop_fraglist(skb); 1102 goto done; 1103 } 1104 1105 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1106 fragp = &frag->next) { 1107 int end = offset + frag->len; 1108 1109 if (skb_shared(frag)) { 1110 struct sk_buff *nfrag; 1111 1112 nfrag = skb_clone(frag, GFP_ATOMIC); 1113 if (unlikely(!nfrag)) 1114 return -ENOMEM; 1115 1116 nfrag->next = frag->next; 1117 kfree_skb(frag); 1118 frag = nfrag; 1119 *fragp = frag; 1120 } 1121 1122 if (end < len) { 1123 offset = end; 1124 continue; 1125 } 1126 1127 if (end > len && 1128 unlikely((err = pskb_trim(frag, len - offset)))) 1129 return err; 1130 1131 if (frag->next) 1132 skb_drop_list(&frag->next); 1133 break; 1134 } 1135 1136 done: 1137 if (len > skb_headlen(skb)) { 1138 skb->data_len -= skb->len - len; 1139 skb->len = len; 1140 } else { 1141 skb->len = len; 1142 skb->data_len = 0; 1143 skb_set_tail_pointer(skb, len); 1144 } 1145 1146 return 0; 1147 } 1148 EXPORT_SYMBOL(___pskb_trim); 1149 1150 /** 1151 * __pskb_pull_tail - advance tail of skb header 1152 * @skb: buffer to reallocate 1153 * @delta: number of bytes to advance tail 1154 * 1155 * The function makes a sense only on a fragmented &sk_buff, 1156 * it expands header moving its tail forward and copying necessary 1157 * data from fragmented part. 1158 * 1159 * &sk_buff MUST have reference count of 1. 1160 * 1161 * Returns %NULL (and &sk_buff does not change) if pull failed 1162 * or value of new tail of skb in the case of success. 1163 * 1164 * All the pointers pointing into skb header may change and must be 1165 * reloaded after call to this function. 1166 */ 1167 1168 /* Moves tail of skb head forward, copying data from fragmented part, 1169 * when it is necessary. 1170 * 1. It may fail due to malloc failure. 1171 * 2. It may change skb pointers. 1172 * 1173 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1174 */ 1175 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1176 { 1177 /* If skb has not enough free space at tail, get new one 1178 * plus 128 bytes for future expansions. If we have enough 1179 * room at tail, reallocate without expansion only if skb is cloned. 1180 */ 1181 int i, k, eat = (skb->tail + delta) - skb->end; 1182 1183 if (eat > 0 || skb_cloned(skb)) { 1184 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1185 GFP_ATOMIC)) 1186 return NULL; 1187 } 1188 1189 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1190 BUG(); 1191 1192 /* Optimization: no fragments, no reasons to preestimate 1193 * size of pulled pages. Superb. 1194 */ 1195 if (!skb_has_frag_list(skb)) 1196 goto pull_pages; 1197 1198 /* Estimate size of pulled pages. */ 1199 eat = delta; 1200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1201 if (skb_shinfo(skb)->frags[i].size >= eat) 1202 goto pull_pages; 1203 eat -= skb_shinfo(skb)->frags[i].size; 1204 } 1205 1206 /* If we need update frag list, we are in troubles. 1207 * Certainly, it possible to add an offset to skb data, 1208 * but taking into account that pulling is expected to 1209 * be very rare operation, it is worth to fight against 1210 * further bloating skb head and crucify ourselves here instead. 1211 * Pure masohism, indeed. 8)8) 1212 */ 1213 if (eat) { 1214 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1215 struct sk_buff *clone = NULL; 1216 struct sk_buff *insp = NULL; 1217 1218 do { 1219 BUG_ON(!list); 1220 1221 if (list->len <= eat) { 1222 /* Eaten as whole. */ 1223 eat -= list->len; 1224 list = list->next; 1225 insp = list; 1226 } else { 1227 /* Eaten partially. */ 1228 1229 if (skb_shared(list)) { 1230 /* Sucks! We need to fork list. :-( */ 1231 clone = skb_clone(list, GFP_ATOMIC); 1232 if (!clone) 1233 return NULL; 1234 insp = list->next; 1235 list = clone; 1236 } else { 1237 /* This may be pulled without 1238 * problems. */ 1239 insp = list; 1240 } 1241 if (!pskb_pull(list, eat)) { 1242 kfree_skb(clone); 1243 return NULL; 1244 } 1245 break; 1246 } 1247 } while (eat); 1248 1249 /* Free pulled out fragments. */ 1250 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1251 skb_shinfo(skb)->frag_list = list->next; 1252 kfree_skb(list); 1253 } 1254 /* And insert new clone at head. */ 1255 if (clone) { 1256 clone->next = list; 1257 skb_shinfo(skb)->frag_list = clone; 1258 } 1259 } 1260 /* Success! Now we may commit changes to skb data. */ 1261 1262 pull_pages: 1263 eat = delta; 1264 k = 0; 1265 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1266 if (skb_shinfo(skb)->frags[i].size <= eat) { 1267 put_page(skb_shinfo(skb)->frags[i].page); 1268 eat -= skb_shinfo(skb)->frags[i].size; 1269 } else { 1270 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1271 if (eat) { 1272 skb_shinfo(skb)->frags[k].page_offset += eat; 1273 skb_shinfo(skb)->frags[k].size -= eat; 1274 eat = 0; 1275 } 1276 k++; 1277 } 1278 } 1279 skb_shinfo(skb)->nr_frags = k; 1280 1281 skb->tail += delta; 1282 skb->data_len -= delta; 1283 1284 return skb_tail_pointer(skb); 1285 } 1286 EXPORT_SYMBOL(__pskb_pull_tail); 1287 1288 /* Copy some data bits from skb to kernel buffer. */ 1289 1290 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1291 { 1292 int start = skb_headlen(skb); 1293 struct sk_buff *frag_iter; 1294 int i, copy; 1295 1296 if (offset > (int)skb->len - len) 1297 goto fault; 1298 1299 /* Copy header. */ 1300 if ((copy = start - offset) > 0) { 1301 if (copy > len) 1302 copy = len; 1303 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1304 if ((len -= copy) == 0) 1305 return 0; 1306 offset += copy; 1307 to += copy; 1308 } 1309 1310 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1311 int end; 1312 1313 WARN_ON(start > offset + len); 1314 1315 end = start + skb_shinfo(skb)->frags[i].size; 1316 if ((copy = end - offset) > 0) { 1317 u8 *vaddr; 1318 1319 if (copy > len) 1320 copy = len; 1321 1322 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1323 memcpy(to, 1324 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1325 offset - start, copy); 1326 kunmap_skb_frag(vaddr); 1327 1328 if ((len -= copy) == 0) 1329 return 0; 1330 offset += copy; 1331 to += copy; 1332 } 1333 start = end; 1334 } 1335 1336 skb_walk_frags(skb, frag_iter) { 1337 int end; 1338 1339 WARN_ON(start > offset + len); 1340 1341 end = start + frag_iter->len; 1342 if ((copy = end - offset) > 0) { 1343 if (copy > len) 1344 copy = len; 1345 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1346 goto fault; 1347 if ((len -= copy) == 0) 1348 return 0; 1349 offset += copy; 1350 to += copy; 1351 } 1352 start = end; 1353 } 1354 if (!len) 1355 return 0; 1356 1357 fault: 1358 return -EFAULT; 1359 } 1360 EXPORT_SYMBOL(skb_copy_bits); 1361 1362 /* 1363 * Callback from splice_to_pipe(), if we need to release some pages 1364 * at the end of the spd in case we error'ed out in filling the pipe. 1365 */ 1366 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1367 { 1368 put_page(spd->pages[i]); 1369 } 1370 1371 static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1372 unsigned int *offset, 1373 struct sk_buff *skb, struct sock *sk) 1374 { 1375 struct page *p = sk->sk_sndmsg_page; 1376 unsigned int off; 1377 1378 if (!p) { 1379 new_page: 1380 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 1381 if (!p) 1382 return NULL; 1383 1384 off = sk->sk_sndmsg_off = 0; 1385 /* hold one ref to this page until it's full */ 1386 } else { 1387 unsigned int mlen; 1388 1389 off = sk->sk_sndmsg_off; 1390 mlen = PAGE_SIZE - off; 1391 if (mlen < 64 && mlen < *len) { 1392 put_page(p); 1393 goto new_page; 1394 } 1395 1396 *len = min_t(unsigned int, *len, mlen); 1397 } 1398 1399 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1400 sk->sk_sndmsg_off += *len; 1401 *offset = off; 1402 get_page(p); 1403 1404 return p; 1405 } 1406 1407 /* 1408 * Fill page/offset/length into spd, if it can hold more pages. 1409 */ 1410 static inline int spd_fill_page(struct splice_pipe_desc *spd, 1411 struct pipe_inode_info *pipe, struct page *page, 1412 unsigned int *len, unsigned int offset, 1413 struct sk_buff *skb, int linear, 1414 struct sock *sk) 1415 { 1416 if (unlikely(spd->nr_pages == pipe->buffers)) 1417 return 1; 1418 1419 if (linear) { 1420 page = linear_to_page(page, len, &offset, skb, sk); 1421 if (!page) 1422 return 1; 1423 } else 1424 get_page(page); 1425 1426 spd->pages[spd->nr_pages] = page; 1427 spd->partial[spd->nr_pages].len = *len; 1428 spd->partial[spd->nr_pages].offset = offset; 1429 spd->nr_pages++; 1430 1431 return 0; 1432 } 1433 1434 static inline void __segment_seek(struct page **page, unsigned int *poff, 1435 unsigned int *plen, unsigned int off) 1436 { 1437 unsigned long n; 1438 1439 *poff += off; 1440 n = *poff / PAGE_SIZE; 1441 if (n) 1442 *page = nth_page(*page, n); 1443 1444 *poff = *poff % PAGE_SIZE; 1445 *plen -= off; 1446 } 1447 1448 static inline int __splice_segment(struct page *page, unsigned int poff, 1449 unsigned int plen, unsigned int *off, 1450 unsigned int *len, struct sk_buff *skb, 1451 struct splice_pipe_desc *spd, int linear, 1452 struct sock *sk, 1453 struct pipe_inode_info *pipe) 1454 { 1455 if (!*len) 1456 return 1; 1457 1458 /* skip this segment if already processed */ 1459 if (*off >= plen) { 1460 *off -= plen; 1461 return 0; 1462 } 1463 1464 /* ignore any bits we already processed */ 1465 if (*off) { 1466 __segment_seek(&page, &poff, &plen, *off); 1467 *off = 0; 1468 } 1469 1470 do { 1471 unsigned int flen = min(*len, plen); 1472 1473 /* the linear region may spread across several pages */ 1474 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1475 1476 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1477 return 1; 1478 1479 __segment_seek(&page, &poff, &plen, flen); 1480 *len -= flen; 1481 1482 } while (*len && plen); 1483 1484 return 0; 1485 } 1486 1487 /* 1488 * Map linear and fragment data from the skb to spd. It reports failure if the 1489 * pipe is full or if we already spliced the requested length. 1490 */ 1491 static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1492 unsigned int *offset, unsigned int *len, 1493 struct splice_pipe_desc *spd, struct sock *sk) 1494 { 1495 int seg; 1496 1497 /* 1498 * map the linear part 1499 */ 1500 if (__splice_segment(virt_to_page(skb->data), 1501 (unsigned long) skb->data & (PAGE_SIZE - 1), 1502 skb_headlen(skb), 1503 offset, len, skb, spd, 1, sk, pipe)) 1504 return 1; 1505 1506 /* 1507 * then map the fragments 1508 */ 1509 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1510 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1511 1512 if (__splice_segment(f->page, f->page_offset, f->size, 1513 offset, len, skb, spd, 0, sk, pipe)) 1514 return 1; 1515 } 1516 1517 return 0; 1518 } 1519 1520 /* 1521 * Map data from the skb to a pipe. Should handle both the linear part, 1522 * the fragments, and the frag list. It does NOT handle frag lists within 1523 * the frag list, if such a thing exists. We'd probably need to recurse to 1524 * handle that cleanly. 1525 */ 1526 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1527 struct pipe_inode_info *pipe, unsigned int tlen, 1528 unsigned int flags) 1529 { 1530 struct partial_page partial[PIPE_DEF_BUFFERS]; 1531 struct page *pages[PIPE_DEF_BUFFERS]; 1532 struct splice_pipe_desc spd = { 1533 .pages = pages, 1534 .partial = partial, 1535 .flags = flags, 1536 .ops = &sock_pipe_buf_ops, 1537 .spd_release = sock_spd_release, 1538 }; 1539 struct sk_buff *frag_iter; 1540 struct sock *sk = skb->sk; 1541 int ret = 0; 1542 1543 if (splice_grow_spd(pipe, &spd)) 1544 return -ENOMEM; 1545 1546 /* 1547 * __skb_splice_bits() only fails if the output has no room left, 1548 * so no point in going over the frag_list for the error case. 1549 */ 1550 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1551 goto done; 1552 else if (!tlen) 1553 goto done; 1554 1555 /* 1556 * now see if we have a frag_list to map 1557 */ 1558 skb_walk_frags(skb, frag_iter) { 1559 if (!tlen) 1560 break; 1561 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1562 break; 1563 } 1564 1565 done: 1566 if (spd.nr_pages) { 1567 /* 1568 * Drop the socket lock, otherwise we have reverse 1569 * locking dependencies between sk_lock and i_mutex 1570 * here as compared to sendfile(). We enter here 1571 * with the socket lock held, and splice_to_pipe() will 1572 * grab the pipe inode lock. For sendfile() emulation, 1573 * we call into ->sendpage() with the i_mutex lock held 1574 * and networking will grab the socket lock. 1575 */ 1576 release_sock(sk); 1577 ret = splice_to_pipe(pipe, &spd); 1578 lock_sock(sk); 1579 } 1580 1581 splice_shrink_spd(pipe, &spd); 1582 return ret; 1583 } 1584 1585 /** 1586 * skb_store_bits - store bits from kernel buffer to skb 1587 * @skb: destination buffer 1588 * @offset: offset in destination 1589 * @from: source buffer 1590 * @len: number of bytes to copy 1591 * 1592 * Copy the specified number of bytes from the source buffer to the 1593 * destination skb. This function handles all the messy bits of 1594 * traversing fragment lists and such. 1595 */ 1596 1597 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1598 { 1599 int start = skb_headlen(skb); 1600 struct sk_buff *frag_iter; 1601 int i, copy; 1602 1603 if (offset > (int)skb->len - len) 1604 goto fault; 1605 1606 if ((copy = start - offset) > 0) { 1607 if (copy > len) 1608 copy = len; 1609 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1610 if ((len -= copy) == 0) 1611 return 0; 1612 offset += copy; 1613 from += copy; 1614 } 1615 1616 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1617 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1618 int end; 1619 1620 WARN_ON(start > offset + len); 1621 1622 end = start + frag->size; 1623 if ((copy = end - offset) > 0) { 1624 u8 *vaddr; 1625 1626 if (copy > len) 1627 copy = len; 1628 1629 vaddr = kmap_skb_frag(frag); 1630 memcpy(vaddr + frag->page_offset + offset - start, 1631 from, copy); 1632 kunmap_skb_frag(vaddr); 1633 1634 if ((len -= copy) == 0) 1635 return 0; 1636 offset += copy; 1637 from += copy; 1638 } 1639 start = end; 1640 } 1641 1642 skb_walk_frags(skb, frag_iter) { 1643 int end; 1644 1645 WARN_ON(start > offset + len); 1646 1647 end = start + frag_iter->len; 1648 if ((copy = end - offset) > 0) { 1649 if (copy > len) 1650 copy = len; 1651 if (skb_store_bits(frag_iter, offset - start, 1652 from, copy)) 1653 goto fault; 1654 if ((len -= copy) == 0) 1655 return 0; 1656 offset += copy; 1657 from += copy; 1658 } 1659 start = end; 1660 } 1661 if (!len) 1662 return 0; 1663 1664 fault: 1665 return -EFAULT; 1666 } 1667 EXPORT_SYMBOL(skb_store_bits); 1668 1669 /* Checksum skb data. */ 1670 1671 __wsum skb_checksum(const struct sk_buff *skb, int offset, 1672 int len, __wsum csum) 1673 { 1674 int start = skb_headlen(skb); 1675 int i, copy = start - offset; 1676 struct sk_buff *frag_iter; 1677 int pos = 0; 1678 1679 /* Checksum header. */ 1680 if (copy > 0) { 1681 if (copy > len) 1682 copy = len; 1683 csum = csum_partial(skb->data + offset, copy, csum); 1684 if ((len -= copy) == 0) 1685 return csum; 1686 offset += copy; 1687 pos = copy; 1688 } 1689 1690 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1691 int end; 1692 1693 WARN_ON(start > offset + len); 1694 1695 end = start + skb_shinfo(skb)->frags[i].size; 1696 if ((copy = end - offset) > 0) { 1697 __wsum csum2; 1698 u8 *vaddr; 1699 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1700 1701 if (copy > len) 1702 copy = len; 1703 vaddr = kmap_skb_frag(frag); 1704 csum2 = csum_partial(vaddr + frag->page_offset + 1705 offset - start, copy, 0); 1706 kunmap_skb_frag(vaddr); 1707 csum = csum_block_add(csum, csum2, pos); 1708 if (!(len -= copy)) 1709 return csum; 1710 offset += copy; 1711 pos += copy; 1712 } 1713 start = end; 1714 } 1715 1716 skb_walk_frags(skb, frag_iter) { 1717 int end; 1718 1719 WARN_ON(start > offset + len); 1720 1721 end = start + frag_iter->len; 1722 if ((copy = end - offset) > 0) { 1723 __wsum csum2; 1724 if (copy > len) 1725 copy = len; 1726 csum2 = skb_checksum(frag_iter, offset - start, 1727 copy, 0); 1728 csum = csum_block_add(csum, csum2, pos); 1729 if ((len -= copy) == 0) 1730 return csum; 1731 offset += copy; 1732 pos += copy; 1733 } 1734 start = end; 1735 } 1736 BUG_ON(len); 1737 1738 return csum; 1739 } 1740 EXPORT_SYMBOL(skb_checksum); 1741 1742 /* Both of above in one bottle. */ 1743 1744 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1745 u8 *to, int len, __wsum csum) 1746 { 1747 int start = skb_headlen(skb); 1748 int i, copy = start - offset; 1749 struct sk_buff *frag_iter; 1750 int pos = 0; 1751 1752 /* Copy header. */ 1753 if (copy > 0) { 1754 if (copy > len) 1755 copy = len; 1756 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1757 copy, csum); 1758 if ((len -= copy) == 0) 1759 return csum; 1760 offset += copy; 1761 to += copy; 1762 pos = copy; 1763 } 1764 1765 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1766 int end; 1767 1768 WARN_ON(start > offset + len); 1769 1770 end = start + skb_shinfo(skb)->frags[i].size; 1771 if ((copy = end - offset) > 0) { 1772 __wsum csum2; 1773 u8 *vaddr; 1774 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1775 1776 if (copy > len) 1777 copy = len; 1778 vaddr = kmap_skb_frag(frag); 1779 csum2 = csum_partial_copy_nocheck(vaddr + 1780 frag->page_offset + 1781 offset - start, to, 1782 copy, 0); 1783 kunmap_skb_frag(vaddr); 1784 csum = csum_block_add(csum, csum2, pos); 1785 if (!(len -= copy)) 1786 return csum; 1787 offset += copy; 1788 to += copy; 1789 pos += copy; 1790 } 1791 start = end; 1792 } 1793 1794 skb_walk_frags(skb, frag_iter) { 1795 __wsum csum2; 1796 int end; 1797 1798 WARN_ON(start > offset + len); 1799 1800 end = start + frag_iter->len; 1801 if ((copy = end - offset) > 0) { 1802 if (copy > len) 1803 copy = len; 1804 csum2 = skb_copy_and_csum_bits(frag_iter, 1805 offset - start, 1806 to, copy, 0); 1807 csum = csum_block_add(csum, csum2, pos); 1808 if ((len -= copy) == 0) 1809 return csum; 1810 offset += copy; 1811 to += copy; 1812 pos += copy; 1813 } 1814 start = end; 1815 } 1816 BUG_ON(len); 1817 return csum; 1818 } 1819 EXPORT_SYMBOL(skb_copy_and_csum_bits); 1820 1821 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1822 { 1823 __wsum csum; 1824 long csstart; 1825 1826 if (skb->ip_summed == CHECKSUM_PARTIAL) 1827 csstart = skb_checksum_start_offset(skb); 1828 else 1829 csstart = skb_headlen(skb); 1830 1831 BUG_ON(csstart > skb_headlen(skb)); 1832 1833 skb_copy_from_linear_data(skb, to, csstart); 1834 1835 csum = 0; 1836 if (csstart != skb->len) 1837 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1838 skb->len - csstart, 0); 1839 1840 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1841 long csstuff = csstart + skb->csum_offset; 1842 1843 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1844 } 1845 } 1846 EXPORT_SYMBOL(skb_copy_and_csum_dev); 1847 1848 /** 1849 * skb_dequeue - remove from the head of the queue 1850 * @list: list to dequeue from 1851 * 1852 * Remove the head of the list. The list lock is taken so the function 1853 * may be used safely with other locking list functions. The head item is 1854 * returned or %NULL if the list is empty. 1855 */ 1856 1857 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1858 { 1859 unsigned long flags; 1860 struct sk_buff *result; 1861 1862 spin_lock_irqsave(&list->lock, flags); 1863 result = __skb_dequeue(list); 1864 spin_unlock_irqrestore(&list->lock, flags); 1865 return result; 1866 } 1867 EXPORT_SYMBOL(skb_dequeue); 1868 1869 /** 1870 * skb_dequeue_tail - remove from the tail of the queue 1871 * @list: list to dequeue from 1872 * 1873 * Remove the tail of the list. The list lock is taken so the function 1874 * may be used safely with other locking list functions. The tail item is 1875 * returned or %NULL if the list is empty. 1876 */ 1877 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1878 { 1879 unsigned long flags; 1880 struct sk_buff *result; 1881 1882 spin_lock_irqsave(&list->lock, flags); 1883 result = __skb_dequeue_tail(list); 1884 spin_unlock_irqrestore(&list->lock, flags); 1885 return result; 1886 } 1887 EXPORT_SYMBOL(skb_dequeue_tail); 1888 1889 /** 1890 * skb_queue_purge - empty a list 1891 * @list: list to empty 1892 * 1893 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1894 * the list and one reference dropped. This function takes the list 1895 * lock and is atomic with respect to other list locking functions. 1896 */ 1897 void skb_queue_purge(struct sk_buff_head *list) 1898 { 1899 struct sk_buff *skb; 1900 while ((skb = skb_dequeue(list)) != NULL) 1901 kfree_skb(skb); 1902 } 1903 EXPORT_SYMBOL(skb_queue_purge); 1904 1905 /** 1906 * skb_queue_head - queue a buffer at the list head 1907 * @list: list to use 1908 * @newsk: buffer to queue 1909 * 1910 * Queue a buffer at the start of the list. This function takes the 1911 * list lock and can be used safely with other locking &sk_buff functions 1912 * safely. 1913 * 1914 * A buffer cannot be placed on two lists at the same time. 1915 */ 1916 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1917 { 1918 unsigned long flags; 1919 1920 spin_lock_irqsave(&list->lock, flags); 1921 __skb_queue_head(list, newsk); 1922 spin_unlock_irqrestore(&list->lock, flags); 1923 } 1924 EXPORT_SYMBOL(skb_queue_head); 1925 1926 /** 1927 * skb_queue_tail - queue a buffer at the list tail 1928 * @list: list to use 1929 * @newsk: buffer to queue 1930 * 1931 * Queue a buffer at the tail of the list. This function takes the 1932 * list lock and can be used safely with other locking &sk_buff functions 1933 * safely. 1934 * 1935 * A buffer cannot be placed on two lists at the same time. 1936 */ 1937 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1938 { 1939 unsigned long flags; 1940 1941 spin_lock_irqsave(&list->lock, flags); 1942 __skb_queue_tail(list, newsk); 1943 spin_unlock_irqrestore(&list->lock, flags); 1944 } 1945 EXPORT_SYMBOL(skb_queue_tail); 1946 1947 /** 1948 * skb_unlink - remove a buffer from a list 1949 * @skb: buffer to remove 1950 * @list: list to use 1951 * 1952 * Remove a packet from a list. The list locks are taken and this 1953 * function is atomic with respect to other list locked calls 1954 * 1955 * You must know what list the SKB is on. 1956 */ 1957 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1958 { 1959 unsigned long flags; 1960 1961 spin_lock_irqsave(&list->lock, flags); 1962 __skb_unlink(skb, list); 1963 spin_unlock_irqrestore(&list->lock, flags); 1964 } 1965 EXPORT_SYMBOL(skb_unlink); 1966 1967 /** 1968 * skb_append - append a buffer 1969 * @old: buffer to insert after 1970 * @newsk: buffer to insert 1971 * @list: list to use 1972 * 1973 * Place a packet after a given packet in a list. The list locks are taken 1974 * and this function is atomic with respect to other list locked calls. 1975 * A buffer cannot be placed on two lists at the same time. 1976 */ 1977 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1978 { 1979 unsigned long flags; 1980 1981 spin_lock_irqsave(&list->lock, flags); 1982 __skb_queue_after(list, old, newsk); 1983 spin_unlock_irqrestore(&list->lock, flags); 1984 } 1985 EXPORT_SYMBOL(skb_append); 1986 1987 /** 1988 * skb_insert - insert a buffer 1989 * @old: buffer to insert before 1990 * @newsk: buffer to insert 1991 * @list: list to use 1992 * 1993 * Place a packet before a given packet in a list. The list locks are 1994 * taken and this function is atomic with respect to other list locked 1995 * calls. 1996 * 1997 * A buffer cannot be placed on two lists at the same time. 1998 */ 1999 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2000 { 2001 unsigned long flags; 2002 2003 spin_lock_irqsave(&list->lock, flags); 2004 __skb_insert(newsk, old->prev, old, list); 2005 spin_unlock_irqrestore(&list->lock, flags); 2006 } 2007 EXPORT_SYMBOL(skb_insert); 2008 2009 static inline void skb_split_inside_header(struct sk_buff *skb, 2010 struct sk_buff* skb1, 2011 const u32 len, const int pos) 2012 { 2013 int i; 2014 2015 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2016 pos - len); 2017 /* And move data appendix as is. */ 2018 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2019 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2020 2021 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2022 skb_shinfo(skb)->nr_frags = 0; 2023 skb1->data_len = skb->data_len; 2024 skb1->len += skb1->data_len; 2025 skb->data_len = 0; 2026 skb->len = len; 2027 skb_set_tail_pointer(skb, len); 2028 } 2029 2030 static inline void skb_split_no_header(struct sk_buff *skb, 2031 struct sk_buff* skb1, 2032 const u32 len, int pos) 2033 { 2034 int i, k = 0; 2035 const int nfrags = skb_shinfo(skb)->nr_frags; 2036 2037 skb_shinfo(skb)->nr_frags = 0; 2038 skb1->len = skb1->data_len = skb->len - len; 2039 skb->len = len; 2040 skb->data_len = len - pos; 2041 2042 for (i = 0; i < nfrags; i++) { 2043 int size = skb_shinfo(skb)->frags[i].size; 2044 2045 if (pos + size > len) { 2046 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2047 2048 if (pos < len) { 2049 /* Split frag. 2050 * We have two variants in this case: 2051 * 1. Move all the frag to the second 2052 * part, if it is possible. F.e. 2053 * this approach is mandatory for TUX, 2054 * where splitting is expensive. 2055 * 2. Split is accurately. We make this. 2056 */ 2057 get_page(skb_shinfo(skb)->frags[i].page); 2058 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2059 skb_shinfo(skb1)->frags[0].size -= len - pos; 2060 skb_shinfo(skb)->frags[i].size = len - pos; 2061 skb_shinfo(skb)->nr_frags++; 2062 } 2063 k++; 2064 } else 2065 skb_shinfo(skb)->nr_frags++; 2066 pos += size; 2067 } 2068 skb_shinfo(skb1)->nr_frags = k; 2069 } 2070 2071 /** 2072 * skb_split - Split fragmented skb to two parts at length len. 2073 * @skb: the buffer to split 2074 * @skb1: the buffer to receive the second part 2075 * @len: new length for skb 2076 */ 2077 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2078 { 2079 int pos = skb_headlen(skb); 2080 2081 if (len < pos) /* Split line is inside header. */ 2082 skb_split_inside_header(skb, skb1, len, pos); 2083 else /* Second chunk has no header, nothing to copy. */ 2084 skb_split_no_header(skb, skb1, len, pos); 2085 } 2086 EXPORT_SYMBOL(skb_split); 2087 2088 /* Shifting from/to a cloned skb is a no-go. 2089 * 2090 * Caller cannot keep skb_shinfo related pointers past calling here! 2091 */ 2092 static int skb_prepare_for_shift(struct sk_buff *skb) 2093 { 2094 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2095 } 2096 2097 /** 2098 * skb_shift - Shifts paged data partially from skb to another 2099 * @tgt: buffer into which tail data gets added 2100 * @skb: buffer from which the paged data comes from 2101 * @shiftlen: shift up to this many bytes 2102 * 2103 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2104 * the length of the skb, from tgt to skb. Returns number bytes shifted. 2105 * It's up to caller to free skb if everything was shifted. 2106 * 2107 * If @tgt runs out of frags, the whole operation is aborted. 2108 * 2109 * Skb cannot include anything else but paged data while tgt is allowed 2110 * to have non-paged data as well. 2111 * 2112 * TODO: full sized shift could be optimized but that would need 2113 * specialized skb free'er to handle frags without up-to-date nr_frags. 2114 */ 2115 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2116 { 2117 int from, to, merge, todo; 2118 struct skb_frag_struct *fragfrom, *fragto; 2119 2120 BUG_ON(shiftlen > skb->len); 2121 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2122 2123 todo = shiftlen; 2124 from = 0; 2125 to = skb_shinfo(tgt)->nr_frags; 2126 fragfrom = &skb_shinfo(skb)->frags[from]; 2127 2128 /* Actual merge is delayed until the point when we know we can 2129 * commit all, so that we don't have to undo partial changes 2130 */ 2131 if (!to || 2132 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) { 2133 merge = -1; 2134 } else { 2135 merge = to - 1; 2136 2137 todo -= fragfrom->size; 2138 if (todo < 0) { 2139 if (skb_prepare_for_shift(skb) || 2140 skb_prepare_for_shift(tgt)) 2141 return 0; 2142 2143 /* All previous frag pointers might be stale! */ 2144 fragfrom = &skb_shinfo(skb)->frags[from]; 2145 fragto = &skb_shinfo(tgt)->frags[merge]; 2146 2147 fragto->size += shiftlen; 2148 fragfrom->size -= shiftlen; 2149 fragfrom->page_offset += shiftlen; 2150 2151 goto onlymerged; 2152 } 2153 2154 from++; 2155 } 2156 2157 /* Skip full, not-fitting skb to avoid expensive operations */ 2158 if ((shiftlen == skb->len) && 2159 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2160 return 0; 2161 2162 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2163 return 0; 2164 2165 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2166 if (to == MAX_SKB_FRAGS) 2167 return 0; 2168 2169 fragfrom = &skb_shinfo(skb)->frags[from]; 2170 fragto = &skb_shinfo(tgt)->frags[to]; 2171 2172 if (todo >= fragfrom->size) { 2173 *fragto = *fragfrom; 2174 todo -= fragfrom->size; 2175 from++; 2176 to++; 2177 2178 } else { 2179 get_page(fragfrom->page); 2180 fragto->page = fragfrom->page; 2181 fragto->page_offset = fragfrom->page_offset; 2182 fragto->size = todo; 2183 2184 fragfrom->page_offset += todo; 2185 fragfrom->size -= todo; 2186 todo = 0; 2187 2188 to++; 2189 break; 2190 } 2191 } 2192 2193 /* Ready to "commit" this state change to tgt */ 2194 skb_shinfo(tgt)->nr_frags = to; 2195 2196 if (merge >= 0) { 2197 fragfrom = &skb_shinfo(skb)->frags[0]; 2198 fragto = &skb_shinfo(tgt)->frags[merge]; 2199 2200 fragto->size += fragfrom->size; 2201 put_page(fragfrom->page); 2202 } 2203 2204 /* Reposition in the original skb */ 2205 to = 0; 2206 while (from < skb_shinfo(skb)->nr_frags) 2207 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2208 skb_shinfo(skb)->nr_frags = to; 2209 2210 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2211 2212 onlymerged: 2213 /* Most likely the tgt won't ever need its checksum anymore, skb on 2214 * the other hand might need it if it needs to be resent 2215 */ 2216 tgt->ip_summed = CHECKSUM_PARTIAL; 2217 skb->ip_summed = CHECKSUM_PARTIAL; 2218 2219 /* Yak, is it really working this way? Some helper please? */ 2220 skb->len -= shiftlen; 2221 skb->data_len -= shiftlen; 2222 skb->truesize -= shiftlen; 2223 tgt->len += shiftlen; 2224 tgt->data_len += shiftlen; 2225 tgt->truesize += shiftlen; 2226 2227 return shiftlen; 2228 } 2229 2230 /** 2231 * skb_prepare_seq_read - Prepare a sequential read of skb data 2232 * @skb: the buffer to read 2233 * @from: lower offset of data to be read 2234 * @to: upper offset of data to be read 2235 * @st: state variable 2236 * 2237 * Initializes the specified state variable. Must be called before 2238 * invoking skb_seq_read() for the first time. 2239 */ 2240 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2241 unsigned int to, struct skb_seq_state *st) 2242 { 2243 st->lower_offset = from; 2244 st->upper_offset = to; 2245 st->root_skb = st->cur_skb = skb; 2246 st->frag_idx = st->stepped_offset = 0; 2247 st->frag_data = NULL; 2248 } 2249 EXPORT_SYMBOL(skb_prepare_seq_read); 2250 2251 /** 2252 * skb_seq_read - Sequentially read skb data 2253 * @consumed: number of bytes consumed by the caller so far 2254 * @data: destination pointer for data to be returned 2255 * @st: state variable 2256 * 2257 * Reads a block of skb data at &consumed relative to the 2258 * lower offset specified to skb_prepare_seq_read(). Assigns 2259 * the head of the data block to &data and returns the length 2260 * of the block or 0 if the end of the skb data or the upper 2261 * offset has been reached. 2262 * 2263 * The caller is not required to consume all of the data 2264 * returned, i.e. &consumed is typically set to the number 2265 * of bytes already consumed and the next call to 2266 * skb_seq_read() will return the remaining part of the block. 2267 * 2268 * Note 1: The size of each block of data returned can be arbitary, 2269 * this limitation is the cost for zerocopy seqeuental 2270 * reads of potentially non linear data. 2271 * 2272 * Note 2: Fragment lists within fragments are not implemented 2273 * at the moment, state->root_skb could be replaced with 2274 * a stack for this purpose. 2275 */ 2276 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2277 struct skb_seq_state *st) 2278 { 2279 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2280 skb_frag_t *frag; 2281 2282 if (unlikely(abs_offset >= st->upper_offset)) 2283 return 0; 2284 2285 next_skb: 2286 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2287 2288 if (abs_offset < block_limit && !st->frag_data) { 2289 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2290 return block_limit - abs_offset; 2291 } 2292 2293 if (st->frag_idx == 0 && !st->frag_data) 2294 st->stepped_offset += skb_headlen(st->cur_skb); 2295 2296 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2297 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2298 block_limit = frag->size + st->stepped_offset; 2299 2300 if (abs_offset < block_limit) { 2301 if (!st->frag_data) 2302 st->frag_data = kmap_skb_frag(frag); 2303 2304 *data = (u8 *) st->frag_data + frag->page_offset + 2305 (abs_offset - st->stepped_offset); 2306 2307 return block_limit - abs_offset; 2308 } 2309 2310 if (st->frag_data) { 2311 kunmap_skb_frag(st->frag_data); 2312 st->frag_data = NULL; 2313 } 2314 2315 st->frag_idx++; 2316 st->stepped_offset += frag->size; 2317 } 2318 2319 if (st->frag_data) { 2320 kunmap_skb_frag(st->frag_data); 2321 st->frag_data = NULL; 2322 } 2323 2324 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2325 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2326 st->frag_idx = 0; 2327 goto next_skb; 2328 } else if (st->cur_skb->next) { 2329 st->cur_skb = st->cur_skb->next; 2330 st->frag_idx = 0; 2331 goto next_skb; 2332 } 2333 2334 return 0; 2335 } 2336 EXPORT_SYMBOL(skb_seq_read); 2337 2338 /** 2339 * skb_abort_seq_read - Abort a sequential read of skb data 2340 * @st: state variable 2341 * 2342 * Must be called if skb_seq_read() was not called until it 2343 * returned 0. 2344 */ 2345 void skb_abort_seq_read(struct skb_seq_state *st) 2346 { 2347 if (st->frag_data) 2348 kunmap_skb_frag(st->frag_data); 2349 } 2350 EXPORT_SYMBOL(skb_abort_seq_read); 2351 2352 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2353 2354 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2355 struct ts_config *conf, 2356 struct ts_state *state) 2357 { 2358 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2359 } 2360 2361 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2362 { 2363 skb_abort_seq_read(TS_SKB_CB(state)); 2364 } 2365 2366 /** 2367 * skb_find_text - Find a text pattern in skb data 2368 * @skb: the buffer to look in 2369 * @from: search offset 2370 * @to: search limit 2371 * @config: textsearch configuration 2372 * @state: uninitialized textsearch state variable 2373 * 2374 * Finds a pattern in the skb data according to the specified 2375 * textsearch configuration. Use textsearch_next() to retrieve 2376 * subsequent occurrences of the pattern. Returns the offset 2377 * to the first occurrence or UINT_MAX if no match was found. 2378 */ 2379 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2380 unsigned int to, struct ts_config *config, 2381 struct ts_state *state) 2382 { 2383 unsigned int ret; 2384 2385 config->get_next_block = skb_ts_get_next_block; 2386 config->finish = skb_ts_finish; 2387 2388 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 2389 2390 ret = textsearch_find(config, state); 2391 return (ret <= to - from ? ret : UINT_MAX); 2392 } 2393 EXPORT_SYMBOL(skb_find_text); 2394 2395 /** 2396 * skb_append_datato_frags: - append the user data to a skb 2397 * @sk: sock structure 2398 * @skb: skb structure to be appened with user data. 2399 * @getfrag: call back function to be used for getting the user data 2400 * @from: pointer to user message iov 2401 * @length: length of the iov message 2402 * 2403 * Description: This procedure append the user data in the fragment part 2404 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2405 */ 2406 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2407 int (*getfrag)(void *from, char *to, int offset, 2408 int len, int odd, struct sk_buff *skb), 2409 void *from, int length) 2410 { 2411 int frg_cnt = 0; 2412 skb_frag_t *frag = NULL; 2413 struct page *page = NULL; 2414 int copy, left; 2415 int offset = 0; 2416 int ret; 2417 2418 do { 2419 /* Return error if we don't have space for new frag */ 2420 frg_cnt = skb_shinfo(skb)->nr_frags; 2421 if (frg_cnt >= MAX_SKB_FRAGS) 2422 return -EFAULT; 2423 2424 /* allocate a new page for next frag */ 2425 page = alloc_pages(sk->sk_allocation, 0); 2426 2427 /* If alloc_page fails just return failure and caller will 2428 * free previous allocated pages by doing kfree_skb() 2429 */ 2430 if (page == NULL) 2431 return -ENOMEM; 2432 2433 /* initialize the next frag */ 2434 sk->sk_sndmsg_page = page; 2435 sk->sk_sndmsg_off = 0; 2436 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2437 skb->truesize += PAGE_SIZE; 2438 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2439 2440 /* get the new initialized frag */ 2441 frg_cnt = skb_shinfo(skb)->nr_frags; 2442 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2443 2444 /* copy the user data to page */ 2445 left = PAGE_SIZE - frag->page_offset; 2446 copy = (length > left)? left : length; 2447 2448 ret = getfrag(from, (page_address(frag->page) + 2449 frag->page_offset + frag->size), 2450 offset, copy, 0, skb); 2451 if (ret < 0) 2452 return -EFAULT; 2453 2454 /* copy was successful so update the size parameters */ 2455 sk->sk_sndmsg_off += copy; 2456 frag->size += copy; 2457 skb->len += copy; 2458 skb->data_len += copy; 2459 offset += copy; 2460 length -= copy; 2461 2462 } while (length > 0); 2463 2464 return 0; 2465 } 2466 EXPORT_SYMBOL(skb_append_datato_frags); 2467 2468 /** 2469 * skb_pull_rcsum - pull skb and update receive checksum 2470 * @skb: buffer to update 2471 * @len: length of data pulled 2472 * 2473 * This function performs an skb_pull on the packet and updates 2474 * the CHECKSUM_COMPLETE checksum. It should be used on 2475 * receive path processing instead of skb_pull unless you know 2476 * that the checksum difference is zero (e.g., a valid IP header) 2477 * or you are setting ip_summed to CHECKSUM_NONE. 2478 */ 2479 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2480 { 2481 BUG_ON(len > skb->len); 2482 skb->len -= len; 2483 BUG_ON(skb->len < skb->data_len); 2484 skb_postpull_rcsum(skb, skb->data, len); 2485 return skb->data += len; 2486 } 2487 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2488 2489 /** 2490 * skb_segment - Perform protocol segmentation on skb. 2491 * @skb: buffer to segment 2492 * @features: features for the output path (see dev->features) 2493 * 2494 * This function performs segmentation on the given skb. It returns 2495 * a pointer to the first in a list of new skbs for the segments. 2496 * In case of error it returns ERR_PTR(err). 2497 */ 2498 struct sk_buff *skb_segment(struct sk_buff *skb, int features) 2499 { 2500 struct sk_buff *segs = NULL; 2501 struct sk_buff *tail = NULL; 2502 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2503 unsigned int mss = skb_shinfo(skb)->gso_size; 2504 unsigned int doffset = skb->data - skb_mac_header(skb); 2505 unsigned int offset = doffset; 2506 unsigned int headroom; 2507 unsigned int len; 2508 int sg = features & NETIF_F_SG; 2509 int nfrags = skb_shinfo(skb)->nr_frags; 2510 int err = -ENOMEM; 2511 int i = 0; 2512 int pos; 2513 2514 __skb_push(skb, doffset); 2515 headroom = skb_headroom(skb); 2516 pos = skb_headlen(skb); 2517 2518 do { 2519 struct sk_buff *nskb; 2520 skb_frag_t *frag; 2521 int hsize; 2522 int size; 2523 2524 len = skb->len - offset; 2525 if (len > mss) 2526 len = mss; 2527 2528 hsize = skb_headlen(skb) - offset; 2529 if (hsize < 0) 2530 hsize = 0; 2531 if (hsize > len || !sg) 2532 hsize = len; 2533 2534 if (!hsize && i >= nfrags) { 2535 BUG_ON(fskb->len != len); 2536 2537 pos += len; 2538 nskb = skb_clone(fskb, GFP_ATOMIC); 2539 fskb = fskb->next; 2540 2541 if (unlikely(!nskb)) 2542 goto err; 2543 2544 hsize = skb_end_pointer(nskb) - nskb->head; 2545 if (skb_cow_head(nskb, doffset + headroom)) { 2546 kfree_skb(nskb); 2547 goto err; 2548 } 2549 2550 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2551 hsize; 2552 skb_release_head_state(nskb); 2553 __skb_push(nskb, doffset); 2554 } else { 2555 nskb = alloc_skb(hsize + doffset + headroom, 2556 GFP_ATOMIC); 2557 2558 if (unlikely(!nskb)) 2559 goto err; 2560 2561 skb_reserve(nskb, headroom); 2562 __skb_put(nskb, doffset); 2563 } 2564 2565 if (segs) 2566 tail->next = nskb; 2567 else 2568 segs = nskb; 2569 tail = nskb; 2570 2571 __copy_skb_header(nskb, skb); 2572 nskb->mac_len = skb->mac_len; 2573 2574 /* nskb and skb might have different headroom */ 2575 if (nskb->ip_summed == CHECKSUM_PARTIAL) 2576 nskb->csum_start += skb_headroom(nskb) - headroom; 2577 2578 skb_reset_mac_header(nskb); 2579 skb_set_network_header(nskb, skb->mac_len); 2580 nskb->transport_header = (nskb->network_header + 2581 skb_network_header_len(skb)); 2582 skb_copy_from_linear_data(skb, nskb->data, doffset); 2583 2584 if (fskb != skb_shinfo(skb)->frag_list) 2585 continue; 2586 2587 if (!sg) { 2588 nskb->ip_summed = CHECKSUM_NONE; 2589 nskb->csum = skb_copy_and_csum_bits(skb, offset, 2590 skb_put(nskb, len), 2591 len, 0); 2592 continue; 2593 } 2594 2595 frag = skb_shinfo(nskb)->frags; 2596 2597 skb_copy_from_linear_data_offset(skb, offset, 2598 skb_put(nskb, hsize), hsize); 2599 2600 while (pos < offset + len && i < nfrags) { 2601 *frag = skb_shinfo(skb)->frags[i]; 2602 get_page(frag->page); 2603 size = frag->size; 2604 2605 if (pos < offset) { 2606 frag->page_offset += offset - pos; 2607 frag->size -= offset - pos; 2608 } 2609 2610 skb_shinfo(nskb)->nr_frags++; 2611 2612 if (pos + size <= offset + len) { 2613 i++; 2614 pos += size; 2615 } else { 2616 frag->size -= pos + size - (offset + len); 2617 goto skip_fraglist; 2618 } 2619 2620 frag++; 2621 } 2622 2623 if (pos < offset + len) { 2624 struct sk_buff *fskb2 = fskb; 2625 2626 BUG_ON(pos + fskb->len != offset + len); 2627 2628 pos += fskb->len; 2629 fskb = fskb->next; 2630 2631 if (fskb2->next) { 2632 fskb2 = skb_clone(fskb2, GFP_ATOMIC); 2633 if (!fskb2) 2634 goto err; 2635 } else 2636 skb_get(fskb2); 2637 2638 SKB_FRAG_ASSERT(nskb); 2639 skb_shinfo(nskb)->frag_list = fskb2; 2640 } 2641 2642 skip_fraglist: 2643 nskb->data_len = len - hsize; 2644 nskb->len += nskb->data_len; 2645 nskb->truesize += nskb->data_len; 2646 } while ((offset += len) < skb->len); 2647 2648 return segs; 2649 2650 err: 2651 while ((skb = segs)) { 2652 segs = skb->next; 2653 kfree_skb(skb); 2654 } 2655 return ERR_PTR(err); 2656 } 2657 EXPORT_SYMBOL_GPL(skb_segment); 2658 2659 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2660 { 2661 struct sk_buff *p = *head; 2662 struct sk_buff *nskb; 2663 struct skb_shared_info *skbinfo = skb_shinfo(skb); 2664 struct skb_shared_info *pinfo = skb_shinfo(p); 2665 unsigned int headroom; 2666 unsigned int len = skb_gro_len(skb); 2667 unsigned int offset = skb_gro_offset(skb); 2668 unsigned int headlen = skb_headlen(skb); 2669 2670 if (p->len + len >= 65536) 2671 return -E2BIG; 2672 2673 if (pinfo->frag_list) 2674 goto merge; 2675 else if (headlen <= offset) { 2676 skb_frag_t *frag; 2677 skb_frag_t *frag2; 2678 int i = skbinfo->nr_frags; 2679 int nr_frags = pinfo->nr_frags + i; 2680 2681 offset -= headlen; 2682 2683 if (nr_frags > MAX_SKB_FRAGS) 2684 return -E2BIG; 2685 2686 pinfo->nr_frags = nr_frags; 2687 skbinfo->nr_frags = 0; 2688 2689 frag = pinfo->frags + nr_frags; 2690 frag2 = skbinfo->frags + i; 2691 do { 2692 *--frag = *--frag2; 2693 } while (--i); 2694 2695 frag->page_offset += offset; 2696 frag->size -= offset; 2697 2698 skb->truesize -= skb->data_len; 2699 skb->len -= skb->data_len; 2700 skb->data_len = 0; 2701 2702 NAPI_GRO_CB(skb)->free = 1; 2703 goto done; 2704 } else if (skb_gro_len(p) != pinfo->gso_size) 2705 return -E2BIG; 2706 2707 headroom = skb_headroom(p); 2708 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 2709 if (unlikely(!nskb)) 2710 return -ENOMEM; 2711 2712 __copy_skb_header(nskb, p); 2713 nskb->mac_len = p->mac_len; 2714 2715 skb_reserve(nskb, headroom); 2716 __skb_put(nskb, skb_gro_offset(p)); 2717 2718 skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 2719 skb_set_network_header(nskb, skb_network_offset(p)); 2720 skb_set_transport_header(nskb, skb_transport_offset(p)); 2721 2722 __skb_pull(p, skb_gro_offset(p)); 2723 memcpy(skb_mac_header(nskb), skb_mac_header(p), 2724 p->data - skb_mac_header(p)); 2725 2726 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 2727 skb_shinfo(nskb)->frag_list = p; 2728 skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2729 pinfo->gso_size = 0; 2730 skb_header_release(p); 2731 nskb->prev = p; 2732 2733 nskb->data_len += p->len; 2734 nskb->truesize += p->len; 2735 nskb->len += p->len; 2736 2737 *head = nskb; 2738 nskb->next = p->next; 2739 p->next = NULL; 2740 2741 p = nskb; 2742 2743 merge: 2744 if (offset > headlen) { 2745 skbinfo->frags[0].page_offset += offset - headlen; 2746 skbinfo->frags[0].size -= offset - headlen; 2747 offset = headlen; 2748 } 2749 2750 __skb_pull(skb, offset); 2751 2752 p->prev->next = skb; 2753 p->prev = skb; 2754 skb_header_release(skb); 2755 2756 done: 2757 NAPI_GRO_CB(p)->count++; 2758 p->data_len += len; 2759 p->truesize += len; 2760 p->len += len; 2761 2762 NAPI_GRO_CB(skb)->same_flow = 1; 2763 return 0; 2764 } 2765 EXPORT_SYMBOL_GPL(skb_gro_receive); 2766 2767 void __init skb_init(void) 2768 { 2769 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2770 sizeof(struct sk_buff), 2771 0, 2772 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2773 NULL); 2774 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2775 (2*sizeof(struct sk_buff)) + 2776 sizeof(atomic_t), 2777 0, 2778 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2779 NULL); 2780 } 2781 2782 /** 2783 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2784 * @skb: Socket buffer containing the buffers to be mapped 2785 * @sg: The scatter-gather list to map into 2786 * @offset: The offset into the buffer's contents to start mapping 2787 * @len: Length of buffer space to be mapped 2788 * 2789 * Fill the specified scatter-gather list with mappings/pointers into a 2790 * region of the buffer space attached to a socket buffer. 2791 */ 2792 static int 2793 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2794 { 2795 int start = skb_headlen(skb); 2796 int i, copy = start - offset; 2797 struct sk_buff *frag_iter; 2798 int elt = 0; 2799 2800 if (copy > 0) { 2801 if (copy > len) 2802 copy = len; 2803 sg_set_buf(sg, skb->data + offset, copy); 2804 elt++; 2805 if ((len -= copy) == 0) 2806 return elt; 2807 offset += copy; 2808 } 2809 2810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2811 int end; 2812 2813 WARN_ON(start > offset + len); 2814 2815 end = start + skb_shinfo(skb)->frags[i].size; 2816 if ((copy = end - offset) > 0) { 2817 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2818 2819 if (copy > len) 2820 copy = len; 2821 sg_set_page(&sg[elt], frag->page, copy, 2822 frag->page_offset+offset-start); 2823 elt++; 2824 if (!(len -= copy)) 2825 return elt; 2826 offset += copy; 2827 } 2828 start = end; 2829 } 2830 2831 skb_walk_frags(skb, frag_iter) { 2832 int end; 2833 2834 WARN_ON(start > offset + len); 2835 2836 end = start + frag_iter->len; 2837 if ((copy = end - offset) > 0) { 2838 if (copy > len) 2839 copy = len; 2840 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 2841 copy); 2842 if ((len -= copy) == 0) 2843 return elt; 2844 offset += copy; 2845 } 2846 start = end; 2847 } 2848 BUG_ON(len); 2849 return elt; 2850 } 2851 2852 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2853 { 2854 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2855 2856 sg_mark_end(&sg[nsg - 1]); 2857 2858 return nsg; 2859 } 2860 EXPORT_SYMBOL_GPL(skb_to_sgvec); 2861 2862 /** 2863 * skb_cow_data - Check that a socket buffer's data buffers are writable 2864 * @skb: The socket buffer to check. 2865 * @tailbits: Amount of trailing space to be added 2866 * @trailer: Returned pointer to the skb where the @tailbits space begins 2867 * 2868 * Make sure that the data buffers attached to a socket buffer are 2869 * writable. If they are not, private copies are made of the data buffers 2870 * and the socket buffer is set to use these instead. 2871 * 2872 * If @tailbits is given, make sure that there is space to write @tailbits 2873 * bytes of data beyond current end of socket buffer. @trailer will be 2874 * set to point to the skb in which this space begins. 2875 * 2876 * The number of scatterlist elements required to completely map the 2877 * COW'd and extended socket buffer will be returned. 2878 */ 2879 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2880 { 2881 int copyflag; 2882 int elt; 2883 struct sk_buff *skb1, **skb_p; 2884 2885 /* If skb is cloned or its head is paged, reallocate 2886 * head pulling out all the pages (pages are considered not writable 2887 * at the moment even if they are anonymous). 2888 */ 2889 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2890 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2891 return -ENOMEM; 2892 2893 /* Easy case. Most of packets will go this way. */ 2894 if (!skb_has_frag_list(skb)) { 2895 /* A little of trouble, not enough of space for trailer. 2896 * This should not happen, when stack is tuned to generate 2897 * good frames. OK, on miss we reallocate and reserve even more 2898 * space, 128 bytes is fair. */ 2899 2900 if (skb_tailroom(skb) < tailbits && 2901 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2902 return -ENOMEM; 2903 2904 /* Voila! */ 2905 *trailer = skb; 2906 return 1; 2907 } 2908 2909 /* Misery. We are in troubles, going to mincer fragments... */ 2910 2911 elt = 1; 2912 skb_p = &skb_shinfo(skb)->frag_list; 2913 copyflag = 0; 2914 2915 while ((skb1 = *skb_p) != NULL) { 2916 int ntail = 0; 2917 2918 /* The fragment is partially pulled by someone, 2919 * this can happen on input. Copy it and everything 2920 * after it. */ 2921 2922 if (skb_shared(skb1)) 2923 copyflag = 1; 2924 2925 /* If the skb is the last, worry about trailer. */ 2926 2927 if (skb1->next == NULL && tailbits) { 2928 if (skb_shinfo(skb1)->nr_frags || 2929 skb_has_frag_list(skb1) || 2930 skb_tailroom(skb1) < tailbits) 2931 ntail = tailbits + 128; 2932 } 2933 2934 if (copyflag || 2935 skb_cloned(skb1) || 2936 ntail || 2937 skb_shinfo(skb1)->nr_frags || 2938 skb_has_frag_list(skb1)) { 2939 struct sk_buff *skb2; 2940 2941 /* Fuck, we are miserable poor guys... */ 2942 if (ntail == 0) 2943 skb2 = skb_copy(skb1, GFP_ATOMIC); 2944 else 2945 skb2 = skb_copy_expand(skb1, 2946 skb_headroom(skb1), 2947 ntail, 2948 GFP_ATOMIC); 2949 if (unlikely(skb2 == NULL)) 2950 return -ENOMEM; 2951 2952 if (skb1->sk) 2953 skb_set_owner_w(skb2, skb1->sk); 2954 2955 /* Looking around. Are we still alive? 2956 * OK, link new skb, drop old one */ 2957 2958 skb2->next = skb1->next; 2959 *skb_p = skb2; 2960 kfree_skb(skb1); 2961 skb1 = skb2; 2962 } 2963 elt++; 2964 *trailer = skb1; 2965 skb_p = &skb1->next; 2966 } 2967 2968 return elt; 2969 } 2970 EXPORT_SYMBOL_GPL(skb_cow_data); 2971 2972 static void sock_rmem_free(struct sk_buff *skb) 2973 { 2974 struct sock *sk = skb->sk; 2975 2976 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 2977 } 2978 2979 /* 2980 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 2981 */ 2982 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 2983 { 2984 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 2985 (unsigned)sk->sk_rcvbuf) 2986 return -ENOMEM; 2987 2988 skb_orphan(skb); 2989 skb->sk = sk; 2990 skb->destructor = sock_rmem_free; 2991 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2992 2993 skb_queue_tail(&sk->sk_error_queue, skb); 2994 if (!sock_flag(sk, SOCK_DEAD)) 2995 sk->sk_data_ready(sk, skb->len); 2996 return 0; 2997 } 2998 EXPORT_SYMBOL(sock_queue_err_skb); 2999 3000 void skb_tstamp_tx(struct sk_buff *orig_skb, 3001 struct skb_shared_hwtstamps *hwtstamps) 3002 { 3003 struct sock *sk = orig_skb->sk; 3004 struct sock_exterr_skb *serr; 3005 struct sk_buff *skb; 3006 int err; 3007 3008 if (!sk) 3009 return; 3010 3011 skb = skb_clone(orig_skb, GFP_ATOMIC); 3012 if (!skb) 3013 return; 3014 3015 if (hwtstamps) { 3016 *skb_hwtstamps(skb) = 3017 *hwtstamps; 3018 } else { 3019 /* 3020 * no hardware time stamps available, 3021 * so keep the shared tx_flags and only 3022 * store software time stamp 3023 */ 3024 skb->tstamp = ktime_get_real(); 3025 } 3026 3027 serr = SKB_EXT_ERR(skb); 3028 memset(serr, 0, sizeof(*serr)); 3029 serr->ee.ee_errno = ENOMSG; 3030 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3031 3032 err = sock_queue_err_skb(sk, skb); 3033 3034 if (err) 3035 kfree_skb(skb); 3036 } 3037 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3038 3039 3040 /** 3041 * skb_partial_csum_set - set up and verify partial csum values for packet 3042 * @skb: the skb to set 3043 * @start: the number of bytes after skb->data to start checksumming. 3044 * @off: the offset from start to place the checksum. 3045 * 3046 * For untrusted partially-checksummed packets, we need to make sure the values 3047 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3048 * 3049 * This function checks and sets those values and skb->ip_summed: if this 3050 * returns false you should drop the packet. 3051 */ 3052 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3053 { 3054 if (unlikely(start > skb_headlen(skb)) || 3055 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3056 if (net_ratelimit()) 3057 printk(KERN_WARNING 3058 "bad partial csum: csum=%u/%u len=%u\n", 3059 start, off, skb_headlen(skb)); 3060 return false; 3061 } 3062 skb->ip_summed = CHECKSUM_PARTIAL; 3063 skb->csum_start = skb_headroom(skb) + start; 3064 skb->csum_offset = off; 3065 return true; 3066 } 3067 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3068 3069 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 3070 { 3071 if (net_ratelimit()) 3072 pr_warning("%s: received packets cannot be forwarded" 3073 " while LRO is enabled\n", skb->dev->name); 3074 } 3075 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3076