1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/tcp.h> 51 #include <linux/udp.h> 52 #include <linux/sctp.h> 53 #include <linux/netdevice.h> 54 #ifdef CONFIG_NET_CLS_ACT 55 #include <net/pkt_sched.h> 56 #endif 57 #include <linux/string.h> 58 #include <linux/skbuff.h> 59 #include <linux/splice.h> 60 #include <linux/cache.h> 61 #include <linux/rtnetlink.h> 62 #include <linux/init.h> 63 #include <linux/scatterlist.h> 64 #include <linux/errqueue.h> 65 #include <linux/prefetch.h> 66 #include <linux/if_vlan.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/ip6_checksum.h> 73 #include <net/xfrm.h> 74 75 #include <linux/uaccess.h> 76 #include <trace/events/skb.h> 77 #include <linux/highmem.h> 78 #include <linux/capability.h> 79 #include <linux/user_namespace.h> 80 81 struct kmem_cache *skbuff_head_cache __read_mostly; 82 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 83 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 84 EXPORT_SYMBOL(sysctl_max_skb_frags); 85 86 /** 87 * skb_panic - private function for out-of-line support 88 * @skb: buffer 89 * @sz: size 90 * @addr: address 91 * @msg: skb_over_panic or skb_under_panic 92 * 93 * Out-of-line support for skb_put() and skb_push(). 94 * Called via the wrapper skb_over_panic() or skb_under_panic(). 95 * Keep out of line to prevent kernel bloat. 96 * __builtin_return_address is not used because it is not always reliable. 97 */ 98 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 99 const char msg[]) 100 { 101 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 102 msg, addr, skb->len, sz, skb->head, skb->data, 103 (unsigned long)skb->tail, (unsigned long)skb->end, 104 skb->dev ? skb->dev->name : "<NULL>"); 105 BUG(); 106 } 107 108 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 109 { 110 skb_panic(skb, sz, addr, __func__); 111 } 112 113 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 114 { 115 skb_panic(skb, sz, addr, __func__); 116 } 117 118 /* 119 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 120 * the caller if emergency pfmemalloc reserves are being used. If it is and 121 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 122 * may be used. Otherwise, the packet data may be discarded until enough 123 * memory is free 124 */ 125 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 126 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 127 128 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 129 unsigned long ip, bool *pfmemalloc) 130 { 131 void *obj; 132 bool ret_pfmemalloc = false; 133 134 /* 135 * Try a regular allocation, when that fails and we're not entitled 136 * to the reserves, fail. 137 */ 138 obj = kmalloc_node_track_caller(size, 139 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 140 node); 141 if (obj || !(gfp_pfmemalloc_allowed(flags))) 142 goto out; 143 144 /* Try again but now we are using pfmemalloc reserves */ 145 ret_pfmemalloc = true; 146 obj = kmalloc_node_track_caller(size, flags, node); 147 148 out: 149 if (pfmemalloc) 150 *pfmemalloc = ret_pfmemalloc; 151 152 return obj; 153 } 154 155 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 156 * 'private' fields and also do memory statistics to find all the 157 * [BEEP] leaks. 158 * 159 */ 160 161 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 162 { 163 struct sk_buff *skb; 164 165 /* Get the HEAD */ 166 skb = kmem_cache_alloc_node(skbuff_head_cache, 167 gfp_mask & ~__GFP_DMA, node); 168 if (!skb) 169 goto out; 170 171 /* 172 * Only clear those fields we need to clear, not those that we will 173 * actually initialise below. Hence, don't put any more fields after 174 * the tail pointer in struct sk_buff! 175 */ 176 memset(skb, 0, offsetof(struct sk_buff, tail)); 177 skb->head = NULL; 178 skb->truesize = sizeof(struct sk_buff); 179 atomic_set(&skb->users, 1); 180 181 skb->mac_header = (typeof(skb->mac_header))~0U; 182 out: 183 return skb; 184 } 185 186 /** 187 * __alloc_skb - allocate a network buffer 188 * @size: size to allocate 189 * @gfp_mask: allocation mask 190 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 191 * instead of head cache and allocate a cloned (child) skb. 192 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 193 * allocations in case the data is required for writeback 194 * @node: numa node to allocate memory on 195 * 196 * Allocate a new &sk_buff. The returned buffer has no headroom and a 197 * tail room of at least size bytes. The object has a reference count 198 * of one. The return is the buffer. On a failure the return is %NULL. 199 * 200 * Buffers may only be allocated from interrupts using a @gfp_mask of 201 * %GFP_ATOMIC. 202 */ 203 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 204 int flags, int node) 205 { 206 struct kmem_cache *cache; 207 struct skb_shared_info *shinfo; 208 struct sk_buff *skb; 209 u8 *data; 210 bool pfmemalloc; 211 212 cache = (flags & SKB_ALLOC_FCLONE) 213 ? skbuff_fclone_cache : skbuff_head_cache; 214 215 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 216 gfp_mask |= __GFP_MEMALLOC; 217 218 /* Get the HEAD */ 219 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 220 if (!skb) 221 goto out; 222 prefetchw(skb); 223 224 /* We do our best to align skb_shared_info on a separate cache 225 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 226 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 227 * Both skb->head and skb_shared_info are cache line aligned. 228 */ 229 size = SKB_DATA_ALIGN(size); 230 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 231 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 232 if (!data) 233 goto nodata; 234 /* kmalloc(size) might give us more room than requested. 235 * Put skb_shared_info exactly at the end of allocated zone, 236 * to allow max possible filling before reallocation. 237 */ 238 size = SKB_WITH_OVERHEAD(ksize(data)); 239 prefetchw(data + size); 240 241 /* 242 * Only clear those fields we need to clear, not those that we will 243 * actually initialise below. Hence, don't put any more fields after 244 * the tail pointer in struct sk_buff! 245 */ 246 memset(skb, 0, offsetof(struct sk_buff, tail)); 247 /* Account for allocated memory : skb + skb->head */ 248 skb->truesize = SKB_TRUESIZE(size); 249 skb->pfmemalloc = pfmemalloc; 250 atomic_set(&skb->users, 1); 251 skb->head = data; 252 skb->data = data; 253 skb_reset_tail_pointer(skb); 254 skb->end = skb->tail + size; 255 skb->mac_header = (typeof(skb->mac_header))~0U; 256 skb->transport_header = (typeof(skb->transport_header))~0U; 257 258 /* make sure we initialize shinfo sequentially */ 259 shinfo = skb_shinfo(skb); 260 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 261 atomic_set(&shinfo->dataref, 1); 262 kmemcheck_annotate_variable(shinfo->destructor_arg); 263 264 if (flags & SKB_ALLOC_FCLONE) { 265 struct sk_buff_fclones *fclones; 266 267 fclones = container_of(skb, struct sk_buff_fclones, skb1); 268 269 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 270 skb->fclone = SKB_FCLONE_ORIG; 271 atomic_set(&fclones->fclone_ref, 1); 272 273 fclones->skb2.fclone = SKB_FCLONE_CLONE; 274 } 275 out: 276 return skb; 277 nodata: 278 kmem_cache_free(cache, skb); 279 skb = NULL; 280 goto out; 281 } 282 EXPORT_SYMBOL(__alloc_skb); 283 284 /** 285 * __build_skb - build a network buffer 286 * @data: data buffer provided by caller 287 * @frag_size: size of data, or 0 if head was kmalloced 288 * 289 * Allocate a new &sk_buff. Caller provides space holding head and 290 * skb_shared_info. @data must have been allocated by kmalloc() only if 291 * @frag_size is 0, otherwise data should come from the page allocator 292 * or vmalloc() 293 * The return is the new skb buffer. 294 * On a failure the return is %NULL, and @data is not freed. 295 * Notes : 296 * Before IO, driver allocates only data buffer where NIC put incoming frame 297 * Driver should add room at head (NET_SKB_PAD) and 298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 299 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 300 * before giving packet to stack. 301 * RX rings only contains data buffers, not full skbs. 302 */ 303 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 304 { 305 struct skb_shared_info *shinfo; 306 struct sk_buff *skb; 307 unsigned int size = frag_size ? : ksize(data); 308 309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 310 if (!skb) 311 return NULL; 312 313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 314 315 memset(skb, 0, offsetof(struct sk_buff, tail)); 316 skb->truesize = SKB_TRUESIZE(size); 317 atomic_set(&skb->users, 1); 318 skb->head = data; 319 skb->data = data; 320 skb_reset_tail_pointer(skb); 321 skb->end = skb->tail + size; 322 skb->mac_header = (typeof(skb->mac_header))~0U; 323 skb->transport_header = (typeof(skb->transport_header))~0U; 324 325 /* make sure we initialize shinfo sequentially */ 326 shinfo = skb_shinfo(skb); 327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 328 atomic_set(&shinfo->dataref, 1); 329 kmemcheck_annotate_variable(shinfo->destructor_arg); 330 331 return skb; 332 } 333 334 /* build_skb() is wrapper over __build_skb(), that specifically 335 * takes care of skb->head and skb->pfmemalloc 336 * This means that if @frag_size is not zero, then @data must be backed 337 * by a page fragment, not kmalloc() or vmalloc() 338 */ 339 struct sk_buff *build_skb(void *data, unsigned int frag_size) 340 { 341 struct sk_buff *skb = __build_skb(data, frag_size); 342 343 if (skb && frag_size) { 344 skb->head_frag = 1; 345 if (page_is_pfmemalloc(virt_to_head_page(data))) 346 skb->pfmemalloc = 1; 347 } 348 return skb; 349 } 350 EXPORT_SYMBOL(build_skb); 351 352 #define NAPI_SKB_CACHE_SIZE 64 353 354 struct napi_alloc_cache { 355 struct page_frag_cache page; 356 unsigned int skb_count; 357 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 358 }; 359 360 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 361 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 362 363 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 364 { 365 struct page_frag_cache *nc; 366 unsigned long flags; 367 void *data; 368 369 local_irq_save(flags); 370 nc = this_cpu_ptr(&netdev_alloc_cache); 371 data = page_frag_alloc(nc, fragsz, gfp_mask); 372 local_irq_restore(flags); 373 return data; 374 } 375 376 /** 377 * netdev_alloc_frag - allocate a page fragment 378 * @fragsz: fragment size 379 * 380 * Allocates a frag from a page for receive buffer. 381 * Uses GFP_ATOMIC allocations. 382 */ 383 void *netdev_alloc_frag(unsigned int fragsz) 384 { 385 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 386 } 387 EXPORT_SYMBOL(netdev_alloc_frag); 388 389 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 390 { 391 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 392 393 return page_frag_alloc(&nc->page, fragsz, gfp_mask); 394 } 395 396 void *napi_alloc_frag(unsigned int fragsz) 397 { 398 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 399 } 400 EXPORT_SYMBOL(napi_alloc_frag); 401 402 /** 403 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 404 * @dev: network device to receive on 405 * @len: length to allocate 406 * @gfp_mask: get_free_pages mask, passed to alloc_skb 407 * 408 * Allocate a new &sk_buff and assign it a usage count of one. The 409 * buffer has NET_SKB_PAD headroom built in. Users should allocate 410 * the headroom they think they need without accounting for the 411 * built in space. The built in space is used for optimisations. 412 * 413 * %NULL is returned if there is no free memory. 414 */ 415 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 416 gfp_t gfp_mask) 417 { 418 struct page_frag_cache *nc; 419 unsigned long flags; 420 struct sk_buff *skb; 421 bool pfmemalloc; 422 void *data; 423 424 len += NET_SKB_PAD; 425 426 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 427 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 428 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 429 if (!skb) 430 goto skb_fail; 431 goto skb_success; 432 } 433 434 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 435 len = SKB_DATA_ALIGN(len); 436 437 if (sk_memalloc_socks()) 438 gfp_mask |= __GFP_MEMALLOC; 439 440 local_irq_save(flags); 441 442 nc = this_cpu_ptr(&netdev_alloc_cache); 443 data = page_frag_alloc(nc, len, gfp_mask); 444 pfmemalloc = nc->pfmemalloc; 445 446 local_irq_restore(flags); 447 448 if (unlikely(!data)) 449 return NULL; 450 451 skb = __build_skb(data, len); 452 if (unlikely(!skb)) { 453 skb_free_frag(data); 454 return NULL; 455 } 456 457 /* use OR instead of assignment to avoid clearing of bits in mask */ 458 if (pfmemalloc) 459 skb->pfmemalloc = 1; 460 skb->head_frag = 1; 461 462 skb_success: 463 skb_reserve(skb, NET_SKB_PAD); 464 skb->dev = dev; 465 466 skb_fail: 467 return skb; 468 } 469 EXPORT_SYMBOL(__netdev_alloc_skb); 470 471 /** 472 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 473 * @napi: napi instance this buffer was allocated for 474 * @len: length to allocate 475 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 476 * 477 * Allocate a new sk_buff for use in NAPI receive. This buffer will 478 * attempt to allocate the head from a special reserved region used 479 * only for NAPI Rx allocation. By doing this we can save several 480 * CPU cycles by avoiding having to disable and re-enable IRQs. 481 * 482 * %NULL is returned if there is no free memory. 483 */ 484 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 485 gfp_t gfp_mask) 486 { 487 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 488 struct sk_buff *skb; 489 void *data; 490 491 len += NET_SKB_PAD + NET_IP_ALIGN; 492 493 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 494 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 495 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 496 if (!skb) 497 goto skb_fail; 498 goto skb_success; 499 } 500 501 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 502 len = SKB_DATA_ALIGN(len); 503 504 if (sk_memalloc_socks()) 505 gfp_mask |= __GFP_MEMALLOC; 506 507 data = page_frag_alloc(&nc->page, len, gfp_mask); 508 if (unlikely(!data)) 509 return NULL; 510 511 skb = __build_skb(data, len); 512 if (unlikely(!skb)) { 513 skb_free_frag(data); 514 return NULL; 515 } 516 517 /* use OR instead of assignment to avoid clearing of bits in mask */ 518 if (nc->page.pfmemalloc) 519 skb->pfmemalloc = 1; 520 skb->head_frag = 1; 521 522 skb_success: 523 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 524 skb->dev = napi->dev; 525 526 skb_fail: 527 return skb; 528 } 529 EXPORT_SYMBOL(__napi_alloc_skb); 530 531 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 532 int size, unsigned int truesize) 533 { 534 skb_fill_page_desc(skb, i, page, off, size); 535 skb->len += size; 536 skb->data_len += size; 537 skb->truesize += truesize; 538 } 539 EXPORT_SYMBOL(skb_add_rx_frag); 540 541 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 542 unsigned int truesize) 543 { 544 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 545 546 skb_frag_size_add(frag, size); 547 skb->len += size; 548 skb->data_len += size; 549 skb->truesize += truesize; 550 } 551 EXPORT_SYMBOL(skb_coalesce_rx_frag); 552 553 static void skb_drop_list(struct sk_buff **listp) 554 { 555 kfree_skb_list(*listp); 556 *listp = NULL; 557 } 558 559 static inline void skb_drop_fraglist(struct sk_buff *skb) 560 { 561 skb_drop_list(&skb_shinfo(skb)->frag_list); 562 } 563 564 static void skb_clone_fraglist(struct sk_buff *skb) 565 { 566 struct sk_buff *list; 567 568 skb_walk_frags(skb, list) 569 skb_get(list); 570 } 571 572 static void skb_free_head(struct sk_buff *skb) 573 { 574 unsigned char *head = skb->head; 575 576 if (skb->head_frag) 577 skb_free_frag(head); 578 else 579 kfree(head); 580 } 581 582 static void skb_release_data(struct sk_buff *skb) 583 { 584 struct skb_shared_info *shinfo = skb_shinfo(skb); 585 int i; 586 587 if (skb->cloned && 588 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 589 &shinfo->dataref)) 590 return; 591 592 for (i = 0; i < shinfo->nr_frags; i++) 593 __skb_frag_unref(&shinfo->frags[i]); 594 595 /* 596 * If skb buf is from userspace, we need to notify the caller 597 * the lower device DMA has done; 598 */ 599 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { 600 struct ubuf_info *uarg; 601 602 uarg = shinfo->destructor_arg; 603 if (uarg->callback) 604 uarg->callback(uarg, true); 605 } 606 607 if (shinfo->frag_list) 608 kfree_skb_list(shinfo->frag_list); 609 610 skb_free_head(skb); 611 } 612 613 /* 614 * Free an skbuff by memory without cleaning the state. 615 */ 616 static void kfree_skbmem(struct sk_buff *skb) 617 { 618 struct sk_buff_fclones *fclones; 619 620 switch (skb->fclone) { 621 case SKB_FCLONE_UNAVAILABLE: 622 kmem_cache_free(skbuff_head_cache, skb); 623 return; 624 625 case SKB_FCLONE_ORIG: 626 fclones = container_of(skb, struct sk_buff_fclones, skb1); 627 628 /* We usually free the clone (TX completion) before original skb 629 * This test would have no chance to be true for the clone, 630 * while here, branch prediction will be good. 631 */ 632 if (atomic_read(&fclones->fclone_ref) == 1) 633 goto fastpath; 634 break; 635 636 default: /* SKB_FCLONE_CLONE */ 637 fclones = container_of(skb, struct sk_buff_fclones, skb2); 638 break; 639 } 640 if (!atomic_dec_and_test(&fclones->fclone_ref)) 641 return; 642 fastpath: 643 kmem_cache_free(skbuff_fclone_cache, fclones); 644 } 645 646 static void skb_release_head_state(struct sk_buff *skb) 647 { 648 skb_dst_drop(skb); 649 #ifdef CONFIG_XFRM 650 secpath_put(skb->sp); 651 #endif 652 if (skb->destructor) { 653 WARN_ON(in_irq()); 654 skb->destructor(skb); 655 } 656 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 657 nf_conntrack_put(skb_nfct(skb)); 658 #endif 659 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 660 nf_bridge_put(skb->nf_bridge); 661 #endif 662 } 663 664 /* Free everything but the sk_buff shell. */ 665 static void skb_release_all(struct sk_buff *skb) 666 { 667 skb_release_head_state(skb); 668 if (likely(skb->head)) 669 skb_release_data(skb); 670 } 671 672 /** 673 * __kfree_skb - private function 674 * @skb: buffer 675 * 676 * Free an sk_buff. Release anything attached to the buffer. 677 * Clean the state. This is an internal helper function. Users should 678 * always call kfree_skb 679 */ 680 681 void __kfree_skb(struct sk_buff *skb) 682 { 683 skb_release_all(skb); 684 kfree_skbmem(skb); 685 } 686 EXPORT_SYMBOL(__kfree_skb); 687 688 /** 689 * kfree_skb - free an sk_buff 690 * @skb: buffer to free 691 * 692 * Drop a reference to the buffer and free it if the usage count has 693 * hit zero. 694 */ 695 void kfree_skb(struct sk_buff *skb) 696 { 697 if (unlikely(!skb)) 698 return; 699 if (likely(atomic_read(&skb->users) == 1)) 700 smp_rmb(); 701 else if (likely(!atomic_dec_and_test(&skb->users))) 702 return; 703 trace_kfree_skb(skb, __builtin_return_address(0)); 704 __kfree_skb(skb); 705 } 706 EXPORT_SYMBOL(kfree_skb); 707 708 void kfree_skb_list(struct sk_buff *segs) 709 { 710 while (segs) { 711 struct sk_buff *next = segs->next; 712 713 kfree_skb(segs); 714 segs = next; 715 } 716 } 717 EXPORT_SYMBOL(kfree_skb_list); 718 719 /** 720 * skb_tx_error - report an sk_buff xmit error 721 * @skb: buffer that triggered an error 722 * 723 * Report xmit error if a device callback is tracking this skb. 724 * skb must be freed afterwards. 725 */ 726 void skb_tx_error(struct sk_buff *skb) 727 { 728 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 729 struct ubuf_info *uarg; 730 731 uarg = skb_shinfo(skb)->destructor_arg; 732 if (uarg->callback) 733 uarg->callback(uarg, false); 734 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 735 } 736 } 737 EXPORT_SYMBOL(skb_tx_error); 738 739 /** 740 * consume_skb - free an skbuff 741 * @skb: buffer to free 742 * 743 * Drop a ref to the buffer and free it if the usage count has hit zero 744 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 745 * is being dropped after a failure and notes that 746 */ 747 void consume_skb(struct sk_buff *skb) 748 { 749 if (unlikely(!skb)) 750 return; 751 if (likely(atomic_read(&skb->users) == 1)) 752 smp_rmb(); 753 else if (likely(!atomic_dec_and_test(&skb->users))) 754 return; 755 trace_consume_skb(skb); 756 __kfree_skb(skb); 757 } 758 EXPORT_SYMBOL(consume_skb); 759 760 void __kfree_skb_flush(void) 761 { 762 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 763 764 /* flush skb_cache if containing objects */ 765 if (nc->skb_count) { 766 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, 767 nc->skb_cache); 768 nc->skb_count = 0; 769 } 770 } 771 772 static inline void _kfree_skb_defer(struct sk_buff *skb) 773 { 774 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 775 776 /* drop skb->head and call any destructors for packet */ 777 skb_release_all(skb); 778 779 /* record skb to CPU local list */ 780 nc->skb_cache[nc->skb_count++] = skb; 781 782 #ifdef CONFIG_SLUB 783 /* SLUB writes into objects when freeing */ 784 prefetchw(skb); 785 #endif 786 787 /* flush skb_cache if it is filled */ 788 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 789 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, 790 nc->skb_cache); 791 nc->skb_count = 0; 792 } 793 } 794 void __kfree_skb_defer(struct sk_buff *skb) 795 { 796 _kfree_skb_defer(skb); 797 } 798 799 void napi_consume_skb(struct sk_buff *skb, int budget) 800 { 801 if (unlikely(!skb)) 802 return; 803 804 /* Zero budget indicate non-NAPI context called us, like netpoll */ 805 if (unlikely(!budget)) { 806 dev_consume_skb_any(skb); 807 return; 808 } 809 810 if (likely(atomic_read(&skb->users) == 1)) 811 smp_rmb(); 812 else if (likely(!atomic_dec_and_test(&skb->users))) 813 return; 814 /* if reaching here SKB is ready to free */ 815 trace_consume_skb(skb); 816 817 /* if SKB is a clone, don't handle this case */ 818 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 819 __kfree_skb(skb); 820 return; 821 } 822 823 _kfree_skb_defer(skb); 824 } 825 EXPORT_SYMBOL(napi_consume_skb); 826 827 /* Make sure a field is enclosed inside headers_start/headers_end section */ 828 #define CHECK_SKB_FIELD(field) \ 829 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 830 offsetof(struct sk_buff, headers_start)); \ 831 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 832 offsetof(struct sk_buff, headers_end)); \ 833 834 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 835 { 836 new->tstamp = old->tstamp; 837 /* We do not copy old->sk */ 838 new->dev = old->dev; 839 memcpy(new->cb, old->cb, sizeof(old->cb)); 840 skb_dst_copy(new, old); 841 #ifdef CONFIG_XFRM 842 new->sp = secpath_get(old->sp); 843 #endif 844 __nf_copy(new, old, false); 845 846 /* Note : this field could be in headers_start/headers_end section 847 * It is not yet because we do not want to have a 16 bit hole 848 */ 849 new->queue_mapping = old->queue_mapping; 850 851 memcpy(&new->headers_start, &old->headers_start, 852 offsetof(struct sk_buff, headers_end) - 853 offsetof(struct sk_buff, headers_start)); 854 CHECK_SKB_FIELD(protocol); 855 CHECK_SKB_FIELD(csum); 856 CHECK_SKB_FIELD(hash); 857 CHECK_SKB_FIELD(priority); 858 CHECK_SKB_FIELD(skb_iif); 859 CHECK_SKB_FIELD(vlan_proto); 860 CHECK_SKB_FIELD(vlan_tci); 861 CHECK_SKB_FIELD(transport_header); 862 CHECK_SKB_FIELD(network_header); 863 CHECK_SKB_FIELD(mac_header); 864 CHECK_SKB_FIELD(inner_protocol); 865 CHECK_SKB_FIELD(inner_transport_header); 866 CHECK_SKB_FIELD(inner_network_header); 867 CHECK_SKB_FIELD(inner_mac_header); 868 CHECK_SKB_FIELD(mark); 869 #ifdef CONFIG_NETWORK_SECMARK 870 CHECK_SKB_FIELD(secmark); 871 #endif 872 #ifdef CONFIG_NET_RX_BUSY_POLL 873 CHECK_SKB_FIELD(napi_id); 874 #endif 875 #ifdef CONFIG_XPS 876 CHECK_SKB_FIELD(sender_cpu); 877 #endif 878 #ifdef CONFIG_NET_SCHED 879 CHECK_SKB_FIELD(tc_index); 880 #endif 881 882 } 883 884 /* 885 * You should not add any new code to this function. Add it to 886 * __copy_skb_header above instead. 887 */ 888 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 889 { 890 #define C(x) n->x = skb->x 891 892 n->next = n->prev = NULL; 893 n->sk = NULL; 894 __copy_skb_header(n, skb); 895 896 C(len); 897 C(data_len); 898 C(mac_len); 899 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 900 n->cloned = 1; 901 n->nohdr = 0; 902 n->destructor = NULL; 903 C(tail); 904 C(end); 905 C(head); 906 C(head_frag); 907 C(data); 908 C(truesize); 909 atomic_set(&n->users, 1); 910 911 atomic_inc(&(skb_shinfo(skb)->dataref)); 912 skb->cloned = 1; 913 914 return n; 915 #undef C 916 } 917 918 /** 919 * skb_morph - morph one skb into another 920 * @dst: the skb to receive the contents 921 * @src: the skb to supply the contents 922 * 923 * This is identical to skb_clone except that the target skb is 924 * supplied by the user. 925 * 926 * The target skb is returned upon exit. 927 */ 928 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 929 { 930 skb_release_all(dst); 931 return __skb_clone(dst, src); 932 } 933 EXPORT_SYMBOL_GPL(skb_morph); 934 935 /** 936 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 937 * @skb: the skb to modify 938 * @gfp_mask: allocation priority 939 * 940 * This must be called on SKBTX_DEV_ZEROCOPY skb. 941 * It will copy all frags into kernel and drop the reference 942 * to userspace pages. 943 * 944 * If this function is called from an interrupt gfp_mask() must be 945 * %GFP_ATOMIC. 946 * 947 * Returns 0 on success or a negative error code on failure 948 * to allocate kernel memory to copy to. 949 */ 950 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 951 { 952 int i; 953 int num_frags = skb_shinfo(skb)->nr_frags; 954 struct page *page, *head = NULL; 955 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 956 957 for (i = 0; i < num_frags; i++) { 958 u8 *vaddr; 959 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 960 961 page = alloc_page(gfp_mask); 962 if (!page) { 963 while (head) { 964 struct page *next = (struct page *)page_private(head); 965 put_page(head); 966 head = next; 967 } 968 return -ENOMEM; 969 } 970 vaddr = kmap_atomic(skb_frag_page(f)); 971 memcpy(page_address(page), 972 vaddr + f->page_offset, skb_frag_size(f)); 973 kunmap_atomic(vaddr); 974 set_page_private(page, (unsigned long)head); 975 head = page; 976 } 977 978 /* skb frags release userspace buffers */ 979 for (i = 0; i < num_frags; i++) 980 skb_frag_unref(skb, i); 981 982 uarg->callback(uarg, false); 983 984 /* skb frags point to kernel buffers */ 985 for (i = num_frags - 1; i >= 0; i--) { 986 __skb_fill_page_desc(skb, i, head, 0, 987 skb_shinfo(skb)->frags[i].size); 988 head = (struct page *)page_private(head); 989 } 990 991 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 992 return 0; 993 } 994 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 995 996 /** 997 * skb_clone - duplicate an sk_buff 998 * @skb: buffer to clone 999 * @gfp_mask: allocation priority 1000 * 1001 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1002 * copies share the same packet data but not structure. The new 1003 * buffer has a reference count of 1. If the allocation fails the 1004 * function returns %NULL otherwise the new buffer is returned. 1005 * 1006 * If this function is called from an interrupt gfp_mask() must be 1007 * %GFP_ATOMIC. 1008 */ 1009 1010 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1011 { 1012 struct sk_buff_fclones *fclones = container_of(skb, 1013 struct sk_buff_fclones, 1014 skb1); 1015 struct sk_buff *n; 1016 1017 if (skb_orphan_frags(skb, gfp_mask)) 1018 return NULL; 1019 1020 if (skb->fclone == SKB_FCLONE_ORIG && 1021 atomic_read(&fclones->fclone_ref) == 1) { 1022 n = &fclones->skb2; 1023 atomic_set(&fclones->fclone_ref, 2); 1024 } else { 1025 if (skb_pfmemalloc(skb)) 1026 gfp_mask |= __GFP_MEMALLOC; 1027 1028 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1029 if (!n) 1030 return NULL; 1031 1032 kmemcheck_annotate_bitfield(n, flags1); 1033 n->fclone = SKB_FCLONE_UNAVAILABLE; 1034 } 1035 1036 return __skb_clone(n, skb); 1037 } 1038 EXPORT_SYMBOL(skb_clone); 1039 1040 static void skb_headers_offset_update(struct sk_buff *skb, int off) 1041 { 1042 /* Only adjust this if it actually is csum_start rather than csum */ 1043 if (skb->ip_summed == CHECKSUM_PARTIAL) 1044 skb->csum_start += off; 1045 /* {transport,network,mac}_header and tail are relative to skb->head */ 1046 skb->transport_header += off; 1047 skb->network_header += off; 1048 if (skb_mac_header_was_set(skb)) 1049 skb->mac_header += off; 1050 skb->inner_transport_header += off; 1051 skb->inner_network_header += off; 1052 skb->inner_mac_header += off; 1053 } 1054 1055 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1056 { 1057 __copy_skb_header(new, old); 1058 1059 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1060 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1061 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1062 } 1063 1064 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1065 { 1066 if (skb_pfmemalloc(skb)) 1067 return SKB_ALLOC_RX; 1068 return 0; 1069 } 1070 1071 /** 1072 * skb_copy - create private copy of an sk_buff 1073 * @skb: buffer to copy 1074 * @gfp_mask: allocation priority 1075 * 1076 * Make a copy of both an &sk_buff and its data. This is used when the 1077 * caller wishes to modify the data and needs a private copy of the 1078 * data to alter. Returns %NULL on failure or the pointer to the buffer 1079 * on success. The returned buffer has a reference count of 1. 1080 * 1081 * As by-product this function converts non-linear &sk_buff to linear 1082 * one, so that &sk_buff becomes completely private and caller is allowed 1083 * to modify all the data of returned buffer. This means that this 1084 * function is not recommended for use in circumstances when only 1085 * header is going to be modified. Use pskb_copy() instead. 1086 */ 1087 1088 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1089 { 1090 int headerlen = skb_headroom(skb); 1091 unsigned int size = skb_end_offset(skb) + skb->data_len; 1092 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1093 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1094 1095 if (!n) 1096 return NULL; 1097 1098 /* Set the data pointer */ 1099 skb_reserve(n, headerlen); 1100 /* Set the tail pointer and length */ 1101 skb_put(n, skb->len); 1102 1103 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 1104 BUG(); 1105 1106 copy_skb_header(n, skb); 1107 return n; 1108 } 1109 EXPORT_SYMBOL(skb_copy); 1110 1111 /** 1112 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1113 * @skb: buffer to copy 1114 * @headroom: headroom of new skb 1115 * @gfp_mask: allocation priority 1116 * @fclone: if true allocate the copy of the skb from the fclone 1117 * cache instead of the head cache; it is recommended to set this 1118 * to true for the cases where the copy will likely be cloned 1119 * 1120 * Make a copy of both an &sk_buff and part of its data, located 1121 * in header. Fragmented data remain shared. This is used when 1122 * the caller wishes to modify only header of &sk_buff and needs 1123 * private copy of the header to alter. Returns %NULL on failure 1124 * or the pointer to the buffer on success. 1125 * The returned buffer has a reference count of 1. 1126 */ 1127 1128 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1129 gfp_t gfp_mask, bool fclone) 1130 { 1131 unsigned int size = skb_headlen(skb) + headroom; 1132 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1133 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1134 1135 if (!n) 1136 goto out; 1137 1138 /* Set the data pointer */ 1139 skb_reserve(n, headroom); 1140 /* Set the tail pointer and length */ 1141 skb_put(n, skb_headlen(skb)); 1142 /* Copy the bytes */ 1143 skb_copy_from_linear_data(skb, n->data, n->len); 1144 1145 n->truesize += skb->data_len; 1146 n->data_len = skb->data_len; 1147 n->len = skb->len; 1148 1149 if (skb_shinfo(skb)->nr_frags) { 1150 int i; 1151 1152 if (skb_orphan_frags(skb, gfp_mask)) { 1153 kfree_skb(n); 1154 n = NULL; 1155 goto out; 1156 } 1157 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1158 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1159 skb_frag_ref(skb, i); 1160 } 1161 skb_shinfo(n)->nr_frags = i; 1162 } 1163 1164 if (skb_has_frag_list(skb)) { 1165 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1166 skb_clone_fraglist(n); 1167 } 1168 1169 copy_skb_header(n, skb); 1170 out: 1171 return n; 1172 } 1173 EXPORT_SYMBOL(__pskb_copy_fclone); 1174 1175 /** 1176 * pskb_expand_head - reallocate header of &sk_buff 1177 * @skb: buffer to reallocate 1178 * @nhead: room to add at head 1179 * @ntail: room to add at tail 1180 * @gfp_mask: allocation priority 1181 * 1182 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1183 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1184 * reference count of 1. Returns zero in the case of success or error, 1185 * if expansion failed. In the last case, &sk_buff is not changed. 1186 * 1187 * All the pointers pointing into skb header may change and must be 1188 * reloaded after call to this function. 1189 */ 1190 1191 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1192 gfp_t gfp_mask) 1193 { 1194 int i, osize = skb_end_offset(skb); 1195 int size = osize + nhead + ntail; 1196 long off; 1197 u8 *data; 1198 1199 BUG_ON(nhead < 0); 1200 1201 if (skb_shared(skb)) 1202 BUG(); 1203 1204 size = SKB_DATA_ALIGN(size); 1205 1206 if (skb_pfmemalloc(skb)) 1207 gfp_mask |= __GFP_MEMALLOC; 1208 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1209 gfp_mask, NUMA_NO_NODE, NULL); 1210 if (!data) 1211 goto nodata; 1212 size = SKB_WITH_OVERHEAD(ksize(data)); 1213 1214 /* Copy only real data... and, alas, header. This should be 1215 * optimized for the cases when header is void. 1216 */ 1217 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1218 1219 memcpy((struct skb_shared_info *)(data + size), 1220 skb_shinfo(skb), 1221 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1222 1223 /* 1224 * if shinfo is shared we must drop the old head gracefully, but if it 1225 * is not we can just drop the old head and let the existing refcount 1226 * be since all we did is relocate the values 1227 */ 1228 if (skb_cloned(skb)) { 1229 /* copy this zero copy skb frags */ 1230 if (skb_orphan_frags(skb, gfp_mask)) 1231 goto nofrags; 1232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1233 skb_frag_ref(skb, i); 1234 1235 if (skb_has_frag_list(skb)) 1236 skb_clone_fraglist(skb); 1237 1238 skb_release_data(skb); 1239 } else { 1240 skb_free_head(skb); 1241 } 1242 off = (data + nhead) - skb->head; 1243 1244 skb->head = data; 1245 skb->head_frag = 0; 1246 skb->data += off; 1247 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1248 skb->end = size; 1249 off = nhead; 1250 #else 1251 skb->end = skb->head + size; 1252 #endif 1253 skb->tail += off; 1254 skb_headers_offset_update(skb, nhead); 1255 skb->cloned = 0; 1256 skb->hdr_len = 0; 1257 skb->nohdr = 0; 1258 atomic_set(&skb_shinfo(skb)->dataref, 1); 1259 1260 /* It is not generally safe to change skb->truesize. 1261 * For the moment, we really care of rx path, or 1262 * when skb is orphaned (not attached to a socket). 1263 */ 1264 if (!skb->sk || skb->destructor == sock_edemux) 1265 skb->truesize += size - osize; 1266 1267 return 0; 1268 1269 nofrags: 1270 kfree(data); 1271 nodata: 1272 return -ENOMEM; 1273 } 1274 EXPORT_SYMBOL(pskb_expand_head); 1275 1276 /* Make private copy of skb with writable head and some headroom */ 1277 1278 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1279 { 1280 struct sk_buff *skb2; 1281 int delta = headroom - skb_headroom(skb); 1282 1283 if (delta <= 0) 1284 skb2 = pskb_copy(skb, GFP_ATOMIC); 1285 else { 1286 skb2 = skb_clone(skb, GFP_ATOMIC); 1287 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1288 GFP_ATOMIC)) { 1289 kfree_skb(skb2); 1290 skb2 = NULL; 1291 } 1292 } 1293 return skb2; 1294 } 1295 EXPORT_SYMBOL(skb_realloc_headroom); 1296 1297 /** 1298 * skb_copy_expand - copy and expand sk_buff 1299 * @skb: buffer to copy 1300 * @newheadroom: new free bytes at head 1301 * @newtailroom: new free bytes at tail 1302 * @gfp_mask: allocation priority 1303 * 1304 * Make a copy of both an &sk_buff and its data and while doing so 1305 * allocate additional space. 1306 * 1307 * This is used when the caller wishes to modify the data and needs a 1308 * private copy of the data to alter as well as more space for new fields. 1309 * Returns %NULL on failure or the pointer to the buffer 1310 * on success. The returned buffer has a reference count of 1. 1311 * 1312 * You must pass %GFP_ATOMIC as the allocation priority if this function 1313 * is called from an interrupt. 1314 */ 1315 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1316 int newheadroom, int newtailroom, 1317 gfp_t gfp_mask) 1318 { 1319 /* 1320 * Allocate the copy buffer 1321 */ 1322 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1323 gfp_mask, skb_alloc_rx_flag(skb), 1324 NUMA_NO_NODE); 1325 int oldheadroom = skb_headroom(skb); 1326 int head_copy_len, head_copy_off; 1327 1328 if (!n) 1329 return NULL; 1330 1331 skb_reserve(n, newheadroom); 1332 1333 /* Set the tail pointer and length */ 1334 skb_put(n, skb->len); 1335 1336 head_copy_len = oldheadroom; 1337 head_copy_off = 0; 1338 if (newheadroom <= head_copy_len) 1339 head_copy_len = newheadroom; 1340 else 1341 head_copy_off = newheadroom - head_copy_len; 1342 1343 /* Copy the linear header and data. */ 1344 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1345 skb->len + head_copy_len)) 1346 BUG(); 1347 1348 copy_skb_header(n, skb); 1349 1350 skb_headers_offset_update(n, newheadroom - oldheadroom); 1351 1352 return n; 1353 } 1354 EXPORT_SYMBOL(skb_copy_expand); 1355 1356 /** 1357 * skb_pad - zero pad the tail of an skb 1358 * @skb: buffer to pad 1359 * @pad: space to pad 1360 * 1361 * Ensure that a buffer is followed by a padding area that is zero 1362 * filled. Used by network drivers which may DMA or transfer data 1363 * beyond the buffer end onto the wire. 1364 * 1365 * May return error in out of memory cases. The skb is freed on error. 1366 */ 1367 1368 int skb_pad(struct sk_buff *skb, int pad) 1369 { 1370 int err; 1371 int ntail; 1372 1373 /* If the skbuff is non linear tailroom is always zero.. */ 1374 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1375 memset(skb->data+skb->len, 0, pad); 1376 return 0; 1377 } 1378 1379 ntail = skb->data_len + pad - (skb->end - skb->tail); 1380 if (likely(skb_cloned(skb) || ntail > 0)) { 1381 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1382 if (unlikely(err)) 1383 goto free_skb; 1384 } 1385 1386 /* FIXME: The use of this function with non-linear skb's really needs 1387 * to be audited. 1388 */ 1389 err = skb_linearize(skb); 1390 if (unlikely(err)) 1391 goto free_skb; 1392 1393 memset(skb->data + skb->len, 0, pad); 1394 return 0; 1395 1396 free_skb: 1397 kfree_skb(skb); 1398 return err; 1399 } 1400 EXPORT_SYMBOL(skb_pad); 1401 1402 /** 1403 * pskb_put - add data to the tail of a potentially fragmented buffer 1404 * @skb: start of the buffer to use 1405 * @tail: tail fragment of the buffer to use 1406 * @len: amount of data to add 1407 * 1408 * This function extends the used data area of the potentially 1409 * fragmented buffer. @tail must be the last fragment of @skb -- or 1410 * @skb itself. If this would exceed the total buffer size the kernel 1411 * will panic. A pointer to the first byte of the extra data is 1412 * returned. 1413 */ 1414 1415 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1416 { 1417 if (tail != skb) { 1418 skb->data_len += len; 1419 skb->len += len; 1420 } 1421 return skb_put(tail, len); 1422 } 1423 EXPORT_SYMBOL_GPL(pskb_put); 1424 1425 /** 1426 * skb_put - add data to a buffer 1427 * @skb: buffer to use 1428 * @len: amount of data to add 1429 * 1430 * This function extends the used data area of the buffer. If this would 1431 * exceed the total buffer size the kernel will panic. A pointer to the 1432 * first byte of the extra data is returned. 1433 */ 1434 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1435 { 1436 unsigned char *tmp = skb_tail_pointer(skb); 1437 SKB_LINEAR_ASSERT(skb); 1438 skb->tail += len; 1439 skb->len += len; 1440 if (unlikely(skb->tail > skb->end)) 1441 skb_over_panic(skb, len, __builtin_return_address(0)); 1442 return tmp; 1443 } 1444 EXPORT_SYMBOL(skb_put); 1445 1446 /** 1447 * skb_push - add data to the start of a buffer 1448 * @skb: buffer to use 1449 * @len: amount of data to add 1450 * 1451 * This function extends the used data area of the buffer at the buffer 1452 * start. If this would exceed the total buffer headroom the kernel will 1453 * panic. A pointer to the first byte of the extra data is returned. 1454 */ 1455 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1456 { 1457 skb->data -= len; 1458 skb->len += len; 1459 if (unlikely(skb->data<skb->head)) 1460 skb_under_panic(skb, len, __builtin_return_address(0)); 1461 return skb->data; 1462 } 1463 EXPORT_SYMBOL(skb_push); 1464 1465 /** 1466 * skb_pull - remove data from the start of a buffer 1467 * @skb: buffer to use 1468 * @len: amount of data to remove 1469 * 1470 * This function removes data from the start of a buffer, returning 1471 * the memory to the headroom. A pointer to the next data in the buffer 1472 * is returned. Once the data has been pulled future pushes will overwrite 1473 * the old data. 1474 */ 1475 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1476 { 1477 return skb_pull_inline(skb, len); 1478 } 1479 EXPORT_SYMBOL(skb_pull); 1480 1481 /** 1482 * skb_trim - remove end from a buffer 1483 * @skb: buffer to alter 1484 * @len: new length 1485 * 1486 * Cut the length of a buffer down by removing data from the tail. If 1487 * the buffer is already under the length specified it is not modified. 1488 * The skb must be linear. 1489 */ 1490 void skb_trim(struct sk_buff *skb, unsigned int len) 1491 { 1492 if (skb->len > len) 1493 __skb_trim(skb, len); 1494 } 1495 EXPORT_SYMBOL(skb_trim); 1496 1497 /* Trims skb to length len. It can change skb pointers. 1498 */ 1499 1500 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1501 { 1502 struct sk_buff **fragp; 1503 struct sk_buff *frag; 1504 int offset = skb_headlen(skb); 1505 int nfrags = skb_shinfo(skb)->nr_frags; 1506 int i; 1507 int err; 1508 1509 if (skb_cloned(skb) && 1510 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1511 return err; 1512 1513 i = 0; 1514 if (offset >= len) 1515 goto drop_pages; 1516 1517 for (; i < nfrags; i++) { 1518 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1519 1520 if (end < len) { 1521 offset = end; 1522 continue; 1523 } 1524 1525 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1526 1527 drop_pages: 1528 skb_shinfo(skb)->nr_frags = i; 1529 1530 for (; i < nfrags; i++) 1531 skb_frag_unref(skb, i); 1532 1533 if (skb_has_frag_list(skb)) 1534 skb_drop_fraglist(skb); 1535 goto done; 1536 } 1537 1538 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1539 fragp = &frag->next) { 1540 int end = offset + frag->len; 1541 1542 if (skb_shared(frag)) { 1543 struct sk_buff *nfrag; 1544 1545 nfrag = skb_clone(frag, GFP_ATOMIC); 1546 if (unlikely(!nfrag)) 1547 return -ENOMEM; 1548 1549 nfrag->next = frag->next; 1550 consume_skb(frag); 1551 frag = nfrag; 1552 *fragp = frag; 1553 } 1554 1555 if (end < len) { 1556 offset = end; 1557 continue; 1558 } 1559 1560 if (end > len && 1561 unlikely((err = pskb_trim(frag, len - offset)))) 1562 return err; 1563 1564 if (frag->next) 1565 skb_drop_list(&frag->next); 1566 break; 1567 } 1568 1569 done: 1570 if (len > skb_headlen(skb)) { 1571 skb->data_len -= skb->len - len; 1572 skb->len = len; 1573 } else { 1574 skb->len = len; 1575 skb->data_len = 0; 1576 skb_set_tail_pointer(skb, len); 1577 } 1578 1579 if (!skb->sk || skb->destructor == sock_edemux) 1580 skb_condense(skb); 1581 return 0; 1582 } 1583 EXPORT_SYMBOL(___pskb_trim); 1584 1585 /** 1586 * __pskb_pull_tail - advance tail of skb header 1587 * @skb: buffer to reallocate 1588 * @delta: number of bytes to advance tail 1589 * 1590 * The function makes a sense only on a fragmented &sk_buff, 1591 * it expands header moving its tail forward and copying necessary 1592 * data from fragmented part. 1593 * 1594 * &sk_buff MUST have reference count of 1. 1595 * 1596 * Returns %NULL (and &sk_buff does not change) if pull failed 1597 * or value of new tail of skb in the case of success. 1598 * 1599 * All the pointers pointing into skb header may change and must be 1600 * reloaded after call to this function. 1601 */ 1602 1603 /* Moves tail of skb head forward, copying data from fragmented part, 1604 * when it is necessary. 1605 * 1. It may fail due to malloc failure. 1606 * 2. It may change skb pointers. 1607 * 1608 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1609 */ 1610 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1611 { 1612 /* If skb has not enough free space at tail, get new one 1613 * plus 128 bytes for future expansions. If we have enough 1614 * room at tail, reallocate without expansion only if skb is cloned. 1615 */ 1616 int i, k, eat = (skb->tail + delta) - skb->end; 1617 1618 if (eat > 0 || skb_cloned(skb)) { 1619 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1620 GFP_ATOMIC)) 1621 return NULL; 1622 } 1623 1624 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1625 BUG(); 1626 1627 /* Optimization: no fragments, no reasons to preestimate 1628 * size of pulled pages. Superb. 1629 */ 1630 if (!skb_has_frag_list(skb)) 1631 goto pull_pages; 1632 1633 /* Estimate size of pulled pages. */ 1634 eat = delta; 1635 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1636 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1637 1638 if (size >= eat) 1639 goto pull_pages; 1640 eat -= size; 1641 } 1642 1643 /* If we need update frag list, we are in troubles. 1644 * Certainly, it possible to add an offset to skb data, 1645 * but taking into account that pulling is expected to 1646 * be very rare operation, it is worth to fight against 1647 * further bloating skb head and crucify ourselves here instead. 1648 * Pure masohism, indeed. 8)8) 1649 */ 1650 if (eat) { 1651 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1652 struct sk_buff *clone = NULL; 1653 struct sk_buff *insp = NULL; 1654 1655 do { 1656 BUG_ON(!list); 1657 1658 if (list->len <= eat) { 1659 /* Eaten as whole. */ 1660 eat -= list->len; 1661 list = list->next; 1662 insp = list; 1663 } else { 1664 /* Eaten partially. */ 1665 1666 if (skb_shared(list)) { 1667 /* Sucks! We need to fork list. :-( */ 1668 clone = skb_clone(list, GFP_ATOMIC); 1669 if (!clone) 1670 return NULL; 1671 insp = list->next; 1672 list = clone; 1673 } else { 1674 /* This may be pulled without 1675 * problems. */ 1676 insp = list; 1677 } 1678 if (!pskb_pull(list, eat)) { 1679 kfree_skb(clone); 1680 return NULL; 1681 } 1682 break; 1683 } 1684 } while (eat); 1685 1686 /* Free pulled out fragments. */ 1687 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1688 skb_shinfo(skb)->frag_list = list->next; 1689 kfree_skb(list); 1690 } 1691 /* And insert new clone at head. */ 1692 if (clone) { 1693 clone->next = list; 1694 skb_shinfo(skb)->frag_list = clone; 1695 } 1696 } 1697 /* Success! Now we may commit changes to skb data. */ 1698 1699 pull_pages: 1700 eat = delta; 1701 k = 0; 1702 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1703 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1704 1705 if (size <= eat) { 1706 skb_frag_unref(skb, i); 1707 eat -= size; 1708 } else { 1709 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1710 if (eat) { 1711 skb_shinfo(skb)->frags[k].page_offset += eat; 1712 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1713 eat = 0; 1714 } 1715 k++; 1716 } 1717 } 1718 skb_shinfo(skb)->nr_frags = k; 1719 1720 skb->tail += delta; 1721 skb->data_len -= delta; 1722 1723 return skb_tail_pointer(skb); 1724 } 1725 EXPORT_SYMBOL(__pskb_pull_tail); 1726 1727 /** 1728 * skb_copy_bits - copy bits from skb to kernel buffer 1729 * @skb: source skb 1730 * @offset: offset in source 1731 * @to: destination buffer 1732 * @len: number of bytes to copy 1733 * 1734 * Copy the specified number of bytes from the source skb to the 1735 * destination buffer. 1736 * 1737 * CAUTION ! : 1738 * If its prototype is ever changed, 1739 * check arch/{*}/net/{*}.S files, 1740 * since it is called from BPF assembly code. 1741 */ 1742 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1743 { 1744 int start = skb_headlen(skb); 1745 struct sk_buff *frag_iter; 1746 int i, copy; 1747 1748 if (offset > (int)skb->len - len) 1749 goto fault; 1750 1751 /* Copy header. */ 1752 if ((copy = start - offset) > 0) { 1753 if (copy > len) 1754 copy = len; 1755 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1756 if ((len -= copy) == 0) 1757 return 0; 1758 offset += copy; 1759 to += copy; 1760 } 1761 1762 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1763 int end; 1764 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1765 1766 WARN_ON(start > offset + len); 1767 1768 end = start + skb_frag_size(f); 1769 if ((copy = end - offset) > 0) { 1770 u8 *vaddr; 1771 1772 if (copy > len) 1773 copy = len; 1774 1775 vaddr = kmap_atomic(skb_frag_page(f)); 1776 memcpy(to, 1777 vaddr + f->page_offset + offset - start, 1778 copy); 1779 kunmap_atomic(vaddr); 1780 1781 if ((len -= copy) == 0) 1782 return 0; 1783 offset += copy; 1784 to += copy; 1785 } 1786 start = end; 1787 } 1788 1789 skb_walk_frags(skb, frag_iter) { 1790 int end; 1791 1792 WARN_ON(start > offset + len); 1793 1794 end = start + frag_iter->len; 1795 if ((copy = end - offset) > 0) { 1796 if (copy > len) 1797 copy = len; 1798 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1799 goto fault; 1800 if ((len -= copy) == 0) 1801 return 0; 1802 offset += copy; 1803 to += copy; 1804 } 1805 start = end; 1806 } 1807 1808 if (!len) 1809 return 0; 1810 1811 fault: 1812 return -EFAULT; 1813 } 1814 EXPORT_SYMBOL(skb_copy_bits); 1815 1816 /* 1817 * Callback from splice_to_pipe(), if we need to release some pages 1818 * at the end of the spd in case we error'ed out in filling the pipe. 1819 */ 1820 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1821 { 1822 put_page(spd->pages[i]); 1823 } 1824 1825 static struct page *linear_to_page(struct page *page, unsigned int *len, 1826 unsigned int *offset, 1827 struct sock *sk) 1828 { 1829 struct page_frag *pfrag = sk_page_frag(sk); 1830 1831 if (!sk_page_frag_refill(sk, pfrag)) 1832 return NULL; 1833 1834 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1835 1836 memcpy(page_address(pfrag->page) + pfrag->offset, 1837 page_address(page) + *offset, *len); 1838 *offset = pfrag->offset; 1839 pfrag->offset += *len; 1840 1841 return pfrag->page; 1842 } 1843 1844 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1845 struct page *page, 1846 unsigned int offset) 1847 { 1848 return spd->nr_pages && 1849 spd->pages[spd->nr_pages - 1] == page && 1850 (spd->partial[spd->nr_pages - 1].offset + 1851 spd->partial[spd->nr_pages - 1].len == offset); 1852 } 1853 1854 /* 1855 * Fill page/offset/length into spd, if it can hold more pages. 1856 */ 1857 static bool spd_fill_page(struct splice_pipe_desc *spd, 1858 struct pipe_inode_info *pipe, struct page *page, 1859 unsigned int *len, unsigned int offset, 1860 bool linear, 1861 struct sock *sk) 1862 { 1863 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1864 return true; 1865 1866 if (linear) { 1867 page = linear_to_page(page, len, &offset, sk); 1868 if (!page) 1869 return true; 1870 } 1871 if (spd_can_coalesce(spd, page, offset)) { 1872 spd->partial[spd->nr_pages - 1].len += *len; 1873 return false; 1874 } 1875 get_page(page); 1876 spd->pages[spd->nr_pages] = page; 1877 spd->partial[spd->nr_pages].len = *len; 1878 spd->partial[spd->nr_pages].offset = offset; 1879 spd->nr_pages++; 1880 1881 return false; 1882 } 1883 1884 static bool __splice_segment(struct page *page, unsigned int poff, 1885 unsigned int plen, unsigned int *off, 1886 unsigned int *len, 1887 struct splice_pipe_desc *spd, bool linear, 1888 struct sock *sk, 1889 struct pipe_inode_info *pipe) 1890 { 1891 if (!*len) 1892 return true; 1893 1894 /* skip this segment if already processed */ 1895 if (*off >= plen) { 1896 *off -= plen; 1897 return false; 1898 } 1899 1900 /* ignore any bits we already processed */ 1901 poff += *off; 1902 plen -= *off; 1903 *off = 0; 1904 1905 do { 1906 unsigned int flen = min(*len, plen); 1907 1908 if (spd_fill_page(spd, pipe, page, &flen, poff, 1909 linear, sk)) 1910 return true; 1911 poff += flen; 1912 plen -= flen; 1913 *len -= flen; 1914 } while (*len && plen); 1915 1916 return false; 1917 } 1918 1919 /* 1920 * Map linear and fragment data from the skb to spd. It reports true if the 1921 * pipe is full or if we already spliced the requested length. 1922 */ 1923 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1924 unsigned int *offset, unsigned int *len, 1925 struct splice_pipe_desc *spd, struct sock *sk) 1926 { 1927 int seg; 1928 struct sk_buff *iter; 1929 1930 /* map the linear part : 1931 * If skb->head_frag is set, this 'linear' part is backed by a 1932 * fragment, and if the head is not shared with any clones then 1933 * we can avoid a copy since we own the head portion of this page. 1934 */ 1935 if (__splice_segment(virt_to_page(skb->data), 1936 (unsigned long) skb->data & (PAGE_SIZE - 1), 1937 skb_headlen(skb), 1938 offset, len, spd, 1939 skb_head_is_locked(skb), 1940 sk, pipe)) 1941 return true; 1942 1943 /* 1944 * then map the fragments 1945 */ 1946 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1947 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1948 1949 if (__splice_segment(skb_frag_page(f), 1950 f->page_offset, skb_frag_size(f), 1951 offset, len, spd, false, sk, pipe)) 1952 return true; 1953 } 1954 1955 skb_walk_frags(skb, iter) { 1956 if (*offset >= iter->len) { 1957 *offset -= iter->len; 1958 continue; 1959 } 1960 /* __skb_splice_bits() only fails if the output has no room 1961 * left, so no point in going over the frag_list for the error 1962 * case. 1963 */ 1964 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 1965 return true; 1966 } 1967 1968 return false; 1969 } 1970 1971 /* 1972 * Map data from the skb to a pipe. Should handle both the linear part, 1973 * the fragments, and the frag list. 1974 */ 1975 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 1976 struct pipe_inode_info *pipe, unsigned int tlen, 1977 unsigned int flags) 1978 { 1979 struct partial_page partial[MAX_SKB_FRAGS]; 1980 struct page *pages[MAX_SKB_FRAGS]; 1981 struct splice_pipe_desc spd = { 1982 .pages = pages, 1983 .partial = partial, 1984 .nr_pages_max = MAX_SKB_FRAGS, 1985 .flags = flags, 1986 .ops = &nosteal_pipe_buf_ops, 1987 .spd_release = sock_spd_release, 1988 }; 1989 int ret = 0; 1990 1991 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 1992 1993 if (spd.nr_pages) 1994 ret = splice_to_pipe(pipe, &spd); 1995 1996 return ret; 1997 } 1998 EXPORT_SYMBOL_GPL(skb_splice_bits); 1999 2000 /** 2001 * skb_store_bits - store bits from kernel buffer to skb 2002 * @skb: destination buffer 2003 * @offset: offset in destination 2004 * @from: source buffer 2005 * @len: number of bytes to copy 2006 * 2007 * Copy the specified number of bytes from the source buffer to the 2008 * destination skb. This function handles all the messy bits of 2009 * traversing fragment lists and such. 2010 */ 2011 2012 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2013 { 2014 int start = skb_headlen(skb); 2015 struct sk_buff *frag_iter; 2016 int i, copy; 2017 2018 if (offset > (int)skb->len - len) 2019 goto fault; 2020 2021 if ((copy = start - offset) > 0) { 2022 if (copy > len) 2023 copy = len; 2024 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2025 if ((len -= copy) == 0) 2026 return 0; 2027 offset += copy; 2028 from += copy; 2029 } 2030 2031 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2032 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2033 int end; 2034 2035 WARN_ON(start > offset + len); 2036 2037 end = start + skb_frag_size(frag); 2038 if ((copy = end - offset) > 0) { 2039 u8 *vaddr; 2040 2041 if (copy > len) 2042 copy = len; 2043 2044 vaddr = kmap_atomic(skb_frag_page(frag)); 2045 memcpy(vaddr + frag->page_offset + offset - start, 2046 from, copy); 2047 kunmap_atomic(vaddr); 2048 2049 if ((len -= copy) == 0) 2050 return 0; 2051 offset += copy; 2052 from += copy; 2053 } 2054 start = end; 2055 } 2056 2057 skb_walk_frags(skb, frag_iter) { 2058 int end; 2059 2060 WARN_ON(start > offset + len); 2061 2062 end = start + frag_iter->len; 2063 if ((copy = end - offset) > 0) { 2064 if (copy > len) 2065 copy = len; 2066 if (skb_store_bits(frag_iter, offset - start, 2067 from, copy)) 2068 goto fault; 2069 if ((len -= copy) == 0) 2070 return 0; 2071 offset += copy; 2072 from += copy; 2073 } 2074 start = end; 2075 } 2076 if (!len) 2077 return 0; 2078 2079 fault: 2080 return -EFAULT; 2081 } 2082 EXPORT_SYMBOL(skb_store_bits); 2083 2084 /* Checksum skb data. */ 2085 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2086 __wsum csum, const struct skb_checksum_ops *ops) 2087 { 2088 int start = skb_headlen(skb); 2089 int i, copy = start - offset; 2090 struct sk_buff *frag_iter; 2091 int pos = 0; 2092 2093 /* Checksum header. */ 2094 if (copy > 0) { 2095 if (copy > len) 2096 copy = len; 2097 csum = ops->update(skb->data + offset, copy, csum); 2098 if ((len -= copy) == 0) 2099 return csum; 2100 offset += copy; 2101 pos = copy; 2102 } 2103 2104 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2105 int end; 2106 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2107 2108 WARN_ON(start > offset + len); 2109 2110 end = start + skb_frag_size(frag); 2111 if ((copy = end - offset) > 0) { 2112 __wsum csum2; 2113 u8 *vaddr; 2114 2115 if (copy > len) 2116 copy = len; 2117 vaddr = kmap_atomic(skb_frag_page(frag)); 2118 csum2 = ops->update(vaddr + frag->page_offset + 2119 offset - start, copy, 0); 2120 kunmap_atomic(vaddr); 2121 csum = ops->combine(csum, csum2, pos, copy); 2122 if (!(len -= copy)) 2123 return csum; 2124 offset += copy; 2125 pos += copy; 2126 } 2127 start = end; 2128 } 2129 2130 skb_walk_frags(skb, frag_iter) { 2131 int end; 2132 2133 WARN_ON(start > offset + len); 2134 2135 end = start + frag_iter->len; 2136 if ((copy = end - offset) > 0) { 2137 __wsum csum2; 2138 if (copy > len) 2139 copy = len; 2140 csum2 = __skb_checksum(frag_iter, offset - start, 2141 copy, 0, ops); 2142 csum = ops->combine(csum, csum2, pos, copy); 2143 if ((len -= copy) == 0) 2144 return csum; 2145 offset += copy; 2146 pos += copy; 2147 } 2148 start = end; 2149 } 2150 BUG_ON(len); 2151 2152 return csum; 2153 } 2154 EXPORT_SYMBOL(__skb_checksum); 2155 2156 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2157 int len, __wsum csum) 2158 { 2159 const struct skb_checksum_ops ops = { 2160 .update = csum_partial_ext, 2161 .combine = csum_block_add_ext, 2162 }; 2163 2164 return __skb_checksum(skb, offset, len, csum, &ops); 2165 } 2166 EXPORT_SYMBOL(skb_checksum); 2167 2168 /* Both of above in one bottle. */ 2169 2170 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2171 u8 *to, int len, __wsum csum) 2172 { 2173 int start = skb_headlen(skb); 2174 int i, copy = start - offset; 2175 struct sk_buff *frag_iter; 2176 int pos = 0; 2177 2178 /* Copy header. */ 2179 if (copy > 0) { 2180 if (copy > len) 2181 copy = len; 2182 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2183 copy, csum); 2184 if ((len -= copy) == 0) 2185 return csum; 2186 offset += copy; 2187 to += copy; 2188 pos = copy; 2189 } 2190 2191 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2192 int end; 2193 2194 WARN_ON(start > offset + len); 2195 2196 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2197 if ((copy = end - offset) > 0) { 2198 __wsum csum2; 2199 u8 *vaddr; 2200 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2201 2202 if (copy > len) 2203 copy = len; 2204 vaddr = kmap_atomic(skb_frag_page(frag)); 2205 csum2 = csum_partial_copy_nocheck(vaddr + 2206 frag->page_offset + 2207 offset - start, to, 2208 copy, 0); 2209 kunmap_atomic(vaddr); 2210 csum = csum_block_add(csum, csum2, pos); 2211 if (!(len -= copy)) 2212 return csum; 2213 offset += copy; 2214 to += copy; 2215 pos += copy; 2216 } 2217 start = end; 2218 } 2219 2220 skb_walk_frags(skb, frag_iter) { 2221 __wsum csum2; 2222 int end; 2223 2224 WARN_ON(start > offset + len); 2225 2226 end = start + frag_iter->len; 2227 if ((copy = end - offset) > 0) { 2228 if (copy > len) 2229 copy = len; 2230 csum2 = skb_copy_and_csum_bits(frag_iter, 2231 offset - start, 2232 to, copy, 0); 2233 csum = csum_block_add(csum, csum2, pos); 2234 if ((len -= copy) == 0) 2235 return csum; 2236 offset += copy; 2237 to += copy; 2238 pos += copy; 2239 } 2240 start = end; 2241 } 2242 BUG_ON(len); 2243 return csum; 2244 } 2245 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2246 2247 /** 2248 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2249 * @from: source buffer 2250 * 2251 * Calculates the amount of linear headroom needed in the 'to' skb passed 2252 * into skb_zerocopy(). 2253 */ 2254 unsigned int 2255 skb_zerocopy_headlen(const struct sk_buff *from) 2256 { 2257 unsigned int hlen = 0; 2258 2259 if (!from->head_frag || 2260 skb_headlen(from) < L1_CACHE_BYTES || 2261 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2262 hlen = skb_headlen(from); 2263 2264 if (skb_has_frag_list(from)) 2265 hlen = from->len; 2266 2267 return hlen; 2268 } 2269 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2270 2271 /** 2272 * skb_zerocopy - Zero copy skb to skb 2273 * @to: destination buffer 2274 * @from: source buffer 2275 * @len: number of bytes to copy from source buffer 2276 * @hlen: size of linear headroom in destination buffer 2277 * 2278 * Copies up to `len` bytes from `from` to `to` by creating references 2279 * to the frags in the source buffer. 2280 * 2281 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2282 * headroom in the `to` buffer. 2283 * 2284 * Return value: 2285 * 0: everything is OK 2286 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2287 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2288 */ 2289 int 2290 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2291 { 2292 int i, j = 0; 2293 int plen = 0; /* length of skb->head fragment */ 2294 int ret; 2295 struct page *page; 2296 unsigned int offset; 2297 2298 BUG_ON(!from->head_frag && !hlen); 2299 2300 /* dont bother with small payloads */ 2301 if (len <= skb_tailroom(to)) 2302 return skb_copy_bits(from, 0, skb_put(to, len), len); 2303 2304 if (hlen) { 2305 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2306 if (unlikely(ret)) 2307 return ret; 2308 len -= hlen; 2309 } else { 2310 plen = min_t(int, skb_headlen(from), len); 2311 if (plen) { 2312 page = virt_to_head_page(from->head); 2313 offset = from->data - (unsigned char *)page_address(page); 2314 __skb_fill_page_desc(to, 0, page, offset, plen); 2315 get_page(page); 2316 j = 1; 2317 len -= plen; 2318 } 2319 } 2320 2321 to->truesize += len + plen; 2322 to->len += len + plen; 2323 to->data_len += len + plen; 2324 2325 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2326 skb_tx_error(from); 2327 return -ENOMEM; 2328 } 2329 2330 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2331 if (!len) 2332 break; 2333 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2334 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2335 len -= skb_shinfo(to)->frags[j].size; 2336 skb_frag_ref(to, j); 2337 j++; 2338 } 2339 skb_shinfo(to)->nr_frags = j; 2340 2341 return 0; 2342 } 2343 EXPORT_SYMBOL_GPL(skb_zerocopy); 2344 2345 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2346 { 2347 __wsum csum; 2348 long csstart; 2349 2350 if (skb->ip_summed == CHECKSUM_PARTIAL) 2351 csstart = skb_checksum_start_offset(skb); 2352 else 2353 csstart = skb_headlen(skb); 2354 2355 BUG_ON(csstart > skb_headlen(skb)); 2356 2357 skb_copy_from_linear_data(skb, to, csstart); 2358 2359 csum = 0; 2360 if (csstart != skb->len) 2361 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2362 skb->len - csstart, 0); 2363 2364 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2365 long csstuff = csstart + skb->csum_offset; 2366 2367 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2368 } 2369 } 2370 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2371 2372 /** 2373 * skb_dequeue - remove from the head of the queue 2374 * @list: list to dequeue from 2375 * 2376 * Remove the head of the list. The list lock is taken so the function 2377 * may be used safely with other locking list functions. The head item is 2378 * returned or %NULL if the list is empty. 2379 */ 2380 2381 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2382 { 2383 unsigned long flags; 2384 struct sk_buff *result; 2385 2386 spin_lock_irqsave(&list->lock, flags); 2387 result = __skb_dequeue(list); 2388 spin_unlock_irqrestore(&list->lock, flags); 2389 return result; 2390 } 2391 EXPORT_SYMBOL(skb_dequeue); 2392 2393 /** 2394 * skb_dequeue_tail - remove from the tail of the queue 2395 * @list: list to dequeue from 2396 * 2397 * Remove the tail of the list. The list lock is taken so the function 2398 * may be used safely with other locking list functions. The tail item is 2399 * returned or %NULL if the list is empty. 2400 */ 2401 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2402 { 2403 unsigned long flags; 2404 struct sk_buff *result; 2405 2406 spin_lock_irqsave(&list->lock, flags); 2407 result = __skb_dequeue_tail(list); 2408 spin_unlock_irqrestore(&list->lock, flags); 2409 return result; 2410 } 2411 EXPORT_SYMBOL(skb_dequeue_tail); 2412 2413 /** 2414 * skb_queue_purge - empty a list 2415 * @list: list to empty 2416 * 2417 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2418 * the list and one reference dropped. This function takes the list 2419 * lock and is atomic with respect to other list locking functions. 2420 */ 2421 void skb_queue_purge(struct sk_buff_head *list) 2422 { 2423 struct sk_buff *skb; 2424 while ((skb = skb_dequeue(list)) != NULL) 2425 kfree_skb(skb); 2426 } 2427 EXPORT_SYMBOL(skb_queue_purge); 2428 2429 /** 2430 * skb_rbtree_purge - empty a skb rbtree 2431 * @root: root of the rbtree to empty 2432 * 2433 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 2434 * the list and one reference dropped. This function does not take 2435 * any lock. Synchronization should be handled by the caller (e.g., TCP 2436 * out-of-order queue is protected by the socket lock). 2437 */ 2438 void skb_rbtree_purge(struct rb_root *root) 2439 { 2440 struct sk_buff *skb, *next; 2441 2442 rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode) 2443 kfree_skb(skb); 2444 2445 *root = RB_ROOT; 2446 } 2447 2448 /** 2449 * skb_queue_head - queue a buffer at the list head 2450 * @list: list to use 2451 * @newsk: buffer to queue 2452 * 2453 * Queue a buffer at the start of the list. This function takes the 2454 * list lock and can be used safely with other locking &sk_buff functions 2455 * safely. 2456 * 2457 * A buffer cannot be placed on two lists at the same time. 2458 */ 2459 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2460 { 2461 unsigned long flags; 2462 2463 spin_lock_irqsave(&list->lock, flags); 2464 __skb_queue_head(list, newsk); 2465 spin_unlock_irqrestore(&list->lock, flags); 2466 } 2467 EXPORT_SYMBOL(skb_queue_head); 2468 2469 /** 2470 * skb_queue_tail - queue a buffer at the list tail 2471 * @list: list to use 2472 * @newsk: buffer to queue 2473 * 2474 * Queue a buffer at the tail of the list. This function takes the 2475 * list lock and can be used safely with other locking &sk_buff functions 2476 * safely. 2477 * 2478 * A buffer cannot be placed on two lists at the same time. 2479 */ 2480 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2481 { 2482 unsigned long flags; 2483 2484 spin_lock_irqsave(&list->lock, flags); 2485 __skb_queue_tail(list, newsk); 2486 spin_unlock_irqrestore(&list->lock, flags); 2487 } 2488 EXPORT_SYMBOL(skb_queue_tail); 2489 2490 /** 2491 * skb_unlink - remove a buffer from a list 2492 * @skb: buffer to remove 2493 * @list: list to use 2494 * 2495 * Remove a packet from a list. The list locks are taken and this 2496 * function is atomic with respect to other list locked calls 2497 * 2498 * You must know what list the SKB is on. 2499 */ 2500 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2501 { 2502 unsigned long flags; 2503 2504 spin_lock_irqsave(&list->lock, flags); 2505 __skb_unlink(skb, list); 2506 spin_unlock_irqrestore(&list->lock, flags); 2507 } 2508 EXPORT_SYMBOL(skb_unlink); 2509 2510 /** 2511 * skb_append - append a buffer 2512 * @old: buffer to insert after 2513 * @newsk: buffer to insert 2514 * @list: list to use 2515 * 2516 * Place a packet after a given packet in a list. The list locks are taken 2517 * and this function is atomic with respect to other list locked calls. 2518 * A buffer cannot be placed on two lists at the same time. 2519 */ 2520 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2521 { 2522 unsigned long flags; 2523 2524 spin_lock_irqsave(&list->lock, flags); 2525 __skb_queue_after(list, old, newsk); 2526 spin_unlock_irqrestore(&list->lock, flags); 2527 } 2528 EXPORT_SYMBOL(skb_append); 2529 2530 /** 2531 * skb_insert - insert a buffer 2532 * @old: buffer to insert before 2533 * @newsk: buffer to insert 2534 * @list: list to use 2535 * 2536 * Place a packet before a given packet in a list. The list locks are 2537 * taken and this function is atomic with respect to other list locked 2538 * calls. 2539 * 2540 * A buffer cannot be placed on two lists at the same time. 2541 */ 2542 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2543 { 2544 unsigned long flags; 2545 2546 spin_lock_irqsave(&list->lock, flags); 2547 __skb_insert(newsk, old->prev, old, list); 2548 spin_unlock_irqrestore(&list->lock, flags); 2549 } 2550 EXPORT_SYMBOL(skb_insert); 2551 2552 static inline void skb_split_inside_header(struct sk_buff *skb, 2553 struct sk_buff* skb1, 2554 const u32 len, const int pos) 2555 { 2556 int i; 2557 2558 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2559 pos - len); 2560 /* And move data appendix as is. */ 2561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2562 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2563 2564 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2565 skb_shinfo(skb)->nr_frags = 0; 2566 skb1->data_len = skb->data_len; 2567 skb1->len += skb1->data_len; 2568 skb->data_len = 0; 2569 skb->len = len; 2570 skb_set_tail_pointer(skb, len); 2571 } 2572 2573 static inline void skb_split_no_header(struct sk_buff *skb, 2574 struct sk_buff* skb1, 2575 const u32 len, int pos) 2576 { 2577 int i, k = 0; 2578 const int nfrags = skb_shinfo(skb)->nr_frags; 2579 2580 skb_shinfo(skb)->nr_frags = 0; 2581 skb1->len = skb1->data_len = skb->len - len; 2582 skb->len = len; 2583 skb->data_len = len - pos; 2584 2585 for (i = 0; i < nfrags; i++) { 2586 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2587 2588 if (pos + size > len) { 2589 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2590 2591 if (pos < len) { 2592 /* Split frag. 2593 * We have two variants in this case: 2594 * 1. Move all the frag to the second 2595 * part, if it is possible. F.e. 2596 * this approach is mandatory for TUX, 2597 * where splitting is expensive. 2598 * 2. Split is accurately. We make this. 2599 */ 2600 skb_frag_ref(skb, i); 2601 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2602 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2603 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2604 skb_shinfo(skb)->nr_frags++; 2605 } 2606 k++; 2607 } else 2608 skb_shinfo(skb)->nr_frags++; 2609 pos += size; 2610 } 2611 skb_shinfo(skb1)->nr_frags = k; 2612 } 2613 2614 /** 2615 * skb_split - Split fragmented skb to two parts at length len. 2616 * @skb: the buffer to split 2617 * @skb1: the buffer to receive the second part 2618 * @len: new length for skb 2619 */ 2620 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2621 { 2622 int pos = skb_headlen(skb); 2623 2624 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2625 if (len < pos) /* Split line is inside header. */ 2626 skb_split_inside_header(skb, skb1, len, pos); 2627 else /* Second chunk has no header, nothing to copy. */ 2628 skb_split_no_header(skb, skb1, len, pos); 2629 } 2630 EXPORT_SYMBOL(skb_split); 2631 2632 /* Shifting from/to a cloned skb is a no-go. 2633 * 2634 * Caller cannot keep skb_shinfo related pointers past calling here! 2635 */ 2636 static int skb_prepare_for_shift(struct sk_buff *skb) 2637 { 2638 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2639 } 2640 2641 /** 2642 * skb_shift - Shifts paged data partially from skb to another 2643 * @tgt: buffer into which tail data gets added 2644 * @skb: buffer from which the paged data comes from 2645 * @shiftlen: shift up to this many bytes 2646 * 2647 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2648 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2649 * It's up to caller to free skb if everything was shifted. 2650 * 2651 * If @tgt runs out of frags, the whole operation is aborted. 2652 * 2653 * Skb cannot include anything else but paged data while tgt is allowed 2654 * to have non-paged data as well. 2655 * 2656 * TODO: full sized shift could be optimized but that would need 2657 * specialized skb free'er to handle frags without up-to-date nr_frags. 2658 */ 2659 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2660 { 2661 int from, to, merge, todo; 2662 struct skb_frag_struct *fragfrom, *fragto; 2663 2664 BUG_ON(shiftlen > skb->len); 2665 2666 if (skb_headlen(skb)) 2667 return 0; 2668 2669 todo = shiftlen; 2670 from = 0; 2671 to = skb_shinfo(tgt)->nr_frags; 2672 fragfrom = &skb_shinfo(skb)->frags[from]; 2673 2674 /* Actual merge is delayed until the point when we know we can 2675 * commit all, so that we don't have to undo partial changes 2676 */ 2677 if (!to || 2678 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2679 fragfrom->page_offset)) { 2680 merge = -1; 2681 } else { 2682 merge = to - 1; 2683 2684 todo -= skb_frag_size(fragfrom); 2685 if (todo < 0) { 2686 if (skb_prepare_for_shift(skb) || 2687 skb_prepare_for_shift(tgt)) 2688 return 0; 2689 2690 /* All previous frag pointers might be stale! */ 2691 fragfrom = &skb_shinfo(skb)->frags[from]; 2692 fragto = &skb_shinfo(tgt)->frags[merge]; 2693 2694 skb_frag_size_add(fragto, shiftlen); 2695 skb_frag_size_sub(fragfrom, shiftlen); 2696 fragfrom->page_offset += shiftlen; 2697 2698 goto onlymerged; 2699 } 2700 2701 from++; 2702 } 2703 2704 /* Skip full, not-fitting skb to avoid expensive operations */ 2705 if ((shiftlen == skb->len) && 2706 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2707 return 0; 2708 2709 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2710 return 0; 2711 2712 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2713 if (to == MAX_SKB_FRAGS) 2714 return 0; 2715 2716 fragfrom = &skb_shinfo(skb)->frags[from]; 2717 fragto = &skb_shinfo(tgt)->frags[to]; 2718 2719 if (todo >= skb_frag_size(fragfrom)) { 2720 *fragto = *fragfrom; 2721 todo -= skb_frag_size(fragfrom); 2722 from++; 2723 to++; 2724 2725 } else { 2726 __skb_frag_ref(fragfrom); 2727 fragto->page = fragfrom->page; 2728 fragto->page_offset = fragfrom->page_offset; 2729 skb_frag_size_set(fragto, todo); 2730 2731 fragfrom->page_offset += todo; 2732 skb_frag_size_sub(fragfrom, todo); 2733 todo = 0; 2734 2735 to++; 2736 break; 2737 } 2738 } 2739 2740 /* Ready to "commit" this state change to tgt */ 2741 skb_shinfo(tgt)->nr_frags = to; 2742 2743 if (merge >= 0) { 2744 fragfrom = &skb_shinfo(skb)->frags[0]; 2745 fragto = &skb_shinfo(tgt)->frags[merge]; 2746 2747 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2748 __skb_frag_unref(fragfrom); 2749 } 2750 2751 /* Reposition in the original skb */ 2752 to = 0; 2753 while (from < skb_shinfo(skb)->nr_frags) 2754 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2755 skb_shinfo(skb)->nr_frags = to; 2756 2757 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2758 2759 onlymerged: 2760 /* Most likely the tgt won't ever need its checksum anymore, skb on 2761 * the other hand might need it if it needs to be resent 2762 */ 2763 tgt->ip_summed = CHECKSUM_PARTIAL; 2764 skb->ip_summed = CHECKSUM_PARTIAL; 2765 2766 /* Yak, is it really working this way? Some helper please? */ 2767 skb->len -= shiftlen; 2768 skb->data_len -= shiftlen; 2769 skb->truesize -= shiftlen; 2770 tgt->len += shiftlen; 2771 tgt->data_len += shiftlen; 2772 tgt->truesize += shiftlen; 2773 2774 return shiftlen; 2775 } 2776 2777 /** 2778 * skb_prepare_seq_read - Prepare a sequential read of skb data 2779 * @skb: the buffer to read 2780 * @from: lower offset of data to be read 2781 * @to: upper offset of data to be read 2782 * @st: state variable 2783 * 2784 * Initializes the specified state variable. Must be called before 2785 * invoking skb_seq_read() for the first time. 2786 */ 2787 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2788 unsigned int to, struct skb_seq_state *st) 2789 { 2790 st->lower_offset = from; 2791 st->upper_offset = to; 2792 st->root_skb = st->cur_skb = skb; 2793 st->frag_idx = st->stepped_offset = 0; 2794 st->frag_data = NULL; 2795 } 2796 EXPORT_SYMBOL(skb_prepare_seq_read); 2797 2798 /** 2799 * skb_seq_read - Sequentially read skb data 2800 * @consumed: number of bytes consumed by the caller so far 2801 * @data: destination pointer for data to be returned 2802 * @st: state variable 2803 * 2804 * Reads a block of skb data at @consumed relative to the 2805 * lower offset specified to skb_prepare_seq_read(). Assigns 2806 * the head of the data block to @data and returns the length 2807 * of the block or 0 if the end of the skb data or the upper 2808 * offset has been reached. 2809 * 2810 * The caller is not required to consume all of the data 2811 * returned, i.e. @consumed is typically set to the number 2812 * of bytes already consumed and the next call to 2813 * skb_seq_read() will return the remaining part of the block. 2814 * 2815 * Note 1: The size of each block of data returned can be arbitrary, 2816 * this limitation is the cost for zerocopy sequential 2817 * reads of potentially non linear data. 2818 * 2819 * Note 2: Fragment lists within fragments are not implemented 2820 * at the moment, state->root_skb could be replaced with 2821 * a stack for this purpose. 2822 */ 2823 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2824 struct skb_seq_state *st) 2825 { 2826 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2827 skb_frag_t *frag; 2828 2829 if (unlikely(abs_offset >= st->upper_offset)) { 2830 if (st->frag_data) { 2831 kunmap_atomic(st->frag_data); 2832 st->frag_data = NULL; 2833 } 2834 return 0; 2835 } 2836 2837 next_skb: 2838 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2839 2840 if (abs_offset < block_limit && !st->frag_data) { 2841 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2842 return block_limit - abs_offset; 2843 } 2844 2845 if (st->frag_idx == 0 && !st->frag_data) 2846 st->stepped_offset += skb_headlen(st->cur_skb); 2847 2848 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2849 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2850 block_limit = skb_frag_size(frag) + st->stepped_offset; 2851 2852 if (abs_offset < block_limit) { 2853 if (!st->frag_data) 2854 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2855 2856 *data = (u8 *) st->frag_data + frag->page_offset + 2857 (abs_offset - st->stepped_offset); 2858 2859 return block_limit - abs_offset; 2860 } 2861 2862 if (st->frag_data) { 2863 kunmap_atomic(st->frag_data); 2864 st->frag_data = NULL; 2865 } 2866 2867 st->frag_idx++; 2868 st->stepped_offset += skb_frag_size(frag); 2869 } 2870 2871 if (st->frag_data) { 2872 kunmap_atomic(st->frag_data); 2873 st->frag_data = NULL; 2874 } 2875 2876 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2877 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2878 st->frag_idx = 0; 2879 goto next_skb; 2880 } else if (st->cur_skb->next) { 2881 st->cur_skb = st->cur_skb->next; 2882 st->frag_idx = 0; 2883 goto next_skb; 2884 } 2885 2886 return 0; 2887 } 2888 EXPORT_SYMBOL(skb_seq_read); 2889 2890 /** 2891 * skb_abort_seq_read - Abort a sequential read of skb data 2892 * @st: state variable 2893 * 2894 * Must be called if skb_seq_read() was not called until it 2895 * returned 0. 2896 */ 2897 void skb_abort_seq_read(struct skb_seq_state *st) 2898 { 2899 if (st->frag_data) 2900 kunmap_atomic(st->frag_data); 2901 } 2902 EXPORT_SYMBOL(skb_abort_seq_read); 2903 2904 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2905 2906 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2907 struct ts_config *conf, 2908 struct ts_state *state) 2909 { 2910 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2911 } 2912 2913 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2914 { 2915 skb_abort_seq_read(TS_SKB_CB(state)); 2916 } 2917 2918 /** 2919 * skb_find_text - Find a text pattern in skb data 2920 * @skb: the buffer to look in 2921 * @from: search offset 2922 * @to: search limit 2923 * @config: textsearch configuration 2924 * 2925 * Finds a pattern in the skb data according to the specified 2926 * textsearch configuration. Use textsearch_next() to retrieve 2927 * subsequent occurrences of the pattern. Returns the offset 2928 * to the first occurrence or UINT_MAX if no match was found. 2929 */ 2930 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2931 unsigned int to, struct ts_config *config) 2932 { 2933 struct ts_state state; 2934 unsigned int ret; 2935 2936 config->get_next_block = skb_ts_get_next_block; 2937 config->finish = skb_ts_finish; 2938 2939 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 2940 2941 ret = textsearch_find(config, &state); 2942 return (ret <= to - from ? ret : UINT_MAX); 2943 } 2944 EXPORT_SYMBOL(skb_find_text); 2945 2946 /** 2947 * skb_append_datato_frags - append the user data to a skb 2948 * @sk: sock structure 2949 * @skb: skb structure to be appended with user data. 2950 * @getfrag: call back function to be used for getting the user data 2951 * @from: pointer to user message iov 2952 * @length: length of the iov message 2953 * 2954 * Description: This procedure append the user data in the fragment part 2955 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2956 */ 2957 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2958 int (*getfrag)(void *from, char *to, int offset, 2959 int len, int odd, struct sk_buff *skb), 2960 void *from, int length) 2961 { 2962 int frg_cnt = skb_shinfo(skb)->nr_frags; 2963 int copy; 2964 int offset = 0; 2965 int ret; 2966 struct page_frag *pfrag = ¤t->task_frag; 2967 2968 do { 2969 /* Return error if we don't have space for new frag */ 2970 if (frg_cnt >= MAX_SKB_FRAGS) 2971 return -EMSGSIZE; 2972 2973 if (!sk_page_frag_refill(sk, pfrag)) 2974 return -ENOMEM; 2975 2976 /* copy the user data to page */ 2977 copy = min_t(int, length, pfrag->size - pfrag->offset); 2978 2979 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2980 offset, copy, 0, skb); 2981 if (ret < 0) 2982 return -EFAULT; 2983 2984 /* copy was successful so update the size parameters */ 2985 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2986 copy); 2987 frg_cnt++; 2988 pfrag->offset += copy; 2989 get_page(pfrag->page); 2990 2991 skb->truesize += copy; 2992 atomic_add(copy, &sk->sk_wmem_alloc); 2993 skb->len += copy; 2994 skb->data_len += copy; 2995 offset += copy; 2996 length -= copy; 2997 2998 } while (length > 0); 2999 3000 return 0; 3001 } 3002 EXPORT_SYMBOL(skb_append_datato_frags); 3003 3004 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3005 int offset, size_t size) 3006 { 3007 int i = skb_shinfo(skb)->nr_frags; 3008 3009 if (skb_can_coalesce(skb, i, page, offset)) { 3010 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3011 } else if (i < MAX_SKB_FRAGS) { 3012 get_page(page); 3013 skb_fill_page_desc(skb, i, page, offset, size); 3014 } else { 3015 return -EMSGSIZE; 3016 } 3017 3018 return 0; 3019 } 3020 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3021 3022 /** 3023 * skb_pull_rcsum - pull skb and update receive checksum 3024 * @skb: buffer to update 3025 * @len: length of data pulled 3026 * 3027 * This function performs an skb_pull on the packet and updates 3028 * the CHECKSUM_COMPLETE checksum. It should be used on 3029 * receive path processing instead of skb_pull unless you know 3030 * that the checksum difference is zero (e.g., a valid IP header) 3031 * or you are setting ip_summed to CHECKSUM_NONE. 3032 */ 3033 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3034 { 3035 unsigned char *data = skb->data; 3036 3037 BUG_ON(len > skb->len); 3038 __skb_pull(skb, len); 3039 skb_postpull_rcsum(skb, data, len); 3040 return skb->data; 3041 } 3042 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3043 3044 /** 3045 * skb_segment - Perform protocol segmentation on skb. 3046 * @head_skb: buffer to segment 3047 * @features: features for the output path (see dev->features) 3048 * 3049 * This function performs segmentation on the given skb. It returns 3050 * a pointer to the first in a list of new skbs for the segments. 3051 * In case of error it returns ERR_PTR(err). 3052 */ 3053 struct sk_buff *skb_segment(struct sk_buff *head_skb, 3054 netdev_features_t features) 3055 { 3056 struct sk_buff *segs = NULL; 3057 struct sk_buff *tail = NULL; 3058 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3059 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3060 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3061 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3062 struct sk_buff *frag_skb = head_skb; 3063 unsigned int offset = doffset; 3064 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3065 unsigned int partial_segs = 0; 3066 unsigned int headroom; 3067 unsigned int len = head_skb->len; 3068 __be16 proto; 3069 bool csum, sg; 3070 int nfrags = skb_shinfo(head_skb)->nr_frags; 3071 int err = -ENOMEM; 3072 int i = 0; 3073 int pos; 3074 int dummy; 3075 3076 __skb_push(head_skb, doffset); 3077 proto = skb_network_protocol(head_skb, &dummy); 3078 if (unlikely(!proto)) 3079 return ERR_PTR(-EINVAL); 3080 3081 sg = !!(features & NETIF_F_SG); 3082 csum = !!can_checksum_protocol(features, proto); 3083 3084 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3085 if (!(features & NETIF_F_GSO_PARTIAL)) { 3086 struct sk_buff *iter; 3087 unsigned int frag_len; 3088 3089 if (!list_skb || 3090 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3091 goto normal; 3092 3093 /* If we get here then all the required 3094 * GSO features except frag_list are supported. 3095 * Try to split the SKB to multiple GSO SKBs 3096 * with no frag_list. 3097 * Currently we can do that only when the buffers don't 3098 * have a linear part and all the buffers except 3099 * the last are of the same length. 3100 */ 3101 frag_len = list_skb->len; 3102 skb_walk_frags(head_skb, iter) { 3103 if (frag_len != iter->len && iter->next) 3104 goto normal; 3105 if (skb_headlen(iter)) 3106 goto normal; 3107 3108 len -= iter->len; 3109 } 3110 3111 if (len != frag_len) 3112 goto normal; 3113 } 3114 3115 /* GSO partial only requires that we trim off any excess that 3116 * doesn't fit into an MSS sized block, so take care of that 3117 * now. 3118 */ 3119 partial_segs = len / mss; 3120 if (partial_segs > 1) 3121 mss *= partial_segs; 3122 else 3123 partial_segs = 0; 3124 } 3125 3126 normal: 3127 headroom = skb_headroom(head_skb); 3128 pos = skb_headlen(head_skb); 3129 3130 do { 3131 struct sk_buff *nskb; 3132 skb_frag_t *nskb_frag; 3133 int hsize; 3134 int size; 3135 3136 if (unlikely(mss == GSO_BY_FRAGS)) { 3137 len = list_skb->len; 3138 } else { 3139 len = head_skb->len - offset; 3140 if (len > mss) 3141 len = mss; 3142 } 3143 3144 hsize = skb_headlen(head_skb) - offset; 3145 if (hsize < 0) 3146 hsize = 0; 3147 if (hsize > len || !sg) 3148 hsize = len; 3149 3150 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3151 (skb_headlen(list_skb) == len || sg)) { 3152 BUG_ON(skb_headlen(list_skb) > len); 3153 3154 i = 0; 3155 nfrags = skb_shinfo(list_skb)->nr_frags; 3156 frag = skb_shinfo(list_skb)->frags; 3157 frag_skb = list_skb; 3158 pos += skb_headlen(list_skb); 3159 3160 while (pos < offset + len) { 3161 BUG_ON(i >= nfrags); 3162 3163 size = skb_frag_size(frag); 3164 if (pos + size > offset + len) 3165 break; 3166 3167 i++; 3168 pos += size; 3169 frag++; 3170 } 3171 3172 nskb = skb_clone(list_skb, GFP_ATOMIC); 3173 list_skb = list_skb->next; 3174 3175 if (unlikely(!nskb)) 3176 goto err; 3177 3178 if (unlikely(pskb_trim(nskb, len))) { 3179 kfree_skb(nskb); 3180 goto err; 3181 } 3182 3183 hsize = skb_end_offset(nskb); 3184 if (skb_cow_head(nskb, doffset + headroom)) { 3185 kfree_skb(nskb); 3186 goto err; 3187 } 3188 3189 nskb->truesize += skb_end_offset(nskb) - hsize; 3190 skb_release_head_state(nskb); 3191 __skb_push(nskb, doffset); 3192 } else { 3193 nskb = __alloc_skb(hsize + doffset + headroom, 3194 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3195 NUMA_NO_NODE); 3196 3197 if (unlikely(!nskb)) 3198 goto err; 3199 3200 skb_reserve(nskb, headroom); 3201 __skb_put(nskb, doffset); 3202 } 3203 3204 if (segs) 3205 tail->next = nskb; 3206 else 3207 segs = nskb; 3208 tail = nskb; 3209 3210 __copy_skb_header(nskb, head_skb); 3211 3212 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3213 skb_reset_mac_len(nskb); 3214 3215 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3216 nskb->data - tnl_hlen, 3217 doffset + tnl_hlen); 3218 3219 if (nskb->len == len + doffset) 3220 goto perform_csum_check; 3221 3222 if (!sg) { 3223 if (!nskb->remcsum_offload) 3224 nskb->ip_summed = CHECKSUM_NONE; 3225 SKB_GSO_CB(nskb)->csum = 3226 skb_copy_and_csum_bits(head_skb, offset, 3227 skb_put(nskb, len), 3228 len, 0); 3229 SKB_GSO_CB(nskb)->csum_start = 3230 skb_headroom(nskb) + doffset; 3231 continue; 3232 } 3233 3234 nskb_frag = skb_shinfo(nskb)->frags; 3235 3236 skb_copy_from_linear_data_offset(head_skb, offset, 3237 skb_put(nskb, hsize), hsize); 3238 3239 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 3240 SKBTX_SHARED_FRAG; 3241 3242 while (pos < offset + len) { 3243 if (i >= nfrags) { 3244 BUG_ON(skb_headlen(list_skb)); 3245 3246 i = 0; 3247 nfrags = skb_shinfo(list_skb)->nr_frags; 3248 frag = skb_shinfo(list_skb)->frags; 3249 frag_skb = list_skb; 3250 3251 BUG_ON(!nfrags); 3252 3253 list_skb = list_skb->next; 3254 } 3255 3256 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3257 MAX_SKB_FRAGS)) { 3258 net_warn_ratelimited( 3259 "skb_segment: too many frags: %u %u\n", 3260 pos, mss); 3261 goto err; 3262 } 3263 3264 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3265 goto err; 3266 3267 *nskb_frag = *frag; 3268 __skb_frag_ref(nskb_frag); 3269 size = skb_frag_size(nskb_frag); 3270 3271 if (pos < offset) { 3272 nskb_frag->page_offset += offset - pos; 3273 skb_frag_size_sub(nskb_frag, offset - pos); 3274 } 3275 3276 skb_shinfo(nskb)->nr_frags++; 3277 3278 if (pos + size <= offset + len) { 3279 i++; 3280 frag++; 3281 pos += size; 3282 } else { 3283 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3284 goto skip_fraglist; 3285 } 3286 3287 nskb_frag++; 3288 } 3289 3290 skip_fraglist: 3291 nskb->data_len = len - hsize; 3292 nskb->len += nskb->data_len; 3293 nskb->truesize += nskb->data_len; 3294 3295 perform_csum_check: 3296 if (!csum) { 3297 if (skb_has_shared_frag(nskb)) { 3298 err = __skb_linearize(nskb); 3299 if (err) 3300 goto err; 3301 } 3302 if (!nskb->remcsum_offload) 3303 nskb->ip_summed = CHECKSUM_NONE; 3304 SKB_GSO_CB(nskb)->csum = 3305 skb_checksum(nskb, doffset, 3306 nskb->len - doffset, 0); 3307 SKB_GSO_CB(nskb)->csum_start = 3308 skb_headroom(nskb) + doffset; 3309 } 3310 } while ((offset += len) < head_skb->len); 3311 3312 /* Some callers want to get the end of the list. 3313 * Put it in segs->prev to avoid walking the list. 3314 * (see validate_xmit_skb_list() for example) 3315 */ 3316 segs->prev = tail; 3317 3318 if (partial_segs) { 3319 struct sk_buff *iter; 3320 int type = skb_shinfo(head_skb)->gso_type; 3321 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 3322 3323 /* Update type to add partial and then remove dodgy if set */ 3324 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 3325 type &= ~SKB_GSO_DODGY; 3326 3327 /* Update GSO info and prepare to start updating headers on 3328 * our way back down the stack of protocols. 3329 */ 3330 for (iter = segs; iter; iter = iter->next) { 3331 skb_shinfo(iter)->gso_size = gso_size; 3332 skb_shinfo(iter)->gso_segs = partial_segs; 3333 skb_shinfo(iter)->gso_type = type; 3334 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 3335 } 3336 3337 if (tail->len - doffset <= gso_size) 3338 skb_shinfo(tail)->gso_size = 0; 3339 else if (tail != segs) 3340 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 3341 } 3342 3343 /* Following permits correct backpressure, for protocols 3344 * using skb_set_owner_w(). 3345 * Idea is to tranfert ownership from head_skb to last segment. 3346 */ 3347 if (head_skb->destructor == sock_wfree) { 3348 swap(tail->truesize, head_skb->truesize); 3349 swap(tail->destructor, head_skb->destructor); 3350 swap(tail->sk, head_skb->sk); 3351 } 3352 return segs; 3353 3354 err: 3355 kfree_skb_list(segs); 3356 return ERR_PTR(err); 3357 } 3358 EXPORT_SYMBOL_GPL(skb_segment); 3359 3360 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3361 { 3362 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3363 unsigned int offset = skb_gro_offset(skb); 3364 unsigned int headlen = skb_headlen(skb); 3365 unsigned int len = skb_gro_len(skb); 3366 struct sk_buff *lp, *p = *head; 3367 unsigned int delta_truesize; 3368 3369 if (unlikely(p->len + len >= 65536)) 3370 return -E2BIG; 3371 3372 lp = NAPI_GRO_CB(p)->last; 3373 pinfo = skb_shinfo(lp); 3374 3375 if (headlen <= offset) { 3376 skb_frag_t *frag; 3377 skb_frag_t *frag2; 3378 int i = skbinfo->nr_frags; 3379 int nr_frags = pinfo->nr_frags + i; 3380 3381 if (nr_frags > MAX_SKB_FRAGS) 3382 goto merge; 3383 3384 offset -= headlen; 3385 pinfo->nr_frags = nr_frags; 3386 skbinfo->nr_frags = 0; 3387 3388 frag = pinfo->frags + nr_frags; 3389 frag2 = skbinfo->frags + i; 3390 do { 3391 *--frag = *--frag2; 3392 } while (--i); 3393 3394 frag->page_offset += offset; 3395 skb_frag_size_sub(frag, offset); 3396 3397 /* all fragments truesize : remove (head size + sk_buff) */ 3398 delta_truesize = skb->truesize - 3399 SKB_TRUESIZE(skb_end_offset(skb)); 3400 3401 skb->truesize -= skb->data_len; 3402 skb->len -= skb->data_len; 3403 skb->data_len = 0; 3404 3405 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3406 goto done; 3407 } else if (skb->head_frag) { 3408 int nr_frags = pinfo->nr_frags; 3409 skb_frag_t *frag = pinfo->frags + nr_frags; 3410 struct page *page = virt_to_head_page(skb->head); 3411 unsigned int first_size = headlen - offset; 3412 unsigned int first_offset; 3413 3414 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3415 goto merge; 3416 3417 first_offset = skb->data - 3418 (unsigned char *)page_address(page) + 3419 offset; 3420 3421 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3422 3423 frag->page.p = page; 3424 frag->page_offset = first_offset; 3425 skb_frag_size_set(frag, first_size); 3426 3427 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3428 /* We dont need to clear skbinfo->nr_frags here */ 3429 3430 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3431 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3432 goto done; 3433 } 3434 3435 merge: 3436 delta_truesize = skb->truesize; 3437 if (offset > headlen) { 3438 unsigned int eat = offset - headlen; 3439 3440 skbinfo->frags[0].page_offset += eat; 3441 skb_frag_size_sub(&skbinfo->frags[0], eat); 3442 skb->data_len -= eat; 3443 skb->len -= eat; 3444 offset = headlen; 3445 } 3446 3447 __skb_pull(skb, offset); 3448 3449 if (NAPI_GRO_CB(p)->last == p) 3450 skb_shinfo(p)->frag_list = skb; 3451 else 3452 NAPI_GRO_CB(p)->last->next = skb; 3453 NAPI_GRO_CB(p)->last = skb; 3454 __skb_header_release(skb); 3455 lp = p; 3456 3457 done: 3458 NAPI_GRO_CB(p)->count++; 3459 p->data_len += len; 3460 p->truesize += delta_truesize; 3461 p->len += len; 3462 if (lp != p) { 3463 lp->data_len += len; 3464 lp->truesize += delta_truesize; 3465 lp->len += len; 3466 } 3467 NAPI_GRO_CB(skb)->same_flow = 1; 3468 return 0; 3469 } 3470 EXPORT_SYMBOL_GPL(skb_gro_receive); 3471 3472 void __init skb_init(void) 3473 { 3474 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3475 sizeof(struct sk_buff), 3476 0, 3477 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3478 NULL); 3479 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3480 sizeof(struct sk_buff_fclones), 3481 0, 3482 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3483 NULL); 3484 } 3485 3486 /** 3487 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3488 * @skb: Socket buffer containing the buffers to be mapped 3489 * @sg: The scatter-gather list to map into 3490 * @offset: The offset into the buffer's contents to start mapping 3491 * @len: Length of buffer space to be mapped 3492 * 3493 * Fill the specified scatter-gather list with mappings/pointers into a 3494 * region of the buffer space attached to a socket buffer. 3495 */ 3496 static int 3497 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3498 { 3499 int start = skb_headlen(skb); 3500 int i, copy = start - offset; 3501 struct sk_buff *frag_iter; 3502 int elt = 0; 3503 3504 if (copy > 0) { 3505 if (copy > len) 3506 copy = len; 3507 sg_set_buf(sg, skb->data + offset, copy); 3508 elt++; 3509 if ((len -= copy) == 0) 3510 return elt; 3511 offset += copy; 3512 } 3513 3514 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3515 int end; 3516 3517 WARN_ON(start > offset + len); 3518 3519 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3520 if ((copy = end - offset) > 0) { 3521 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3522 3523 if (copy > len) 3524 copy = len; 3525 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3526 frag->page_offset+offset-start); 3527 elt++; 3528 if (!(len -= copy)) 3529 return elt; 3530 offset += copy; 3531 } 3532 start = end; 3533 } 3534 3535 skb_walk_frags(skb, frag_iter) { 3536 int end; 3537 3538 WARN_ON(start > offset + len); 3539 3540 end = start + frag_iter->len; 3541 if ((copy = end - offset) > 0) { 3542 if (copy > len) 3543 copy = len; 3544 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3545 copy); 3546 if ((len -= copy) == 0) 3547 return elt; 3548 offset += copy; 3549 } 3550 start = end; 3551 } 3552 BUG_ON(len); 3553 return elt; 3554 } 3555 3556 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3557 * sglist without mark the sg which contain last skb data as the end. 3558 * So the caller can mannipulate sg list as will when padding new data after 3559 * the first call without calling sg_unmark_end to expend sg list. 3560 * 3561 * Scenario to use skb_to_sgvec_nomark: 3562 * 1. sg_init_table 3563 * 2. skb_to_sgvec_nomark(payload1) 3564 * 3. skb_to_sgvec_nomark(payload2) 3565 * 3566 * This is equivalent to: 3567 * 1. sg_init_table 3568 * 2. skb_to_sgvec(payload1) 3569 * 3. sg_unmark_end 3570 * 4. skb_to_sgvec(payload2) 3571 * 3572 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 3573 * is more preferable. 3574 */ 3575 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3576 int offset, int len) 3577 { 3578 return __skb_to_sgvec(skb, sg, offset, len); 3579 } 3580 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3581 3582 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3583 { 3584 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3585 3586 sg_mark_end(&sg[nsg - 1]); 3587 3588 return nsg; 3589 } 3590 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3591 3592 /** 3593 * skb_cow_data - Check that a socket buffer's data buffers are writable 3594 * @skb: The socket buffer to check. 3595 * @tailbits: Amount of trailing space to be added 3596 * @trailer: Returned pointer to the skb where the @tailbits space begins 3597 * 3598 * Make sure that the data buffers attached to a socket buffer are 3599 * writable. If they are not, private copies are made of the data buffers 3600 * and the socket buffer is set to use these instead. 3601 * 3602 * If @tailbits is given, make sure that there is space to write @tailbits 3603 * bytes of data beyond current end of socket buffer. @trailer will be 3604 * set to point to the skb in which this space begins. 3605 * 3606 * The number of scatterlist elements required to completely map the 3607 * COW'd and extended socket buffer will be returned. 3608 */ 3609 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3610 { 3611 int copyflag; 3612 int elt; 3613 struct sk_buff *skb1, **skb_p; 3614 3615 /* If skb is cloned or its head is paged, reallocate 3616 * head pulling out all the pages (pages are considered not writable 3617 * at the moment even if they are anonymous). 3618 */ 3619 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3620 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3621 return -ENOMEM; 3622 3623 /* Easy case. Most of packets will go this way. */ 3624 if (!skb_has_frag_list(skb)) { 3625 /* A little of trouble, not enough of space for trailer. 3626 * This should not happen, when stack is tuned to generate 3627 * good frames. OK, on miss we reallocate and reserve even more 3628 * space, 128 bytes is fair. */ 3629 3630 if (skb_tailroom(skb) < tailbits && 3631 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3632 return -ENOMEM; 3633 3634 /* Voila! */ 3635 *trailer = skb; 3636 return 1; 3637 } 3638 3639 /* Misery. We are in troubles, going to mincer fragments... */ 3640 3641 elt = 1; 3642 skb_p = &skb_shinfo(skb)->frag_list; 3643 copyflag = 0; 3644 3645 while ((skb1 = *skb_p) != NULL) { 3646 int ntail = 0; 3647 3648 /* The fragment is partially pulled by someone, 3649 * this can happen on input. Copy it and everything 3650 * after it. */ 3651 3652 if (skb_shared(skb1)) 3653 copyflag = 1; 3654 3655 /* If the skb is the last, worry about trailer. */ 3656 3657 if (skb1->next == NULL && tailbits) { 3658 if (skb_shinfo(skb1)->nr_frags || 3659 skb_has_frag_list(skb1) || 3660 skb_tailroom(skb1) < tailbits) 3661 ntail = tailbits + 128; 3662 } 3663 3664 if (copyflag || 3665 skb_cloned(skb1) || 3666 ntail || 3667 skb_shinfo(skb1)->nr_frags || 3668 skb_has_frag_list(skb1)) { 3669 struct sk_buff *skb2; 3670 3671 /* Fuck, we are miserable poor guys... */ 3672 if (ntail == 0) 3673 skb2 = skb_copy(skb1, GFP_ATOMIC); 3674 else 3675 skb2 = skb_copy_expand(skb1, 3676 skb_headroom(skb1), 3677 ntail, 3678 GFP_ATOMIC); 3679 if (unlikely(skb2 == NULL)) 3680 return -ENOMEM; 3681 3682 if (skb1->sk) 3683 skb_set_owner_w(skb2, skb1->sk); 3684 3685 /* Looking around. Are we still alive? 3686 * OK, link new skb, drop old one */ 3687 3688 skb2->next = skb1->next; 3689 *skb_p = skb2; 3690 kfree_skb(skb1); 3691 skb1 = skb2; 3692 } 3693 elt++; 3694 *trailer = skb1; 3695 skb_p = &skb1->next; 3696 } 3697 3698 return elt; 3699 } 3700 EXPORT_SYMBOL_GPL(skb_cow_data); 3701 3702 static void sock_rmem_free(struct sk_buff *skb) 3703 { 3704 struct sock *sk = skb->sk; 3705 3706 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3707 } 3708 3709 static void skb_set_err_queue(struct sk_buff *skb) 3710 { 3711 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 3712 * So, it is safe to (mis)use it to mark skbs on the error queue. 3713 */ 3714 skb->pkt_type = PACKET_OUTGOING; 3715 BUILD_BUG_ON(PACKET_OUTGOING == 0); 3716 } 3717 3718 /* 3719 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3720 */ 3721 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3722 { 3723 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3724 (unsigned int)sk->sk_rcvbuf) 3725 return -ENOMEM; 3726 3727 skb_orphan(skb); 3728 skb->sk = sk; 3729 skb->destructor = sock_rmem_free; 3730 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3731 skb_set_err_queue(skb); 3732 3733 /* before exiting rcu section, make sure dst is refcounted */ 3734 skb_dst_force(skb); 3735 3736 skb_queue_tail(&sk->sk_error_queue, skb); 3737 if (!sock_flag(sk, SOCK_DEAD)) 3738 sk->sk_data_ready(sk); 3739 return 0; 3740 } 3741 EXPORT_SYMBOL(sock_queue_err_skb); 3742 3743 static bool is_icmp_err_skb(const struct sk_buff *skb) 3744 { 3745 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 3746 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 3747 } 3748 3749 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 3750 { 3751 struct sk_buff_head *q = &sk->sk_error_queue; 3752 struct sk_buff *skb, *skb_next = NULL; 3753 bool icmp_next = false; 3754 unsigned long flags; 3755 3756 spin_lock_irqsave(&q->lock, flags); 3757 skb = __skb_dequeue(q); 3758 if (skb && (skb_next = skb_peek(q))) 3759 icmp_next = is_icmp_err_skb(skb_next); 3760 spin_unlock_irqrestore(&q->lock, flags); 3761 3762 if (is_icmp_err_skb(skb) && !icmp_next) 3763 sk->sk_err = 0; 3764 3765 if (skb_next) 3766 sk->sk_error_report(sk); 3767 3768 return skb; 3769 } 3770 EXPORT_SYMBOL(sock_dequeue_err_skb); 3771 3772 /** 3773 * skb_clone_sk - create clone of skb, and take reference to socket 3774 * @skb: the skb to clone 3775 * 3776 * This function creates a clone of a buffer that holds a reference on 3777 * sk_refcnt. Buffers created via this function are meant to be 3778 * returned using sock_queue_err_skb, or free via kfree_skb. 3779 * 3780 * When passing buffers allocated with this function to sock_queue_err_skb 3781 * it is necessary to wrap the call with sock_hold/sock_put in order to 3782 * prevent the socket from being released prior to being enqueued on 3783 * the sk_error_queue. 3784 */ 3785 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 3786 { 3787 struct sock *sk = skb->sk; 3788 struct sk_buff *clone; 3789 3790 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) 3791 return NULL; 3792 3793 clone = skb_clone(skb, GFP_ATOMIC); 3794 if (!clone) { 3795 sock_put(sk); 3796 return NULL; 3797 } 3798 3799 clone->sk = sk; 3800 clone->destructor = sock_efree; 3801 3802 return clone; 3803 } 3804 EXPORT_SYMBOL(skb_clone_sk); 3805 3806 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3807 struct sock *sk, 3808 int tstype, 3809 bool opt_stats) 3810 { 3811 struct sock_exterr_skb *serr; 3812 int err; 3813 3814 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 3815 3816 serr = SKB_EXT_ERR(skb); 3817 memset(serr, 0, sizeof(*serr)); 3818 serr->ee.ee_errno = ENOMSG; 3819 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3820 serr->ee.ee_info = tstype; 3821 serr->opt_stats = opt_stats; 3822 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 3823 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3824 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3825 if (sk->sk_protocol == IPPROTO_TCP && 3826 sk->sk_type == SOCK_STREAM) 3827 serr->ee.ee_data -= sk->sk_tskey; 3828 } 3829 3830 err = sock_queue_err_skb(sk, skb); 3831 3832 if (err) 3833 kfree_skb(skb); 3834 } 3835 3836 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 3837 { 3838 bool ret; 3839 3840 if (likely(sysctl_tstamp_allow_data || tsonly)) 3841 return true; 3842 3843 read_lock_bh(&sk->sk_callback_lock); 3844 ret = sk->sk_socket && sk->sk_socket->file && 3845 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 3846 read_unlock_bh(&sk->sk_callback_lock); 3847 return ret; 3848 } 3849 3850 void skb_complete_tx_timestamp(struct sk_buff *skb, 3851 struct skb_shared_hwtstamps *hwtstamps) 3852 { 3853 struct sock *sk = skb->sk; 3854 3855 if (!skb_may_tx_timestamp(sk, false)) 3856 return; 3857 3858 /* Take a reference to prevent skb_orphan() from freeing the socket, 3859 * but only if the socket refcount is not zero. 3860 */ 3861 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { 3862 *skb_hwtstamps(skb) = *hwtstamps; 3863 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 3864 sock_put(sk); 3865 } 3866 } 3867 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3868 3869 void __skb_tstamp_tx(struct sk_buff *orig_skb, 3870 struct skb_shared_hwtstamps *hwtstamps, 3871 struct sock *sk, int tstype) 3872 { 3873 struct sk_buff *skb; 3874 bool tsonly, opt_stats = false; 3875 3876 if (!sk) 3877 return; 3878 3879 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3880 if (!skb_may_tx_timestamp(sk, tsonly)) 3881 return; 3882 3883 if (tsonly) { 3884 #ifdef CONFIG_INET 3885 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 3886 sk->sk_protocol == IPPROTO_TCP && 3887 sk->sk_type == SOCK_STREAM) { 3888 skb = tcp_get_timestamping_opt_stats(sk); 3889 opt_stats = true; 3890 } else 3891 #endif 3892 skb = alloc_skb(0, GFP_ATOMIC); 3893 } else { 3894 skb = skb_clone(orig_skb, GFP_ATOMIC); 3895 } 3896 if (!skb) 3897 return; 3898 3899 if (tsonly) { 3900 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; 3901 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3902 } 3903 3904 if (hwtstamps) 3905 *skb_hwtstamps(skb) = *hwtstamps; 3906 else 3907 skb->tstamp = ktime_get_real(); 3908 3909 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 3910 } 3911 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3912 3913 void skb_tstamp_tx(struct sk_buff *orig_skb, 3914 struct skb_shared_hwtstamps *hwtstamps) 3915 { 3916 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 3917 SCM_TSTAMP_SND); 3918 } 3919 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3920 3921 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3922 { 3923 struct sock *sk = skb->sk; 3924 struct sock_exterr_skb *serr; 3925 int err = 1; 3926 3927 skb->wifi_acked_valid = 1; 3928 skb->wifi_acked = acked; 3929 3930 serr = SKB_EXT_ERR(skb); 3931 memset(serr, 0, sizeof(*serr)); 3932 serr->ee.ee_errno = ENOMSG; 3933 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3934 3935 /* Take a reference to prevent skb_orphan() from freeing the socket, 3936 * but only if the socket refcount is not zero. 3937 */ 3938 if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { 3939 err = sock_queue_err_skb(sk, skb); 3940 sock_put(sk); 3941 } 3942 if (err) 3943 kfree_skb(skb); 3944 } 3945 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3946 3947 /** 3948 * skb_partial_csum_set - set up and verify partial csum values for packet 3949 * @skb: the skb to set 3950 * @start: the number of bytes after skb->data to start checksumming. 3951 * @off: the offset from start to place the checksum. 3952 * 3953 * For untrusted partially-checksummed packets, we need to make sure the values 3954 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3955 * 3956 * This function checks and sets those values and skb->ip_summed: if this 3957 * returns false you should drop the packet. 3958 */ 3959 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3960 { 3961 if (unlikely(start > skb_headlen(skb)) || 3962 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3963 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3964 start, off, skb_headlen(skb)); 3965 return false; 3966 } 3967 skb->ip_summed = CHECKSUM_PARTIAL; 3968 skb->csum_start = skb_headroom(skb) + start; 3969 skb->csum_offset = off; 3970 skb_set_transport_header(skb, start); 3971 return true; 3972 } 3973 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3974 3975 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 3976 unsigned int max) 3977 { 3978 if (skb_headlen(skb) >= len) 3979 return 0; 3980 3981 /* If we need to pullup then pullup to the max, so we 3982 * won't need to do it again. 3983 */ 3984 if (max > skb->len) 3985 max = skb->len; 3986 3987 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 3988 return -ENOMEM; 3989 3990 if (skb_headlen(skb) < len) 3991 return -EPROTO; 3992 3993 return 0; 3994 } 3995 3996 #define MAX_TCP_HDR_LEN (15 * 4) 3997 3998 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 3999 typeof(IPPROTO_IP) proto, 4000 unsigned int off) 4001 { 4002 switch (proto) { 4003 int err; 4004 4005 case IPPROTO_TCP: 4006 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4007 off + MAX_TCP_HDR_LEN); 4008 if (!err && !skb_partial_csum_set(skb, off, 4009 offsetof(struct tcphdr, 4010 check))) 4011 err = -EPROTO; 4012 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4013 4014 case IPPROTO_UDP: 4015 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4016 off + sizeof(struct udphdr)); 4017 if (!err && !skb_partial_csum_set(skb, off, 4018 offsetof(struct udphdr, 4019 check))) 4020 err = -EPROTO; 4021 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4022 } 4023 4024 return ERR_PTR(-EPROTO); 4025 } 4026 4027 /* This value should be large enough to cover a tagged ethernet header plus 4028 * maximally sized IP and TCP or UDP headers. 4029 */ 4030 #define MAX_IP_HDR_LEN 128 4031 4032 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4033 { 4034 unsigned int off; 4035 bool fragment; 4036 __sum16 *csum; 4037 int err; 4038 4039 fragment = false; 4040 4041 err = skb_maybe_pull_tail(skb, 4042 sizeof(struct iphdr), 4043 MAX_IP_HDR_LEN); 4044 if (err < 0) 4045 goto out; 4046 4047 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 4048 fragment = true; 4049 4050 off = ip_hdrlen(skb); 4051 4052 err = -EPROTO; 4053 4054 if (fragment) 4055 goto out; 4056 4057 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4058 if (IS_ERR(csum)) 4059 return PTR_ERR(csum); 4060 4061 if (recalculate) 4062 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4063 ip_hdr(skb)->daddr, 4064 skb->len - off, 4065 ip_hdr(skb)->protocol, 0); 4066 err = 0; 4067 4068 out: 4069 return err; 4070 } 4071 4072 /* This value should be large enough to cover a tagged ethernet header plus 4073 * an IPv6 header, all options, and a maximal TCP or UDP header. 4074 */ 4075 #define MAX_IPV6_HDR_LEN 256 4076 4077 #define OPT_HDR(type, skb, off) \ 4078 (type *)(skb_network_header(skb) + (off)) 4079 4080 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4081 { 4082 int err; 4083 u8 nexthdr; 4084 unsigned int off; 4085 unsigned int len; 4086 bool fragment; 4087 bool done; 4088 __sum16 *csum; 4089 4090 fragment = false; 4091 done = false; 4092 4093 off = sizeof(struct ipv6hdr); 4094 4095 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 4096 if (err < 0) 4097 goto out; 4098 4099 nexthdr = ipv6_hdr(skb)->nexthdr; 4100 4101 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 4102 while (off <= len && !done) { 4103 switch (nexthdr) { 4104 case IPPROTO_DSTOPTS: 4105 case IPPROTO_HOPOPTS: 4106 case IPPROTO_ROUTING: { 4107 struct ipv6_opt_hdr *hp; 4108 4109 err = skb_maybe_pull_tail(skb, 4110 off + 4111 sizeof(struct ipv6_opt_hdr), 4112 MAX_IPV6_HDR_LEN); 4113 if (err < 0) 4114 goto out; 4115 4116 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 4117 nexthdr = hp->nexthdr; 4118 off += ipv6_optlen(hp); 4119 break; 4120 } 4121 case IPPROTO_AH: { 4122 struct ip_auth_hdr *hp; 4123 4124 err = skb_maybe_pull_tail(skb, 4125 off + 4126 sizeof(struct ip_auth_hdr), 4127 MAX_IPV6_HDR_LEN); 4128 if (err < 0) 4129 goto out; 4130 4131 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 4132 nexthdr = hp->nexthdr; 4133 off += ipv6_authlen(hp); 4134 break; 4135 } 4136 case IPPROTO_FRAGMENT: { 4137 struct frag_hdr *hp; 4138 4139 err = skb_maybe_pull_tail(skb, 4140 off + 4141 sizeof(struct frag_hdr), 4142 MAX_IPV6_HDR_LEN); 4143 if (err < 0) 4144 goto out; 4145 4146 hp = OPT_HDR(struct frag_hdr, skb, off); 4147 4148 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 4149 fragment = true; 4150 4151 nexthdr = hp->nexthdr; 4152 off += sizeof(struct frag_hdr); 4153 break; 4154 } 4155 default: 4156 done = true; 4157 break; 4158 } 4159 } 4160 4161 err = -EPROTO; 4162 4163 if (!done || fragment) 4164 goto out; 4165 4166 csum = skb_checksum_setup_ip(skb, nexthdr, off); 4167 if (IS_ERR(csum)) 4168 return PTR_ERR(csum); 4169 4170 if (recalculate) 4171 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4172 &ipv6_hdr(skb)->daddr, 4173 skb->len - off, nexthdr, 0); 4174 err = 0; 4175 4176 out: 4177 return err; 4178 } 4179 4180 /** 4181 * skb_checksum_setup - set up partial checksum offset 4182 * @skb: the skb to set up 4183 * @recalculate: if true the pseudo-header checksum will be recalculated 4184 */ 4185 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 4186 { 4187 int err; 4188 4189 switch (skb->protocol) { 4190 case htons(ETH_P_IP): 4191 err = skb_checksum_setup_ipv4(skb, recalculate); 4192 break; 4193 4194 case htons(ETH_P_IPV6): 4195 err = skb_checksum_setup_ipv6(skb, recalculate); 4196 break; 4197 4198 default: 4199 err = -EPROTO; 4200 break; 4201 } 4202 4203 return err; 4204 } 4205 EXPORT_SYMBOL(skb_checksum_setup); 4206 4207 /** 4208 * skb_checksum_maybe_trim - maybe trims the given skb 4209 * @skb: the skb to check 4210 * @transport_len: the data length beyond the network header 4211 * 4212 * Checks whether the given skb has data beyond the given transport length. 4213 * If so, returns a cloned skb trimmed to this transport length. 4214 * Otherwise returns the provided skb. Returns NULL in error cases 4215 * (e.g. transport_len exceeds skb length or out-of-memory). 4216 * 4217 * Caller needs to set the skb transport header and free any returned skb if it 4218 * differs from the provided skb. 4219 */ 4220 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4221 unsigned int transport_len) 4222 { 4223 struct sk_buff *skb_chk; 4224 unsigned int len = skb_transport_offset(skb) + transport_len; 4225 int ret; 4226 4227 if (skb->len < len) 4228 return NULL; 4229 else if (skb->len == len) 4230 return skb; 4231 4232 skb_chk = skb_clone(skb, GFP_ATOMIC); 4233 if (!skb_chk) 4234 return NULL; 4235 4236 ret = pskb_trim_rcsum(skb_chk, len); 4237 if (ret) { 4238 kfree_skb(skb_chk); 4239 return NULL; 4240 } 4241 4242 return skb_chk; 4243 } 4244 4245 /** 4246 * skb_checksum_trimmed - validate checksum of an skb 4247 * @skb: the skb to check 4248 * @transport_len: the data length beyond the network header 4249 * @skb_chkf: checksum function to use 4250 * 4251 * Applies the given checksum function skb_chkf to the provided skb. 4252 * Returns a checked and maybe trimmed skb. Returns NULL on error. 4253 * 4254 * If the skb has data beyond the given transport length, then a 4255 * trimmed & cloned skb is checked and returned. 4256 * 4257 * Caller needs to set the skb transport header and free any returned skb if it 4258 * differs from the provided skb. 4259 */ 4260 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4261 unsigned int transport_len, 4262 __sum16(*skb_chkf)(struct sk_buff *skb)) 4263 { 4264 struct sk_buff *skb_chk; 4265 unsigned int offset = skb_transport_offset(skb); 4266 __sum16 ret; 4267 4268 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4269 if (!skb_chk) 4270 goto err; 4271 4272 if (!pskb_may_pull(skb_chk, offset)) 4273 goto err; 4274 4275 skb_pull_rcsum(skb_chk, offset); 4276 ret = skb_chkf(skb_chk); 4277 skb_push_rcsum(skb_chk, offset); 4278 4279 if (ret) 4280 goto err; 4281 4282 return skb_chk; 4283 4284 err: 4285 if (skb_chk && skb_chk != skb) 4286 kfree_skb(skb_chk); 4287 4288 return NULL; 4289 4290 } 4291 EXPORT_SYMBOL(skb_checksum_trimmed); 4292 4293 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4294 { 4295 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4296 skb->dev->name); 4297 } 4298 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4299 4300 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4301 { 4302 if (head_stolen) { 4303 skb_release_head_state(skb); 4304 kmem_cache_free(skbuff_head_cache, skb); 4305 } else { 4306 __kfree_skb(skb); 4307 } 4308 } 4309 EXPORT_SYMBOL(kfree_skb_partial); 4310 4311 /** 4312 * skb_try_coalesce - try to merge skb to prior one 4313 * @to: prior buffer 4314 * @from: buffer to add 4315 * @fragstolen: pointer to boolean 4316 * @delta_truesize: how much more was allocated than was requested 4317 */ 4318 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4319 bool *fragstolen, int *delta_truesize) 4320 { 4321 int i, delta, len = from->len; 4322 4323 *fragstolen = false; 4324 4325 if (skb_cloned(to)) 4326 return false; 4327 4328 if (len <= skb_tailroom(to)) { 4329 if (len) 4330 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4331 *delta_truesize = 0; 4332 return true; 4333 } 4334 4335 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4336 return false; 4337 4338 if (skb_headlen(from) != 0) { 4339 struct page *page; 4340 unsigned int offset; 4341 4342 if (skb_shinfo(to)->nr_frags + 4343 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4344 return false; 4345 4346 if (skb_head_is_locked(from)) 4347 return false; 4348 4349 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4350 4351 page = virt_to_head_page(from->head); 4352 offset = from->data - (unsigned char *)page_address(page); 4353 4354 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4355 page, offset, skb_headlen(from)); 4356 *fragstolen = true; 4357 } else { 4358 if (skb_shinfo(to)->nr_frags + 4359 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4360 return false; 4361 4362 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 4363 } 4364 4365 WARN_ON_ONCE(delta < len); 4366 4367 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4368 skb_shinfo(from)->frags, 4369 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4370 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4371 4372 if (!skb_cloned(from)) 4373 skb_shinfo(from)->nr_frags = 0; 4374 4375 /* if the skb is not cloned this does nothing 4376 * since we set nr_frags to 0. 4377 */ 4378 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4379 skb_frag_ref(from, i); 4380 4381 to->truesize += delta; 4382 to->len += len; 4383 to->data_len += len; 4384 4385 *delta_truesize = delta; 4386 return true; 4387 } 4388 EXPORT_SYMBOL(skb_try_coalesce); 4389 4390 /** 4391 * skb_scrub_packet - scrub an skb 4392 * 4393 * @skb: buffer to clean 4394 * @xnet: packet is crossing netns 4395 * 4396 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 4397 * into/from a tunnel. Some information have to be cleared during these 4398 * operations. 4399 * skb_scrub_packet can also be used to clean a skb before injecting it in 4400 * another namespace (@xnet == true). We have to clear all information in the 4401 * skb that could impact namespace isolation. 4402 */ 4403 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4404 { 4405 skb->tstamp = 0; 4406 skb->pkt_type = PACKET_HOST; 4407 skb->skb_iif = 0; 4408 skb->ignore_df = 0; 4409 skb_dst_drop(skb); 4410 secpath_reset(skb); 4411 nf_reset(skb); 4412 nf_reset_trace(skb); 4413 4414 if (!xnet) 4415 return; 4416 4417 skb_orphan(skb); 4418 skb->mark = 0; 4419 } 4420 EXPORT_SYMBOL_GPL(skb_scrub_packet); 4421 4422 /** 4423 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 4424 * 4425 * @skb: GSO skb 4426 * 4427 * skb_gso_transport_seglen is used to determine the real size of the 4428 * individual segments, including Layer4 headers (TCP/UDP). 4429 * 4430 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4431 */ 4432 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4433 { 4434 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4435 unsigned int thlen = 0; 4436 4437 if (skb->encapsulation) { 4438 thlen = skb_inner_transport_header(skb) - 4439 skb_transport_header(skb); 4440 4441 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4442 thlen += inner_tcp_hdrlen(skb); 4443 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4444 thlen = tcp_hdrlen(skb); 4445 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { 4446 thlen = sizeof(struct sctphdr); 4447 } 4448 /* UFO sets gso_size to the size of the fragmentation 4449 * payload, i.e. the size of the L4 (UDP) header is already 4450 * accounted for. 4451 */ 4452 return thlen + shinfo->gso_size; 4453 } 4454 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4455 4456 /** 4457 * skb_gso_validate_mtu - Return in case such skb fits a given MTU 4458 * 4459 * @skb: GSO skb 4460 * @mtu: MTU to validate against 4461 * 4462 * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU 4463 * once split. 4464 */ 4465 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) 4466 { 4467 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4468 const struct sk_buff *iter; 4469 unsigned int hlen; 4470 4471 hlen = skb_gso_network_seglen(skb); 4472 4473 if (shinfo->gso_size != GSO_BY_FRAGS) 4474 return hlen <= mtu; 4475 4476 /* Undo this so we can re-use header sizes */ 4477 hlen -= GSO_BY_FRAGS; 4478 4479 skb_walk_frags(skb, iter) { 4480 if (hlen + skb_headlen(iter) > mtu) 4481 return false; 4482 } 4483 4484 return true; 4485 } 4486 EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); 4487 4488 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4489 { 4490 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4491 kfree_skb(skb); 4492 return NULL; 4493 } 4494 4495 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4496 2 * ETH_ALEN); 4497 skb->mac_header += VLAN_HLEN; 4498 return skb; 4499 } 4500 4501 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 4502 { 4503 struct vlan_hdr *vhdr; 4504 u16 vlan_tci; 4505 4506 if (unlikely(skb_vlan_tag_present(skb))) { 4507 /* vlan_tci is already set-up so leave this for another time */ 4508 return skb; 4509 } 4510 4511 skb = skb_share_check(skb, GFP_ATOMIC); 4512 if (unlikely(!skb)) 4513 goto err_free; 4514 4515 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 4516 goto err_free; 4517 4518 vhdr = (struct vlan_hdr *)skb->data; 4519 vlan_tci = ntohs(vhdr->h_vlan_TCI); 4520 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 4521 4522 skb_pull_rcsum(skb, VLAN_HLEN); 4523 vlan_set_encap_proto(skb, vhdr); 4524 4525 skb = skb_reorder_vlan_header(skb); 4526 if (unlikely(!skb)) 4527 goto err_free; 4528 4529 skb_reset_network_header(skb); 4530 skb_reset_transport_header(skb); 4531 skb_reset_mac_len(skb); 4532 4533 return skb; 4534 4535 err_free: 4536 kfree_skb(skb); 4537 return NULL; 4538 } 4539 EXPORT_SYMBOL(skb_vlan_untag); 4540 4541 int skb_ensure_writable(struct sk_buff *skb, int write_len) 4542 { 4543 if (!pskb_may_pull(skb, write_len)) 4544 return -ENOMEM; 4545 4546 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 4547 return 0; 4548 4549 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4550 } 4551 EXPORT_SYMBOL(skb_ensure_writable); 4552 4553 /* remove VLAN header from packet and update csum accordingly. 4554 * expects a non skb_vlan_tag_present skb with a vlan tag payload 4555 */ 4556 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 4557 { 4558 struct vlan_hdr *vhdr; 4559 int offset = skb->data - skb_mac_header(skb); 4560 int err; 4561 4562 if (WARN_ONCE(offset, 4563 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 4564 offset)) { 4565 return -EINVAL; 4566 } 4567 4568 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 4569 if (unlikely(err)) 4570 return err; 4571 4572 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4573 4574 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 4575 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 4576 4577 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 4578 __skb_pull(skb, VLAN_HLEN); 4579 4580 vlan_set_encap_proto(skb, vhdr); 4581 skb->mac_header += VLAN_HLEN; 4582 4583 if (skb_network_offset(skb) < ETH_HLEN) 4584 skb_set_network_header(skb, ETH_HLEN); 4585 4586 skb_reset_mac_len(skb); 4587 4588 return err; 4589 } 4590 EXPORT_SYMBOL(__skb_vlan_pop); 4591 4592 /* Pop a vlan tag either from hwaccel or from payload. 4593 * Expects skb->data at mac header. 4594 */ 4595 int skb_vlan_pop(struct sk_buff *skb) 4596 { 4597 u16 vlan_tci; 4598 __be16 vlan_proto; 4599 int err; 4600 4601 if (likely(skb_vlan_tag_present(skb))) { 4602 skb->vlan_tci = 0; 4603 } else { 4604 if (unlikely(!eth_type_vlan(skb->protocol))) 4605 return 0; 4606 4607 err = __skb_vlan_pop(skb, &vlan_tci); 4608 if (err) 4609 return err; 4610 } 4611 /* move next vlan tag to hw accel tag */ 4612 if (likely(!eth_type_vlan(skb->protocol))) 4613 return 0; 4614 4615 vlan_proto = skb->protocol; 4616 err = __skb_vlan_pop(skb, &vlan_tci); 4617 if (unlikely(err)) 4618 return err; 4619 4620 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4621 return 0; 4622 } 4623 EXPORT_SYMBOL(skb_vlan_pop); 4624 4625 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 4626 * Expects skb->data at mac header. 4627 */ 4628 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 4629 { 4630 if (skb_vlan_tag_present(skb)) { 4631 int offset = skb->data - skb_mac_header(skb); 4632 int err; 4633 4634 if (WARN_ONCE(offset, 4635 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 4636 offset)) { 4637 return -EINVAL; 4638 } 4639 4640 err = __vlan_insert_tag(skb, skb->vlan_proto, 4641 skb_vlan_tag_get(skb)); 4642 if (err) 4643 return err; 4644 4645 skb->protocol = skb->vlan_proto; 4646 skb->mac_len += VLAN_HLEN; 4647 4648 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4649 } 4650 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4651 return 0; 4652 } 4653 EXPORT_SYMBOL(skb_vlan_push); 4654 4655 /** 4656 * alloc_skb_with_frags - allocate skb with page frags 4657 * 4658 * @header_len: size of linear part 4659 * @data_len: needed length in frags 4660 * @max_page_order: max page order desired. 4661 * @errcode: pointer to error code if any 4662 * @gfp_mask: allocation mask 4663 * 4664 * This can be used to allocate a paged skb, given a maximal order for frags. 4665 */ 4666 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 4667 unsigned long data_len, 4668 int max_page_order, 4669 int *errcode, 4670 gfp_t gfp_mask) 4671 { 4672 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 4673 unsigned long chunk; 4674 struct sk_buff *skb; 4675 struct page *page; 4676 gfp_t gfp_head; 4677 int i; 4678 4679 *errcode = -EMSGSIZE; 4680 /* Note this test could be relaxed, if we succeed to allocate 4681 * high order pages... 4682 */ 4683 if (npages > MAX_SKB_FRAGS) 4684 return NULL; 4685 4686 gfp_head = gfp_mask; 4687 if (gfp_head & __GFP_DIRECT_RECLAIM) 4688 gfp_head |= __GFP_REPEAT; 4689 4690 *errcode = -ENOBUFS; 4691 skb = alloc_skb(header_len, gfp_head); 4692 if (!skb) 4693 return NULL; 4694 4695 skb->truesize += npages << PAGE_SHIFT; 4696 4697 for (i = 0; npages > 0; i++) { 4698 int order = max_page_order; 4699 4700 while (order) { 4701 if (npages >= 1 << order) { 4702 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 4703 __GFP_COMP | 4704 __GFP_NOWARN | 4705 __GFP_NORETRY, 4706 order); 4707 if (page) 4708 goto fill_page; 4709 /* Do not retry other high order allocations */ 4710 order = 1; 4711 max_page_order = 0; 4712 } 4713 order--; 4714 } 4715 page = alloc_page(gfp_mask); 4716 if (!page) 4717 goto failure; 4718 fill_page: 4719 chunk = min_t(unsigned long, data_len, 4720 PAGE_SIZE << order); 4721 skb_fill_page_desc(skb, i, page, 0, chunk); 4722 data_len -= chunk; 4723 npages -= 1 << order; 4724 } 4725 return skb; 4726 4727 failure: 4728 kfree_skb(skb); 4729 return NULL; 4730 } 4731 EXPORT_SYMBOL(alloc_skb_with_frags); 4732 4733 /* carve out the first off bytes from skb when off < headlen */ 4734 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 4735 const int headlen, gfp_t gfp_mask) 4736 { 4737 int i; 4738 int size = skb_end_offset(skb); 4739 int new_hlen = headlen - off; 4740 u8 *data; 4741 4742 size = SKB_DATA_ALIGN(size); 4743 4744 if (skb_pfmemalloc(skb)) 4745 gfp_mask |= __GFP_MEMALLOC; 4746 data = kmalloc_reserve(size + 4747 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 4748 gfp_mask, NUMA_NO_NODE, NULL); 4749 if (!data) 4750 return -ENOMEM; 4751 4752 size = SKB_WITH_OVERHEAD(ksize(data)); 4753 4754 /* Copy real data, and all frags */ 4755 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 4756 skb->len -= off; 4757 4758 memcpy((struct skb_shared_info *)(data + size), 4759 skb_shinfo(skb), 4760 offsetof(struct skb_shared_info, 4761 frags[skb_shinfo(skb)->nr_frags])); 4762 if (skb_cloned(skb)) { 4763 /* drop the old head gracefully */ 4764 if (skb_orphan_frags(skb, gfp_mask)) { 4765 kfree(data); 4766 return -ENOMEM; 4767 } 4768 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4769 skb_frag_ref(skb, i); 4770 if (skb_has_frag_list(skb)) 4771 skb_clone_fraglist(skb); 4772 skb_release_data(skb); 4773 } else { 4774 /* we can reuse existing recount- all we did was 4775 * relocate values 4776 */ 4777 skb_free_head(skb); 4778 } 4779 4780 skb->head = data; 4781 skb->data = data; 4782 skb->head_frag = 0; 4783 #ifdef NET_SKBUFF_DATA_USES_OFFSET 4784 skb->end = size; 4785 #else 4786 skb->end = skb->head + size; 4787 #endif 4788 skb_set_tail_pointer(skb, skb_headlen(skb)); 4789 skb_headers_offset_update(skb, 0); 4790 skb->cloned = 0; 4791 skb->hdr_len = 0; 4792 skb->nohdr = 0; 4793 atomic_set(&skb_shinfo(skb)->dataref, 1); 4794 4795 return 0; 4796 } 4797 4798 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 4799 4800 /* carve out the first eat bytes from skb's frag_list. May recurse into 4801 * pskb_carve() 4802 */ 4803 static int pskb_carve_frag_list(struct sk_buff *skb, 4804 struct skb_shared_info *shinfo, int eat, 4805 gfp_t gfp_mask) 4806 { 4807 struct sk_buff *list = shinfo->frag_list; 4808 struct sk_buff *clone = NULL; 4809 struct sk_buff *insp = NULL; 4810 4811 do { 4812 if (!list) { 4813 pr_err("Not enough bytes to eat. Want %d\n", eat); 4814 return -EFAULT; 4815 } 4816 if (list->len <= eat) { 4817 /* Eaten as whole. */ 4818 eat -= list->len; 4819 list = list->next; 4820 insp = list; 4821 } else { 4822 /* Eaten partially. */ 4823 if (skb_shared(list)) { 4824 clone = skb_clone(list, gfp_mask); 4825 if (!clone) 4826 return -ENOMEM; 4827 insp = list->next; 4828 list = clone; 4829 } else { 4830 /* This may be pulled without problems. */ 4831 insp = list; 4832 } 4833 if (pskb_carve(list, eat, gfp_mask) < 0) { 4834 kfree_skb(clone); 4835 return -ENOMEM; 4836 } 4837 break; 4838 } 4839 } while (eat); 4840 4841 /* Free pulled out fragments. */ 4842 while ((list = shinfo->frag_list) != insp) { 4843 shinfo->frag_list = list->next; 4844 kfree_skb(list); 4845 } 4846 /* And insert new clone at head. */ 4847 if (clone) { 4848 clone->next = list; 4849 shinfo->frag_list = clone; 4850 } 4851 return 0; 4852 } 4853 4854 /* carve off first len bytes from skb. Split line (off) is in the 4855 * non-linear part of skb 4856 */ 4857 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 4858 int pos, gfp_t gfp_mask) 4859 { 4860 int i, k = 0; 4861 int size = skb_end_offset(skb); 4862 u8 *data; 4863 const int nfrags = skb_shinfo(skb)->nr_frags; 4864 struct skb_shared_info *shinfo; 4865 4866 size = SKB_DATA_ALIGN(size); 4867 4868 if (skb_pfmemalloc(skb)) 4869 gfp_mask |= __GFP_MEMALLOC; 4870 data = kmalloc_reserve(size + 4871 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 4872 gfp_mask, NUMA_NO_NODE, NULL); 4873 if (!data) 4874 return -ENOMEM; 4875 4876 size = SKB_WITH_OVERHEAD(ksize(data)); 4877 4878 memcpy((struct skb_shared_info *)(data + size), 4879 skb_shinfo(skb), offsetof(struct skb_shared_info, 4880 frags[skb_shinfo(skb)->nr_frags])); 4881 if (skb_orphan_frags(skb, gfp_mask)) { 4882 kfree(data); 4883 return -ENOMEM; 4884 } 4885 shinfo = (struct skb_shared_info *)(data + size); 4886 for (i = 0; i < nfrags; i++) { 4887 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4888 4889 if (pos + fsize > off) { 4890 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 4891 4892 if (pos < off) { 4893 /* Split frag. 4894 * We have two variants in this case: 4895 * 1. Move all the frag to the second 4896 * part, if it is possible. F.e. 4897 * this approach is mandatory for TUX, 4898 * where splitting is expensive. 4899 * 2. Split is accurately. We make this. 4900 */ 4901 shinfo->frags[0].page_offset += off - pos; 4902 skb_frag_size_sub(&shinfo->frags[0], off - pos); 4903 } 4904 skb_frag_ref(skb, i); 4905 k++; 4906 } 4907 pos += fsize; 4908 } 4909 shinfo->nr_frags = k; 4910 if (skb_has_frag_list(skb)) 4911 skb_clone_fraglist(skb); 4912 4913 if (k == 0) { 4914 /* split line is in frag list */ 4915 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); 4916 } 4917 skb_release_data(skb); 4918 4919 skb->head = data; 4920 skb->head_frag = 0; 4921 skb->data = data; 4922 #ifdef NET_SKBUFF_DATA_USES_OFFSET 4923 skb->end = size; 4924 #else 4925 skb->end = skb->head + size; 4926 #endif 4927 skb_reset_tail_pointer(skb); 4928 skb_headers_offset_update(skb, 0); 4929 skb->cloned = 0; 4930 skb->hdr_len = 0; 4931 skb->nohdr = 0; 4932 skb->len -= off; 4933 skb->data_len = skb->len; 4934 atomic_set(&skb_shinfo(skb)->dataref, 1); 4935 return 0; 4936 } 4937 4938 /* remove len bytes from the beginning of the skb */ 4939 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 4940 { 4941 int headlen = skb_headlen(skb); 4942 4943 if (len < headlen) 4944 return pskb_carve_inside_header(skb, len, headlen, gfp); 4945 else 4946 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 4947 } 4948 4949 /* Extract to_copy bytes starting at off from skb, and return this in 4950 * a new skb 4951 */ 4952 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 4953 int to_copy, gfp_t gfp) 4954 { 4955 struct sk_buff *clone = skb_clone(skb, gfp); 4956 4957 if (!clone) 4958 return NULL; 4959 4960 if (pskb_carve(clone, off, gfp) < 0 || 4961 pskb_trim(clone, to_copy)) { 4962 kfree_skb(clone); 4963 return NULL; 4964 } 4965 return clone; 4966 } 4967 EXPORT_SYMBOL(pskb_extract); 4968 4969 /** 4970 * skb_condense - try to get rid of fragments/frag_list if possible 4971 * @skb: buffer 4972 * 4973 * Can be used to save memory before skb is added to a busy queue. 4974 * If packet has bytes in frags and enough tail room in skb->head, 4975 * pull all of them, so that we can free the frags right now and adjust 4976 * truesize. 4977 * Notes: 4978 * We do not reallocate skb->head thus can not fail. 4979 * Caller must re-evaluate skb->truesize if needed. 4980 */ 4981 void skb_condense(struct sk_buff *skb) 4982 { 4983 if (skb->data_len) { 4984 if (skb->data_len > skb->end - skb->tail || 4985 skb_cloned(skb)) 4986 return; 4987 4988 /* Nice, we can free page frag(s) right now */ 4989 __pskb_pull_tail(skb, skb->data_len); 4990 } 4991 /* At this point, skb->truesize might be over estimated, 4992 * because skb had a fragment, and fragments do not tell 4993 * their truesize. 4994 * When we pulled its content into skb->head, fragment 4995 * was freed, but __pskb_pull_tail() could not possibly 4996 * adjust skb->truesize, not knowing the frag truesize. 4997 */ 4998 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4999 } 5000