1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/tcp.h> 51 #include <linux/udp.h> 52 #include <linux/sctp.h> 53 #include <linux/netdevice.h> 54 #ifdef CONFIG_NET_CLS_ACT 55 #include <net/pkt_sched.h> 56 #endif 57 #include <linux/string.h> 58 #include <linux/skbuff.h> 59 #include <linux/splice.h> 60 #include <linux/cache.h> 61 #include <linux/rtnetlink.h> 62 #include <linux/init.h> 63 #include <linux/scatterlist.h> 64 #include <linux/errqueue.h> 65 #include <linux/prefetch.h> 66 #include <linux/if_vlan.h> 67 68 #include <net/protocol.h> 69 #include <net/dst.h> 70 #include <net/sock.h> 71 #include <net/checksum.h> 72 #include <net/ip6_checksum.h> 73 #include <net/xfrm.h> 74 75 #include <linux/uaccess.h> 76 #include <trace/events/skb.h> 77 #include <linux/highmem.h> 78 #include <linux/capability.h> 79 #include <linux/user_namespace.h> 80 81 struct kmem_cache *skbuff_head_cache __read_mostly; 82 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 83 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 84 EXPORT_SYMBOL(sysctl_max_skb_frags); 85 86 /** 87 * skb_panic - private function for out-of-line support 88 * @skb: buffer 89 * @sz: size 90 * @addr: address 91 * @msg: skb_over_panic or skb_under_panic 92 * 93 * Out-of-line support for skb_put() and skb_push(). 94 * Called via the wrapper skb_over_panic() or skb_under_panic(). 95 * Keep out of line to prevent kernel bloat. 96 * __builtin_return_address is not used because it is not always reliable. 97 */ 98 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 99 const char msg[]) 100 { 101 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 102 msg, addr, skb->len, sz, skb->head, skb->data, 103 (unsigned long)skb->tail, (unsigned long)skb->end, 104 skb->dev ? skb->dev->name : "<NULL>"); 105 BUG(); 106 } 107 108 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 109 { 110 skb_panic(skb, sz, addr, __func__); 111 } 112 113 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 114 { 115 skb_panic(skb, sz, addr, __func__); 116 } 117 118 /* 119 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 120 * the caller if emergency pfmemalloc reserves are being used. If it is and 121 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 122 * may be used. Otherwise, the packet data may be discarded until enough 123 * memory is free 124 */ 125 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 126 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 127 128 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 129 unsigned long ip, bool *pfmemalloc) 130 { 131 void *obj; 132 bool ret_pfmemalloc = false; 133 134 /* 135 * Try a regular allocation, when that fails and we're not entitled 136 * to the reserves, fail. 137 */ 138 obj = kmalloc_node_track_caller(size, 139 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 140 node); 141 if (obj || !(gfp_pfmemalloc_allowed(flags))) 142 goto out; 143 144 /* Try again but now we are using pfmemalloc reserves */ 145 ret_pfmemalloc = true; 146 obj = kmalloc_node_track_caller(size, flags, node); 147 148 out: 149 if (pfmemalloc) 150 *pfmemalloc = ret_pfmemalloc; 151 152 return obj; 153 } 154 155 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 156 * 'private' fields and also do memory statistics to find all the 157 * [BEEP] leaks. 158 * 159 */ 160 161 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 162 { 163 struct sk_buff *skb; 164 165 /* Get the HEAD */ 166 skb = kmem_cache_alloc_node(skbuff_head_cache, 167 gfp_mask & ~__GFP_DMA, node); 168 if (!skb) 169 goto out; 170 171 /* 172 * Only clear those fields we need to clear, not those that we will 173 * actually initialise below. Hence, don't put any more fields after 174 * the tail pointer in struct sk_buff! 175 */ 176 memset(skb, 0, offsetof(struct sk_buff, tail)); 177 skb->head = NULL; 178 skb->truesize = sizeof(struct sk_buff); 179 refcount_set(&skb->users, 1); 180 181 skb->mac_header = (typeof(skb->mac_header))~0U; 182 out: 183 return skb; 184 } 185 186 /** 187 * __alloc_skb - allocate a network buffer 188 * @size: size to allocate 189 * @gfp_mask: allocation mask 190 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 191 * instead of head cache and allocate a cloned (child) skb. 192 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 193 * allocations in case the data is required for writeback 194 * @node: numa node to allocate memory on 195 * 196 * Allocate a new &sk_buff. The returned buffer has no headroom and a 197 * tail room of at least size bytes. The object has a reference count 198 * of one. The return is the buffer. On a failure the return is %NULL. 199 * 200 * Buffers may only be allocated from interrupts using a @gfp_mask of 201 * %GFP_ATOMIC. 202 */ 203 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 204 int flags, int node) 205 { 206 struct kmem_cache *cache; 207 struct skb_shared_info *shinfo; 208 struct sk_buff *skb; 209 u8 *data; 210 bool pfmemalloc; 211 212 cache = (flags & SKB_ALLOC_FCLONE) 213 ? skbuff_fclone_cache : skbuff_head_cache; 214 215 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 216 gfp_mask |= __GFP_MEMALLOC; 217 218 /* Get the HEAD */ 219 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 220 if (!skb) 221 goto out; 222 prefetchw(skb); 223 224 /* We do our best to align skb_shared_info on a separate cache 225 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 226 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 227 * Both skb->head and skb_shared_info are cache line aligned. 228 */ 229 size = SKB_DATA_ALIGN(size); 230 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 231 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 232 if (!data) 233 goto nodata; 234 /* kmalloc(size) might give us more room than requested. 235 * Put skb_shared_info exactly at the end of allocated zone, 236 * to allow max possible filling before reallocation. 237 */ 238 size = SKB_WITH_OVERHEAD(ksize(data)); 239 prefetchw(data + size); 240 241 /* 242 * Only clear those fields we need to clear, not those that we will 243 * actually initialise below. Hence, don't put any more fields after 244 * the tail pointer in struct sk_buff! 245 */ 246 memset(skb, 0, offsetof(struct sk_buff, tail)); 247 /* Account for allocated memory : skb + skb->head */ 248 skb->truesize = SKB_TRUESIZE(size); 249 skb->pfmemalloc = pfmemalloc; 250 refcount_set(&skb->users, 1); 251 skb->head = data; 252 skb->data = data; 253 skb_reset_tail_pointer(skb); 254 skb->end = skb->tail + size; 255 skb->mac_header = (typeof(skb->mac_header))~0U; 256 skb->transport_header = (typeof(skb->transport_header))~0U; 257 258 /* make sure we initialize shinfo sequentially */ 259 shinfo = skb_shinfo(skb); 260 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 261 atomic_set(&shinfo->dataref, 1); 262 kmemcheck_annotate_variable(shinfo->destructor_arg); 263 264 if (flags & SKB_ALLOC_FCLONE) { 265 struct sk_buff_fclones *fclones; 266 267 fclones = container_of(skb, struct sk_buff_fclones, skb1); 268 269 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 270 skb->fclone = SKB_FCLONE_ORIG; 271 refcount_set(&fclones->fclone_ref, 1); 272 273 fclones->skb2.fclone = SKB_FCLONE_CLONE; 274 } 275 out: 276 return skb; 277 nodata: 278 kmem_cache_free(cache, skb); 279 skb = NULL; 280 goto out; 281 } 282 EXPORT_SYMBOL(__alloc_skb); 283 284 /** 285 * __build_skb - build a network buffer 286 * @data: data buffer provided by caller 287 * @frag_size: size of data, or 0 if head was kmalloced 288 * 289 * Allocate a new &sk_buff. Caller provides space holding head and 290 * skb_shared_info. @data must have been allocated by kmalloc() only if 291 * @frag_size is 0, otherwise data should come from the page allocator 292 * or vmalloc() 293 * The return is the new skb buffer. 294 * On a failure the return is %NULL, and @data is not freed. 295 * Notes : 296 * Before IO, driver allocates only data buffer where NIC put incoming frame 297 * Driver should add room at head (NET_SKB_PAD) and 298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 299 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 300 * before giving packet to stack. 301 * RX rings only contains data buffers, not full skbs. 302 */ 303 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 304 { 305 struct skb_shared_info *shinfo; 306 struct sk_buff *skb; 307 unsigned int size = frag_size ? : ksize(data); 308 309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 310 if (!skb) 311 return NULL; 312 313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 314 315 memset(skb, 0, offsetof(struct sk_buff, tail)); 316 skb->truesize = SKB_TRUESIZE(size); 317 refcount_set(&skb->users, 1); 318 skb->head = data; 319 skb->data = data; 320 skb_reset_tail_pointer(skb); 321 skb->end = skb->tail + size; 322 skb->mac_header = (typeof(skb->mac_header))~0U; 323 skb->transport_header = (typeof(skb->transport_header))~0U; 324 325 /* make sure we initialize shinfo sequentially */ 326 shinfo = skb_shinfo(skb); 327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 328 atomic_set(&shinfo->dataref, 1); 329 kmemcheck_annotate_variable(shinfo->destructor_arg); 330 331 return skb; 332 } 333 334 /* build_skb() is wrapper over __build_skb(), that specifically 335 * takes care of skb->head and skb->pfmemalloc 336 * This means that if @frag_size is not zero, then @data must be backed 337 * by a page fragment, not kmalloc() or vmalloc() 338 */ 339 struct sk_buff *build_skb(void *data, unsigned int frag_size) 340 { 341 struct sk_buff *skb = __build_skb(data, frag_size); 342 343 if (skb && frag_size) { 344 skb->head_frag = 1; 345 if (page_is_pfmemalloc(virt_to_head_page(data))) 346 skb->pfmemalloc = 1; 347 } 348 return skb; 349 } 350 EXPORT_SYMBOL(build_skb); 351 352 #define NAPI_SKB_CACHE_SIZE 64 353 354 struct napi_alloc_cache { 355 struct page_frag_cache page; 356 unsigned int skb_count; 357 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 358 }; 359 360 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 361 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 362 363 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 364 { 365 struct page_frag_cache *nc; 366 unsigned long flags; 367 void *data; 368 369 local_irq_save(flags); 370 nc = this_cpu_ptr(&netdev_alloc_cache); 371 data = page_frag_alloc(nc, fragsz, gfp_mask); 372 local_irq_restore(flags); 373 return data; 374 } 375 376 /** 377 * netdev_alloc_frag - allocate a page fragment 378 * @fragsz: fragment size 379 * 380 * Allocates a frag from a page for receive buffer. 381 * Uses GFP_ATOMIC allocations. 382 */ 383 void *netdev_alloc_frag(unsigned int fragsz) 384 { 385 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 386 } 387 EXPORT_SYMBOL(netdev_alloc_frag); 388 389 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 390 { 391 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 392 393 return page_frag_alloc(&nc->page, fragsz, gfp_mask); 394 } 395 396 void *napi_alloc_frag(unsigned int fragsz) 397 { 398 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 399 } 400 EXPORT_SYMBOL(napi_alloc_frag); 401 402 /** 403 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 404 * @dev: network device to receive on 405 * @len: length to allocate 406 * @gfp_mask: get_free_pages mask, passed to alloc_skb 407 * 408 * Allocate a new &sk_buff and assign it a usage count of one. The 409 * buffer has NET_SKB_PAD headroom built in. Users should allocate 410 * the headroom they think they need without accounting for the 411 * built in space. The built in space is used for optimisations. 412 * 413 * %NULL is returned if there is no free memory. 414 */ 415 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 416 gfp_t gfp_mask) 417 { 418 struct page_frag_cache *nc; 419 unsigned long flags; 420 struct sk_buff *skb; 421 bool pfmemalloc; 422 void *data; 423 424 len += NET_SKB_PAD; 425 426 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 427 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 428 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 429 if (!skb) 430 goto skb_fail; 431 goto skb_success; 432 } 433 434 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 435 len = SKB_DATA_ALIGN(len); 436 437 if (sk_memalloc_socks()) 438 gfp_mask |= __GFP_MEMALLOC; 439 440 local_irq_save(flags); 441 442 nc = this_cpu_ptr(&netdev_alloc_cache); 443 data = page_frag_alloc(nc, len, gfp_mask); 444 pfmemalloc = nc->pfmemalloc; 445 446 local_irq_restore(flags); 447 448 if (unlikely(!data)) 449 return NULL; 450 451 skb = __build_skb(data, len); 452 if (unlikely(!skb)) { 453 skb_free_frag(data); 454 return NULL; 455 } 456 457 /* use OR instead of assignment to avoid clearing of bits in mask */ 458 if (pfmemalloc) 459 skb->pfmemalloc = 1; 460 skb->head_frag = 1; 461 462 skb_success: 463 skb_reserve(skb, NET_SKB_PAD); 464 skb->dev = dev; 465 466 skb_fail: 467 return skb; 468 } 469 EXPORT_SYMBOL(__netdev_alloc_skb); 470 471 /** 472 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 473 * @napi: napi instance this buffer was allocated for 474 * @len: length to allocate 475 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 476 * 477 * Allocate a new sk_buff for use in NAPI receive. This buffer will 478 * attempt to allocate the head from a special reserved region used 479 * only for NAPI Rx allocation. By doing this we can save several 480 * CPU cycles by avoiding having to disable and re-enable IRQs. 481 * 482 * %NULL is returned if there is no free memory. 483 */ 484 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 485 gfp_t gfp_mask) 486 { 487 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 488 struct sk_buff *skb; 489 void *data; 490 491 len += NET_SKB_PAD + NET_IP_ALIGN; 492 493 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 494 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 495 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 496 if (!skb) 497 goto skb_fail; 498 goto skb_success; 499 } 500 501 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 502 len = SKB_DATA_ALIGN(len); 503 504 if (sk_memalloc_socks()) 505 gfp_mask |= __GFP_MEMALLOC; 506 507 data = page_frag_alloc(&nc->page, len, gfp_mask); 508 if (unlikely(!data)) 509 return NULL; 510 511 skb = __build_skb(data, len); 512 if (unlikely(!skb)) { 513 skb_free_frag(data); 514 return NULL; 515 } 516 517 /* use OR instead of assignment to avoid clearing of bits in mask */ 518 if (nc->page.pfmemalloc) 519 skb->pfmemalloc = 1; 520 skb->head_frag = 1; 521 522 skb_success: 523 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 524 skb->dev = napi->dev; 525 526 skb_fail: 527 return skb; 528 } 529 EXPORT_SYMBOL(__napi_alloc_skb); 530 531 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 532 int size, unsigned int truesize) 533 { 534 skb_fill_page_desc(skb, i, page, off, size); 535 skb->len += size; 536 skb->data_len += size; 537 skb->truesize += truesize; 538 } 539 EXPORT_SYMBOL(skb_add_rx_frag); 540 541 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 542 unsigned int truesize) 543 { 544 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 545 546 skb_frag_size_add(frag, size); 547 skb->len += size; 548 skb->data_len += size; 549 skb->truesize += truesize; 550 } 551 EXPORT_SYMBOL(skb_coalesce_rx_frag); 552 553 static void skb_drop_list(struct sk_buff **listp) 554 { 555 kfree_skb_list(*listp); 556 *listp = NULL; 557 } 558 559 static inline void skb_drop_fraglist(struct sk_buff *skb) 560 { 561 skb_drop_list(&skb_shinfo(skb)->frag_list); 562 } 563 564 static void skb_clone_fraglist(struct sk_buff *skb) 565 { 566 struct sk_buff *list; 567 568 skb_walk_frags(skb, list) 569 skb_get(list); 570 } 571 572 static void skb_free_head(struct sk_buff *skb) 573 { 574 unsigned char *head = skb->head; 575 576 if (skb->head_frag) 577 skb_free_frag(head); 578 else 579 kfree(head); 580 } 581 582 static void skb_release_data(struct sk_buff *skb) 583 { 584 struct skb_shared_info *shinfo = skb_shinfo(skb); 585 int i; 586 587 if (skb->cloned && 588 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 589 &shinfo->dataref)) 590 return; 591 592 for (i = 0; i < shinfo->nr_frags; i++) 593 __skb_frag_unref(&shinfo->frags[i]); 594 595 /* 596 * If skb buf is from userspace, we need to notify the caller 597 * the lower device DMA has done; 598 */ 599 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { 600 struct ubuf_info *uarg; 601 602 uarg = shinfo->destructor_arg; 603 if (uarg->callback) 604 uarg->callback(uarg, true); 605 } 606 607 if (shinfo->frag_list) 608 kfree_skb_list(shinfo->frag_list); 609 610 skb_free_head(skb); 611 } 612 613 /* 614 * Free an skbuff by memory without cleaning the state. 615 */ 616 static void kfree_skbmem(struct sk_buff *skb) 617 { 618 struct sk_buff_fclones *fclones; 619 620 switch (skb->fclone) { 621 case SKB_FCLONE_UNAVAILABLE: 622 kmem_cache_free(skbuff_head_cache, skb); 623 return; 624 625 case SKB_FCLONE_ORIG: 626 fclones = container_of(skb, struct sk_buff_fclones, skb1); 627 628 /* We usually free the clone (TX completion) before original skb 629 * This test would have no chance to be true for the clone, 630 * while here, branch prediction will be good. 631 */ 632 if (refcount_read(&fclones->fclone_ref) == 1) 633 goto fastpath; 634 break; 635 636 default: /* SKB_FCLONE_CLONE */ 637 fclones = container_of(skb, struct sk_buff_fclones, skb2); 638 break; 639 } 640 if (!refcount_dec_and_test(&fclones->fclone_ref)) 641 return; 642 fastpath: 643 kmem_cache_free(skbuff_fclone_cache, fclones); 644 } 645 646 void skb_release_head_state(struct sk_buff *skb) 647 { 648 skb_dst_drop(skb); 649 secpath_reset(skb); 650 if (skb->destructor) { 651 WARN_ON(in_irq()); 652 skb->destructor(skb); 653 } 654 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 655 nf_conntrack_put(skb_nfct(skb)); 656 #endif 657 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 658 nf_bridge_put(skb->nf_bridge); 659 #endif 660 } 661 662 /* Free everything but the sk_buff shell. */ 663 static void skb_release_all(struct sk_buff *skb) 664 { 665 skb_release_head_state(skb); 666 if (likely(skb->head)) 667 skb_release_data(skb); 668 } 669 670 /** 671 * __kfree_skb - private function 672 * @skb: buffer 673 * 674 * Free an sk_buff. Release anything attached to the buffer. 675 * Clean the state. This is an internal helper function. Users should 676 * always call kfree_skb 677 */ 678 679 void __kfree_skb(struct sk_buff *skb) 680 { 681 skb_release_all(skb); 682 kfree_skbmem(skb); 683 } 684 EXPORT_SYMBOL(__kfree_skb); 685 686 /** 687 * kfree_skb - free an sk_buff 688 * @skb: buffer to free 689 * 690 * Drop a reference to the buffer and free it if the usage count has 691 * hit zero. 692 */ 693 void kfree_skb(struct sk_buff *skb) 694 { 695 if (!skb_unref(skb)) 696 return; 697 698 trace_kfree_skb(skb, __builtin_return_address(0)); 699 __kfree_skb(skb); 700 } 701 EXPORT_SYMBOL(kfree_skb); 702 703 void kfree_skb_list(struct sk_buff *segs) 704 { 705 while (segs) { 706 struct sk_buff *next = segs->next; 707 708 kfree_skb(segs); 709 segs = next; 710 } 711 } 712 EXPORT_SYMBOL(kfree_skb_list); 713 714 /** 715 * skb_tx_error - report an sk_buff xmit error 716 * @skb: buffer that triggered an error 717 * 718 * Report xmit error if a device callback is tracking this skb. 719 * skb must be freed afterwards. 720 */ 721 void skb_tx_error(struct sk_buff *skb) 722 { 723 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 724 struct ubuf_info *uarg; 725 726 uarg = skb_shinfo(skb)->destructor_arg; 727 if (uarg->callback) 728 uarg->callback(uarg, false); 729 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 730 } 731 } 732 EXPORT_SYMBOL(skb_tx_error); 733 734 /** 735 * consume_skb - free an skbuff 736 * @skb: buffer to free 737 * 738 * Drop a ref to the buffer and free it if the usage count has hit zero 739 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 740 * is being dropped after a failure and notes that 741 */ 742 void consume_skb(struct sk_buff *skb) 743 { 744 if (!skb_unref(skb)) 745 return; 746 747 trace_consume_skb(skb); 748 __kfree_skb(skb); 749 } 750 EXPORT_SYMBOL(consume_skb); 751 752 /** 753 * consume_stateless_skb - free an skbuff, assuming it is stateless 754 * @skb: buffer to free 755 * 756 * Works like consume_skb(), but this variant assumes that all the head 757 * states have been already dropped. 758 */ 759 void consume_stateless_skb(struct sk_buff *skb) 760 { 761 if (!skb_unref(skb)) 762 return; 763 764 trace_consume_skb(skb); 765 if (likely(skb->head)) 766 skb_release_data(skb); 767 kfree_skbmem(skb); 768 } 769 770 void __kfree_skb_flush(void) 771 { 772 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 773 774 /* flush skb_cache if containing objects */ 775 if (nc->skb_count) { 776 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, 777 nc->skb_cache); 778 nc->skb_count = 0; 779 } 780 } 781 782 static inline void _kfree_skb_defer(struct sk_buff *skb) 783 { 784 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 785 786 /* drop skb->head and call any destructors for packet */ 787 skb_release_all(skb); 788 789 /* record skb to CPU local list */ 790 nc->skb_cache[nc->skb_count++] = skb; 791 792 #ifdef CONFIG_SLUB 793 /* SLUB writes into objects when freeing */ 794 prefetchw(skb); 795 #endif 796 797 /* flush skb_cache if it is filled */ 798 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 799 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, 800 nc->skb_cache); 801 nc->skb_count = 0; 802 } 803 } 804 void __kfree_skb_defer(struct sk_buff *skb) 805 { 806 _kfree_skb_defer(skb); 807 } 808 809 void napi_consume_skb(struct sk_buff *skb, int budget) 810 { 811 if (unlikely(!skb)) 812 return; 813 814 /* Zero budget indicate non-NAPI context called us, like netpoll */ 815 if (unlikely(!budget)) { 816 dev_consume_skb_any(skb); 817 return; 818 } 819 820 if (!skb_unref(skb)) 821 return; 822 823 /* if reaching here SKB is ready to free */ 824 trace_consume_skb(skb); 825 826 /* if SKB is a clone, don't handle this case */ 827 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 828 __kfree_skb(skb); 829 return; 830 } 831 832 _kfree_skb_defer(skb); 833 } 834 EXPORT_SYMBOL(napi_consume_skb); 835 836 /* Make sure a field is enclosed inside headers_start/headers_end section */ 837 #define CHECK_SKB_FIELD(field) \ 838 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 839 offsetof(struct sk_buff, headers_start)); \ 840 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 841 offsetof(struct sk_buff, headers_end)); \ 842 843 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 844 { 845 new->tstamp = old->tstamp; 846 /* We do not copy old->sk */ 847 new->dev = old->dev; 848 memcpy(new->cb, old->cb, sizeof(old->cb)); 849 skb_dst_copy(new, old); 850 #ifdef CONFIG_XFRM 851 new->sp = secpath_get(old->sp); 852 #endif 853 __nf_copy(new, old, false); 854 855 /* Note : this field could be in headers_start/headers_end section 856 * It is not yet because we do not want to have a 16 bit hole 857 */ 858 new->queue_mapping = old->queue_mapping; 859 860 memcpy(&new->headers_start, &old->headers_start, 861 offsetof(struct sk_buff, headers_end) - 862 offsetof(struct sk_buff, headers_start)); 863 CHECK_SKB_FIELD(protocol); 864 CHECK_SKB_FIELD(csum); 865 CHECK_SKB_FIELD(hash); 866 CHECK_SKB_FIELD(priority); 867 CHECK_SKB_FIELD(skb_iif); 868 CHECK_SKB_FIELD(vlan_proto); 869 CHECK_SKB_FIELD(vlan_tci); 870 CHECK_SKB_FIELD(transport_header); 871 CHECK_SKB_FIELD(network_header); 872 CHECK_SKB_FIELD(mac_header); 873 CHECK_SKB_FIELD(inner_protocol); 874 CHECK_SKB_FIELD(inner_transport_header); 875 CHECK_SKB_FIELD(inner_network_header); 876 CHECK_SKB_FIELD(inner_mac_header); 877 CHECK_SKB_FIELD(mark); 878 #ifdef CONFIG_NETWORK_SECMARK 879 CHECK_SKB_FIELD(secmark); 880 #endif 881 #ifdef CONFIG_NET_RX_BUSY_POLL 882 CHECK_SKB_FIELD(napi_id); 883 #endif 884 #ifdef CONFIG_XPS 885 CHECK_SKB_FIELD(sender_cpu); 886 #endif 887 #ifdef CONFIG_NET_SCHED 888 CHECK_SKB_FIELD(tc_index); 889 #endif 890 891 } 892 893 /* 894 * You should not add any new code to this function. Add it to 895 * __copy_skb_header above instead. 896 */ 897 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 898 { 899 #define C(x) n->x = skb->x 900 901 n->next = n->prev = NULL; 902 n->sk = NULL; 903 __copy_skb_header(n, skb); 904 905 C(len); 906 C(data_len); 907 C(mac_len); 908 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 909 n->cloned = 1; 910 n->nohdr = 0; 911 n->destructor = NULL; 912 C(tail); 913 C(end); 914 C(head); 915 C(head_frag); 916 C(data); 917 C(truesize); 918 refcount_set(&n->users, 1); 919 920 atomic_inc(&(skb_shinfo(skb)->dataref)); 921 skb->cloned = 1; 922 923 return n; 924 #undef C 925 } 926 927 /** 928 * skb_morph - morph one skb into another 929 * @dst: the skb to receive the contents 930 * @src: the skb to supply the contents 931 * 932 * This is identical to skb_clone except that the target skb is 933 * supplied by the user. 934 * 935 * The target skb is returned upon exit. 936 */ 937 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 938 { 939 skb_release_all(dst); 940 return __skb_clone(dst, src); 941 } 942 EXPORT_SYMBOL_GPL(skb_morph); 943 944 /** 945 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 946 * @skb: the skb to modify 947 * @gfp_mask: allocation priority 948 * 949 * This must be called on SKBTX_DEV_ZEROCOPY skb. 950 * It will copy all frags into kernel and drop the reference 951 * to userspace pages. 952 * 953 * If this function is called from an interrupt gfp_mask() must be 954 * %GFP_ATOMIC. 955 * 956 * Returns 0 on success or a negative error code on failure 957 * to allocate kernel memory to copy to. 958 */ 959 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 960 { 961 int i; 962 int num_frags = skb_shinfo(skb)->nr_frags; 963 struct page *page, *head = NULL; 964 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 965 966 for (i = 0; i < num_frags; i++) { 967 u8 *vaddr; 968 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 969 970 page = alloc_page(gfp_mask); 971 if (!page) { 972 while (head) { 973 struct page *next = (struct page *)page_private(head); 974 put_page(head); 975 head = next; 976 } 977 return -ENOMEM; 978 } 979 vaddr = kmap_atomic(skb_frag_page(f)); 980 memcpy(page_address(page), 981 vaddr + f->page_offset, skb_frag_size(f)); 982 kunmap_atomic(vaddr); 983 set_page_private(page, (unsigned long)head); 984 head = page; 985 } 986 987 /* skb frags release userspace buffers */ 988 for (i = 0; i < num_frags; i++) 989 skb_frag_unref(skb, i); 990 991 uarg->callback(uarg, false); 992 993 /* skb frags point to kernel buffers */ 994 for (i = num_frags - 1; i >= 0; i--) { 995 __skb_fill_page_desc(skb, i, head, 0, 996 skb_shinfo(skb)->frags[i].size); 997 head = (struct page *)page_private(head); 998 } 999 1000 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 1001 return 0; 1002 } 1003 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1004 1005 /** 1006 * skb_clone - duplicate an sk_buff 1007 * @skb: buffer to clone 1008 * @gfp_mask: allocation priority 1009 * 1010 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1011 * copies share the same packet data but not structure. The new 1012 * buffer has a reference count of 1. If the allocation fails the 1013 * function returns %NULL otherwise the new buffer is returned. 1014 * 1015 * If this function is called from an interrupt gfp_mask() must be 1016 * %GFP_ATOMIC. 1017 */ 1018 1019 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1020 { 1021 struct sk_buff_fclones *fclones = container_of(skb, 1022 struct sk_buff_fclones, 1023 skb1); 1024 struct sk_buff *n; 1025 1026 if (skb_orphan_frags(skb, gfp_mask)) 1027 return NULL; 1028 1029 if (skb->fclone == SKB_FCLONE_ORIG && 1030 refcount_read(&fclones->fclone_ref) == 1) { 1031 n = &fclones->skb2; 1032 refcount_set(&fclones->fclone_ref, 2); 1033 } else { 1034 if (skb_pfmemalloc(skb)) 1035 gfp_mask |= __GFP_MEMALLOC; 1036 1037 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1038 if (!n) 1039 return NULL; 1040 1041 kmemcheck_annotate_bitfield(n, flags1); 1042 n->fclone = SKB_FCLONE_UNAVAILABLE; 1043 } 1044 1045 return __skb_clone(n, skb); 1046 } 1047 EXPORT_SYMBOL(skb_clone); 1048 1049 static void skb_headers_offset_update(struct sk_buff *skb, int off) 1050 { 1051 /* Only adjust this if it actually is csum_start rather than csum */ 1052 if (skb->ip_summed == CHECKSUM_PARTIAL) 1053 skb->csum_start += off; 1054 /* {transport,network,mac}_header and tail are relative to skb->head */ 1055 skb->transport_header += off; 1056 skb->network_header += off; 1057 if (skb_mac_header_was_set(skb)) 1058 skb->mac_header += off; 1059 skb->inner_transport_header += off; 1060 skb->inner_network_header += off; 1061 skb->inner_mac_header += off; 1062 } 1063 1064 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1065 { 1066 __copy_skb_header(new, old); 1067 1068 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1069 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1070 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1071 } 1072 1073 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1074 { 1075 if (skb_pfmemalloc(skb)) 1076 return SKB_ALLOC_RX; 1077 return 0; 1078 } 1079 1080 /** 1081 * skb_copy - create private copy of an sk_buff 1082 * @skb: buffer to copy 1083 * @gfp_mask: allocation priority 1084 * 1085 * Make a copy of both an &sk_buff and its data. This is used when the 1086 * caller wishes to modify the data and needs a private copy of the 1087 * data to alter. Returns %NULL on failure or the pointer to the buffer 1088 * on success. The returned buffer has a reference count of 1. 1089 * 1090 * As by-product this function converts non-linear &sk_buff to linear 1091 * one, so that &sk_buff becomes completely private and caller is allowed 1092 * to modify all the data of returned buffer. This means that this 1093 * function is not recommended for use in circumstances when only 1094 * header is going to be modified. Use pskb_copy() instead. 1095 */ 1096 1097 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1098 { 1099 int headerlen = skb_headroom(skb); 1100 unsigned int size = skb_end_offset(skb) + skb->data_len; 1101 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1102 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1103 1104 if (!n) 1105 return NULL; 1106 1107 /* Set the data pointer */ 1108 skb_reserve(n, headerlen); 1109 /* Set the tail pointer and length */ 1110 skb_put(n, skb->len); 1111 1112 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 1113 BUG(); 1114 1115 copy_skb_header(n, skb); 1116 return n; 1117 } 1118 EXPORT_SYMBOL(skb_copy); 1119 1120 /** 1121 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1122 * @skb: buffer to copy 1123 * @headroom: headroom of new skb 1124 * @gfp_mask: allocation priority 1125 * @fclone: if true allocate the copy of the skb from the fclone 1126 * cache instead of the head cache; it is recommended to set this 1127 * to true for the cases where the copy will likely be cloned 1128 * 1129 * Make a copy of both an &sk_buff and part of its data, located 1130 * in header. Fragmented data remain shared. This is used when 1131 * the caller wishes to modify only header of &sk_buff and needs 1132 * private copy of the header to alter. Returns %NULL on failure 1133 * or the pointer to the buffer on success. 1134 * The returned buffer has a reference count of 1. 1135 */ 1136 1137 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1138 gfp_t gfp_mask, bool fclone) 1139 { 1140 unsigned int size = skb_headlen(skb) + headroom; 1141 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1142 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1143 1144 if (!n) 1145 goto out; 1146 1147 /* Set the data pointer */ 1148 skb_reserve(n, headroom); 1149 /* Set the tail pointer and length */ 1150 skb_put(n, skb_headlen(skb)); 1151 /* Copy the bytes */ 1152 skb_copy_from_linear_data(skb, n->data, n->len); 1153 1154 n->truesize += skb->data_len; 1155 n->data_len = skb->data_len; 1156 n->len = skb->len; 1157 1158 if (skb_shinfo(skb)->nr_frags) { 1159 int i; 1160 1161 if (skb_orphan_frags(skb, gfp_mask)) { 1162 kfree_skb(n); 1163 n = NULL; 1164 goto out; 1165 } 1166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1167 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1168 skb_frag_ref(skb, i); 1169 } 1170 skb_shinfo(n)->nr_frags = i; 1171 } 1172 1173 if (skb_has_frag_list(skb)) { 1174 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1175 skb_clone_fraglist(n); 1176 } 1177 1178 copy_skb_header(n, skb); 1179 out: 1180 return n; 1181 } 1182 EXPORT_SYMBOL(__pskb_copy_fclone); 1183 1184 /** 1185 * pskb_expand_head - reallocate header of &sk_buff 1186 * @skb: buffer to reallocate 1187 * @nhead: room to add at head 1188 * @ntail: room to add at tail 1189 * @gfp_mask: allocation priority 1190 * 1191 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1192 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1193 * reference count of 1. Returns zero in the case of success or error, 1194 * if expansion failed. In the last case, &sk_buff is not changed. 1195 * 1196 * All the pointers pointing into skb header may change and must be 1197 * reloaded after call to this function. 1198 */ 1199 1200 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1201 gfp_t gfp_mask) 1202 { 1203 int i, osize = skb_end_offset(skb); 1204 int size = osize + nhead + ntail; 1205 long off; 1206 u8 *data; 1207 1208 BUG_ON(nhead < 0); 1209 1210 if (skb_shared(skb)) 1211 BUG(); 1212 1213 size = SKB_DATA_ALIGN(size); 1214 1215 if (skb_pfmemalloc(skb)) 1216 gfp_mask |= __GFP_MEMALLOC; 1217 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1218 gfp_mask, NUMA_NO_NODE, NULL); 1219 if (!data) 1220 goto nodata; 1221 size = SKB_WITH_OVERHEAD(ksize(data)); 1222 1223 /* Copy only real data... and, alas, header. This should be 1224 * optimized for the cases when header is void. 1225 */ 1226 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1227 1228 memcpy((struct skb_shared_info *)(data + size), 1229 skb_shinfo(skb), 1230 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1231 1232 /* 1233 * if shinfo is shared we must drop the old head gracefully, but if it 1234 * is not we can just drop the old head and let the existing refcount 1235 * be since all we did is relocate the values 1236 */ 1237 if (skb_cloned(skb)) { 1238 /* copy this zero copy skb frags */ 1239 if (skb_orphan_frags(skb, gfp_mask)) 1240 goto nofrags; 1241 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1242 skb_frag_ref(skb, i); 1243 1244 if (skb_has_frag_list(skb)) 1245 skb_clone_fraglist(skb); 1246 1247 skb_release_data(skb); 1248 } else { 1249 skb_free_head(skb); 1250 } 1251 off = (data + nhead) - skb->head; 1252 1253 skb->head = data; 1254 skb->head_frag = 0; 1255 skb->data += off; 1256 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1257 skb->end = size; 1258 off = nhead; 1259 #else 1260 skb->end = skb->head + size; 1261 #endif 1262 skb->tail += off; 1263 skb_headers_offset_update(skb, nhead); 1264 skb->cloned = 0; 1265 skb->hdr_len = 0; 1266 skb->nohdr = 0; 1267 atomic_set(&skb_shinfo(skb)->dataref, 1); 1268 1269 /* It is not generally safe to change skb->truesize. 1270 * For the moment, we really care of rx path, or 1271 * when skb is orphaned (not attached to a socket). 1272 */ 1273 if (!skb->sk || skb->destructor == sock_edemux) 1274 skb->truesize += size - osize; 1275 1276 return 0; 1277 1278 nofrags: 1279 kfree(data); 1280 nodata: 1281 return -ENOMEM; 1282 } 1283 EXPORT_SYMBOL(pskb_expand_head); 1284 1285 /* Make private copy of skb with writable head and some headroom */ 1286 1287 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1288 { 1289 struct sk_buff *skb2; 1290 int delta = headroom - skb_headroom(skb); 1291 1292 if (delta <= 0) 1293 skb2 = pskb_copy(skb, GFP_ATOMIC); 1294 else { 1295 skb2 = skb_clone(skb, GFP_ATOMIC); 1296 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1297 GFP_ATOMIC)) { 1298 kfree_skb(skb2); 1299 skb2 = NULL; 1300 } 1301 } 1302 return skb2; 1303 } 1304 EXPORT_SYMBOL(skb_realloc_headroom); 1305 1306 /** 1307 * skb_copy_expand - copy and expand sk_buff 1308 * @skb: buffer to copy 1309 * @newheadroom: new free bytes at head 1310 * @newtailroom: new free bytes at tail 1311 * @gfp_mask: allocation priority 1312 * 1313 * Make a copy of both an &sk_buff and its data and while doing so 1314 * allocate additional space. 1315 * 1316 * This is used when the caller wishes to modify the data and needs a 1317 * private copy of the data to alter as well as more space for new fields. 1318 * Returns %NULL on failure or the pointer to the buffer 1319 * on success. The returned buffer has a reference count of 1. 1320 * 1321 * You must pass %GFP_ATOMIC as the allocation priority if this function 1322 * is called from an interrupt. 1323 */ 1324 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1325 int newheadroom, int newtailroom, 1326 gfp_t gfp_mask) 1327 { 1328 /* 1329 * Allocate the copy buffer 1330 */ 1331 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1332 gfp_mask, skb_alloc_rx_flag(skb), 1333 NUMA_NO_NODE); 1334 int oldheadroom = skb_headroom(skb); 1335 int head_copy_len, head_copy_off; 1336 1337 if (!n) 1338 return NULL; 1339 1340 skb_reserve(n, newheadroom); 1341 1342 /* Set the tail pointer and length */ 1343 skb_put(n, skb->len); 1344 1345 head_copy_len = oldheadroom; 1346 head_copy_off = 0; 1347 if (newheadroom <= head_copy_len) 1348 head_copy_len = newheadroom; 1349 else 1350 head_copy_off = newheadroom - head_copy_len; 1351 1352 /* Copy the linear header and data. */ 1353 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1354 skb->len + head_copy_len)) 1355 BUG(); 1356 1357 copy_skb_header(n, skb); 1358 1359 skb_headers_offset_update(n, newheadroom - oldheadroom); 1360 1361 return n; 1362 } 1363 EXPORT_SYMBOL(skb_copy_expand); 1364 1365 /** 1366 * skb_pad - zero pad the tail of an skb 1367 * @skb: buffer to pad 1368 * @pad: space to pad 1369 * 1370 * Ensure that a buffer is followed by a padding area that is zero 1371 * filled. Used by network drivers which may DMA or transfer data 1372 * beyond the buffer end onto the wire. 1373 * 1374 * May return error in out of memory cases. The skb is freed on error. 1375 */ 1376 1377 int skb_pad(struct sk_buff *skb, int pad) 1378 { 1379 int err; 1380 int ntail; 1381 1382 /* If the skbuff is non linear tailroom is always zero.. */ 1383 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1384 memset(skb->data+skb->len, 0, pad); 1385 return 0; 1386 } 1387 1388 ntail = skb->data_len + pad - (skb->end - skb->tail); 1389 if (likely(skb_cloned(skb) || ntail > 0)) { 1390 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1391 if (unlikely(err)) 1392 goto free_skb; 1393 } 1394 1395 /* FIXME: The use of this function with non-linear skb's really needs 1396 * to be audited. 1397 */ 1398 err = skb_linearize(skb); 1399 if (unlikely(err)) 1400 goto free_skb; 1401 1402 memset(skb->data + skb->len, 0, pad); 1403 return 0; 1404 1405 free_skb: 1406 kfree_skb(skb); 1407 return err; 1408 } 1409 EXPORT_SYMBOL(skb_pad); 1410 1411 /** 1412 * pskb_put - add data to the tail of a potentially fragmented buffer 1413 * @skb: start of the buffer to use 1414 * @tail: tail fragment of the buffer to use 1415 * @len: amount of data to add 1416 * 1417 * This function extends the used data area of the potentially 1418 * fragmented buffer. @tail must be the last fragment of @skb -- or 1419 * @skb itself. If this would exceed the total buffer size the kernel 1420 * will panic. A pointer to the first byte of the extra data is 1421 * returned. 1422 */ 1423 1424 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1425 { 1426 if (tail != skb) { 1427 skb->data_len += len; 1428 skb->len += len; 1429 } 1430 return skb_put(tail, len); 1431 } 1432 EXPORT_SYMBOL_GPL(pskb_put); 1433 1434 /** 1435 * skb_put - add data to a buffer 1436 * @skb: buffer to use 1437 * @len: amount of data to add 1438 * 1439 * This function extends the used data area of the buffer. If this would 1440 * exceed the total buffer size the kernel will panic. A pointer to the 1441 * first byte of the extra data is returned. 1442 */ 1443 void *skb_put(struct sk_buff *skb, unsigned int len) 1444 { 1445 void *tmp = skb_tail_pointer(skb); 1446 SKB_LINEAR_ASSERT(skb); 1447 skb->tail += len; 1448 skb->len += len; 1449 if (unlikely(skb->tail > skb->end)) 1450 skb_over_panic(skb, len, __builtin_return_address(0)); 1451 return tmp; 1452 } 1453 EXPORT_SYMBOL(skb_put); 1454 1455 /** 1456 * skb_push - add data to the start of a buffer 1457 * @skb: buffer to use 1458 * @len: amount of data to add 1459 * 1460 * This function extends the used data area of the buffer at the buffer 1461 * start. If this would exceed the total buffer headroom the kernel will 1462 * panic. A pointer to the first byte of the extra data is returned. 1463 */ 1464 void *skb_push(struct sk_buff *skb, unsigned int len) 1465 { 1466 skb->data -= len; 1467 skb->len += len; 1468 if (unlikely(skb->data<skb->head)) 1469 skb_under_panic(skb, len, __builtin_return_address(0)); 1470 return skb->data; 1471 } 1472 EXPORT_SYMBOL(skb_push); 1473 1474 /** 1475 * skb_pull - remove data from the start of a buffer 1476 * @skb: buffer to use 1477 * @len: amount of data to remove 1478 * 1479 * This function removes data from the start of a buffer, returning 1480 * the memory to the headroom. A pointer to the next data in the buffer 1481 * is returned. Once the data has been pulled future pushes will overwrite 1482 * the old data. 1483 */ 1484 void *skb_pull(struct sk_buff *skb, unsigned int len) 1485 { 1486 return skb_pull_inline(skb, len); 1487 } 1488 EXPORT_SYMBOL(skb_pull); 1489 1490 /** 1491 * skb_trim - remove end from a buffer 1492 * @skb: buffer to alter 1493 * @len: new length 1494 * 1495 * Cut the length of a buffer down by removing data from the tail. If 1496 * the buffer is already under the length specified it is not modified. 1497 * The skb must be linear. 1498 */ 1499 void skb_trim(struct sk_buff *skb, unsigned int len) 1500 { 1501 if (skb->len > len) 1502 __skb_trim(skb, len); 1503 } 1504 EXPORT_SYMBOL(skb_trim); 1505 1506 /* Trims skb to length len. It can change skb pointers. 1507 */ 1508 1509 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1510 { 1511 struct sk_buff **fragp; 1512 struct sk_buff *frag; 1513 int offset = skb_headlen(skb); 1514 int nfrags = skb_shinfo(skb)->nr_frags; 1515 int i; 1516 int err; 1517 1518 if (skb_cloned(skb) && 1519 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1520 return err; 1521 1522 i = 0; 1523 if (offset >= len) 1524 goto drop_pages; 1525 1526 for (; i < nfrags; i++) { 1527 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1528 1529 if (end < len) { 1530 offset = end; 1531 continue; 1532 } 1533 1534 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1535 1536 drop_pages: 1537 skb_shinfo(skb)->nr_frags = i; 1538 1539 for (; i < nfrags; i++) 1540 skb_frag_unref(skb, i); 1541 1542 if (skb_has_frag_list(skb)) 1543 skb_drop_fraglist(skb); 1544 goto done; 1545 } 1546 1547 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1548 fragp = &frag->next) { 1549 int end = offset + frag->len; 1550 1551 if (skb_shared(frag)) { 1552 struct sk_buff *nfrag; 1553 1554 nfrag = skb_clone(frag, GFP_ATOMIC); 1555 if (unlikely(!nfrag)) 1556 return -ENOMEM; 1557 1558 nfrag->next = frag->next; 1559 consume_skb(frag); 1560 frag = nfrag; 1561 *fragp = frag; 1562 } 1563 1564 if (end < len) { 1565 offset = end; 1566 continue; 1567 } 1568 1569 if (end > len && 1570 unlikely((err = pskb_trim(frag, len - offset)))) 1571 return err; 1572 1573 if (frag->next) 1574 skb_drop_list(&frag->next); 1575 break; 1576 } 1577 1578 done: 1579 if (len > skb_headlen(skb)) { 1580 skb->data_len -= skb->len - len; 1581 skb->len = len; 1582 } else { 1583 skb->len = len; 1584 skb->data_len = 0; 1585 skb_set_tail_pointer(skb, len); 1586 } 1587 1588 if (!skb->sk || skb->destructor == sock_edemux) 1589 skb_condense(skb); 1590 return 0; 1591 } 1592 EXPORT_SYMBOL(___pskb_trim); 1593 1594 /** 1595 * __pskb_pull_tail - advance tail of skb header 1596 * @skb: buffer to reallocate 1597 * @delta: number of bytes to advance tail 1598 * 1599 * The function makes a sense only on a fragmented &sk_buff, 1600 * it expands header moving its tail forward and copying necessary 1601 * data from fragmented part. 1602 * 1603 * &sk_buff MUST have reference count of 1. 1604 * 1605 * Returns %NULL (and &sk_buff does not change) if pull failed 1606 * or value of new tail of skb in the case of success. 1607 * 1608 * All the pointers pointing into skb header may change and must be 1609 * reloaded after call to this function. 1610 */ 1611 1612 /* Moves tail of skb head forward, copying data from fragmented part, 1613 * when it is necessary. 1614 * 1. It may fail due to malloc failure. 1615 * 2. It may change skb pointers. 1616 * 1617 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1618 */ 1619 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 1620 { 1621 /* If skb has not enough free space at tail, get new one 1622 * plus 128 bytes for future expansions. If we have enough 1623 * room at tail, reallocate without expansion only if skb is cloned. 1624 */ 1625 int i, k, eat = (skb->tail + delta) - skb->end; 1626 1627 if (eat > 0 || skb_cloned(skb)) { 1628 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1629 GFP_ATOMIC)) 1630 return NULL; 1631 } 1632 1633 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1634 BUG(); 1635 1636 /* Optimization: no fragments, no reasons to preestimate 1637 * size of pulled pages. Superb. 1638 */ 1639 if (!skb_has_frag_list(skb)) 1640 goto pull_pages; 1641 1642 /* Estimate size of pulled pages. */ 1643 eat = delta; 1644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1645 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1646 1647 if (size >= eat) 1648 goto pull_pages; 1649 eat -= size; 1650 } 1651 1652 /* If we need update frag list, we are in troubles. 1653 * Certainly, it possible to add an offset to skb data, 1654 * but taking into account that pulling is expected to 1655 * be very rare operation, it is worth to fight against 1656 * further bloating skb head and crucify ourselves here instead. 1657 * Pure masohism, indeed. 8)8) 1658 */ 1659 if (eat) { 1660 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1661 struct sk_buff *clone = NULL; 1662 struct sk_buff *insp = NULL; 1663 1664 do { 1665 BUG_ON(!list); 1666 1667 if (list->len <= eat) { 1668 /* Eaten as whole. */ 1669 eat -= list->len; 1670 list = list->next; 1671 insp = list; 1672 } else { 1673 /* Eaten partially. */ 1674 1675 if (skb_shared(list)) { 1676 /* Sucks! We need to fork list. :-( */ 1677 clone = skb_clone(list, GFP_ATOMIC); 1678 if (!clone) 1679 return NULL; 1680 insp = list->next; 1681 list = clone; 1682 } else { 1683 /* This may be pulled without 1684 * problems. */ 1685 insp = list; 1686 } 1687 if (!pskb_pull(list, eat)) { 1688 kfree_skb(clone); 1689 return NULL; 1690 } 1691 break; 1692 } 1693 } while (eat); 1694 1695 /* Free pulled out fragments. */ 1696 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1697 skb_shinfo(skb)->frag_list = list->next; 1698 kfree_skb(list); 1699 } 1700 /* And insert new clone at head. */ 1701 if (clone) { 1702 clone->next = list; 1703 skb_shinfo(skb)->frag_list = clone; 1704 } 1705 } 1706 /* Success! Now we may commit changes to skb data. */ 1707 1708 pull_pages: 1709 eat = delta; 1710 k = 0; 1711 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1712 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1713 1714 if (size <= eat) { 1715 skb_frag_unref(skb, i); 1716 eat -= size; 1717 } else { 1718 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1719 if (eat) { 1720 skb_shinfo(skb)->frags[k].page_offset += eat; 1721 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1722 eat = 0; 1723 } 1724 k++; 1725 } 1726 } 1727 skb_shinfo(skb)->nr_frags = k; 1728 1729 skb->tail += delta; 1730 skb->data_len -= delta; 1731 1732 return skb_tail_pointer(skb); 1733 } 1734 EXPORT_SYMBOL(__pskb_pull_tail); 1735 1736 /** 1737 * skb_copy_bits - copy bits from skb to kernel buffer 1738 * @skb: source skb 1739 * @offset: offset in source 1740 * @to: destination buffer 1741 * @len: number of bytes to copy 1742 * 1743 * Copy the specified number of bytes from the source skb to the 1744 * destination buffer. 1745 * 1746 * CAUTION ! : 1747 * If its prototype is ever changed, 1748 * check arch/{*}/net/{*}.S files, 1749 * since it is called from BPF assembly code. 1750 */ 1751 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1752 { 1753 int start = skb_headlen(skb); 1754 struct sk_buff *frag_iter; 1755 int i, copy; 1756 1757 if (offset > (int)skb->len - len) 1758 goto fault; 1759 1760 /* Copy header. */ 1761 if ((copy = start - offset) > 0) { 1762 if (copy > len) 1763 copy = len; 1764 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1765 if ((len -= copy) == 0) 1766 return 0; 1767 offset += copy; 1768 to += copy; 1769 } 1770 1771 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1772 int end; 1773 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1774 1775 WARN_ON(start > offset + len); 1776 1777 end = start + skb_frag_size(f); 1778 if ((copy = end - offset) > 0) { 1779 u8 *vaddr; 1780 1781 if (copy > len) 1782 copy = len; 1783 1784 vaddr = kmap_atomic(skb_frag_page(f)); 1785 memcpy(to, 1786 vaddr + f->page_offset + offset - start, 1787 copy); 1788 kunmap_atomic(vaddr); 1789 1790 if ((len -= copy) == 0) 1791 return 0; 1792 offset += copy; 1793 to += copy; 1794 } 1795 start = end; 1796 } 1797 1798 skb_walk_frags(skb, frag_iter) { 1799 int end; 1800 1801 WARN_ON(start > offset + len); 1802 1803 end = start + frag_iter->len; 1804 if ((copy = end - offset) > 0) { 1805 if (copy > len) 1806 copy = len; 1807 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1808 goto fault; 1809 if ((len -= copy) == 0) 1810 return 0; 1811 offset += copy; 1812 to += copy; 1813 } 1814 start = end; 1815 } 1816 1817 if (!len) 1818 return 0; 1819 1820 fault: 1821 return -EFAULT; 1822 } 1823 EXPORT_SYMBOL(skb_copy_bits); 1824 1825 /* 1826 * Callback from splice_to_pipe(), if we need to release some pages 1827 * at the end of the spd in case we error'ed out in filling the pipe. 1828 */ 1829 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1830 { 1831 put_page(spd->pages[i]); 1832 } 1833 1834 static struct page *linear_to_page(struct page *page, unsigned int *len, 1835 unsigned int *offset, 1836 struct sock *sk) 1837 { 1838 struct page_frag *pfrag = sk_page_frag(sk); 1839 1840 if (!sk_page_frag_refill(sk, pfrag)) 1841 return NULL; 1842 1843 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1844 1845 memcpy(page_address(pfrag->page) + pfrag->offset, 1846 page_address(page) + *offset, *len); 1847 *offset = pfrag->offset; 1848 pfrag->offset += *len; 1849 1850 return pfrag->page; 1851 } 1852 1853 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1854 struct page *page, 1855 unsigned int offset) 1856 { 1857 return spd->nr_pages && 1858 spd->pages[spd->nr_pages - 1] == page && 1859 (spd->partial[spd->nr_pages - 1].offset + 1860 spd->partial[spd->nr_pages - 1].len == offset); 1861 } 1862 1863 /* 1864 * Fill page/offset/length into spd, if it can hold more pages. 1865 */ 1866 static bool spd_fill_page(struct splice_pipe_desc *spd, 1867 struct pipe_inode_info *pipe, struct page *page, 1868 unsigned int *len, unsigned int offset, 1869 bool linear, 1870 struct sock *sk) 1871 { 1872 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1873 return true; 1874 1875 if (linear) { 1876 page = linear_to_page(page, len, &offset, sk); 1877 if (!page) 1878 return true; 1879 } 1880 if (spd_can_coalesce(spd, page, offset)) { 1881 spd->partial[spd->nr_pages - 1].len += *len; 1882 return false; 1883 } 1884 get_page(page); 1885 spd->pages[spd->nr_pages] = page; 1886 spd->partial[spd->nr_pages].len = *len; 1887 spd->partial[spd->nr_pages].offset = offset; 1888 spd->nr_pages++; 1889 1890 return false; 1891 } 1892 1893 static bool __splice_segment(struct page *page, unsigned int poff, 1894 unsigned int plen, unsigned int *off, 1895 unsigned int *len, 1896 struct splice_pipe_desc *spd, bool linear, 1897 struct sock *sk, 1898 struct pipe_inode_info *pipe) 1899 { 1900 if (!*len) 1901 return true; 1902 1903 /* skip this segment if already processed */ 1904 if (*off >= plen) { 1905 *off -= plen; 1906 return false; 1907 } 1908 1909 /* ignore any bits we already processed */ 1910 poff += *off; 1911 plen -= *off; 1912 *off = 0; 1913 1914 do { 1915 unsigned int flen = min(*len, plen); 1916 1917 if (spd_fill_page(spd, pipe, page, &flen, poff, 1918 linear, sk)) 1919 return true; 1920 poff += flen; 1921 plen -= flen; 1922 *len -= flen; 1923 } while (*len && plen); 1924 1925 return false; 1926 } 1927 1928 /* 1929 * Map linear and fragment data from the skb to spd. It reports true if the 1930 * pipe is full or if we already spliced the requested length. 1931 */ 1932 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1933 unsigned int *offset, unsigned int *len, 1934 struct splice_pipe_desc *spd, struct sock *sk) 1935 { 1936 int seg; 1937 struct sk_buff *iter; 1938 1939 /* map the linear part : 1940 * If skb->head_frag is set, this 'linear' part is backed by a 1941 * fragment, and if the head is not shared with any clones then 1942 * we can avoid a copy since we own the head portion of this page. 1943 */ 1944 if (__splice_segment(virt_to_page(skb->data), 1945 (unsigned long) skb->data & (PAGE_SIZE - 1), 1946 skb_headlen(skb), 1947 offset, len, spd, 1948 skb_head_is_locked(skb), 1949 sk, pipe)) 1950 return true; 1951 1952 /* 1953 * then map the fragments 1954 */ 1955 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1956 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1957 1958 if (__splice_segment(skb_frag_page(f), 1959 f->page_offset, skb_frag_size(f), 1960 offset, len, spd, false, sk, pipe)) 1961 return true; 1962 } 1963 1964 skb_walk_frags(skb, iter) { 1965 if (*offset >= iter->len) { 1966 *offset -= iter->len; 1967 continue; 1968 } 1969 /* __skb_splice_bits() only fails if the output has no room 1970 * left, so no point in going over the frag_list for the error 1971 * case. 1972 */ 1973 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 1974 return true; 1975 } 1976 1977 return false; 1978 } 1979 1980 /* 1981 * Map data from the skb to a pipe. Should handle both the linear part, 1982 * the fragments, and the frag list. 1983 */ 1984 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 1985 struct pipe_inode_info *pipe, unsigned int tlen, 1986 unsigned int flags) 1987 { 1988 struct partial_page partial[MAX_SKB_FRAGS]; 1989 struct page *pages[MAX_SKB_FRAGS]; 1990 struct splice_pipe_desc spd = { 1991 .pages = pages, 1992 .partial = partial, 1993 .nr_pages_max = MAX_SKB_FRAGS, 1994 .ops = &nosteal_pipe_buf_ops, 1995 .spd_release = sock_spd_release, 1996 }; 1997 int ret = 0; 1998 1999 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 2000 2001 if (spd.nr_pages) 2002 ret = splice_to_pipe(pipe, &spd); 2003 2004 return ret; 2005 } 2006 EXPORT_SYMBOL_GPL(skb_splice_bits); 2007 2008 /** 2009 * skb_store_bits - store bits from kernel buffer to skb 2010 * @skb: destination buffer 2011 * @offset: offset in destination 2012 * @from: source buffer 2013 * @len: number of bytes to copy 2014 * 2015 * Copy the specified number of bytes from the source buffer to the 2016 * destination skb. This function handles all the messy bits of 2017 * traversing fragment lists and such. 2018 */ 2019 2020 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2021 { 2022 int start = skb_headlen(skb); 2023 struct sk_buff *frag_iter; 2024 int i, copy; 2025 2026 if (offset > (int)skb->len - len) 2027 goto fault; 2028 2029 if ((copy = start - offset) > 0) { 2030 if (copy > len) 2031 copy = len; 2032 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2033 if ((len -= copy) == 0) 2034 return 0; 2035 offset += copy; 2036 from += copy; 2037 } 2038 2039 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2040 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2041 int end; 2042 2043 WARN_ON(start > offset + len); 2044 2045 end = start + skb_frag_size(frag); 2046 if ((copy = end - offset) > 0) { 2047 u8 *vaddr; 2048 2049 if (copy > len) 2050 copy = len; 2051 2052 vaddr = kmap_atomic(skb_frag_page(frag)); 2053 memcpy(vaddr + frag->page_offset + offset - start, 2054 from, copy); 2055 kunmap_atomic(vaddr); 2056 2057 if ((len -= copy) == 0) 2058 return 0; 2059 offset += copy; 2060 from += copy; 2061 } 2062 start = end; 2063 } 2064 2065 skb_walk_frags(skb, frag_iter) { 2066 int end; 2067 2068 WARN_ON(start > offset + len); 2069 2070 end = start + frag_iter->len; 2071 if ((copy = end - offset) > 0) { 2072 if (copy > len) 2073 copy = len; 2074 if (skb_store_bits(frag_iter, offset - start, 2075 from, copy)) 2076 goto fault; 2077 if ((len -= copy) == 0) 2078 return 0; 2079 offset += copy; 2080 from += copy; 2081 } 2082 start = end; 2083 } 2084 if (!len) 2085 return 0; 2086 2087 fault: 2088 return -EFAULT; 2089 } 2090 EXPORT_SYMBOL(skb_store_bits); 2091 2092 /* Checksum skb data. */ 2093 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2094 __wsum csum, const struct skb_checksum_ops *ops) 2095 { 2096 int start = skb_headlen(skb); 2097 int i, copy = start - offset; 2098 struct sk_buff *frag_iter; 2099 int pos = 0; 2100 2101 /* Checksum header. */ 2102 if (copy > 0) { 2103 if (copy > len) 2104 copy = len; 2105 csum = ops->update(skb->data + offset, copy, csum); 2106 if ((len -= copy) == 0) 2107 return csum; 2108 offset += copy; 2109 pos = copy; 2110 } 2111 2112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2113 int end; 2114 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2115 2116 WARN_ON(start > offset + len); 2117 2118 end = start + skb_frag_size(frag); 2119 if ((copy = end - offset) > 0) { 2120 __wsum csum2; 2121 u8 *vaddr; 2122 2123 if (copy > len) 2124 copy = len; 2125 vaddr = kmap_atomic(skb_frag_page(frag)); 2126 csum2 = ops->update(vaddr + frag->page_offset + 2127 offset - start, copy, 0); 2128 kunmap_atomic(vaddr); 2129 csum = ops->combine(csum, csum2, pos, copy); 2130 if (!(len -= copy)) 2131 return csum; 2132 offset += copy; 2133 pos += copy; 2134 } 2135 start = end; 2136 } 2137 2138 skb_walk_frags(skb, frag_iter) { 2139 int end; 2140 2141 WARN_ON(start > offset + len); 2142 2143 end = start + frag_iter->len; 2144 if ((copy = end - offset) > 0) { 2145 __wsum csum2; 2146 if (copy > len) 2147 copy = len; 2148 csum2 = __skb_checksum(frag_iter, offset - start, 2149 copy, 0, ops); 2150 csum = ops->combine(csum, csum2, pos, copy); 2151 if ((len -= copy) == 0) 2152 return csum; 2153 offset += copy; 2154 pos += copy; 2155 } 2156 start = end; 2157 } 2158 BUG_ON(len); 2159 2160 return csum; 2161 } 2162 EXPORT_SYMBOL(__skb_checksum); 2163 2164 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2165 int len, __wsum csum) 2166 { 2167 const struct skb_checksum_ops ops = { 2168 .update = csum_partial_ext, 2169 .combine = csum_block_add_ext, 2170 }; 2171 2172 return __skb_checksum(skb, offset, len, csum, &ops); 2173 } 2174 EXPORT_SYMBOL(skb_checksum); 2175 2176 /* Both of above in one bottle. */ 2177 2178 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2179 u8 *to, int len, __wsum csum) 2180 { 2181 int start = skb_headlen(skb); 2182 int i, copy = start - offset; 2183 struct sk_buff *frag_iter; 2184 int pos = 0; 2185 2186 /* Copy header. */ 2187 if (copy > 0) { 2188 if (copy > len) 2189 copy = len; 2190 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2191 copy, csum); 2192 if ((len -= copy) == 0) 2193 return csum; 2194 offset += copy; 2195 to += copy; 2196 pos = copy; 2197 } 2198 2199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2200 int end; 2201 2202 WARN_ON(start > offset + len); 2203 2204 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2205 if ((copy = end - offset) > 0) { 2206 __wsum csum2; 2207 u8 *vaddr; 2208 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2209 2210 if (copy > len) 2211 copy = len; 2212 vaddr = kmap_atomic(skb_frag_page(frag)); 2213 csum2 = csum_partial_copy_nocheck(vaddr + 2214 frag->page_offset + 2215 offset - start, to, 2216 copy, 0); 2217 kunmap_atomic(vaddr); 2218 csum = csum_block_add(csum, csum2, pos); 2219 if (!(len -= copy)) 2220 return csum; 2221 offset += copy; 2222 to += copy; 2223 pos += copy; 2224 } 2225 start = end; 2226 } 2227 2228 skb_walk_frags(skb, frag_iter) { 2229 __wsum csum2; 2230 int end; 2231 2232 WARN_ON(start > offset + len); 2233 2234 end = start + frag_iter->len; 2235 if ((copy = end - offset) > 0) { 2236 if (copy > len) 2237 copy = len; 2238 csum2 = skb_copy_and_csum_bits(frag_iter, 2239 offset - start, 2240 to, copy, 0); 2241 csum = csum_block_add(csum, csum2, pos); 2242 if ((len -= copy) == 0) 2243 return csum; 2244 offset += copy; 2245 to += copy; 2246 pos += copy; 2247 } 2248 start = end; 2249 } 2250 BUG_ON(len); 2251 return csum; 2252 } 2253 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2254 2255 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 2256 { 2257 net_warn_ratelimited( 2258 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2259 __func__); 2260 return 0; 2261 } 2262 2263 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 2264 int offset, int len) 2265 { 2266 net_warn_ratelimited( 2267 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2268 __func__); 2269 return 0; 2270 } 2271 2272 static const struct skb_checksum_ops default_crc32c_ops = { 2273 .update = warn_crc32c_csum_update, 2274 .combine = warn_crc32c_csum_combine, 2275 }; 2276 2277 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 2278 &default_crc32c_ops; 2279 EXPORT_SYMBOL(crc32c_csum_stub); 2280 2281 /** 2282 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2283 * @from: source buffer 2284 * 2285 * Calculates the amount of linear headroom needed in the 'to' skb passed 2286 * into skb_zerocopy(). 2287 */ 2288 unsigned int 2289 skb_zerocopy_headlen(const struct sk_buff *from) 2290 { 2291 unsigned int hlen = 0; 2292 2293 if (!from->head_frag || 2294 skb_headlen(from) < L1_CACHE_BYTES || 2295 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2296 hlen = skb_headlen(from); 2297 2298 if (skb_has_frag_list(from)) 2299 hlen = from->len; 2300 2301 return hlen; 2302 } 2303 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2304 2305 /** 2306 * skb_zerocopy - Zero copy skb to skb 2307 * @to: destination buffer 2308 * @from: source buffer 2309 * @len: number of bytes to copy from source buffer 2310 * @hlen: size of linear headroom in destination buffer 2311 * 2312 * Copies up to `len` bytes from `from` to `to` by creating references 2313 * to the frags in the source buffer. 2314 * 2315 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2316 * headroom in the `to` buffer. 2317 * 2318 * Return value: 2319 * 0: everything is OK 2320 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2321 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2322 */ 2323 int 2324 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2325 { 2326 int i, j = 0; 2327 int plen = 0; /* length of skb->head fragment */ 2328 int ret; 2329 struct page *page; 2330 unsigned int offset; 2331 2332 BUG_ON(!from->head_frag && !hlen); 2333 2334 /* dont bother with small payloads */ 2335 if (len <= skb_tailroom(to)) 2336 return skb_copy_bits(from, 0, skb_put(to, len), len); 2337 2338 if (hlen) { 2339 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2340 if (unlikely(ret)) 2341 return ret; 2342 len -= hlen; 2343 } else { 2344 plen = min_t(int, skb_headlen(from), len); 2345 if (plen) { 2346 page = virt_to_head_page(from->head); 2347 offset = from->data - (unsigned char *)page_address(page); 2348 __skb_fill_page_desc(to, 0, page, offset, plen); 2349 get_page(page); 2350 j = 1; 2351 len -= plen; 2352 } 2353 } 2354 2355 to->truesize += len + plen; 2356 to->len += len + plen; 2357 to->data_len += len + plen; 2358 2359 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2360 skb_tx_error(from); 2361 return -ENOMEM; 2362 } 2363 2364 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2365 if (!len) 2366 break; 2367 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2368 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2369 len -= skb_shinfo(to)->frags[j].size; 2370 skb_frag_ref(to, j); 2371 j++; 2372 } 2373 skb_shinfo(to)->nr_frags = j; 2374 2375 return 0; 2376 } 2377 EXPORT_SYMBOL_GPL(skb_zerocopy); 2378 2379 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2380 { 2381 __wsum csum; 2382 long csstart; 2383 2384 if (skb->ip_summed == CHECKSUM_PARTIAL) 2385 csstart = skb_checksum_start_offset(skb); 2386 else 2387 csstart = skb_headlen(skb); 2388 2389 BUG_ON(csstart > skb_headlen(skb)); 2390 2391 skb_copy_from_linear_data(skb, to, csstart); 2392 2393 csum = 0; 2394 if (csstart != skb->len) 2395 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2396 skb->len - csstart, 0); 2397 2398 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2399 long csstuff = csstart + skb->csum_offset; 2400 2401 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2402 } 2403 } 2404 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2405 2406 /** 2407 * skb_dequeue - remove from the head of the queue 2408 * @list: list to dequeue from 2409 * 2410 * Remove the head of the list. The list lock is taken so the function 2411 * may be used safely with other locking list functions. The head item is 2412 * returned or %NULL if the list is empty. 2413 */ 2414 2415 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2416 { 2417 unsigned long flags; 2418 struct sk_buff *result; 2419 2420 spin_lock_irqsave(&list->lock, flags); 2421 result = __skb_dequeue(list); 2422 spin_unlock_irqrestore(&list->lock, flags); 2423 return result; 2424 } 2425 EXPORT_SYMBOL(skb_dequeue); 2426 2427 /** 2428 * skb_dequeue_tail - remove from the tail of the queue 2429 * @list: list to dequeue from 2430 * 2431 * Remove the tail of the list. The list lock is taken so the function 2432 * may be used safely with other locking list functions. The tail item is 2433 * returned or %NULL if the list is empty. 2434 */ 2435 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2436 { 2437 unsigned long flags; 2438 struct sk_buff *result; 2439 2440 spin_lock_irqsave(&list->lock, flags); 2441 result = __skb_dequeue_tail(list); 2442 spin_unlock_irqrestore(&list->lock, flags); 2443 return result; 2444 } 2445 EXPORT_SYMBOL(skb_dequeue_tail); 2446 2447 /** 2448 * skb_queue_purge - empty a list 2449 * @list: list to empty 2450 * 2451 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2452 * the list and one reference dropped. This function takes the list 2453 * lock and is atomic with respect to other list locking functions. 2454 */ 2455 void skb_queue_purge(struct sk_buff_head *list) 2456 { 2457 struct sk_buff *skb; 2458 while ((skb = skb_dequeue(list)) != NULL) 2459 kfree_skb(skb); 2460 } 2461 EXPORT_SYMBOL(skb_queue_purge); 2462 2463 /** 2464 * skb_rbtree_purge - empty a skb rbtree 2465 * @root: root of the rbtree to empty 2466 * 2467 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 2468 * the list and one reference dropped. This function does not take 2469 * any lock. Synchronization should be handled by the caller (e.g., TCP 2470 * out-of-order queue is protected by the socket lock). 2471 */ 2472 void skb_rbtree_purge(struct rb_root *root) 2473 { 2474 struct sk_buff *skb, *next; 2475 2476 rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode) 2477 kfree_skb(skb); 2478 2479 *root = RB_ROOT; 2480 } 2481 2482 /** 2483 * skb_queue_head - queue a buffer at the list head 2484 * @list: list to use 2485 * @newsk: buffer to queue 2486 * 2487 * Queue a buffer at the start of the list. This function takes the 2488 * list lock and can be used safely with other locking &sk_buff functions 2489 * safely. 2490 * 2491 * A buffer cannot be placed on two lists at the same time. 2492 */ 2493 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2494 { 2495 unsigned long flags; 2496 2497 spin_lock_irqsave(&list->lock, flags); 2498 __skb_queue_head(list, newsk); 2499 spin_unlock_irqrestore(&list->lock, flags); 2500 } 2501 EXPORT_SYMBOL(skb_queue_head); 2502 2503 /** 2504 * skb_queue_tail - queue a buffer at the list tail 2505 * @list: list to use 2506 * @newsk: buffer to queue 2507 * 2508 * Queue a buffer at the tail of the list. This function takes the 2509 * list lock and can be used safely with other locking &sk_buff functions 2510 * safely. 2511 * 2512 * A buffer cannot be placed on two lists at the same time. 2513 */ 2514 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2515 { 2516 unsigned long flags; 2517 2518 spin_lock_irqsave(&list->lock, flags); 2519 __skb_queue_tail(list, newsk); 2520 spin_unlock_irqrestore(&list->lock, flags); 2521 } 2522 EXPORT_SYMBOL(skb_queue_tail); 2523 2524 /** 2525 * skb_unlink - remove a buffer from a list 2526 * @skb: buffer to remove 2527 * @list: list to use 2528 * 2529 * Remove a packet from a list. The list locks are taken and this 2530 * function is atomic with respect to other list locked calls 2531 * 2532 * You must know what list the SKB is on. 2533 */ 2534 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2535 { 2536 unsigned long flags; 2537 2538 spin_lock_irqsave(&list->lock, flags); 2539 __skb_unlink(skb, list); 2540 spin_unlock_irqrestore(&list->lock, flags); 2541 } 2542 EXPORT_SYMBOL(skb_unlink); 2543 2544 /** 2545 * skb_append - append a buffer 2546 * @old: buffer to insert after 2547 * @newsk: buffer to insert 2548 * @list: list to use 2549 * 2550 * Place a packet after a given packet in a list. The list locks are taken 2551 * and this function is atomic with respect to other list locked calls. 2552 * A buffer cannot be placed on two lists at the same time. 2553 */ 2554 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2555 { 2556 unsigned long flags; 2557 2558 spin_lock_irqsave(&list->lock, flags); 2559 __skb_queue_after(list, old, newsk); 2560 spin_unlock_irqrestore(&list->lock, flags); 2561 } 2562 EXPORT_SYMBOL(skb_append); 2563 2564 /** 2565 * skb_insert - insert a buffer 2566 * @old: buffer to insert before 2567 * @newsk: buffer to insert 2568 * @list: list to use 2569 * 2570 * Place a packet before a given packet in a list. The list locks are 2571 * taken and this function is atomic with respect to other list locked 2572 * calls. 2573 * 2574 * A buffer cannot be placed on two lists at the same time. 2575 */ 2576 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2577 { 2578 unsigned long flags; 2579 2580 spin_lock_irqsave(&list->lock, flags); 2581 __skb_insert(newsk, old->prev, old, list); 2582 spin_unlock_irqrestore(&list->lock, flags); 2583 } 2584 EXPORT_SYMBOL(skb_insert); 2585 2586 static inline void skb_split_inside_header(struct sk_buff *skb, 2587 struct sk_buff* skb1, 2588 const u32 len, const int pos) 2589 { 2590 int i; 2591 2592 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2593 pos - len); 2594 /* And move data appendix as is. */ 2595 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2596 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2597 2598 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2599 skb_shinfo(skb)->nr_frags = 0; 2600 skb1->data_len = skb->data_len; 2601 skb1->len += skb1->data_len; 2602 skb->data_len = 0; 2603 skb->len = len; 2604 skb_set_tail_pointer(skb, len); 2605 } 2606 2607 static inline void skb_split_no_header(struct sk_buff *skb, 2608 struct sk_buff* skb1, 2609 const u32 len, int pos) 2610 { 2611 int i, k = 0; 2612 const int nfrags = skb_shinfo(skb)->nr_frags; 2613 2614 skb_shinfo(skb)->nr_frags = 0; 2615 skb1->len = skb1->data_len = skb->len - len; 2616 skb->len = len; 2617 skb->data_len = len - pos; 2618 2619 for (i = 0; i < nfrags; i++) { 2620 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2621 2622 if (pos + size > len) { 2623 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2624 2625 if (pos < len) { 2626 /* Split frag. 2627 * We have two variants in this case: 2628 * 1. Move all the frag to the second 2629 * part, if it is possible. F.e. 2630 * this approach is mandatory for TUX, 2631 * where splitting is expensive. 2632 * 2. Split is accurately. We make this. 2633 */ 2634 skb_frag_ref(skb, i); 2635 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2636 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2637 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2638 skb_shinfo(skb)->nr_frags++; 2639 } 2640 k++; 2641 } else 2642 skb_shinfo(skb)->nr_frags++; 2643 pos += size; 2644 } 2645 skb_shinfo(skb1)->nr_frags = k; 2646 } 2647 2648 /** 2649 * skb_split - Split fragmented skb to two parts at length len. 2650 * @skb: the buffer to split 2651 * @skb1: the buffer to receive the second part 2652 * @len: new length for skb 2653 */ 2654 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2655 { 2656 int pos = skb_headlen(skb); 2657 2658 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & 2659 SKBTX_SHARED_FRAG; 2660 if (len < pos) /* Split line is inside header. */ 2661 skb_split_inside_header(skb, skb1, len, pos); 2662 else /* Second chunk has no header, nothing to copy. */ 2663 skb_split_no_header(skb, skb1, len, pos); 2664 } 2665 EXPORT_SYMBOL(skb_split); 2666 2667 /* Shifting from/to a cloned skb is a no-go. 2668 * 2669 * Caller cannot keep skb_shinfo related pointers past calling here! 2670 */ 2671 static int skb_prepare_for_shift(struct sk_buff *skb) 2672 { 2673 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2674 } 2675 2676 /** 2677 * skb_shift - Shifts paged data partially from skb to another 2678 * @tgt: buffer into which tail data gets added 2679 * @skb: buffer from which the paged data comes from 2680 * @shiftlen: shift up to this many bytes 2681 * 2682 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2683 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2684 * It's up to caller to free skb if everything was shifted. 2685 * 2686 * If @tgt runs out of frags, the whole operation is aborted. 2687 * 2688 * Skb cannot include anything else but paged data while tgt is allowed 2689 * to have non-paged data as well. 2690 * 2691 * TODO: full sized shift could be optimized but that would need 2692 * specialized skb free'er to handle frags without up-to-date nr_frags. 2693 */ 2694 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2695 { 2696 int from, to, merge, todo; 2697 struct skb_frag_struct *fragfrom, *fragto; 2698 2699 BUG_ON(shiftlen > skb->len); 2700 2701 if (skb_headlen(skb)) 2702 return 0; 2703 2704 todo = shiftlen; 2705 from = 0; 2706 to = skb_shinfo(tgt)->nr_frags; 2707 fragfrom = &skb_shinfo(skb)->frags[from]; 2708 2709 /* Actual merge is delayed until the point when we know we can 2710 * commit all, so that we don't have to undo partial changes 2711 */ 2712 if (!to || 2713 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2714 fragfrom->page_offset)) { 2715 merge = -1; 2716 } else { 2717 merge = to - 1; 2718 2719 todo -= skb_frag_size(fragfrom); 2720 if (todo < 0) { 2721 if (skb_prepare_for_shift(skb) || 2722 skb_prepare_for_shift(tgt)) 2723 return 0; 2724 2725 /* All previous frag pointers might be stale! */ 2726 fragfrom = &skb_shinfo(skb)->frags[from]; 2727 fragto = &skb_shinfo(tgt)->frags[merge]; 2728 2729 skb_frag_size_add(fragto, shiftlen); 2730 skb_frag_size_sub(fragfrom, shiftlen); 2731 fragfrom->page_offset += shiftlen; 2732 2733 goto onlymerged; 2734 } 2735 2736 from++; 2737 } 2738 2739 /* Skip full, not-fitting skb to avoid expensive operations */ 2740 if ((shiftlen == skb->len) && 2741 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2742 return 0; 2743 2744 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2745 return 0; 2746 2747 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2748 if (to == MAX_SKB_FRAGS) 2749 return 0; 2750 2751 fragfrom = &skb_shinfo(skb)->frags[from]; 2752 fragto = &skb_shinfo(tgt)->frags[to]; 2753 2754 if (todo >= skb_frag_size(fragfrom)) { 2755 *fragto = *fragfrom; 2756 todo -= skb_frag_size(fragfrom); 2757 from++; 2758 to++; 2759 2760 } else { 2761 __skb_frag_ref(fragfrom); 2762 fragto->page = fragfrom->page; 2763 fragto->page_offset = fragfrom->page_offset; 2764 skb_frag_size_set(fragto, todo); 2765 2766 fragfrom->page_offset += todo; 2767 skb_frag_size_sub(fragfrom, todo); 2768 todo = 0; 2769 2770 to++; 2771 break; 2772 } 2773 } 2774 2775 /* Ready to "commit" this state change to tgt */ 2776 skb_shinfo(tgt)->nr_frags = to; 2777 2778 if (merge >= 0) { 2779 fragfrom = &skb_shinfo(skb)->frags[0]; 2780 fragto = &skb_shinfo(tgt)->frags[merge]; 2781 2782 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2783 __skb_frag_unref(fragfrom); 2784 } 2785 2786 /* Reposition in the original skb */ 2787 to = 0; 2788 while (from < skb_shinfo(skb)->nr_frags) 2789 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2790 skb_shinfo(skb)->nr_frags = to; 2791 2792 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2793 2794 onlymerged: 2795 /* Most likely the tgt won't ever need its checksum anymore, skb on 2796 * the other hand might need it if it needs to be resent 2797 */ 2798 tgt->ip_summed = CHECKSUM_PARTIAL; 2799 skb->ip_summed = CHECKSUM_PARTIAL; 2800 2801 /* Yak, is it really working this way? Some helper please? */ 2802 skb->len -= shiftlen; 2803 skb->data_len -= shiftlen; 2804 skb->truesize -= shiftlen; 2805 tgt->len += shiftlen; 2806 tgt->data_len += shiftlen; 2807 tgt->truesize += shiftlen; 2808 2809 return shiftlen; 2810 } 2811 2812 /** 2813 * skb_prepare_seq_read - Prepare a sequential read of skb data 2814 * @skb: the buffer to read 2815 * @from: lower offset of data to be read 2816 * @to: upper offset of data to be read 2817 * @st: state variable 2818 * 2819 * Initializes the specified state variable. Must be called before 2820 * invoking skb_seq_read() for the first time. 2821 */ 2822 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2823 unsigned int to, struct skb_seq_state *st) 2824 { 2825 st->lower_offset = from; 2826 st->upper_offset = to; 2827 st->root_skb = st->cur_skb = skb; 2828 st->frag_idx = st->stepped_offset = 0; 2829 st->frag_data = NULL; 2830 } 2831 EXPORT_SYMBOL(skb_prepare_seq_read); 2832 2833 /** 2834 * skb_seq_read - Sequentially read skb data 2835 * @consumed: number of bytes consumed by the caller so far 2836 * @data: destination pointer for data to be returned 2837 * @st: state variable 2838 * 2839 * Reads a block of skb data at @consumed relative to the 2840 * lower offset specified to skb_prepare_seq_read(). Assigns 2841 * the head of the data block to @data and returns the length 2842 * of the block or 0 if the end of the skb data or the upper 2843 * offset has been reached. 2844 * 2845 * The caller is not required to consume all of the data 2846 * returned, i.e. @consumed is typically set to the number 2847 * of bytes already consumed and the next call to 2848 * skb_seq_read() will return the remaining part of the block. 2849 * 2850 * Note 1: The size of each block of data returned can be arbitrary, 2851 * this limitation is the cost for zerocopy sequential 2852 * reads of potentially non linear data. 2853 * 2854 * Note 2: Fragment lists within fragments are not implemented 2855 * at the moment, state->root_skb could be replaced with 2856 * a stack for this purpose. 2857 */ 2858 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2859 struct skb_seq_state *st) 2860 { 2861 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2862 skb_frag_t *frag; 2863 2864 if (unlikely(abs_offset >= st->upper_offset)) { 2865 if (st->frag_data) { 2866 kunmap_atomic(st->frag_data); 2867 st->frag_data = NULL; 2868 } 2869 return 0; 2870 } 2871 2872 next_skb: 2873 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2874 2875 if (abs_offset < block_limit && !st->frag_data) { 2876 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2877 return block_limit - abs_offset; 2878 } 2879 2880 if (st->frag_idx == 0 && !st->frag_data) 2881 st->stepped_offset += skb_headlen(st->cur_skb); 2882 2883 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2884 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2885 block_limit = skb_frag_size(frag) + st->stepped_offset; 2886 2887 if (abs_offset < block_limit) { 2888 if (!st->frag_data) 2889 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2890 2891 *data = (u8 *) st->frag_data + frag->page_offset + 2892 (abs_offset - st->stepped_offset); 2893 2894 return block_limit - abs_offset; 2895 } 2896 2897 if (st->frag_data) { 2898 kunmap_atomic(st->frag_data); 2899 st->frag_data = NULL; 2900 } 2901 2902 st->frag_idx++; 2903 st->stepped_offset += skb_frag_size(frag); 2904 } 2905 2906 if (st->frag_data) { 2907 kunmap_atomic(st->frag_data); 2908 st->frag_data = NULL; 2909 } 2910 2911 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2912 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2913 st->frag_idx = 0; 2914 goto next_skb; 2915 } else if (st->cur_skb->next) { 2916 st->cur_skb = st->cur_skb->next; 2917 st->frag_idx = 0; 2918 goto next_skb; 2919 } 2920 2921 return 0; 2922 } 2923 EXPORT_SYMBOL(skb_seq_read); 2924 2925 /** 2926 * skb_abort_seq_read - Abort a sequential read of skb data 2927 * @st: state variable 2928 * 2929 * Must be called if skb_seq_read() was not called until it 2930 * returned 0. 2931 */ 2932 void skb_abort_seq_read(struct skb_seq_state *st) 2933 { 2934 if (st->frag_data) 2935 kunmap_atomic(st->frag_data); 2936 } 2937 EXPORT_SYMBOL(skb_abort_seq_read); 2938 2939 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2940 2941 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2942 struct ts_config *conf, 2943 struct ts_state *state) 2944 { 2945 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2946 } 2947 2948 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2949 { 2950 skb_abort_seq_read(TS_SKB_CB(state)); 2951 } 2952 2953 /** 2954 * skb_find_text - Find a text pattern in skb data 2955 * @skb: the buffer to look in 2956 * @from: search offset 2957 * @to: search limit 2958 * @config: textsearch configuration 2959 * 2960 * Finds a pattern in the skb data according to the specified 2961 * textsearch configuration. Use textsearch_next() to retrieve 2962 * subsequent occurrences of the pattern. Returns the offset 2963 * to the first occurrence or UINT_MAX if no match was found. 2964 */ 2965 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2966 unsigned int to, struct ts_config *config) 2967 { 2968 struct ts_state state; 2969 unsigned int ret; 2970 2971 config->get_next_block = skb_ts_get_next_block; 2972 config->finish = skb_ts_finish; 2973 2974 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 2975 2976 ret = textsearch_find(config, &state); 2977 return (ret <= to - from ? ret : UINT_MAX); 2978 } 2979 EXPORT_SYMBOL(skb_find_text); 2980 2981 /** 2982 * skb_append_datato_frags - append the user data to a skb 2983 * @sk: sock structure 2984 * @skb: skb structure to be appended with user data. 2985 * @getfrag: call back function to be used for getting the user data 2986 * @from: pointer to user message iov 2987 * @length: length of the iov message 2988 * 2989 * Description: This procedure append the user data in the fragment part 2990 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2991 */ 2992 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2993 int (*getfrag)(void *from, char *to, int offset, 2994 int len, int odd, struct sk_buff *skb), 2995 void *from, int length) 2996 { 2997 int frg_cnt = skb_shinfo(skb)->nr_frags; 2998 int copy; 2999 int offset = 0; 3000 int ret; 3001 struct page_frag *pfrag = ¤t->task_frag; 3002 3003 do { 3004 /* Return error if we don't have space for new frag */ 3005 if (frg_cnt >= MAX_SKB_FRAGS) 3006 return -EMSGSIZE; 3007 3008 if (!sk_page_frag_refill(sk, pfrag)) 3009 return -ENOMEM; 3010 3011 /* copy the user data to page */ 3012 copy = min_t(int, length, pfrag->size - pfrag->offset); 3013 3014 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 3015 offset, copy, 0, skb); 3016 if (ret < 0) 3017 return -EFAULT; 3018 3019 /* copy was successful so update the size parameters */ 3020 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 3021 copy); 3022 frg_cnt++; 3023 pfrag->offset += copy; 3024 get_page(pfrag->page); 3025 3026 skb->truesize += copy; 3027 refcount_add(copy, &sk->sk_wmem_alloc); 3028 skb->len += copy; 3029 skb->data_len += copy; 3030 offset += copy; 3031 length -= copy; 3032 3033 } while (length > 0); 3034 3035 return 0; 3036 } 3037 EXPORT_SYMBOL(skb_append_datato_frags); 3038 3039 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3040 int offset, size_t size) 3041 { 3042 int i = skb_shinfo(skb)->nr_frags; 3043 3044 if (skb_can_coalesce(skb, i, page, offset)) { 3045 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3046 } else if (i < MAX_SKB_FRAGS) { 3047 get_page(page); 3048 skb_fill_page_desc(skb, i, page, offset, size); 3049 } else { 3050 return -EMSGSIZE; 3051 } 3052 3053 return 0; 3054 } 3055 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3056 3057 /** 3058 * skb_pull_rcsum - pull skb and update receive checksum 3059 * @skb: buffer to update 3060 * @len: length of data pulled 3061 * 3062 * This function performs an skb_pull on the packet and updates 3063 * the CHECKSUM_COMPLETE checksum. It should be used on 3064 * receive path processing instead of skb_pull unless you know 3065 * that the checksum difference is zero (e.g., a valid IP header) 3066 * or you are setting ip_summed to CHECKSUM_NONE. 3067 */ 3068 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3069 { 3070 unsigned char *data = skb->data; 3071 3072 BUG_ON(len > skb->len); 3073 __skb_pull(skb, len); 3074 skb_postpull_rcsum(skb, data, len); 3075 return skb->data; 3076 } 3077 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3078 3079 /** 3080 * skb_segment - Perform protocol segmentation on skb. 3081 * @head_skb: buffer to segment 3082 * @features: features for the output path (see dev->features) 3083 * 3084 * This function performs segmentation on the given skb. It returns 3085 * a pointer to the first in a list of new skbs for the segments. 3086 * In case of error it returns ERR_PTR(err). 3087 */ 3088 struct sk_buff *skb_segment(struct sk_buff *head_skb, 3089 netdev_features_t features) 3090 { 3091 struct sk_buff *segs = NULL; 3092 struct sk_buff *tail = NULL; 3093 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3094 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3095 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3096 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3097 struct sk_buff *frag_skb = head_skb; 3098 unsigned int offset = doffset; 3099 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3100 unsigned int partial_segs = 0; 3101 unsigned int headroom; 3102 unsigned int len = head_skb->len; 3103 __be16 proto; 3104 bool csum, sg; 3105 int nfrags = skb_shinfo(head_skb)->nr_frags; 3106 int err = -ENOMEM; 3107 int i = 0; 3108 int pos; 3109 int dummy; 3110 3111 __skb_push(head_skb, doffset); 3112 proto = skb_network_protocol(head_skb, &dummy); 3113 if (unlikely(!proto)) 3114 return ERR_PTR(-EINVAL); 3115 3116 sg = !!(features & NETIF_F_SG); 3117 csum = !!can_checksum_protocol(features, proto); 3118 3119 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3120 if (!(features & NETIF_F_GSO_PARTIAL)) { 3121 struct sk_buff *iter; 3122 unsigned int frag_len; 3123 3124 if (!list_skb || 3125 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3126 goto normal; 3127 3128 /* If we get here then all the required 3129 * GSO features except frag_list are supported. 3130 * Try to split the SKB to multiple GSO SKBs 3131 * with no frag_list. 3132 * Currently we can do that only when the buffers don't 3133 * have a linear part and all the buffers except 3134 * the last are of the same length. 3135 */ 3136 frag_len = list_skb->len; 3137 skb_walk_frags(head_skb, iter) { 3138 if (frag_len != iter->len && iter->next) 3139 goto normal; 3140 if (skb_headlen(iter) && !iter->head_frag) 3141 goto normal; 3142 3143 len -= iter->len; 3144 } 3145 3146 if (len != frag_len) 3147 goto normal; 3148 } 3149 3150 /* GSO partial only requires that we trim off any excess that 3151 * doesn't fit into an MSS sized block, so take care of that 3152 * now. 3153 */ 3154 partial_segs = len / mss; 3155 if (partial_segs > 1) 3156 mss *= partial_segs; 3157 else 3158 partial_segs = 0; 3159 } 3160 3161 normal: 3162 headroom = skb_headroom(head_skb); 3163 pos = skb_headlen(head_skb); 3164 3165 do { 3166 struct sk_buff *nskb; 3167 skb_frag_t *nskb_frag; 3168 int hsize; 3169 int size; 3170 3171 if (unlikely(mss == GSO_BY_FRAGS)) { 3172 len = list_skb->len; 3173 } else { 3174 len = head_skb->len - offset; 3175 if (len > mss) 3176 len = mss; 3177 } 3178 3179 hsize = skb_headlen(head_skb) - offset; 3180 if (hsize < 0) 3181 hsize = 0; 3182 if (hsize > len || !sg) 3183 hsize = len; 3184 3185 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3186 (skb_headlen(list_skb) == len || sg)) { 3187 BUG_ON(skb_headlen(list_skb) > len); 3188 3189 i = 0; 3190 nfrags = skb_shinfo(list_skb)->nr_frags; 3191 frag = skb_shinfo(list_skb)->frags; 3192 frag_skb = list_skb; 3193 pos += skb_headlen(list_skb); 3194 3195 while (pos < offset + len) { 3196 BUG_ON(i >= nfrags); 3197 3198 size = skb_frag_size(frag); 3199 if (pos + size > offset + len) 3200 break; 3201 3202 i++; 3203 pos += size; 3204 frag++; 3205 } 3206 3207 nskb = skb_clone(list_skb, GFP_ATOMIC); 3208 list_skb = list_skb->next; 3209 3210 if (unlikely(!nskb)) 3211 goto err; 3212 3213 if (unlikely(pskb_trim(nskb, len))) { 3214 kfree_skb(nskb); 3215 goto err; 3216 } 3217 3218 hsize = skb_end_offset(nskb); 3219 if (skb_cow_head(nskb, doffset + headroom)) { 3220 kfree_skb(nskb); 3221 goto err; 3222 } 3223 3224 nskb->truesize += skb_end_offset(nskb) - hsize; 3225 skb_release_head_state(nskb); 3226 __skb_push(nskb, doffset); 3227 } else { 3228 nskb = __alloc_skb(hsize + doffset + headroom, 3229 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3230 NUMA_NO_NODE); 3231 3232 if (unlikely(!nskb)) 3233 goto err; 3234 3235 skb_reserve(nskb, headroom); 3236 __skb_put(nskb, doffset); 3237 } 3238 3239 if (segs) 3240 tail->next = nskb; 3241 else 3242 segs = nskb; 3243 tail = nskb; 3244 3245 __copy_skb_header(nskb, head_skb); 3246 3247 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3248 skb_reset_mac_len(nskb); 3249 3250 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3251 nskb->data - tnl_hlen, 3252 doffset + tnl_hlen); 3253 3254 if (nskb->len == len + doffset) 3255 goto perform_csum_check; 3256 3257 if (!sg) { 3258 if (!nskb->remcsum_offload) 3259 nskb->ip_summed = CHECKSUM_NONE; 3260 SKB_GSO_CB(nskb)->csum = 3261 skb_copy_and_csum_bits(head_skb, offset, 3262 skb_put(nskb, len), 3263 len, 0); 3264 SKB_GSO_CB(nskb)->csum_start = 3265 skb_headroom(nskb) + doffset; 3266 continue; 3267 } 3268 3269 nskb_frag = skb_shinfo(nskb)->frags; 3270 3271 skb_copy_from_linear_data_offset(head_skb, offset, 3272 skb_put(nskb, hsize), hsize); 3273 3274 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & 3275 SKBTX_SHARED_FRAG; 3276 3277 while (pos < offset + len) { 3278 if (i >= nfrags) { 3279 BUG_ON(skb_headlen(list_skb)); 3280 3281 i = 0; 3282 nfrags = skb_shinfo(list_skb)->nr_frags; 3283 frag = skb_shinfo(list_skb)->frags; 3284 frag_skb = list_skb; 3285 3286 BUG_ON(!nfrags); 3287 3288 list_skb = list_skb->next; 3289 } 3290 3291 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3292 MAX_SKB_FRAGS)) { 3293 net_warn_ratelimited( 3294 "skb_segment: too many frags: %u %u\n", 3295 pos, mss); 3296 goto err; 3297 } 3298 3299 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3300 goto err; 3301 3302 *nskb_frag = *frag; 3303 __skb_frag_ref(nskb_frag); 3304 size = skb_frag_size(nskb_frag); 3305 3306 if (pos < offset) { 3307 nskb_frag->page_offset += offset - pos; 3308 skb_frag_size_sub(nskb_frag, offset - pos); 3309 } 3310 3311 skb_shinfo(nskb)->nr_frags++; 3312 3313 if (pos + size <= offset + len) { 3314 i++; 3315 frag++; 3316 pos += size; 3317 } else { 3318 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3319 goto skip_fraglist; 3320 } 3321 3322 nskb_frag++; 3323 } 3324 3325 skip_fraglist: 3326 nskb->data_len = len - hsize; 3327 nskb->len += nskb->data_len; 3328 nskb->truesize += nskb->data_len; 3329 3330 perform_csum_check: 3331 if (!csum) { 3332 if (skb_has_shared_frag(nskb)) { 3333 err = __skb_linearize(nskb); 3334 if (err) 3335 goto err; 3336 } 3337 if (!nskb->remcsum_offload) 3338 nskb->ip_summed = CHECKSUM_NONE; 3339 SKB_GSO_CB(nskb)->csum = 3340 skb_checksum(nskb, doffset, 3341 nskb->len - doffset, 0); 3342 SKB_GSO_CB(nskb)->csum_start = 3343 skb_headroom(nskb) + doffset; 3344 } 3345 } while ((offset += len) < head_skb->len); 3346 3347 /* Some callers want to get the end of the list. 3348 * Put it in segs->prev to avoid walking the list. 3349 * (see validate_xmit_skb_list() for example) 3350 */ 3351 segs->prev = tail; 3352 3353 if (partial_segs) { 3354 struct sk_buff *iter; 3355 int type = skb_shinfo(head_skb)->gso_type; 3356 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 3357 3358 /* Update type to add partial and then remove dodgy if set */ 3359 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 3360 type &= ~SKB_GSO_DODGY; 3361 3362 /* Update GSO info and prepare to start updating headers on 3363 * our way back down the stack of protocols. 3364 */ 3365 for (iter = segs; iter; iter = iter->next) { 3366 skb_shinfo(iter)->gso_size = gso_size; 3367 skb_shinfo(iter)->gso_segs = partial_segs; 3368 skb_shinfo(iter)->gso_type = type; 3369 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 3370 } 3371 3372 if (tail->len - doffset <= gso_size) 3373 skb_shinfo(tail)->gso_size = 0; 3374 else if (tail != segs) 3375 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 3376 } 3377 3378 /* Following permits correct backpressure, for protocols 3379 * using skb_set_owner_w(). 3380 * Idea is to tranfert ownership from head_skb to last segment. 3381 */ 3382 if (head_skb->destructor == sock_wfree) { 3383 swap(tail->truesize, head_skb->truesize); 3384 swap(tail->destructor, head_skb->destructor); 3385 swap(tail->sk, head_skb->sk); 3386 } 3387 return segs; 3388 3389 err: 3390 kfree_skb_list(segs); 3391 return ERR_PTR(err); 3392 } 3393 EXPORT_SYMBOL_GPL(skb_segment); 3394 3395 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3396 { 3397 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3398 unsigned int offset = skb_gro_offset(skb); 3399 unsigned int headlen = skb_headlen(skb); 3400 unsigned int len = skb_gro_len(skb); 3401 struct sk_buff *lp, *p = *head; 3402 unsigned int delta_truesize; 3403 3404 if (unlikely(p->len + len >= 65536)) 3405 return -E2BIG; 3406 3407 lp = NAPI_GRO_CB(p)->last; 3408 pinfo = skb_shinfo(lp); 3409 3410 if (headlen <= offset) { 3411 skb_frag_t *frag; 3412 skb_frag_t *frag2; 3413 int i = skbinfo->nr_frags; 3414 int nr_frags = pinfo->nr_frags + i; 3415 3416 if (nr_frags > MAX_SKB_FRAGS) 3417 goto merge; 3418 3419 offset -= headlen; 3420 pinfo->nr_frags = nr_frags; 3421 skbinfo->nr_frags = 0; 3422 3423 frag = pinfo->frags + nr_frags; 3424 frag2 = skbinfo->frags + i; 3425 do { 3426 *--frag = *--frag2; 3427 } while (--i); 3428 3429 frag->page_offset += offset; 3430 skb_frag_size_sub(frag, offset); 3431 3432 /* all fragments truesize : remove (head size + sk_buff) */ 3433 delta_truesize = skb->truesize - 3434 SKB_TRUESIZE(skb_end_offset(skb)); 3435 3436 skb->truesize -= skb->data_len; 3437 skb->len -= skb->data_len; 3438 skb->data_len = 0; 3439 3440 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3441 goto done; 3442 } else if (skb->head_frag) { 3443 int nr_frags = pinfo->nr_frags; 3444 skb_frag_t *frag = pinfo->frags + nr_frags; 3445 struct page *page = virt_to_head_page(skb->head); 3446 unsigned int first_size = headlen - offset; 3447 unsigned int first_offset; 3448 3449 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3450 goto merge; 3451 3452 first_offset = skb->data - 3453 (unsigned char *)page_address(page) + 3454 offset; 3455 3456 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3457 3458 frag->page.p = page; 3459 frag->page_offset = first_offset; 3460 skb_frag_size_set(frag, first_size); 3461 3462 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3463 /* We dont need to clear skbinfo->nr_frags here */ 3464 3465 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3466 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3467 goto done; 3468 } 3469 3470 merge: 3471 delta_truesize = skb->truesize; 3472 if (offset > headlen) { 3473 unsigned int eat = offset - headlen; 3474 3475 skbinfo->frags[0].page_offset += eat; 3476 skb_frag_size_sub(&skbinfo->frags[0], eat); 3477 skb->data_len -= eat; 3478 skb->len -= eat; 3479 offset = headlen; 3480 } 3481 3482 __skb_pull(skb, offset); 3483 3484 if (NAPI_GRO_CB(p)->last == p) 3485 skb_shinfo(p)->frag_list = skb; 3486 else 3487 NAPI_GRO_CB(p)->last->next = skb; 3488 NAPI_GRO_CB(p)->last = skb; 3489 __skb_header_release(skb); 3490 lp = p; 3491 3492 done: 3493 NAPI_GRO_CB(p)->count++; 3494 p->data_len += len; 3495 p->truesize += delta_truesize; 3496 p->len += len; 3497 if (lp != p) { 3498 lp->data_len += len; 3499 lp->truesize += delta_truesize; 3500 lp->len += len; 3501 } 3502 NAPI_GRO_CB(skb)->same_flow = 1; 3503 return 0; 3504 } 3505 EXPORT_SYMBOL_GPL(skb_gro_receive); 3506 3507 void __init skb_init(void) 3508 { 3509 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3510 sizeof(struct sk_buff), 3511 0, 3512 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3513 NULL); 3514 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3515 sizeof(struct sk_buff_fclones), 3516 0, 3517 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3518 NULL); 3519 } 3520 3521 static int 3522 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 3523 unsigned int recursion_level) 3524 { 3525 int start = skb_headlen(skb); 3526 int i, copy = start - offset; 3527 struct sk_buff *frag_iter; 3528 int elt = 0; 3529 3530 if (unlikely(recursion_level >= 24)) 3531 return -EMSGSIZE; 3532 3533 if (copy > 0) { 3534 if (copy > len) 3535 copy = len; 3536 sg_set_buf(sg, skb->data + offset, copy); 3537 elt++; 3538 if ((len -= copy) == 0) 3539 return elt; 3540 offset += copy; 3541 } 3542 3543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3544 int end; 3545 3546 WARN_ON(start > offset + len); 3547 3548 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3549 if ((copy = end - offset) > 0) { 3550 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3551 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 3552 return -EMSGSIZE; 3553 3554 if (copy > len) 3555 copy = len; 3556 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3557 frag->page_offset+offset-start); 3558 elt++; 3559 if (!(len -= copy)) 3560 return elt; 3561 offset += copy; 3562 } 3563 start = end; 3564 } 3565 3566 skb_walk_frags(skb, frag_iter) { 3567 int end, ret; 3568 3569 WARN_ON(start > offset + len); 3570 3571 end = start + frag_iter->len; 3572 if ((copy = end - offset) > 0) { 3573 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 3574 return -EMSGSIZE; 3575 3576 if (copy > len) 3577 copy = len; 3578 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3579 copy, recursion_level + 1); 3580 if (unlikely(ret < 0)) 3581 return ret; 3582 elt += ret; 3583 if ((len -= copy) == 0) 3584 return elt; 3585 offset += copy; 3586 } 3587 start = end; 3588 } 3589 BUG_ON(len); 3590 return elt; 3591 } 3592 3593 /** 3594 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3595 * @skb: Socket buffer containing the buffers to be mapped 3596 * @sg: The scatter-gather list to map into 3597 * @offset: The offset into the buffer's contents to start mapping 3598 * @len: Length of buffer space to be mapped 3599 * 3600 * Fill the specified scatter-gather list with mappings/pointers into a 3601 * region of the buffer space attached to a socket buffer. Returns either 3602 * the number of scatterlist items used, or -EMSGSIZE if the contents 3603 * could not fit. 3604 */ 3605 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3606 { 3607 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 3608 3609 if (nsg <= 0) 3610 return nsg; 3611 3612 sg_mark_end(&sg[nsg - 1]); 3613 3614 return nsg; 3615 } 3616 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3617 3618 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3619 * sglist without mark the sg which contain last skb data as the end. 3620 * So the caller can mannipulate sg list as will when padding new data after 3621 * the first call without calling sg_unmark_end to expend sg list. 3622 * 3623 * Scenario to use skb_to_sgvec_nomark: 3624 * 1. sg_init_table 3625 * 2. skb_to_sgvec_nomark(payload1) 3626 * 3. skb_to_sgvec_nomark(payload2) 3627 * 3628 * This is equivalent to: 3629 * 1. sg_init_table 3630 * 2. skb_to_sgvec(payload1) 3631 * 3. sg_unmark_end 3632 * 4. skb_to_sgvec(payload2) 3633 * 3634 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 3635 * is more preferable. 3636 */ 3637 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3638 int offset, int len) 3639 { 3640 return __skb_to_sgvec(skb, sg, offset, len, 0); 3641 } 3642 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3643 3644 3645 3646 /** 3647 * skb_cow_data - Check that a socket buffer's data buffers are writable 3648 * @skb: The socket buffer to check. 3649 * @tailbits: Amount of trailing space to be added 3650 * @trailer: Returned pointer to the skb where the @tailbits space begins 3651 * 3652 * Make sure that the data buffers attached to a socket buffer are 3653 * writable. If they are not, private copies are made of the data buffers 3654 * and the socket buffer is set to use these instead. 3655 * 3656 * If @tailbits is given, make sure that there is space to write @tailbits 3657 * bytes of data beyond current end of socket buffer. @trailer will be 3658 * set to point to the skb in which this space begins. 3659 * 3660 * The number of scatterlist elements required to completely map the 3661 * COW'd and extended socket buffer will be returned. 3662 */ 3663 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3664 { 3665 int copyflag; 3666 int elt; 3667 struct sk_buff *skb1, **skb_p; 3668 3669 /* If skb is cloned or its head is paged, reallocate 3670 * head pulling out all the pages (pages are considered not writable 3671 * at the moment even if they are anonymous). 3672 */ 3673 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3674 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3675 return -ENOMEM; 3676 3677 /* Easy case. Most of packets will go this way. */ 3678 if (!skb_has_frag_list(skb)) { 3679 /* A little of trouble, not enough of space for trailer. 3680 * This should not happen, when stack is tuned to generate 3681 * good frames. OK, on miss we reallocate and reserve even more 3682 * space, 128 bytes is fair. */ 3683 3684 if (skb_tailroom(skb) < tailbits && 3685 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3686 return -ENOMEM; 3687 3688 /* Voila! */ 3689 *trailer = skb; 3690 return 1; 3691 } 3692 3693 /* Misery. We are in troubles, going to mincer fragments... */ 3694 3695 elt = 1; 3696 skb_p = &skb_shinfo(skb)->frag_list; 3697 copyflag = 0; 3698 3699 while ((skb1 = *skb_p) != NULL) { 3700 int ntail = 0; 3701 3702 /* The fragment is partially pulled by someone, 3703 * this can happen on input. Copy it and everything 3704 * after it. */ 3705 3706 if (skb_shared(skb1)) 3707 copyflag = 1; 3708 3709 /* If the skb is the last, worry about trailer. */ 3710 3711 if (skb1->next == NULL && tailbits) { 3712 if (skb_shinfo(skb1)->nr_frags || 3713 skb_has_frag_list(skb1) || 3714 skb_tailroom(skb1) < tailbits) 3715 ntail = tailbits + 128; 3716 } 3717 3718 if (copyflag || 3719 skb_cloned(skb1) || 3720 ntail || 3721 skb_shinfo(skb1)->nr_frags || 3722 skb_has_frag_list(skb1)) { 3723 struct sk_buff *skb2; 3724 3725 /* Fuck, we are miserable poor guys... */ 3726 if (ntail == 0) 3727 skb2 = skb_copy(skb1, GFP_ATOMIC); 3728 else 3729 skb2 = skb_copy_expand(skb1, 3730 skb_headroom(skb1), 3731 ntail, 3732 GFP_ATOMIC); 3733 if (unlikely(skb2 == NULL)) 3734 return -ENOMEM; 3735 3736 if (skb1->sk) 3737 skb_set_owner_w(skb2, skb1->sk); 3738 3739 /* Looking around. Are we still alive? 3740 * OK, link new skb, drop old one */ 3741 3742 skb2->next = skb1->next; 3743 *skb_p = skb2; 3744 kfree_skb(skb1); 3745 skb1 = skb2; 3746 } 3747 elt++; 3748 *trailer = skb1; 3749 skb_p = &skb1->next; 3750 } 3751 3752 return elt; 3753 } 3754 EXPORT_SYMBOL_GPL(skb_cow_data); 3755 3756 static void sock_rmem_free(struct sk_buff *skb) 3757 { 3758 struct sock *sk = skb->sk; 3759 3760 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3761 } 3762 3763 static void skb_set_err_queue(struct sk_buff *skb) 3764 { 3765 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 3766 * So, it is safe to (mis)use it to mark skbs on the error queue. 3767 */ 3768 skb->pkt_type = PACKET_OUTGOING; 3769 BUILD_BUG_ON(PACKET_OUTGOING == 0); 3770 } 3771 3772 /* 3773 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3774 */ 3775 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3776 { 3777 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3778 (unsigned int)sk->sk_rcvbuf) 3779 return -ENOMEM; 3780 3781 skb_orphan(skb); 3782 skb->sk = sk; 3783 skb->destructor = sock_rmem_free; 3784 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3785 skb_set_err_queue(skb); 3786 3787 /* before exiting rcu section, make sure dst is refcounted */ 3788 skb_dst_force(skb); 3789 3790 skb_queue_tail(&sk->sk_error_queue, skb); 3791 if (!sock_flag(sk, SOCK_DEAD)) 3792 sk->sk_data_ready(sk); 3793 return 0; 3794 } 3795 EXPORT_SYMBOL(sock_queue_err_skb); 3796 3797 static bool is_icmp_err_skb(const struct sk_buff *skb) 3798 { 3799 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 3800 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 3801 } 3802 3803 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 3804 { 3805 struct sk_buff_head *q = &sk->sk_error_queue; 3806 struct sk_buff *skb, *skb_next = NULL; 3807 bool icmp_next = false; 3808 unsigned long flags; 3809 3810 spin_lock_irqsave(&q->lock, flags); 3811 skb = __skb_dequeue(q); 3812 if (skb && (skb_next = skb_peek(q))) { 3813 icmp_next = is_icmp_err_skb(skb_next); 3814 if (icmp_next) 3815 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; 3816 } 3817 spin_unlock_irqrestore(&q->lock, flags); 3818 3819 if (is_icmp_err_skb(skb) && !icmp_next) 3820 sk->sk_err = 0; 3821 3822 if (skb_next) 3823 sk->sk_error_report(sk); 3824 3825 return skb; 3826 } 3827 EXPORT_SYMBOL(sock_dequeue_err_skb); 3828 3829 /** 3830 * skb_clone_sk - create clone of skb, and take reference to socket 3831 * @skb: the skb to clone 3832 * 3833 * This function creates a clone of a buffer that holds a reference on 3834 * sk_refcnt. Buffers created via this function are meant to be 3835 * returned using sock_queue_err_skb, or free via kfree_skb. 3836 * 3837 * When passing buffers allocated with this function to sock_queue_err_skb 3838 * it is necessary to wrap the call with sock_hold/sock_put in order to 3839 * prevent the socket from being released prior to being enqueued on 3840 * the sk_error_queue. 3841 */ 3842 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 3843 { 3844 struct sock *sk = skb->sk; 3845 struct sk_buff *clone; 3846 3847 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 3848 return NULL; 3849 3850 clone = skb_clone(skb, GFP_ATOMIC); 3851 if (!clone) { 3852 sock_put(sk); 3853 return NULL; 3854 } 3855 3856 clone->sk = sk; 3857 clone->destructor = sock_efree; 3858 3859 return clone; 3860 } 3861 EXPORT_SYMBOL(skb_clone_sk); 3862 3863 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3864 struct sock *sk, 3865 int tstype, 3866 bool opt_stats) 3867 { 3868 struct sock_exterr_skb *serr; 3869 int err; 3870 3871 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 3872 3873 serr = SKB_EXT_ERR(skb); 3874 memset(serr, 0, sizeof(*serr)); 3875 serr->ee.ee_errno = ENOMSG; 3876 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3877 serr->ee.ee_info = tstype; 3878 serr->opt_stats = opt_stats; 3879 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 3880 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3881 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3882 if (sk->sk_protocol == IPPROTO_TCP && 3883 sk->sk_type == SOCK_STREAM) 3884 serr->ee.ee_data -= sk->sk_tskey; 3885 } 3886 3887 err = sock_queue_err_skb(sk, skb); 3888 3889 if (err) 3890 kfree_skb(skb); 3891 } 3892 3893 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 3894 { 3895 bool ret; 3896 3897 if (likely(sysctl_tstamp_allow_data || tsonly)) 3898 return true; 3899 3900 read_lock_bh(&sk->sk_callback_lock); 3901 ret = sk->sk_socket && sk->sk_socket->file && 3902 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 3903 read_unlock_bh(&sk->sk_callback_lock); 3904 return ret; 3905 } 3906 3907 void skb_complete_tx_timestamp(struct sk_buff *skb, 3908 struct skb_shared_hwtstamps *hwtstamps) 3909 { 3910 struct sock *sk = skb->sk; 3911 3912 if (!skb_may_tx_timestamp(sk, false)) 3913 return; 3914 3915 /* Take a reference to prevent skb_orphan() from freeing the socket, 3916 * but only if the socket refcount is not zero. 3917 */ 3918 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 3919 *skb_hwtstamps(skb) = *hwtstamps; 3920 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 3921 sock_put(sk); 3922 } 3923 } 3924 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3925 3926 void __skb_tstamp_tx(struct sk_buff *orig_skb, 3927 struct skb_shared_hwtstamps *hwtstamps, 3928 struct sock *sk, int tstype) 3929 { 3930 struct sk_buff *skb; 3931 bool tsonly, opt_stats = false; 3932 3933 if (!sk) 3934 return; 3935 3936 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 3937 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 3938 return; 3939 3940 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3941 if (!skb_may_tx_timestamp(sk, tsonly)) 3942 return; 3943 3944 if (tsonly) { 3945 #ifdef CONFIG_INET 3946 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 3947 sk->sk_protocol == IPPROTO_TCP && 3948 sk->sk_type == SOCK_STREAM) { 3949 skb = tcp_get_timestamping_opt_stats(sk); 3950 opt_stats = true; 3951 } else 3952 #endif 3953 skb = alloc_skb(0, GFP_ATOMIC); 3954 } else { 3955 skb = skb_clone(orig_skb, GFP_ATOMIC); 3956 } 3957 if (!skb) 3958 return; 3959 3960 if (tsonly) { 3961 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 3962 SKBTX_ANY_TSTAMP; 3963 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3964 } 3965 3966 if (hwtstamps) 3967 *skb_hwtstamps(skb) = *hwtstamps; 3968 else 3969 skb->tstamp = ktime_get_real(); 3970 3971 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 3972 } 3973 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3974 3975 void skb_tstamp_tx(struct sk_buff *orig_skb, 3976 struct skb_shared_hwtstamps *hwtstamps) 3977 { 3978 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 3979 SCM_TSTAMP_SND); 3980 } 3981 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3982 3983 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3984 { 3985 struct sock *sk = skb->sk; 3986 struct sock_exterr_skb *serr; 3987 int err = 1; 3988 3989 skb->wifi_acked_valid = 1; 3990 skb->wifi_acked = acked; 3991 3992 serr = SKB_EXT_ERR(skb); 3993 memset(serr, 0, sizeof(*serr)); 3994 serr->ee.ee_errno = ENOMSG; 3995 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3996 3997 /* Take a reference to prevent skb_orphan() from freeing the socket, 3998 * but only if the socket refcount is not zero. 3999 */ 4000 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4001 err = sock_queue_err_skb(sk, skb); 4002 sock_put(sk); 4003 } 4004 if (err) 4005 kfree_skb(skb); 4006 } 4007 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 4008 4009 /** 4010 * skb_partial_csum_set - set up and verify partial csum values for packet 4011 * @skb: the skb to set 4012 * @start: the number of bytes after skb->data to start checksumming. 4013 * @off: the offset from start to place the checksum. 4014 * 4015 * For untrusted partially-checksummed packets, we need to make sure the values 4016 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4017 * 4018 * This function checks and sets those values and skb->ip_summed: if this 4019 * returns false you should drop the packet. 4020 */ 4021 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4022 { 4023 if (unlikely(start > skb_headlen(skb)) || 4024 unlikely((int)start + off > skb_headlen(skb) - 2)) { 4025 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 4026 start, off, skb_headlen(skb)); 4027 return false; 4028 } 4029 skb->ip_summed = CHECKSUM_PARTIAL; 4030 skb->csum_start = skb_headroom(skb) + start; 4031 skb->csum_offset = off; 4032 skb_set_transport_header(skb, start); 4033 return true; 4034 } 4035 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4036 4037 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4038 unsigned int max) 4039 { 4040 if (skb_headlen(skb) >= len) 4041 return 0; 4042 4043 /* If we need to pullup then pullup to the max, so we 4044 * won't need to do it again. 4045 */ 4046 if (max > skb->len) 4047 max = skb->len; 4048 4049 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4050 return -ENOMEM; 4051 4052 if (skb_headlen(skb) < len) 4053 return -EPROTO; 4054 4055 return 0; 4056 } 4057 4058 #define MAX_TCP_HDR_LEN (15 * 4) 4059 4060 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4061 typeof(IPPROTO_IP) proto, 4062 unsigned int off) 4063 { 4064 switch (proto) { 4065 int err; 4066 4067 case IPPROTO_TCP: 4068 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4069 off + MAX_TCP_HDR_LEN); 4070 if (!err && !skb_partial_csum_set(skb, off, 4071 offsetof(struct tcphdr, 4072 check))) 4073 err = -EPROTO; 4074 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4075 4076 case IPPROTO_UDP: 4077 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4078 off + sizeof(struct udphdr)); 4079 if (!err && !skb_partial_csum_set(skb, off, 4080 offsetof(struct udphdr, 4081 check))) 4082 err = -EPROTO; 4083 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4084 } 4085 4086 return ERR_PTR(-EPROTO); 4087 } 4088 4089 /* This value should be large enough to cover a tagged ethernet header plus 4090 * maximally sized IP and TCP or UDP headers. 4091 */ 4092 #define MAX_IP_HDR_LEN 128 4093 4094 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4095 { 4096 unsigned int off; 4097 bool fragment; 4098 __sum16 *csum; 4099 int err; 4100 4101 fragment = false; 4102 4103 err = skb_maybe_pull_tail(skb, 4104 sizeof(struct iphdr), 4105 MAX_IP_HDR_LEN); 4106 if (err < 0) 4107 goto out; 4108 4109 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 4110 fragment = true; 4111 4112 off = ip_hdrlen(skb); 4113 4114 err = -EPROTO; 4115 4116 if (fragment) 4117 goto out; 4118 4119 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4120 if (IS_ERR(csum)) 4121 return PTR_ERR(csum); 4122 4123 if (recalculate) 4124 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4125 ip_hdr(skb)->daddr, 4126 skb->len - off, 4127 ip_hdr(skb)->protocol, 0); 4128 err = 0; 4129 4130 out: 4131 return err; 4132 } 4133 4134 /* This value should be large enough to cover a tagged ethernet header plus 4135 * an IPv6 header, all options, and a maximal TCP or UDP header. 4136 */ 4137 #define MAX_IPV6_HDR_LEN 256 4138 4139 #define OPT_HDR(type, skb, off) \ 4140 (type *)(skb_network_header(skb) + (off)) 4141 4142 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4143 { 4144 int err; 4145 u8 nexthdr; 4146 unsigned int off; 4147 unsigned int len; 4148 bool fragment; 4149 bool done; 4150 __sum16 *csum; 4151 4152 fragment = false; 4153 done = false; 4154 4155 off = sizeof(struct ipv6hdr); 4156 4157 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 4158 if (err < 0) 4159 goto out; 4160 4161 nexthdr = ipv6_hdr(skb)->nexthdr; 4162 4163 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 4164 while (off <= len && !done) { 4165 switch (nexthdr) { 4166 case IPPROTO_DSTOPTS: 4167 case IPPROTO_HOPOPTS: 4168 case IPPROTO_ROUTING: { 4169 struct ipv6_opt_hdr *hp; 4170 4171 err = skb_maybe_pull_tail(skb, 4172 off + 4173 sizeof(struct ipv6_opt_hdr), 4174 MAX_IPV6_HDR_LEN); 4175 if (err < 0) 4176 goto out; 4177 4178 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 4179 nexthdr = hp->nexthdr; 4180 off += ipv6_optlen(hp); 4181 break; 4182 } 4183 case IPPROTO_AH: { 4184 struct ip_auth_hdr *hp; 4185 4186 err = skb_maybe_pull_tail(skb, 4187 off + 4188 sizeof(struct ip_auth_hdr), 4189 MAX_IPV6_HDR_LEN); 4190 if (err < 0) 4191 goto out; 4192 4193 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 4194 nexthdr = hp->nexthdr; 4195 off += ipv6_authlen(hp); 4196 break; 4197 } 4198 case IPPROTO_FRAGMENT: { 4199 struct frag_hdr *hp; 4200 4201 err = skb_maybe_pull_tail(skb, 4202 off + 4203 sizeof(struct frag_hdr), 4204 MAX_IPV6_HDR_LEN); 4205 if (err < 0) 4206 goto out; 4207 4208 hp = OPT_HDR(struct frag_hdr, skb, off); 4209 4210 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 4211 fragment = true; 4212 4213 nexthdr = hp->nexthdr; 4214 off += sizeof(struct frag_hdr); 4215 break; 4216 } 4217 default: 4218 done = true; 4219 break; 4220 } 4221 } 4222 4223 err = -EPROTO; 4224 4225 if (!done || fragment) 4226 goto out; 4227 4228 csum = skb_checksum_setup_ip(skb, nexthdr, off); 4229 if (IS_ERR(csum)) 4230 return PTR_ERR(csum); 4231 4232 if (recalculate) 4233 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4234 &ipv6_hdr(skb)->daddr, 4235 skb->len - off, nexthdr, 0); 4236 err = 0; 4237 4238 out: 4239 return err; 4240 } 4241 4242 /** 4243 * skb_checksum_setup - set up partial checksum offset 4244 * @skb: the skb to set up 4245 * @recalculate: if true the pseudo-header checksum will be recalculated 4246 */ 4247 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 4248 { 4249 int err; 4250 4251 switch (skb->protocol) { 4252 case htons(ETH_P_IP): 4253 err = skb_checksum_setup_ipv4(skb, recalculate); 4254 break; 4255 4256 case htons(ETH_P_IPV6): 4257 err = skb_checksum_setup_ipv6(skb, recalculate); 4258 break; 4259 4260 default: 4261 err = -EPROTO; 4262 break; 4263 } 4264 4265 return err; 4266 } 4267 EXPORT_SYMBOL(skb_checksum_setup); 4268 4269 /** 4270 * skb_checksum_maybe_trim - maybe trims the given skb 4271 * @skb: the skb to check 4272 * @transport_len: the data length beyond the network header 4273 * 4274 * Checks whether the given skb has data beyond the given transport length. 4275 * If so, returns a cloned skb trimmed to this transport length. 4276 * Otherwise returns the provided skb. Returns NULL in error cases 4277 * (e.g. transport_len exceeds skb length or out-of-memory). 4278 * 4279 * Caller needs to set the skb transport header and free any returned skb if it 4280 * differs from the provided skb. 4281 */ 4282 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4283 unsigned int transport_len) 4284 { 4285 struct sk_buff *skb_chk; 4286 unsigned int len = skb_transport_offset(skb) + transport_len; 4287 int ret; 4288 4289 if (skb->len < len) 4290 return NULL; 4291 else if (skb->len == len) 4292 return skb; 4293 4294 skb_chk = skb_clone(skb, GFP_ATOMIC); 4295 if (!skb_chk) 4296 return NULL; 4297 4298 ret = pskb_trim_rcsum(skb_chk, len); 4299 if (ret) { 4300 kfree_skb(skb_chk); 4301 return NULL; 4302 } 4303 4304 return skb_chk; 4305 } 4306 4307 /** 4308 * skb_checksum_trimmed - validate checksum of an skb 4309 * @skb: the skb to check 4310 * @transport_len: the data length beyond the network header 4311 * @skb_chkf: checksum function to use 4312 * 4313 * Applies the given checksum function skb_chkf to the provided skb. 4314 * Returns a checked and maybe trimmed skb. Returns NULL on error. 4315 * 4316 * If the skb has data beyond the given transport length, then a 4317 * trimmed & cloned skb is checked and returned. 4318 * 4319 * Caller needs to set the skb transport header and free any returned skb if it 4320 * differs from the provided skb. 4321 */ 4322 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4323 unsigned int transport_len, 4324 __sum16(*skb_chkf)(struct sk_buff *skb)) 4325 { 4326 struct sk_buff *skb_chk; 4327 unsigned int offset = skb_transport_offset(skb); 4328 __sum16 ret; 4329 4330 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4331 if (!skb_chk) 4332 goto err; 4333 4334 if (!pskb_may_pull(skb_chk, offset)) 4335 goto err; 4336 4337 skb_pull_rcsum(skb_chk, offset); 4338 ret = skb_chkf(skb_chk); 4339 skb_push_rcsum(skb_chk, offset); 4340 4341 if (ret) 4342 goto err; 4343 4344 return skb_chk; 4345 4346 err: 4347 if (skb_chk && skb_chk != skb) 4348 kfree_skb(skb_chk); 4349 4350 return NULL; 4351 4352 } 4353 EXPORT_SYMBOL(skb_checksum_trimmed); 4354 4355 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4356 { 4357 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4358 skb->dev->name); 4359 } 4360 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4361 4362 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4363 { 4364 if (head_stolen) { 4365 skb_release_head_state(skb); 4366 kmem_cache_free(skbuff_head_cache, skb); 4367 } else { 4368 __kfree_skb(skb); 4369 } 4370 } 4371 EXPORT_SYMBOL(kfree_skb_partial); 4372 4373 /** 4374 * skb_try_coalesce - try to merge skb to prior one 4375 * @to: prior buffer 4376 * @from: buffer to add 4377 * @fragstolen: pointer to boolean 4378 * @delta_truesize: how much more was allocated than was requested 4379 */ 4380 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4381 bool *fragstolen, int *delta_truesize) 4382 { 4383 int i, delta, len = from->len; 4384 4385 *fragstolen = false; 4386 4387 if (skb_cloned(to)) 4388 return false; 4389 4390 if (len <= skb_tailroom(to)) { 4391 if (len) 4392 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4393 *delta_truesize = 0; 4394 return true; 4395 } 4396 4397 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4398 return false; 4399 4400 if (skb_headlen(from) != 0) { 4401 struct page *page; 4402 unsigned int offset; 4403 4404 if (skb_shinfo(to)->nr_frags + 4405 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4406 return false; 4407 4408 if (skb_head_is_locked(from)) 4409 return false; 4410 4411 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4412 4413 page = virt_to_head_page(from->head); 4414 offset = from->data - (unsigned char *)page_address(page); 4415 4416 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4417 page, offset, skb_headlen(from)); 4418 *fragstolen = true; 4419 } else { 4420 if (skb_shinfo(to)->nr_frags + 4421 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4422 return false; 4423 4424 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 4425 } 4426 4427 WARN_ON_ONCE(delta < len); 4428 4429 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4430 skb_shinfo(from)->frags, 4431 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4432 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4433 4434 if (!skb_cloned(from)) 4435 skb_shinfo(from)->nr_frags = 0; 4436 4437 /* if the skb is not cloned this does nothing 4438 * since we set nr_frags to 0. 4439 */ 4440 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4441 skb_frag_ref(from, i); 4442 4443 to->truesize += delta; 4444 to->len += len; 4445 to->data_len += len; 4446 4447 *delta_truesize = delta; 4448 return true; 4449 } 4450 EXPORT_SYMBOL(skb_try_coalesce); 4451 4452 /** 4453 * skb_scrub_packet - scrub an skb 4454 * 4455 * @skb: buffer to clean 4456 * @xnet: packet is crossing netns 4457 * 4458 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 4459 * into/from a tunnel. Some information have to be cleared during these 4460 * operations. 4461 * skb_scrub_packet can also be used to clean a skb before injecting it in 4462 * another namespace (@xnet == true). We have to clear all information in the 4463 * skb that could impact namespace isolation. 4464 */ 4465 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4466 { 4467 skb->tstamp = 0; 4468 skb->pkt_type = PACKET_HOST; 4469 skb->skb_iif = 0; 4470 skb->ignore_df = 0; 4471 skb_dst_drop(skb); 4472 secpath_reset(skb); 4473 nf_reset(skb); 4474 nf_reset_trace(skb); 4475 4476 if (!xnet) 4477 return; 4478 4479 skb_orphan(skb); 4480 skb->mark = 0; 4481 } 4482 EXPORT_SYMBOL_GPL(skb_scrub_packet); 4483 4484 /** 4485 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 4486 * 4487 * @skb: GSO skb 4488 * 4489 * skb_gso_transport_seglen is used to determine the real size of the 4490 * individual segments, including Layer4 headers (TCP/UDP). 4491 * 4492 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4493 */ 4494 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4495 { 4496 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4497 unsigned int thlen = 0; 4498 4499 if (skb->encapsulation) { 4500 thlen = skb_inner_transport_header(skb) - 4501 skb_transport_header(skb); 4502 4503 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4504 thlen += inner_tcp_hdrlen(skb); 4505 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4506 thlen = tcp_hdrlen(skb); 4507 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) { 4508 thlen = sizeof(struct sctphdr); 4509 } 4510 /* UFO sets gso_size to the size of the fragmentation 4511 * payload, i.e. the size of the L4 (UDP) header is already 4512 * accounted for. 4513 */ 4514 return thlen + shinfo->gso_size; 4515 } 4516 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4517 4518 /** 4519 * skb_gso_validate_mtu - Return in case such skb fits a given MTU 4520 * 4521 * @skb: GSO skb 4522 * @mtu: MTU to validate against 4523 * 4524 * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU 4525 * once split. 4526 */ 4527 bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu) 4528 { 4529 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4530 const struct sk_buff *iter; 4531 unsigned int hlen; 4532 4533 hlen = skb_gso_network_seglen(skb); 4534 4535 if (shinfo->gso_size != GSO_BY_FRAGS) 4536 return hlen <= mtu; 4537 4538 /* Undo this so we can re-use header sizes */ 4539 hlen -= GSO_BY_FRAGS; 4540 4541 skb_walk_frags(skb, iter) { 4542 if (hlen + skb_headlen(iter) > mtu) 4543 return false; 4544 } 4545 4546 return true; 4547 } 4548 EXPORT_SYMBOL_GPL(skb_gso_validate_mtu); 4549 4550 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4551 { 4552 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4553 kfree_skb(skb); 4554 return NULL; 4555 } 4556 4557 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4558 2 * ETH_ALEN); 4559 skb->mac_header += VLAN_HLEN; 4560 return skb; 4561 } 4562 4563 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 4564 { 4565 struct vlan_hdr *vhdr; 4566 u16 vlan_tci; 4567 4568 if (unlikely(skb_vlan_tag_present(skb))) { 4569 /* vlan_tci is already set-up so leave this for another time */ 4570 return skb; 4571 } 4572 4573 skb = skb_share_check(skb, GFP_ATOMIC); 4574 if (unlikely(!skb)) 4575 goto err_free; 4576 4577 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 4578 goto err_free; 4579 4580 vhdr = (struct vlan_hdr *)skb->data; 4581 vlan_tci = ntohs(vhdr->h_vlan_TCI); 4582 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 4583 4584 skb_pull_rcsum(skb, VLAN_HLEN); 4585 vlan_set_encap_proto(skb, vhdr); 4586 4587 skb = skb_reorder_vlan_header(skb); 4588 if (unlikely(!skb)) 4589 goto err_free; 4590 4591 skb_reset_network_header(skb); 4592 skb_reset_transport_header(skb); 4593 skb_reset_mac_len(skb); 4594 4595 return skb; 4596 4597 err_free: 4598 kfree_skb(skb); 4599 return NULL; 4600 } 4601 EXPORT_SYMBOL(skb_vlan_untag); 4602 4603 int skb_ensure_writable(struct sk_buff *skb, int write_len) 4604 { 4605 if (!pskb_may_pull(skb, write_len)) 4606 return -ENOMEM; 4607 4608 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 4609 return 0; 4610 4611 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4612 } 4613 EXPORT_SYMBOL(skb_ensure_writable); 4614 4615 /* remove VLAN header from packet and update csum accordingly. 4616 * expects a non skb_vlan_tag_present skb with a vlan tag payload 4617 */ 4618 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 4619 { 4620 struct vlan_hdr *vhdr; 4621 int offset = skb->data - skb_mac_header(skb); 4622 int err; 4623 4624 if (WARN_ONCE(offset, 4625 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 4626 offset)) { 4627 return -EINVAL; 4628 } 4629 4630 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 4631 if (unlikely(err)) 4632 return err; 4633 4634 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4635 4636 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 4637 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 4638 4639 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 4640 __skb_pull(skb, VLAN_HLEN); 4641 4642 vlan_set_encap_proto(skb, vhdr); 4643 skb->mac_header += VLAN_HLEN; 4644 4645 if (skb_network_offset(skb) < ETH_HLEN) 4646 skb_set_network_header(skb, ETH_HLEN); 4647 4648 skb_reset_mac_len(skb); 4649 4650 return err; 4651 } 4652 EXPORT_SYMBOL(__skb_vlan_pop); 4653 4654 /* Pop a vlan tag either from hwaccel or from payload. 4655 * Expects skb->data at mac header. 4656 */ 4657 int skb_vlan_pop(struct sk_buff *skb) 4658 { 4659 u16 vlan_tci; 4660 __be16 vlan_proto; 4661 int err; 4662 4663 if (likely(skb_vlan_tag_present(skb))) { 4664 skb->vlan_tci = 0; 4665 } else { 4666 if (unlikely(!eth_type_vlan(skb->protocol))) 4667 return 0; 4668 4669 err = __skb_vlan_pop(skb, &vlan_tci); 4670 if (err) 4671 return err; 4672 } 4673 /* move next vlan tag to hw accel tag */ 4674 if (likely(!eth_type_vlan(skb->protocol))) 4675 return 0; 4676 4677 vlan_proto = skb->protocol; 4678 err = __skb_vlan_pop(skb, &vlan_tci); 4679 if (unlikely(err)) 4680 return err; 4681 4682 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4683 return 0; 4684 } 4685 EXPORT_SYMBOL(skb_vlan_pop); 4686 4687 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 4688 * Expects skb->data at mac header. 4689 */ 4690 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 4691 { 4692 if (skb_vlan_tag_present(skb)) { 4693 int offset = skb->data - skb_mac_header(skb); 4694 int err; 4695 4696 if (WARN_ONCE(offset, 4697 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 4698 offset)) { 4699 return -EINVAL; 4700 } 4701 4702 err = __vlan_insert_tag(skb, skb->vlan_proto, 4703 skb_vlan_tag_get(skb)); 4704 if (err) 4705 return err; 4706 4707 skb->protocol = skb->vlan_proto; 4708 skb->mac_len += VLAN_HLEN; 4709 4710 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4711 } 4712 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4713 return 0; 4714 } 4715 EXPORT_SYMBOL(skb_vlan_push); 4716 4717 /** 4718 * alloc_skb_with_frags - allocate skb with page frags 4719 * 4720 * @header_len: size of linear part 4721 * @data_len: needed length in frags 4722 * @max_page_order: max page order desired. 4723 * @errcode: pointer to error code if any 4724 * @gfp_mask: allocation mask 4725 * 4726 * This can be used to allocate a paged skb, given a maximal order for frags. 4727 */ 4728 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 4729 unsigned long data_len, 4730 int max_page_order, 4731 int *errcode, 4732 gfp_t gfp_mask) 4733 { 4734 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 4735 unsigned long chunk; 4736 struct sk_buff *skb; 4737 struct page *page; 4738 gfp_t gfp_head; 4739 int i; 4740 4741 *errcode = -EMSGSIZE; 4742 /* Note this test could be relaxed, if we succeed to allocate 4743 * high order pages... 4744 */ 4745 if (npages > MAX_SKB_FRAGS) 4746 return NULL; 4747 4748 gfp_head = gfp_mask; 4749 if (gfp_head & __GFP_DIRECT_RECLAIM) 4750 gfp_head |= __GFP_RETRY_MAYFAIL; 4751 4752 *errcode = -ENOBUFS; 4753 skb = alloc_skb(header_len, gfp_head); 4754 if (!skb) 4755 return NULL; 4756 4757 skb->truesize += npages << PAGE_SHIFT; 4758 4759 for (i = 0; npages > 0; i++) { 4760 int order = max_page_order; 4761 4762 while (order) { 4763 if (npages >= 1 << order) { 4764 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 4765 __GFP_COMP | 4766 __GFP_NOWARN | 4767 __GFP_NORETRY, 4768 order); 4769 if (page) 4770 goto fill_page; 4771 /* Do not retry other high order allocations */ 4772 order = 1; 4773 max_page_order = 0; 4774 } 4775 order--; 4776 } 4777 page = alloc_page(gfp_mask); 4778 if (!page) 4779 goto failure; 4780 fill_page: 4781 chunk = min_t(unsigned long, data_len, 4782 PAGE_SIZE << order); 4783 skb_fill_page_desc(skb, i, page, 0, chunk); 4784 data_len -= chunk; 4785 npages -= 1 << order; 4786 } 4787 return skb; 4788 4789 failure: 4790 kfree_skb(skb); 4791 return NULL; 4792 } 4793 EXPORT_SYMBOL(alloc_skb_with_frags); 4794 4795 /* carve out the first off bytes from skb when off < headlen */ 4796 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 4797 const int headlen, gfp_t gfp_mask) 4798 { 4799 int i; 4800 int size = skb_end_offset(skb); 4801 int new_hlen = headlen - off; 4802 u8 *data; 4803 4804 size = SKB_DATA_ALIGN(size); 4805 4806 if (skb_pfmemalloc(skb)) 4807 gfp_mask |= __GFP_MEMALLOC; 4808 data = kmalloc_reserve(size + 4809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 4810 gfp_mask, NUMA_NO_NODE, NULL); 4811 if (!data) 4812 return -ENOMEM; 4813 4814 size = SKB_WITH_OVERHEAD(ksize(data)); 4815 4816 /* Copy real data, and all frags */ 4817 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 4818 skb->len -= off; 4819 4820 memcpy((struct skb_shared_info *)(data + size), 4821 skb_shinfo(skb), 4822 offsetof(struct skb_shared_info, 4823 frags[skb_shinfo(skb)->nr_frags])); 4824 if (skb_cloned(skb)) { 4825 /* drop the old head gracefully */ 4826 if (skb_orphan_frags(skb, gfp_mask)) { 4827 kfree(data); 4828 return -ENOMEM; 4829 } 4830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 4831 skb_frag_ref(skb, i); 4832 if (skb_has_frag_list(skb)) 4833 skb_clone_fraglist(skb); 4834 skb_release_data(skb); 4835 } else { 4836 /* we can reuse existing recount- all we did was 4837 * relocate values 4838 */ 4839 skb_free_head(skb); 4840 } 4841 4842 skb->head = data; 4843 skb->data = data; 4844 skb->head_frag = 0; 4845 #ifdef NET_SKBUFF_DATA_USES_OFFSET 4846 skb->end = size; 4847 #else 4848 skb->end = skb->head + size; 4849 #endif 4850 skb_set_tail_pointer(skb, skb_headlen(skb)); 4851 skb_headers_offset_update(skb, 0); 4852 skb->cloned = 0; 4853 skb->hdr_len = 0; 4854 skb->nohdr = 0; 4855 atomic_set(&skb_shinfo(skb)->dataref, 1); 4856 4857 return 0; 4858 } 4859 4860 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 4861 4862 /* carve out the first eat bytes from skb's frag_list. May recurse into 4863 * pskb_carve() 4864 */ 4865 static int pskb_carve_frag_list(struct sk_buff *skb, 4866 struct skb_shared_info *shinfo, int eat, 4867 gfp_t gfp_mask) 4868 { 4869 struct sk_buff *list = shinfo->frag_list; 4870 struct sk_buff *clone = NULL; 4871 struct sk_buff *insp = NULL; 4872 4873 do { 4874 if (!list) { 4875 pr_err("Not enough bytes to eat. Want %d\n", eat); 4876 return -EFAULT; 4877 } 4878 if (list->len <= eat) { 4879 /* Eaten as whole. */ 4880 eat -= list->len; 4881 list = list->next; 4882 insp = list; 4883 } else { 4884 /* Eaten partially. */ 4885 if (skb_shared(list)) { 4886 clone = skb_clone(list, gfp_mask); 4887 if (!clone) 4888 return -ENOMEM; 4889 insp = list->next; 4890 list = clone; 4891 } else { 4892 /* This may be pulled without problems. */ 4893 insp = list; 4894 } 4895 if (pskb_carve(list, eat, gfp_mask) < 0) { 4896 kfree_skb(clone); 4897 return -ENOMEM; 4898 } 4899 break; 4900 } 4901 } while (eat); 4902 4903 /* Free pulled out fragments. */ 4904 while ((list = shinfo->frag_list) != insp) { 4905 shinfo->frag_list = list->next; 4906 kfree_skb(list); 4907 } 4908 /* And insert new clone at head. */ 4909 if (clone) { 4910 clone->next = list; 4911 shinfo->frag_list = clone; 4912 } 4913 return 0; 4914 } 4915 4916 /* carve off first len bytes from skb. Split line (off) is in the 4917 * non-linear part of skb 4918 */ 4919 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 4920 int pos, gfp_t gfp_mask) 4921 { 4922 int i, k = 0; 4923 int size = skb_end_offset(skb); 4924 u8 *data; 4925 const int nfrags = skb_shinfo(skb)->nr_frags; 4926 struct skb_shared_info *shinfo; 4927 4928 size = SKB_DATA_ALIGN(size); 4929 4930 if (skb_pfmemalloc(skb)) 4931 gfp_mask |= __GFP_MEMALLOC; 4932 data = kmalloc_reserve(size + 4933 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 4934 gfp_mask, NUMA_NO_NODE, NULL); 4935 if (!data) 4936 return -ENOMEM; 4937 4938 size = SKB_WITH_OVERHEAD(ksize(data)); 4939 4940 memcpy((struct skb_shared_info *)(data + size), 4941 skb_shinfo(skb), offsetof(struct skb_shared_info, 4942 frags[skb_shinfo(skb)->nr_frags])); 4943 if (skb_orphan_frags(skb, gfp_mask)) { 4944 kfree(data); 4945 return -ENOMEM; 4946 } 4947 shinfo = (struct skb_shared_info *)(data + size); 4948 for (i = 0; i < nfrags; i++) { 4949 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 4950 4951 if (pos + fsize > off) { 4952 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 4953 4954 if (pos < off) { 4955 /* Split frag. 4956 * We have two variants in this case: 4957 * 1. Move all the frag to the second 4958 * part, if it is possible. F.e. 4959 * this approach is mandatory for TUX, 4960 * where splitting is expensive. 4961 * 2. Split is accurately. We make this. 4962 */ 4963 shinfo->frags[0].page_offset += off - pos; 4964 skb_frag_size_sub(&shinfo->frags[0], off - pos); 4965 } 4966 skb_frag_ref(skb, i); 4967 k++; 4968 } 4969 pos += fsize; 4970 } 4971 shinfo->nr_frags = k; 4972 if (skb_has_frag_list(skb)) 4973 skb_clone_fraglist(skb); 4974 4975 if (k == 0) { 4976 /* split line is in frag list */ 4977 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); 4978 } 4979 skb_release_data(skb); 4980 4981 skb->head = data; 4982 skb->head_frag = 0; 4983 skb->data = data; 4984 #ifdef NET_SKBUFF_DATA_USES_OFFSET 4985 skb->end = size; 4986 #else 4987 skb->end = skb->head + size; 4988 #endif 4989 skb_reset_tail_pointer(skb); 4990 skb_headers_offset_update(skb, 0); 4991 skb->cloned = 0; 4992 skb->hdr_len = 0; 4993 skb->nohdr = 0; 4994 skb->len -= off; 4995 skb->data_len = skb->len; 4996 atomic_set(&skb_shinfo(skb)->dataref, 1); 4997 return 0; 4998 } 4999 5000 /* remove len bytes from the beginning of the skb */ 5001 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 5002 { 5003 int headlen = skb_headlen(skb); 5004 5005 if (len < headlen) 5006 return pskb_carve_inside_header(skb, len, headlen, gfp); 5007 else 5008 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 5009 } 5010 5011 /* Extract to_copy bytes starting at off from skb, and return this in 5012 * a new skb 5013 */ 5014 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 5015 int to_copy, gfp_t gfp) 5016 { 5017 struct sk_buff *clone = skb_clone(skb, gfp); 5018 5019 if (!clone) 5020 return NULL; 5021 5022 if (pskb_carve(clone, off, gfp) < 0 || 5023 pskb_trim(clone, to_copy)) { 5024 kfree_skb(clone); 5025 return NULL; 5026 } 5027 return clone; 5028 } 5029 EXPORT_SYMBOL(pskb_extract); 5030 5031 /** 5032 * skb_condense - try to get rid of fragments/frag_list if possible 5033 * @skb: buffer 5034 * 5035 * Can be used to save memory before skb is added to a busy queue. 5036 * If packet has bytes in frags and enough tail room in skb->head, 5037 * pull all of them, so that we can free the frags right now and adjust 5038 * truesize. 5039 * Notes: 5040 * We do not reallocate skb->head thus can not fail. 5041 * Caller must re-evaluate skb->truesize if needed. 5042 */ 5043 void skb_condense(struct sk_buff *skb) 5044 { 5045 if (skb->data_len) { 5046 if (skb->data_len > skb->end - skb->tail || 5047 skb_cloned(skb)) 5048 return; 5049 5050 /* Nice, we can free page frag(s) right now */ 5051 __pskb_pull_tail(skb, skb->data_len); 5052 } 5053 /* At this point, skb->truesize might be over estimated, 5054 * because skb had a fragment, and fragments do not tell 5055 * their truesize. 5056 * When we pulled its content into skb->head, fragment 5057 * was freed, but __pskb_pull_tail() could not possibly 5058 * adjust skb->truesize, not knowing the frag truesize. 5059 */ 5060 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5061 } 5062