1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/tcp.h> 51 #include <linux/udp.h> 52 #include <linux/netdevice.h> 53 #ifdef CONFIG_NET_CLS_ACT 54 #include <net/pkt_sched.h> 55 #endif 56 #include <linux/string.h> 57 #include <linux/skbuff.h> 58 #include <linux/splice.h> 59 #include <linux/cache.h> 60 #include <linux/rtnetlink.h> 61 #include <linux/init.h> 62 #include <linux/scatterlist.h> 63 #include <linux/errqueue.h> 64 #include <linux/prefetch.h> 65 #include <linux/if_vlan.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/ip6_checksum.h> 72 #include <net/xfrm.h> 73 74 #include <asm/uaccess.h> 75 #include <trace/events/skb.h> 76 #include <linux/highmem.h> 77 #include <linux/capability.h> 78 #include <linux/user_namespace.h> 79 80 struct kmem_cache *skbuff_head_cache __read_mostly; 81 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 82 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 83 EXPORT_SYMBOL(sysctl_max_skb_frags); 84 85 /** 86 * skb_panic - private function for out-of-line support 87 * @skb: buffer 88 * @sz: size 89 * @addr: address 90 * @msg: skb_over_panic or skb_under_panic 91 * 92 * Out-of-line support for skb_put() and skb_push(). 93 * Called via the wrapper skb_over_panic() or skb_under_panic(). 94 * Keep out of line to prevent kernel bloat. 95 * __builtin_return_address is not used because it is not always reliable. 96 */ 97 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 98 const char msg[]) 99 { 100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 101 msg, addr, skb->len, sz, skb->head, skb->data, 102 (unsigned long)skb->tail, (unsigned long)skb->end, 103 skb->dev ? skb->dev->name : "<NULL>"); 104 BUG(); 105 } 106 107 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 108 { 109 skb_panic(skb, sz, addr, __func__); 110 } 111 112 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 113 { 114 skb_panic(skb, sz, addr, __func__); 115 } 116 117 /* 118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 119 * the caller if emergency pfmemalloc reserves are being used. If it is and 120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 121 * may be used. Otherwise, the packet data may be discarded until enough 122 * memory is free 123 */ 124 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 126 127 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 128 unsigned long ip, bool *pfmemalloc) 129 { 130 void *obj; 131 bool ret_pfmemalloc = false; 132 133 /* 134 * Try a regular allocation, when that fails and we're not entitled 135 * to the reserves, fail. 136 */ 137 obj = kmalloc_node_track_caller(size, 138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 139 node); 140 if (obj || !(gfp_pfmemalloc_allowed(flags))) 141 goto out; 142 143 /* Try again but now we are using pfmemalloc reserves */ 144 ret_pfmemalloc = true; 145 obj = kmalloc_node_track_caller(size, flags, node); 146 147 out: 148 if (pfmemalloc) 149 *pfmemalloc = ret_pfmemalloc; 150 151 return obj; 152 } 153 154 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 155 * 'private' fields and also do memory statistics to find all the 156 * [BEEP] leaks. 157 * 158 */ 159 160 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 161 { 162 struct sk_buff *skb; 163 164 /* Get the HEAD */ 165 skb = kmem_cache_alloc_node(skbuff_head_cache, 166 gfp_mask & ~__GFP_DMA, node); 167 if (!skb) 168 goto out; 169 170 /* 171 * Only clear those fields we need to clear, not those that we will 172 * actually initialise below. Hence, don't put any more fields after 173 * the tail pointer in struct sk_buff! 174 */ 175 memset(skb, 0, offsetof(struct sk_buff, tail)); 176 skb->head = NULL; 177 skb->truesize = sizeof(struct sk_buff); 178 atomic_set(&skb->users, 1); 179 180 skb->mac_header = (typeof(skb->mac_header))~0U; 181 out: 182 return skb; 183 } 184 185 /** 186 * __alloc_skb - allocate a network buffer 187 * @size: size to allocate 188 * @gfp_mask: allocation mask 189 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 190 * instead of head cache and allocate a cloned (child) skb. 191 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 192 * allocations in case the data is required for writeback 193 * @node: numa node to allocate memory on 194 * 195 * Allocate a new &sk_buff. The returned buffer has no headroom and a 196 * tail room of at least size bytes. The object has a reference count 197 * of one. The return is the buffer. On a failure the return is %NULL. 198 * 199 * Buffers may only be allocated from interrupts using a @gfp_mask of 200 * %GFP_ATOMIC. 201 */ 202 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 203 int flags, int node) 204 { 205 struct kmem_cache *cache; 206 struct skb_shared_info *shinfo; 207 struct sk_buff *skb; 208 u8 *data; 209 bool pfmemalloc; 210 211 cache = (flags & SKB_ALLOC_FCLONE) 212 ? skbuff_fclone_cache : skbuff_head_cache; 213 214 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 215 gfp_mask |= __GFP_MEMALLOC; 216 217 /* Get the HEAD */ 218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 219 if (!skb) 220 goto out; 221 prefetchw(skb); 222 223 /* We do our best to align skb_shared_info on a separate cache 224 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 225 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 226 * Both skb->head and skb_shared_info are cache line aligned. 227 */ 228 size = SKB_DATA_ALIGN(size); 229 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 230 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 231 if (!data) 232 goto nodata; 233 /* kmalloc(size) might give us more room than requested. 234 * Put skb_shared_info exactly at the end of allocated zone, 235 * to allow max possible filling before reallocation. 236 */ 237 size = SKB_WITH_OVERHEAD(ksize(data)); 238 prefetchw(data + size); 239 240 /* 241 * Only clear those fields we need to clear, not those that we will 242 * actually initialise below. Hence, don't put any more fields after 243 * the tail pointer in struct sk_buff! 244 */ 245 memset(skb, 0, offsetof(struct sk_buff, tail)); 246 /* Account for allocated memory : skb + skb->head */ 247 skb->truesize = SKB_TRUESIZE(size); 248 skb->pfmemalloc = pfmemalloc; 249 atomic_set(&skb->users, 1); 250 skb->head = data; 251 skb->data = data; 252 skb_reset_tail_pointer(skb); 253 skb->end = skb->tail + size; 254 skb->mac_header = (typeof(skb->mac_header))~0U; 255 skb->transport_header = (typeof(skb->transport_header))~0U; 256 257 /* make sure we initialize shinfo sequentially */ 258 shinfo = skb_shinfo(skb); 259 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 260 atomic_set(&shinfo->dataref, 1); 261 kmemcheck_annotate_variable(shinfo->destructor_arg); 262 263 if (flags & SKB_ALLOC_FCLONE) { 264 struct sk_buff_fclones *fclones; 265 266 fclones = container_of(skb, struct sk_buff_fclones, skb1); 267 268 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 269 skb->fclone = SKB_FCLONE_ORIG; 270 atomic_set(&fclones->fclone_ref, 1); 271 272 fclones->skb2.fclone = SKB_FCLONE_CLONE; 273 fclones->skb2.pfmemalloc = pfmemalloc; 274 } 275 out: 276 return skb; 277 nodata: 278 kmem_cache_free(cache, skb); 279 skb = NULL; 280 goto out; 281 } 282 EXPORT_SYMBOL(__alloc_skb); 283 284 /** 285 * __build_skb - build a network buffer 286 * @data: data buffer provided by caller 287 * @frag_size: size of data, or 0 if head was kmalloced 288 * 289 * Allocate a new &sk_buff. Caller provides space holding head and 290 * skb_shared_info. @data must have been allocated by kmalloc() only if 291 * @frag_size is 0, otherwise data should come from the page allocator 292 * or vmalloc() 293 * The return is the new skb buffer. 294 * On a failure the return is %NULL, and @data is not freed. 295 * Notes : 296 * Before IO, driver allocates only data buffer where NIC put incoming frame 297 * Driver should add room at head (NET_SKB_PAD) and 298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 299 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 300 * before giving packet to stack. 301 * RX rings only contains data buffers, not full skbs. 302 */ 303 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 304 { 305 struct skb_shared_info *shinfo; 306 struct sk_buff *skb; 307 unsigned int size = frag_size ? : ksize(data); 308 309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 310 if (!skb) 311 return NULL; 312 313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 314 315 memset(skb, 0, offsetof(struct sk_buff, tail)); 316 skb->truesize = SKB_TRUESIZE(size); 317 atomic_set(&skb->users, 1); 318 skb->head = data; 319 skb->data = data; 320 skb_reset_tail_pointer(skb); 321 skb->end = skb->tail + size; 322 skb->mac_header = (typeof(skb->mac_header))~0U; 323 skb->transport_header = (typeof(skb->transport_header))~0U; 324 325 /* make sure we initialize shinfo sequentially */ 326 shinfo = skb_shinfo(skb); 327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 328 atomic_set(&shinfo->dataref, 1); 329 kmemcheck_annotate_variable(shinfo->destructor_arg); 330 331 return skb; 332 } 333 334 /* build_skb() is wrapper over __build_skb(), that specifically 335 * takes care of skb->head and skb->pfmemalloc 336 * This means that if @frag_size is not zero, then @data must be backed 337 * by a page fragment, not kmalloc() or vmalloc() 338 */ 339 struct sk_buff *build_skb(void *data, unsigned int frag_size) 340 { 341 struct sk_buff *skb = __build_skb(data, frag_size); 342 343 if (skb && frag_size) { 344 skb->head_frag = 1; 345 if (page_is_pfmemalloc(virt_to_head_page(data))) 346 skb->pfmemalloc = 1; 347 } 348 return skb; 349 } 350 EXPORT_SYMBOL(build_skb); 351 352 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 353 static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache); 354 355 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 356 { 357 struct page_frag_cache *nc; 358 unsigned long flags; 359 void *data; 360 361 local_irq_save(flags); 362 nc = this_cpu_ptr(&netdev_alloc_cache); 363 data = __alloc_page_frag(nc, fragsz, gfp_mask); 364 local_irq_restore(flags); 365 return data; 366 } 367 368 /** 369 * netdev_alloc_frag - allocate a page fragment 370 * @fragsz: fragment size 371 * 372 * Allocates a frag from a page for receive buffer. 373 * Uses GFP_ATOMIC allocations. 374 */ 375 void *netdev_alloc_frag(unsigned int fragsz) 376 { 377 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 378 } 379 EXPORT_SYMBOL(netdev_alloc_frag); 380 381 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 382 { 383 struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); 384 385 return __alloc_page_frag(nc, fragsz, gfp_mask); 386 } 387 388 void *napi_alloc_frag(unsigned int fragsz) 389 { 390 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 391 } 392 EXPORT_SYMBOL(napi_alloc_frag); 393 394 /** 395 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 396 * @dev: network device to receive on 397 * @len: length to allocate 398 * @gfp_mask: get_free_pages mask, passed to alloc_skb 399 * 400 * Allocate a new &sk_buff and assign it a usage count of one. The 401 * buffer has NET_SKB_PAD headroom built in. Users should allocate 402 * the headroom they think they need without accounting for the 403 * built in space. The built in space is used for optimisations. 404 * 405 * %NULL is returned if there is no free memory. 406 */ 407 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 408 gfp_t gfp_mask) 409 { 410 struct page_frag_cache *nc; 411 unsigned long flags; 412 struct sk_buff *skb; 413 bool pfmemalloc; 414 void *data; 415 416 len += NET_SKB_PAD; 417 418 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 419 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 420 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 421 if (!skb) 422 goto skb_fail; 423 goto skb_success; 424 } 425 426 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 427 len = SKB_DATA_ALIGN(len); 428 429 if (sk_memalloc_socks()) 430 gfp_mask |= __GFP_MEMALLOC; 431 432 local_irq_save(flags); 433 434 nc = this_cpu_ptr(&netdev_alloc_cache); 435 data = __alloc_page_frag(nc, len, gfp_mask); 436 pfmemalloc = nc->pfmemalloc; 437 438 local_irq_restore(flags); 439 440 if (unlikely(!data)) 441 return NULL; 442 443 skb = __build_skb(data, len); 444 if (unlikely(!skb)) { 445 skb_free_frag(data); 446 return NULL; 447 } 448 449 /* use OR instead of assignment to avoid clearing of bits in mask */ 450 if (pfmemalloc) 451 skb->pfmemalloc = 1; 452 skb->head_frag = 1; 453 454 skb_success: 455 skb_reserve(skb, NET_SKB_PAD); 456 skb->dev = dev; 457 458 skb_fail: 459 return skb; 460 } 461 EXPORT_SYMBOL(__netdev_alloc_skb); 462 463 /** 464 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 465 * @napi: napi instance this buffer was allocated for 466 * @len: length to allocate 467 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 468 * 469 * Allocate a new sk_buff for use in NAPI receive. This buffer will 470 * attempt to allocate the head from a special reserved region used 471 * only for NAPI Rx allocation. By doing this we can save several 472 * CPU cycles by avoiding having to disable and re-enable IRQs. 473 * 474 * %NULL is returned if there is no free memory. 475 */ 476 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 477 gfp_t gfp_mask) 478 { 479 struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); 480 struct sk_buff *skb; 481 void *data; 482 483 len += NET_SKB_PAD + NET_IP_ALIGN; 484 485 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 486 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 487 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 488 if (!skb) 489 goto skb_fail; 490 goto skb_success; 491 } 492 493 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 494 len = SKB_DATA_ALIGN(len); 495 496 if (sk_memalloc_socks()) 497 gfp_mask |= __GFP_MEMALLOC; 498 499 data = __alloc_page_frag(nc, len, gfp_mask); 500 if (unlikely(!data)) 501 return NULL; 502 503 skb = __build_skb(data, len); 504 if (unlikely(!skb)) { 505 skb_free_frag(data); 506 return NULL; 507 } 508 509 /* use OR instead of assignment to avoid clearing of bits in mask */ 510 if (nc->pfmemalloc) 511 skb->pfmemalloc = 1; 512 skb->head_frag = 1; 513 514 skb_success: 515 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 516 skb->dev = napi->dev; 517 518 skb_fail: 519 return skb; 520 } 521 EXPORT_SYMBOL(__napi_alloc_skb); 522 523 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 524 int size, unsigned int truesize) 525 { 526 skb_fill_page_desc(skb, i, page, off, size); 527 skb->len += size; 528 skb->data_len += size; 529 skb->truesize += truesize; 530 } 531 EXPORT_SYMBOL(skb_add_rx_frag); 532 533 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 534 unsigned int truesize) 535 { 536 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 537 538 skb_frag_size_add(frag, size); 539 skb->len += size; 540 skb->data_len += size; 541 skb->truesize += truesize; 542 } 543 EXPORT_SYMBOL(skb_coalesce_rx_frag); 544 545 static void skb_drop_list(struct sk_buff **listp) 546 { 547 kfree_skb_list(*listp); 548 *listp = NULL; 549 } 550 551 static inline void skb_drop_fraglist(struct sk_buff *skb) 552 { 553 skb_drop_list(&skb_shinfo(skb)->frag_list); 554 } 555 556 static void skb_clone_fraglist(struct sk_buff *skb) 557 { 558 struct sk_buff *list; 559 560 skb_walk_frags(skb, list) 561 skb_get(list); 562 } 563 564 static void skb_free_head(struct sk_buff *skb) 565 { 566 unsigned char *head = skb->head; 567 568 if (skb->head_frag) 569 skb_free_frag(head); 570 else 571 kfree(head); 572 } 573 574 static void skb_release_data(struct sk_buff *skb) 575 { 576 struct skb_shared_info *shinfo = skb_shinfo(skb); 577 int i; 578 579 if (skb->cloned && 580 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 581 &shinfo->dataref)) 582 return; 583 584 for (i = 0; i < shinfo->nr_frags; i++) 585 __skb_frag_unref(&shinfo->frags[i]); 586 587 /* 588 * If skb buf is from userspace, we need to notify the caller 589 * the lower device DMA has done; 590 */ 591 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { 592 struct ubuf_info *uarg; 593 594 uarg = shinfo->destructor_arg; 595 if (uarg->callback) 596 uarg->callback(uarg, true); 597 } 598 599 if (shinfo->frag_list) 600 kfree_skb_list(shinfo->frag_list); 601 602 skb_free_head(skb); 603 } 604 605 /* 606 * Free an skbuff by memory without cleaning the state. 607 */ 608 static void kfree_skbmem(struct sk_buff *skb) 609 { 610 struct sk_buff_fclones *fclones; 611 612 switch (skb->fclone) { 613 case SKB_FCLONE_UNAVAILABLE: 614 kmem_cache_free(skbuff_head_cache, skb); 615 return; 616 617 case SKB_FCLONE_ORIG: 618 fclones = container_of(skb, struct sk_buff_fclones, skb1); 619 620 /* We usually free the clone (TX completion) before original skb 621 * This test would have no chance to be true for the clone, 622 * while here, branch prediction will be good. 623 */ 624 if (atomic_read(&fclones->fclone_ref) == 1) 625 goto fastpath; 626 break; 627 628 default: /* SKB_FCLONE_CLONE */ 629 fclones = container_of(skb, struct sk_buff_fclones, skb2); 630 break; 631 } 632 if (!atomic_dec_and_test(&fclones->fclone_ref)) 633 return; 634 fastpath: 635 kmem_cache_free(skbuff_fclone_cache, fclones); 636 } 637 638 static void skb_release_head_state(struct sk_buff *skb) 639 { 640 skb_dst_drop(skb); 641 #ifdef CONFIG_XFRM 642 secpath_put(skb->sp); 643 #endif 644 if (skb->destructor) { 645 WARN_ON(in_irq()); 646 skb->destructor(skb); 647 } 648 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 649 nf_conntrack_put(skb->nfct); 650 #endif 651 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 652 nf_bridge_put(skb->nf_bridge); 653 #endif 654 } 655 656 /* Free everything but the sk_buff shell. */ 657 static void skb_release_all(struct sk_buff *skb) 658 { 659 skb_release_head_state(skb); 660 if (likely(skb->head)) 661 skb_release_data(skb); 662 } 663 664 /** 665 * __kfree_skb - private function 666 * @skb: buffer 667 * 668 * Free an sk_buff. Release anything attached to the buffer. 669 * Clean the state. This is an internal helper function. Users should 670 * always call kfree_skb 671 */ 672 673 void __kfree_skb(struct sk_buff *skb) 674 { 675 skb_release_all(skb); 676 kfree_skbmem(skb); 677 } 678 EXPORT_SYMBOL(__kfree_skb); 679 680 /** 681 * kfree_skb - free an sk_buff 682 * @skb: buffer to free 683 * 684 * Drop a reference to the buffer and free it if the usage count has 685 * hit zero. 686 */ 687 void kfree_skb(struct sk_buff *skb) 688 { 689 if (unlikely(!skb)) 690 return; 691 if (likely(atomic_read(&skb->users) == 1)) 692 smp_rmb(); 693 else if (likely(!atomic_dec_and_test(&skb->users))) 694 return; 695 trace_kfree_skb(skb, __builtin_return_address(0)); 696 __kfree_skb(skb); 697 } 698 EXPORT_SYMBOL(kfree_skb); 699 700 void kfree_skb_list(struct sk_buff *segs) 701 { 702 while (segs) { 703 struct sk_buff *next = segs->next; 704 705 kfree_skb(segs); 706 segs = next; 707 } 708 } 709 EXPORT_SYMBOL(kfree_skb_list); 710 711 /** 712 * skb_tx_error - report an sk_buff xmit error 713 * @skb: buffer that triggered an error 714 * 715 * Report xmit error if a device callback is tracking this skb. 716 * skb must be freed afterwards. 717 */ 718 void skb_tx_error(struct sk_buff *skb) 719 { 720 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 721 struct ubuf_info *uarg; 722 723 uarg = skb_shinfo(skb)->destructor_arg; 724 if (uarg->callback) 725 uarg->callback(uarg, false); 726 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 727 } 728 } 729 EXPORT_SYMBOL(skb_tx_error); 730 731 /** 732 * consume_skb - free an skbuff 733 * @skb: buffer to free 734 * 735 * Drop a ref to the buffer and free it if the usage count has hit zero 736 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 737 * is being dropped after a failure and notes that 738 */ 739 void consume_skb(struct sk_buff *skb) 740 { 741 if (unlikely(!skb)) 742 return; 743 if (likely(atomic_read(&skb->users) == 1)) 744 smp_rmb(); 745 else if (likely(!atomic_dec_and_test(&skb->users))) 746 return; 747 trace_consume_skb(skb); 748 __kfree_skb(skb); 749 } 750 EXPORT_SYMBOL(consume_skb); 751 752 /* Make sure a field is enclosed inside headers_start/headers_end section */ 753 #define CHECK_SKB_FIELD(field) \ 754 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 755 offsetof(struct sk_buff, headers_start)); \ 756 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 757 offsetof(struct sk_buff, headers_end)); \ 758 759 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 760 { 761 new->tstamp = old->tstamp; 762 /* We do not copy old->sk */ 763 new->dev = old->dev; 764 memcpy(new->cb, old->cb, sizeof(old->cb)); 765 skb_dst_copy(new, old); 766 #ifdef CONFIG_XFRM 767 new->sp = secpath_get(old->sp); 768 #endif 769 __nf_copy(new, old, false); 770 771 /* Note : this field could be in headers_start/headers_end section 772 * It is not yet because we do not want to have a 16 bit hole 773 */ 774 new->queue_mapping = old->queue_mapping; 775 776 memcpy(&new->headers_start, &old->headers_start, 777 offsetof(struct sk_buff, headers_end) - 778 offsetof(struct sk_buff, headers_start)); 779 CHECK_SKB_FIELD(protocol); 780 CHECK_SKB_FIELD(csum); 781 CHECK_SKB_FIELD(hash); 782 CHECK_SKB_FIELD(priority); 783 CHECK_SKB_FIELD(skb_iif); 784 CHECK_SKB_FIELD(vlan_proto); 785 CHECK_SKB_FIELD(vlan_tci); 786 CHECK_SKB_FIELD(transport_header); 787 CHECK_SKB_FIELD(network_header); 788 CHECK_SKB_FIELD(mac_header); 789 CHECK_SKB_FIELD(inner_protocol); 790 CHECK_SKB_FIELD(inner_transport_header); 791 CHECK_SKB_FIELD(inner_network_header); 792 CHECK_SKB_FIELD(inner_mac_header); 793 CHECK_SKB_FIELD(mark); 794 #ifdef CONFIG_NETWORK_SECMARK 795 CHECK_SKB_FIELD(secmark); 796 #endif 797 #ifdef CONFIG_NET_RX_BUSY_POLL 798 CHECK_SKB_FIELD(napi_id); 799 #endif 800 #ifdef CONFIG_XPS 801 CHECK_SKB_FIELD(sender_cpu); 802 #endif 803 #ifdef CONFIG_NET_SCHED 804 CHECK_SKB_FIELD(tc_index); 805 #ifdef CONFIG_NET_CLS_ACT 806 CHECK_SKB_FIELD(tc_verd); 807 #endif 808 #endif 809 810 } 811 812 /* 813 * You should not add any new code to this function. Add it to 814 * __copy_skb_header above instead. 815 */ 816 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 817 { 818 #define C(x) n->x = skb->x 819 820 n->next = n->prev = NULL; 821 n->sk = NULL; 822 __copy_skb_header(n, skb); 823 824 C(len); 825 C(data_len); 826 C(mac_len); 827 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 828 n->cloned = 1; 829 n->nohdr = 0; 830 n->destructor = NULL; 831 C(tail); 832 C(end); 833 C(head); 834 C(head_frag); 835 C(data); 836 C(truesize); 837 atomic_set(&n->users, 1); 838 839 atomic_inc(&(skb_shinfo(skb)->dataref)); 840 skb->cloned = 1; 841 842 return n; 843 #undef C 844 } 845 846 /** 847 * skb_morph - morph one skb into another 848 * @dst: the skb to receive the contents 849 * @src: the skb to supply the contents 850 * 851 * This is identical to skb_clone except that the target skb is 852 * supplied by the user. 853 * 854 * The target skb is returned upon exit. 855 */ 856 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 857 { 858 skb_release_all(dst); 859 return __skb_clone(dst, src); 860 } 861 EXPORT_SYMBOL_GPL(skb_morph); 862 863 /** 864 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 865 * @skb: the skb to modify 866 * @gfp_mask: allocation priority 867 * 868 * This must be called on SKBTX_DEV_ZEROCOPY skb. 869 * It will copy all frags into kernel and drop the reference 870 * to userspace pages. 871 * 872 * If this function is called from an interrupt gfp_mask() must be 873 * %GFP_ATOMIC. 874 * 875 * Returns 0 on success or a negative error code on failure 876 * to allocate kernel memory to copy to. 877 */ 878 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 879 { 880 int i; 881 int num_frags = skb_shinfo(skb)->nr_frags; 882 struct page *page, *head = NULL; 883 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 884 885 for (i = 0; i < num_frags; i++) { 886 u8 *vaddr; 887 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 888 889 page = alloc_page(gfp_mask); 890 if (!page) { 891 while (head) { 892 struct page *next = (struct page *)page_private(head); 893 put_page(head); 894 head = next; 895 } 896 return -ENOMEM; 897 } 898 vaddr = kmap_atomic(skb_frag_page(f)); 899 memcpy(page_address(page), 900 vaddr + f->page_offset, skb_frag_size(f)); 901 kunmap_atomic(vaddr); 902 set_page_private(page, (unsigned long)head); 903 head = page; 904 } 905 906 /* skb frags release userspace buffers */ 907 for (i = 0; i < num_frags; i++) 908 skb_frag_unref(skb, i); 909 910 uarg->callback(uarg, false); 911 912 /* skb frags point to kernel buffers */ 913 for (i = num_frags - 1; i >= 0; i--) { 914 __skb_fill_page_desc(skb, i, head, 0, 915 skb_shinfo(skb)->frags[i].size); 916 head = (struct page *)page_private(head); 917 } 918 919 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 920 return 0; 921 } 922 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 923 924 /** 925 * skb_clone - duplicate an sk_buff 926 * @skb: buffer to clone 927 * @gfp_mask: allocation priority 928 * 929 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 930 * copies share the same packet data but not structure. The new 931 * buffer has a reference count of 1. If the allocation fails the 932 * function returns %NULL otherwise the new buffer is returned. 933 * 934 * If this function is called from an interrupt gfp_mask() must be 935 * %GFP_ATOMIC. 936 */ 937 938 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 939 { 940 struct sk_buff_fclones *fclones = container_of(skb, 941 struct sk_buff_fclones, 942 skb1); 943 struct sk_buff *n; 944 945 if (skb_orphan_frags(skb, gfp_mask)) 946 return NULL; 947 948 if (skb->fclone == SKB_FCLONE_ORIG && 949 atomic_read(&fclones->fclone_ref) == 1) { 950 n = &fclones->skb2; 951 atomic_set(&fclones->fclone_ref, 2); 952 } else { 953 if (skb_pfmemalloc(skb)) 954 gfp_mask |= __GFP_MEMALLOC; 955 956 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 957 if (!n) 958 return NULL; 959 960 kmemcheck_annotate_bitfield(n, flags1); 961 n->fclone = SKB_FCLONE_UNAVAILABLE; 962 } 963 964 return __skb_clone(n, skb); 965 } 966 EXPORT_SYMBOL(skb_clone); 967 968 static void skb_headers_offset_update(struct sk_buff *skb, int off) 969 { 970 /* Only adjust this if it actually is csum_start rather than csum */ 971 if (skb->ip_summed == CHECKSUM_PARTIAL) 972 skb->csum_start += off; 973 /* {transport,network,mac}_header and tail are relative to skb->head */ 974 skb->transport_header += off; 975 skb->network_header += off; 976 if (skb_mac_header_was_set(skb)) 977 skb->mac_header += off; 978 skb->inner_transport_header += off; 979 skb->inner_network_header += off; 980 skb->inner_mac_header += off; 981 } 982 983 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 984 { 985 __copy_skb_header(new, old); 986 987 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 988 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 989 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 990 } 991 992 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 993 { 994 if (skb_pfmemalloc(skb)) 995 return SKB_ALLOC_RX; 996 return 0; 997 } 998 999 /** 1000 * skb_copy - create private copy of an sk_buff 1001 * @skb: buffer to copy 1002 * @gfp_mask: allocation priority 1003 * 1004 * Make a copy of both an &sk_buff and its data. This is used when the 1005 * caller wishes to modify the data and needs a private copy of the 1006 * data to alter. Returns %NULL on failure or the pointer to the buffer 1007 * on success. The returned buffer has a reference count of 1. 1008 * 1009 * As by-product this function converts non-linear &sk_buff to linear 1010 * one, so that &sk_buff becomes completely private and caller is allowed 1011 * to modify all the data of returned buffer. This means that this 1012 * function is not recommended for use in circumstances when only 1013 * header is going to be modified. Use pskb_copy() instead. 1014 */ 1015 1016 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1017 { 1018 int headerlen = skb_headroom(skb); 1019 unsigned int size = skb_end_offset(skb) + skb->data_len; 1020 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1021 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1022 1023 if (!n) 1024 return NULL; 1025 1026 /* Set the data pointer */ 1027 skb_reserve(n, headerlen); 1028 /* Set the tail pointer and length */ 1029 skb_put(n, skb->len); 1030 1031 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 1032 BUG(); 1033 1034 copy_skb_header(n, skb); 1035 return n; 1036 } 1037 EXPORT_SYMBOL(skb_copy); 1038 1039 /** 1040 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1041 * @skb: buffer to copy 1042 * @headroom: headroom of new skb 1043 * @gfp_mask: allocation priority 1044 * @fclone: if true allocate the copy of the skb from the fclone 1045 * cache instead of the head cache; it is recommended to set this 1046 * to true for the cases where the copy will likely be cloned 1047 * 1048 * Make a copy of both an &sk_buff and part of its data, located 1049 * in header. Fragmented data remain shared. This is used when 1050 * the caller wishes to modify only header of &sk_buff and needs 1051 * private copy of the header to alter. Returns %NULL on failure 1052 * or the pointer to the buffer on success. 1053 * The returned buffer has a reference count of 1. 1054 */ 1055 1056 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1057 gfp_t gfp_mask, bool fclone) 1058 { 1059 unsigned int size = skb_headlen(skb) + headroom; 1060 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1061 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1062 1063 if (!n) 1064 goto out; 1065 1066 /* Set the data pointer */ 1067 skb_reserve(n, headroom); 1068 /* Set the tail pointer and length */ 1069 skb_put(n, skb_headlen(skb)); 1070 /* Copy the bytes */ 1071 skb_copy_from_linear_data(skb, n->data, n->len); 1072 1073 n->truesize += skb->data_len; 1074 n->data_len = skb->data_len; 1075 n->len = skb->len; 1076 1077 if (skb_shinfo(skb)->nr_frags) { 1078 int i; 1079 1080 if (skb_orphan_frags(skb, gfp_mask)) { 1081 kfree_skb(n); 1082 n = NULL; 1083 goto out; 1084 } 1085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1086 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1087 skb_frag_ref(skb, i); 1088 } 1089 skb_shinfo(n)->nr_frags = i; 1090 } 1091 1092 if (skb_has_frag_list(skb)) { 1093 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1094 skb_clone_fraglist(n); 1095 } 1096 1097 copy_skb_header(n, skb); 1098 out: 1099 return n; 1100 } 1101 EXPORT_SYMBOL(__pskb_copy_fclone); 1102 1103 /** 1104 * pskb_expand_head - reallocate header of &sk_buff 1105 * @skb: buffer to reallocate 1106 * @nhead: room to add at head 1107 * @ntail: room to add at tail 1108 * @gfp_mask: allocation priority 1109 * 1110 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1111 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1112 * reference count of 1. Returns zero in the case of success or error, 1113 * if expansion failed. In the last case, &sk_buff is not changed. 1114 * 1115 * All the pointers pointing into skb header may change and must be 1116 * reloaded after call to this function. 1117 */ 1118 1119 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1120 gfp_t gfp_mask) 1121 { 1122 int i; 1123 u8 *data; 1124 int size = nhead + skb_end_offset(skb) + ntail; 1125 long off; 1126 1127 BUG_ON(nhead < 0); 1128 1129 if (skb_shared(skb)) 1130 BUG(); 1131 1132 size = SKB_DATA_ALIGN(size); 1133 1134 if (skb_pfmemalloc(skb)) 1135 gfp_mask |= __GFP_MEMALLOC; 1136 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1137 gfp_mask, NUMA_NO_NODE, NULL); 1138 if (!data) 1139 goto nodata; 1140 size = SKB_WITH_OVERHEAD(ksize(data)); 1141 1142 /* Copy only real data... and, alas, header. This should be 1143 * optimized for the cases when header is void. 1144 */ 1145 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1146 1147 memcpy((struct skb_shared_info *)(data + size), 1148 skb_shinfo(skb), 1149 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1150 1151 /* 1152 * if shinfo is shared we must drop the old head gracefully, but if it 1153 * is not we can just drop the old head and let the existing refcount 1154 * be since all we did is relocate the values 1155 */ 1156 if (skb_cloned(skb)) { 1157 /* copy this zero copy skb frags */ 1158 if (skb_orphan_frags(skb, gfp_mask)) 1159 goto nofrags; 1160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1161 skb_frag_ref(skb, i); 1162 1163 if (skb_has_frag_list(skb)) 1164 skb_clone_fraglist(skb); 1165 1166 skb_release_data(skb); 1167 } else { 1168 skb_free_head(skb); 1169 } 1170 off = (data + nhead) - skb->head; 1171 1172 skb->head = data; 1173 skb->head_frag = 0; 1174 skb->data += off; 1175 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1176 skb->end = size; 1177 off = nhead; 1178 #else 1179 skb->end = skb->head + size; 1180 #endif 1181 skb->tail += off; 1182 skb_headers_offset_update(skb, nhead); 1183 skb->cloned = 0; 1184 skb->hdr_len = 0; 1185 skb->nohdr = 0; 1186 atomic_set(&skb_shinfo(skb)->dataref, 1); 1187 return 0; 1188 1189 nofrags: 1190 kfree(data); 1191 nodata: 1192 return -ENOMEM; 1193 } 1194 EXPORT_SYMBOL(pskb_expand_head); 1195 1196 /* Make private copy of skb with writable head and some headroom */ 1197 1198 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1199 { 1200 struct sk_buff *skb2; 1201 int delta = headroom - skb_headroom(skb); 1202 1203 if (delta <= 0) 1204 skb2 = pskb_copy(skb, GFP_ATOMIC); 1205 else { 1206 skb2 = skb_clone(skb, GFP_ATOMIC); 1207 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1208 GFP_ATOMIC)) { 1209 kfree_skb(skb2); 1210 skb2 = NULL; 1211 } 1212 } 1213 return skb2; 1214 } 1215 EXPORT_SYMBOL(skb_realloc_headroom); 1216 1217 /** 1218 * skb_copy_expand - copy and expand sk_buff 1219 * @skb: buffer to copy 1220 * @newheadroom: new free bytes at head 1221 * @newtailroom: new free bytes at tail 1222 * @gfp_mask: allocation priority 1223 * 1224 * Make a copy of both an &sk_buff and its data and while doing so 1225 * allocate additional space. 1226 * 1227 * This is used when the caller wishes to modify the data and needs a 1228 * private copy of the data to alter as well as more space for new fields. 1229 * Returns %NULL on failure or the pointer to the buffer 1230 * on success. The returned buffer has a reference count of 1. 1231 * 1232 * You must pass %GFP_ATOMIC as the allocation priority if this function 1233 * is called from an interrupt. 1234 */ 1235 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1236 int newheadroom, int newtailroom, 1237 gfp_t gfp_mask) 1238 { 1239 /* 1240 * Allocate the copy buffer 1241 */ 1242 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1243 gfp_mask, skb_alloc_rx_flag(skb), 1244 NUMA_NO_NODE); 1245 int oldheadroom = skb_headroom(skb); 1246 int head_copy_len, head_copy_off; 1247 1248 if (!n) 1249 return NULL; 1250 1251 skb_reserve(n, newheadroom); 1252 1253 /* Set the tail pointer and length */ 1254 skb_put(n, skb->len); 1255 1256 head_copy_len = oldheadroom; 1257 head_copy_off = 0; 1258 if (newheadroom <= head_copy_len) 1259 head_copy_len = newheadroom; 1260 else 1261 head_copy_off = newheadroom - head_copy_len; 1262 1263 /* Copy the linear header and data. */ 1264 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1265 skb->len + head_copy_len)) 1266 BUG(); 1267 1268 copy_skb_header(n, skb); 1269 1270 skb_headers_offset_update(n, newheadroom - oldheadroom); 1271 1272 return n; 1273 } 1274 EXPORT_SYMBOL(skb_copy_expand); 1275 1276 /** 1277 * skb_pad - zero pad the tail of an skb 1278 * @skb: buffer to pad 1279 * @pad: space to pad 1280 * 1281 * Ensure that a buffer is followed by a padding area that is zero 1282 * filled. Used by network drivers which may DMA or transfer data 1283 * beyond the buffer end onto the wire. 1284 * 1285 * May return error in out of memory cases. The skb is freed on error. 1286 */ 1287 1288 int skb_pad(struct sk_buff *skb, int pad) 1289 { 1290 int err; 1291 int ntail; 1292 1293 /* If the skbuff is non linear tailroom is always zero.. */ 1294 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1295 memset(skb->data+skb->len, 0, pad); 1296 return 0; 1297 } 1298 1299 ntail = skb->data_len + pad - (skb->end - skb->tail); 1300 if (likely(skb_cloned(skb) || ntail > 0)) { 1301 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1302 if (unlikely(err)) 1303 goto free_skb; 1304 } 1305 1306 /* FIXME: The use of this function with non-linear skb's really needs 1307 * to be audited. 1308 */ 1309 err = skb_linearize(skb); 1310 if (unlikely(err)) 1311 goto free_skb; 1312 1313 memset(skb->data + skb->len, 0, pad); 1314 return 0; 1315 1316 free_skb: 1317 kfree_skb(skb); 1318 return err; 1319 } 1320 EXPORT_SYMBOL(skb_pad); 1321 1322 /** 1323 * pskb_put - add data to the tail of a potentially fragmented buffer 1324 * @skb: start of the buffer to use 1325 * @tail: tail fragment of the buffer to use 1326 * @len: amount of data to add 1327 * 1328 * This function extends the used data area of the potentially 1329 * fragmented buffer. @tail must be the last fragment of @skb -- or 1330 * @skb itself. If this would exceed the total buffer size the kernel 1331 * will panic. A pointer to the first byte of the extra data is 1332 * returned. 1333 */ 1334 1335 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1336 { 1337 if (tail != skb) { 1338 skb->data_len += len; 1339 skb->len += len; 1340 } 1341 return skb_put(tail, len); 1342 } 1343 EXPORT_SYMBOL_GPL(pskb_put); 1344 1345 /** 1346 * skb_put - add data to a buffer 1347 * @skb: buffer to use 1348 * @len: amount of data to add 1349 * 1350 * This function extends the used data area of the buffer. If this would 1351 * exceed the total buffer size the kernel will panic. A pointer to the 1352 * first byte of the extra data is returned. 1353 */ 1354 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1355 { 1356 unsigned char *tmp = skb_tail_pointer(skb); 1357 SKB_LINEAR_ASSERT(skb); 1358 skb->tail += len; 1359 skb->len += len; 1360 if (unlikely(skb->tail > skb->end)) 1361 skb_over_panic(skb, len, __builtin_return_address(0)); 1362 return tmp; 1363 } 1364 EXPORT_SYMBOL(skb_put); 1365 1366 /** 1367 * skb_push - add data to the start of a buffer 1368 * @skb: buffer to use 1369 * @len: amount of data to add 1370 * 1371 * This function extends the used data area of the buffer at the buffer 1372 * start. If this would exceed the total buffer headroom the kernel will 1373 * panic. A pointer to the first byte of the extra data is returned. 1374 */ 1375 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1376 { 1377 skb->data -= len; 1378 skb->len += len; 1379 if (unlikely(skb->data<skb->head)) 1380 skb_under_panic(skb, len, __builtin_return_address(0)); 1381 return skb->data; 1382 } 1383 EXPORT_SYMBOL(skb_push); 1384 1385 /** 1386 * skb_pull - remove data from the start of a buffer 1387 * @skb: buffer to use 1388 * @len: amount of data to remove 1389 * 1390 * This function removes data from the start of a buffer, returning 1391 * the memory to the headroom. A pointer to the next data in the buffer 1392 * is returned. Once the data has been pulled future pushes will overwrite 1393 * the old data. 1394 */ 1395 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1396 { 1397 return skb_pull_inline(skb, len); 1398 } 1399 EXPORT_SYMBOL(skb_pull); 1400 1401 /** 1402 * skb_trim - remove end from a buffer 1403 * @skb: buffer to alter 1404 * @len: new length 1405 * 1406 * Cut the length of a buffer down by removing data from the tail. If 1407 * the buffer is already under the length specified it is not modified. 1408 * The skb must be linear. 1409 */ 1410 void skb_trim(struct sk_buff *skb, unsigned int len) 1411 { 1412 if (skb->len > len) 1413 __skb_trim(skb, len); 1414 } 1415 EXPORT_SYMBOL(skb_trim); 1416 1417 /* Trims skb to length len. It can change skb pointers. 1418 */ 1419 1420 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1421 { 1422 struct sk_buff **fragp; 1423 struct sk_buff *frag; 1424 int offset = skb_headlen(skb); 1425 int nfrags = skb_shinfo(skb)->nr_frags; 1426 int i; 1427 int err; 1428 1429 if (skb_cloned(skb) && 1430 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1431 return err; 1432 1433 i = 0; 1434 if (offset >= len) 1435 goto drop_pages; 1436 1437 for (; i < nfrags; i++) { 1438 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1439 1440 if (end < len) { 1441 offset = end; 1442 continue; 1443 } 1444 1445 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1446 1447 drop_pages: 1448 skb_shinfo(skb)->nr_frags = i; 1449 1450 for (; i < nfrags; i++) 1451 skb_frag_unref(skb, i); 1452 1453 if (skb_has_frag_list(skb)) 1454 skb_drop_fraglist(skb); 1455 goto done; 1456 } 1457 1458 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1459 fragp = &frag->next) { 1460 int end = offset + frag->len; 1461 1462 if (skb_shared(frag)) { 1463 struct sk_buff *nfrag; 1464 1465 nfrag = skb_clone(frag, GFP_ATOMIC); 1466 if (unlikely(!nfrag)) 1467 return -ENOMEM; 1468 1469 nfrag->next = frag->next; 1470 consume_skb(frag); 1471 frag = nfrag; 1472 *fragp = frag; 1473 } 1474 1475 if (end < len) { 1476 offset = end; 1477 continue; 1478 } 1479 1480 if (end > len && 1481 unlikely((err = pskb_trim(frag, len - offset)))) 1482 return err; 1483 1484 if (frag->next) 1485 skb_drop_list(&frag->next); 1486 break; 1487 } 1488 1489 done: 1490 if (len > skb_headlen(skb)) { 1491 skb->data_len -= skb->len - len; 1492 skb->len = len; 1493 } else { 1494 skb->len = len; 1495 skb->data_len = 0; 1496 skb_set_tail_pointer(skb, len); 1497 } 1498 1499 return 0; 1500 } 1501 EXPORT_SYMBOL(___pskb_trim); 1502 1503 /** 1504 * __pskb_pull_tail - advance tail of skb header 1505 * @skb: buffer to reallocate 1506 * @delta: number of bytes to advance tail 1507 * 1508 * The function makes a sense only on a fragmented &sk_buff, 1509 * it expands header moving its tail forward and copying necessary 1510 * data from fragmented part. 1511 * 1512 * &sk_buff MUST have reference count of 1. 1513 * 1514 * Returns %NULL (and &sk_buff does not change) if pull failed 1515 * or value of new tail of skb in the case of success. 1516 * 1517 * All the pointers pointing into skb header may change and must be 1518 * reloaded after call to this function. 1519 */ 1520 1521 /* Moves tail of skb head forward, copying data from fragmented part, 1522 * when it is necessary. 1523 * 1. It may fail due to malloc failure. 1524 * 2. It may change skb pointers. 1525 * 1526 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1527 */ 1528 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1529 { 1530 /* If skb has not enough free space at tail, get new one 1531 * plus 128 bytes for future expansions. If we have enough 1532 * room at tail, reallocate without expansion only if skb is cloned. 1533 */ 1534 int i, k, eat = (skb->tail + delta) - skb->end; 1535 1536 if (eat > 0 || skb_cloned(skb)) { 1537 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1538 GFP_ATOMIC)) 1539 return NULL; 1540 } 1541 1542 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1543 BUG(); 1544 1545 /* Optimization: no fragments, no reasons to preestimate 1546 * size of pulled pages. Superb. 1547 */ 1548 if (!skb_has_frag_list(skb)) 1549 goto pull_pages; 1550 1551 /* Estimate size of pulled pages. */ 1552 eat = delta; 1553 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1554 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1555 1556 if (size >= eat) 1557 goto pull_pages; 1558 eat -= size; 1559 } 1560 1561 /* If we need update frag list, we are in troubles. 1562 * Certainly, it possible to add an offset to skb data, 1563 * but taking into account that pulling is expected to 1564 * be very rare operation, it is worth to fight against 1565 * further bloating skb head and crucify ourselves here instead. 1566 * Pure masohism, indeed. 8)8) 1567 */ 1568 if (eat) { 1569 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1570 struct sk_buff *clone = NULL; 1571 struct sk_buff *insp = NULL; 1572 1573 do { 1574 BUG_ON(!list); 1575 1576 if (list->len <= eat) { 1577 /* Eaten as whole. */ 1578 eat -= list->len; 1579 list = list->next; 1580 insp = list; 1581 } else { 1582 /* Eaten partially. */ 1583 1584 if (skb_shared(list)) { 1585 /* Sucks! We need to fork list. :-( */ 1586 clone = skb_clone(list, GFP_ATOMIC); 1587 if (!clone) 1588 return NULL; 1589 insp = list->next; 1590 list = clone; 1591 } else { 1592 /* This may be pulled without 1593 * problems. */ 1594 insp = list; 1595 } 1596 if (!pskb_pull(list, eat)) { 1597 kfree_skb(clone); 1598 return NULL; 1599 } 1600 break; 1601 } 1602 } while (eat); 1603 1604 /* Free pulled out fragments. */ 1605 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1606 skb_shinfo(skb)->frag_list = list->next; 1607 kfree_skb(list); 1608 } 1609 /* And insert new clone at head. */ 1610 if (clone) { 1611 clone->next = list; 1612 skb_shinfo(skb)->frag_list = clone; 1613 } 1614 } 1615 /* Success! Now we may commit changes to skb data. */ 1616 1617 pull_pages: 1618 eat = delta; 1619 k = 0; 1620 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1621 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1622 1623 if (size <= eat) { 1624 skb_frag_unref(skb, i); 1625 eat -= size; 1626 } else { 1627 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1628 if (eat) { 1629 skb_shinfo(skb)->frags[k].page_offset += eat; 1630 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1631 eat = 0; 1632 } 1633 k++; 1634 } 1635 } 1636 skb_shinfo(skb)->nr_frags = k; 1637 1638 skb->tail += delta; 1639 skb->data_len -= delta; 1640 1641 return skb_tail_pointer(skb); 1642 } 1643 EXPORT_SYMBOL(__pskb_pull_tail); 1644 1645 /** 1646 * skb_copy_bits - copy bits from skb to kernel buffer 1647 * @skb: source skb 1648 * @offset: offset in source 1649 * @to: destination buffer 1650 * @len: number of bytes to copy 1651 * 1652 * Copy the specified number of bytes from the source skb to the 1653 * destination buffer. 1654 * 1655 * CAUTION ! : 1656 * If its prototype is ever changed, 1657 * check arch/{*}/net/{*}.S files, 1658 * since it is called from BPF assembly code. 1659 */ 1660 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1661 { 1662 int start = skb_headlen(skb); 1663 struct sk_buff *frag_iter; 1664 int i, copy; 1665 1666 if (offset > (int)skb->len - len) 1667 goto fault; 1668 1669 /* Copy header. */ 1670 if ((copy = start - offset) > 0) { 1671 if (copy > len) 1672 copy = len; 1673 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1674 if ((len -= copy) == 0) 1675 return 0; 1676 offset += copy; 1677 to += copy; 1678 } 1679 1680 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1681 int end; 1682 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1683 1684 WARN_ON(start > offset + len); 1685 1686 end = start + skb_frag_size(f); 1687 if ((copy = end - offset) > 0) { 1688 u8 *vaddr; 1689 1690 if (copy > len) 1691 copy = len; 1692 1693 vaddr = kmap_atomic(skb_frag_page(f)); 1694 memcpy(to, 1695 vaddr + f->page_offset + offset - start, 1696 copy); 1697 kunmap_atomic(vaddr); 1698 1699 if ((len -= copy) == 0) 1700 return 0; 1701 offset += copy; 1702 to += copy; 1703 } 1704 start = end; 1705 } 1706 1707 skb_walk_frags(skb, frag_iter) { 1708 int end; 1709 1710 WARN_ON(start > offset + len); 1711 1712 end = start + frag_iter->len; 1713 if ((copy = end - offset) > 0) { 1714 if (copy > len) 1715 copy = len; 1716 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1717 goto fault; 1718 if ((len -= copy) == 0) 1719 return 0; 1720 offset += copy; 1721 to += copy; 1722 } 1723 start = end; 1724 } 1725 1726 if (!len) 1727 return 0; 1728 1729 fault: 1730 return -EFAULT; 1731 } 1732 EXPORT_SYMBOL(skb_copy_bits); 1733 1734 /* 1735 * Callback from splice_to_pipe(), if we need to release some pages 1736 * at the end of the spd in case we error'ed out in filling the pipe. 1737 */ 1738 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1739 { 1740 put_page(spd->pages[i]); 1741 } 1742 1743 static struct page *linear_to_page(struct page *page, unsigned int *len, 1744 unsigned int *offset, 1745 struct sock *sk) 1746 { 1747 struct page_frag *pfrag = sk_page_frag(sk); 1748 1749 if (!sk_page_frag_refill(sk, pfrag)) 1750 return NULL; 1751 1752 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1753 1754 memcpy(page_address(pfrag->page) + pfrag->offset, 1755 page_address(page) + *offset, *len); 1756 *offset = pfrag->offset; 1757 pfrag->offset += *len; 1758 1759 return pfrag->page; 1760 } 1761 1762 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1763 struct page *page, 1764 unsigned int offset) 1765 { 1766 return spd->nr_pages && 1767 spd->pages[spd->nr_pages - 1] == page && 1768 (spd->partial[spd->nr_pages - 1].offset + 1769 spd->partial[spd->nr_pages - 1].len == offset); 1770 } 1771 1772 /* 1773 * Fill page/offset/length into spd, if it can hold more pages. 1774 */ 1775 static bool spd_fill_page(struct splice_pipe_desc *spd, 1776 struct pipe_inode_info *pipe, struct page *page, 1777 unsigned int *len, unsigned int offset, 1778 bool linear, 1779 struct sock *sk) 1780 { 1781 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1782 return true; 1783 1784 if (linear) { 1785 page = linear_to_page(page, len, &offset, sk); 1786 if (!page) 1787 return true; 1788 } 1789 if (spd_can_coalesce(spd, page, offset)) { 1790 spd->partial[spd->nr_pages - 1].len += *len; 1791 return false; 1792 } 1793 get_page(page); 1794 spd->pages[spd->nr_pages] = page; 1795 spd->partial[spd->nr_pages].len = *len; 1796 spd->partial[spd->nr_pages].offset = offset; 1797 spd->nr_pages++; 1798 1799 return false; 1800 } 1801 1802 static bool __splice_segment(struct page *page, unsigned int poff, 1803 unsigned int plen, unsigned int *off, 1804 unsigned int *len, 1805 struct splice_pipe_desc *spd, bool linear, 1806 struct sock *sk, 1807 struct pipe_inode_info *pipe) 1808 { 1809 if (!*len) 1810 return true; 1811 1812 /* skip this segment if already processed */ 1813 if (*off >= plen) { 1814 *off -= plen; 1815 return false; 1816 } 1817 1818 /* ignore any bits we already processed */ 1819 poff += *off; 1820 plen -= *off; 1821 *off = 0; 1822 1823 do { 1824 unsigned int flen = min(*len, plen); 1825 1826 if (spd_fill_page(spd, pipe, page, &flen, poff, 1827 linear, sk)) 1828 return true; 1829 poff += flen; 1830 plen -= flen; 1831 *len -= flen; 1832 } while (*len && plen); 1833 1834 return false; 1835 } 1836 1837 /* 1838 * Map linear and fragment data from the skb to spd. It reports true if the 1839 * pipe is full or if we already spliced the requested length. 1840 */ 1841 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1842 unsigned int *offset, unsigned int *len, 1843 struct splice_pipe_desc *spd, struct sock *sk) 1844 { 1845 int seg; 1846 1847 /* map the linear part : 1848 * If skb->head_frag is set, this 'linear' part is backed by a 1849 * fragment, and if the head is not shared with any clones then 1850 * we can avoid a copy since we own the head portion of this page. 1851 */ 1852 if (__splice_segment(virt_to_page(skb->data), 1853 (unsigned long) skb->data & (PAGE_SIZE - 1), 1854 skb_headlen(skb), 1855 offset, len, spd, 1856 skb_head_is_locked(skb), 1857 sk, pipe)) 1858 return true; 1859 1860 /* 1861 * then map the fragments 1862 */ 1863 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1864 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1865 1866 if (__splice_segment(skb_frag_page(f), 1867 f->page_offset, skb_frag_size(f), 1868 offset, len, spd, false, sk, pipe)) 1869 return true; 1870 } 1871 1872 return false; 1873 } 1874 1875 ssize_t skb_socket_splice(struct sock *sk, 1876 struct pipe_inode_info *pipe, 1877 struct splice_pipe_desc *spd) 1878 { 1879 int ret; 1880 1881 /* Drop the socket lock, otherwise we have reverse 1882 * locking dependencies between sk_lock and i_mutex 1883 * here as compared to sendfile(). We enter here 1884 * with the socket lock held, and splice_to_pipe() will 1885 * grab the pipe inode lock. For sendfile() emulation, 1886 * we call into ->sendpage() with the i_mutex lock held 1887 * and networking will grab the socket lock. 1888 */ 1889 release_sock(sk); 1890 ret = splice_to_pipe(pipe, spd); 1891 lock_sock(sk); 1892 1893 return ret; 1894 } 1895 1896 /* 1897 * Map data from the skb to a pipe. Should handle both the linear part, 1898 * the fragments, and the frag list. It does NOT handle frag lists within 1899 * the frag list, if such a thing exists. We'd probably need to recurse to 1900 * handle that cleanly. 1901 */ 1902 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 1903 struct pipe_inode_info *pipe, unsigned int tlen, 1904 unsigned int flags, 1905 ssize_t (*splice_cb)(struct sock *, 1906 struct pipe_inode_info *, 1907 struct splice_pipe_desc *)) 1908 { 1909 struct partial_page partial[MAX_SKB_FRAGS]; 1910 struct page *pages[MAX_SKB_FRAGS]; 1911 struct splice_pipe_desc spd = { 1912 .pages = pages, 1913 .partial = partial, 1914 .nr_pages_max = MAX_SKB_FRAGS, 1915 .flags = flags, 1916 .ops = &nosteal_pipe_buf_ops, 1917 .spd_release = sock_spd_release, 1918 }; 1919 struct sk_buff *frag_iter; 1920 int ret = 0; 1921 1922 /* 1923 * __skb_splice_bits() only fails if the output has no room left, 1924 * so no point in going over the frag_list for the error case. 1925 */ 1926 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1927 goto done; 1928 else if (!tlen) 1929 goto done; 1930 1931 /* 1932 * now see if we have a frag_list to map 1933 */ 1934 skb_walk_frags(skb, frag_iter) { 1935 if (!tlen) 1936 break; 1937 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1938 break; 1939 } 1940 1941 done: 1942 if (spd.nr_pages) 1943 ret = splice_cb(sk, pipe, &spd); 1944 1945 return ret; 1946 } 1947 EXPORT_SYMBOL_GPL(skb_splice_bits); 1948 1949 /** 1950 * skb_store_bits - store bits from kernel buffer to skb 1951 * @skb: destination buffer 1952 * @offset: offset in destination 1953 * @from: source buffer 1954 * @len: number of bytes to copy 1955 * 1956 * Copy the specified number of bytes from the source buffer to the 1957 * destination skb. This function handles all the messy bits of 1958 * traversing fragment lists and such. 1959 */ 1960 1961 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1962 { 1963 int start = skb_headlen(skb); 1964 struct sk_buff *frag_iter; 1965 int i, copy; 1966 1967 if (offset > (int)skb->len - len) 1968 goto fault; 1969 1970 if ((copy = start - offset) > 0) { 1971 if (copy > len) 1972 copy = len; 1973 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1974 if ((len -= copy) == 0) 1975 return 0; 1976 offset += copy; 1977 from += copy; 1978 } 1979 1980 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1982 int end; 1983 1984 WARN_ON(start > offset + len); 1985 1986 end = start + skb_frag_size(frag); 1987 if ((copy = end - offset) > 0) { 1988 u8 *vaddr; 1989 1990 if (copy > len) 1991 copy = len; 1992 1993 vaddr = kmap_atomic(skb_frag_page(frag)); 1994 memcpy(vaddr + frag->page_offset + offset - start, 1995 from, copy); 1996 kunmap_atomic(vaddr); 1997 1998 if ((len -= copy) == 0) 1999 return 0; 2000 offset += copy; 2001 from += copy; 2002 } 2003 start = end; 2004 } 2005 2006 skb_walk_frags(skb, frag_iter) { 2007 int end; 2008 2009 WARN_ON(start > offset + len); 2010 2011 end = start + frag_iter->len; 2012 if ((copy = end - offset) > 0) { 2013 if (copy > len) 2014 copy = len; 2015 if (skb_store_bits(frag_iter, offset - start, 2016 from, copy)) 2017 goto fault; 2018 if ((len -= copy) == 0) 2019 return 0; 2020 offset += copy; 2021 from += copy; 2022 } 2023 start = end; 2024 } 2025 if (!len) 2026 return 0; 2027 2028 fault: 2029 return -EFAULT; 2030 } 2031 EXPORT_SYMBOL(skb_store_bits); 2032 2033 /* Checksum skb data. */ 2034 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2035 __wsum csum, const struct skb_checksum_ops *ops) 2036 { 2037 int start = skb_headlen(skb); 2038 int i, copy = start - offset; 2039 struct sk_buff *frag_iter; 2040 int pos = 0; 2041 2042 /* Checksum header. */ 2043 if (copy > 0) { 2044 if (copy > len) 2045 copy = len; 2046 csum = ops->update(skb->data + offset, copy, csum); 2047 if ((len -= copy) == 0) 2048 return csum; 2049 offset += copy; 2050 pos = copy; 2051 } 2052 2053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2054 int end; 2055 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2056 2057 WARN_ON(start > offset + len); 2058 2059 end = start + skb_frag_size(frag); 2060 if ((copy = end - offset) > 0) { 2061 __wsum csum2; 2062 u8 *vaddr; 2063 2064 if (copy > len) 2065 copy = len; 2066 vaddr = kmap_atomic(skb_frag_page(frag)); 2067 csum2 = ops->update(vaddr + frag->page_offset + 2068 offset - start, copy, 0); 2069 kunmap_atomic(vaddr); 2070 csum = ops->combine(csum, csum2, pos, copy); 2071 if (!(len -= copy)) 2072 return csum; 2073 offset += copy; 2074 pos += copy; 2075 } 2076 start = end; 2077 } 2078 2079 skb_walk_frags(skb, frag_iter) { 2080 int end; 2081 2082 WARN_ON(start > offset + len); 2083 2084 end = start + frag_iter->len; 2085 if ((copy = end - offset) > 0) { 2086 __wsum csum2; 2087 if (copy > len) 2088 copy = len; 2089 csum2 = __skb_checksum(frag_iter, offset - start, 2090 copy, 0, ops); 2091 csum = ops->combine(csum, csum2, pos, copy); 2092 if ((len -= copy) == 0) 2093 return csum; 2094 offset += copy; 2095 pos += copy; 2096 } 2097 start = end; 2098 } 2099 BUG_ON(len); 2100 2101 return csum; 2102 } 2103 EXPORT_SYMBOL(__skb_checksum); 2104 2105 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2106 int len, __wsum csum) 2107 { 2108 const struct skb_checksum_ops ops = { 2109 .update = csum_partial_ext, 2110 .combine = csum_block_add_ext, 2111 }; 2112 2113 return __skb_checksum(skb, offset, len, csum, &ops); 2114 } 2115 EXPORT_SYMBOL(skb_checksum); 2116 2117 /* Both of above in one bottle. */ 2118 2119 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2120 u8 *to, int len, __wsum csum) 2121 { 2122 int start = skb_headlen(skb); 2123 int i, copy = start - offset; 2124 struct sk_buff *frag_iter; 2125 int pos = 0; 2126 2127 /* Copy header. */ 2128 if (copy > 0) { 2129 if (copy > len) 2130 copy = len; 2131 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2132 copy, csum); 2133 if ((len -= copy) == 0) 2134 return csum; 2135 offset += copy; 2136 to += copy; 2137 pos = copy; 2138 } 2139 2140 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2141 int end; 2142 2143 WARN_ON(start > offset + len); 2144 2145 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2146 if ((copy = end - offset) > 0) { 2147 __wsum csum2; 2148 u8 *vaddr; 2149 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2150 2151 if (copy > len) 2152 copy = len; 2153 vaddr = kmap_atomic(skb_frag_page(frag)); 2154 csum2 = csum_partial_copy_nocheck(vaddr + 2155 frag->page_offset + 2156 offset - start, to, 2157 copy, 0); 2158 kunmap_atomic(vaddr); 2159 csum = csum_block_add(csum, csum2, pos); 2160 if (!(len -= copy)) 2161 return csum; 2162 offset += copy; 2163 to += copy; 2164 pos += copy; 2165 } 2166 start = end; 2167 } 2168 2169 skb_walk_frags(skb, frag_iter) { 2170 __wsum csum2; 2171 int end; 2172 2173 WARN_ON(start > offset + len); 2174 2175 end = start + frag_iter->len; 2176 if ((copy = end - offset) > 0) { 2177 if (copy > len) 2178 copy = len; 2179 csum2 = skb_copy_and_csum_bits(frag_iter, 2180 offset - start, 2181 to, copy, 0); 2182 csum = csum_block_add(csum, csum2, pos); 2183 if ((len -= copy) == 0) 2184 return csum; 2185 offset += copy; 2186 to += copy; 2187 pos += copy; 2188 } 2189 start = end; 2190 } 2191 BUG_ON(len); 2192 return csum; 2193 } 2194 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2195 2196 /** 2197 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2198 * @from: source buffer 2199 * 2200 * Calculates the amount of linear headroom needed in the 'to' skb passed 2201 * into skb_zerocopy(). 2202 */ 2203 unsigned int 2204 skb_zerocopy_headlen(const struct sk_buff *from) 2205 { 2206 unsigned int hlen = 0; 2207 2208 if (!from->head_frag || 2209 skb_headlen(from) < L1_CACHE_BYTES || 2210 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2211 hlen = skb_headlen(from); 2212 2213 if (skb_has_frag_list(from)) 2214 hlen = from->len; 2215 2216 return hlen; 2217 } 2218 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2219 2220 /** 2221 * skb_zerocopy - Zero copy skb to skb 2222 * @to: destination buffer 2223 * @from: source buffer 2224 * @len: number of bytes to copy from source buffer 2225 * @hlen: size of linear headroom in destination buffer 2226 * 2227 * Copies up to `len` bytes from `from` to `to` by creating references 2228 * to the frags in the source buffer. 2229 * 2230 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2231 * headroom in the `to` buffer. 2232 * 2233 * Return value: 2234 * 0: everything is OK 2235 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2236 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2237 */ 2238 int 2239 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2240 { 2241 int i, j = 0; 2242 int plen = 0; /* length of skb->head fragment */ 2243 int ret; 2244 struct page *page; 2245 unsigned int offset; 2246 2247 BUG_ON(!from->head_frag && !hlen); 2248 2249 /* dont bother with small payloads */ 2250 if (len <= skb_tailroom(to)) 2251 return skb_copy_bits(from, 0, skb_put(to, len), len); 2252 2253 if (hlen) { 2254 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2255 if (unlikely(ret)) 2256 return ret; 2257 len -= hlen; 2258 } else { 2259 plen = min_t(int, skb_headlen(from), len); 2260 if (plen) { 2261 page = virt_to_head_page(from->head); 2262 offset = from->data - (unsigned char *)page_address(page); 2263 __skb_fill_page_desc(to, 0, page, offset, plen); 2264 get_page(page); 2265 j = 1; 2266 len -= plen; 2267 } 2268 } 2269 2270 to->truesize += len + plen; 2271 to->len += len + plen; 2272 to->data_len += len + plen; 2273 2274 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2275 skb_tx_error(from); 2276 return -ENOMEM; 2277 } 2278 2279 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2280 if (!len) 2281 break; 2282 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2283 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2284 len -= skb_shinfo(to)->frags[j].size; 2285 skb_frag_ref(to, j); 2286 j++; 2287 } 2288 skb_shinfo(to)->nr_frags = j; 2289 2290 return 0; 2291 } 2292 EXPORT_SYMBOL_GPL(skb_zerocopy); 2293 2294 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2295 { 2296 __wsum csum; 2297 long csstart; 2298 2299 if (skb->ip_summed == CHECKSUM_PARTIAL) 2300 csstart = skb_checksum_start_offset(skb); 2301 else 2302 csstart = skb_headlen(skb); 2303 2304 BUG_ON(csstart > skb_headlen(skb)); 2305 2306 skb_copy_from_linear_data(skb, to, csstart); 2307 2308 csum = 0; 2309 if (csstart != skb->len) 2310 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2311 skb->len - csstart, 0); 2312 2313 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2314 long csstuff = csstart + skb->csum_offset; 2315 2316 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2317 } 2318 } 2319 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2320 2321 /** 2322 * skb_dequeue - remove from the head of the queue 2323 * @list: list to dequeue from 2324 * 2325 * Remove the head of the list. The list lock is taken so the function 2326 * may be used safely with other locking list functions. The head item is 2327 * returned or %NULL if the list is empty. 2328 */ 2329 2330 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2331 { 2332 unsigned long flags; 2333 struct sk_buff *result; 2334 2335 spin_lock_irqsave(&list->lock, flags); 2336 result = __skb_dequeue(list); 2337 spin_unlock_irqrestore(&list->lock, flags); 2338 return result; 2339 } 2340 EXPORT_SYMBOL(skb_dequeue); 2341 2342 /** 2343 * skb_dequeue_tail - remove from the tail of the queue 2344 * @list: list to dequeue from 2345 * 2346 * Remove the tail of the list. The list lock is taken so the function 2347 * may be used safely with other locking list functions. The tail item is 2348 * returned or %NULL if the list is empty. 2349 */ 2350 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2351 { 2352 unsigned long flags; 2353 struct sk_buff *result; 2354 2355 spin_lock_irqsave(&list->lock, flags); 2356 result = __skb_dequeue_tail(list); 2357 spin_unlock_irqrestore(&list->lock, flags); 2358 return result; 2359 } 2360 EXPORT_SYMBOL(skb_dequeue_tail); 2361 2362 /** 2363 * skb_queue_purge - empty a list 2364 * @list: list to empty 2365 * 2366 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2367 * the list and one reference dropped. This function takes the list 2368 * lock and is atomic with respect to other list locking functions. 2369 */ 2370 void skb_queue_purge(struct sk_buff_head *list) 2371 { 2372 struct sk_buff *skb; 2373 while ((skb = skb_dequeue(list)) != NULL) 2374 kfree_skb(skb); 2375 } 2376 EXPORT_SYMBOL(skb_queue_purge); 2377 2378 /** 2379 * skb_queue_head - queue a buffer at the list head 2380 * @list: list to use 2381 * @newsk: buffer to queue 2382 * 2383 * Queue a buffer at the start of the list. This function takes the 2384 * list lock and can be used safely with other locking &sk_buff functions 2385 * safely. 2386 * 2387 * A buffer cannot be placed on two lists at the same time. 2388 */ 2389 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2390 { 2391 unsigned long flags; 2392 2393 spin_lock_irqsave(&list->lock, flags); 2394 __skb_queue_head(list, newsk); 2395 spin_unlock_irqrestore(&list->lock, flags); 2396 } 2397 EXPORT_SYMBOL(skb_queue_head); 2398 2399 /** 2400 * skb_queue_tail - queue a buffer at the list tail 2401 * @list: list to use 2402 * @newsk: buffer to queue 2403 * 2404 * Queue a buffer at the tail of the list. This function takes the 2405 * list lock and can be used safely with other locking &sk_buff functions 2406 * safely. 2407 * 2408 * A buffer cannot be placed on two lists at the same time. 2409 */ 2410 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2411 { 2412 unsigned long flags; 2413 2414 spin_lock_irqsave(&list->lock, flags); 2415 __skb_queue_tail(list, newsk); 2416 spin_unlock_irqrestore(&list->lock, flags); 2417 } 2418 EXPORT_SYMBOL(skb_queue_tail); 2419 2420 /** 2421 * skb_unlink - remove a buffer from a list 2422 * @skb: buffer to remove 2423 * @list: list to use 2424 * 2425 * Remove a packet from a list. The list locks are taken and this 2426 * function is atomic with respect to other list locked calls 2427 * 2428 * You must know what list the SKB is on. 2429 */ 2430 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2431 { 2432 unsigned long flags; 2433 2434 spin_lock_irqsave(&list->lock, flags); 2435 __skb_unlink(skb, list); 2436 spin_unlock_irqrestore(&list->lock, flags); 2437 } 2438 EXPORT_SYMBOL(skb_unlink); 2439 2440 /** 2441 * skb_append - append a buffer 2442 * @old: buffer to insert after 2443 * @newsk: buffer to insert 2444 * @list: list to use 2445 * 2446 * Place a packet after a given packet in a list. The list locks are taken 2447 * and this function is atomic with respect to other list locked calls. 2448 * A buffer cannot be placed on two lists at the same time. 2449 */ 2450 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2451 { 2452 unsigned long flags; 2453 2454 spin_lock_irqsave(&list->lock, flags); 2455 __skb_queue_after(list, old, newsk); 2456 spin_unlock_irqrestore(&list->lock, flags); 2457 } 2458 EXPORT_SYMBOL(skb_append); 2459 2460 /** 2461 * skb_insert - insert a buffer 2462 * @old: buffer to insert before 2463 * @newsk: buffer to insert 2464 * @list: list to use 2465 * 2466 * Place a packet before a given packet in a list. The list locks are 2467 * taken and this function is atomic with respect to other list locked 2468 * calls. 2469 * 2470 * A buffer cannot be placed on two lists at the same time. 2471 */ 2472 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2473 { 2474 unsigned long flags; 2475 2476 spin_lock_irqsave(&list->lock, flags); 2477 __skb_insert(newsk, old->prev, old, list); 2478 spin_unlock_irqrestore(&list->lock, flags); 2479 } 2480 EXPORT_SYMBOL(skb_insert); 2481 2482 static inline void skb_split_inside_header(struct sk_buff *skb, 2483 struct sk_buff* skb1, 2484 const u32 len, const int pos) 2485 { 2486 int i; 2487 2488 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2489 pos - len); 2490 /* And move data appendix as is. */ 2491 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2492 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2493 2494 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2495 skb_shinfo(skb)->nr_frags = 0; 2496 skb1->data_len = skb->data_len; 2497 skb1->len += skb1->data_len; 2498 skb->data_len = 0; 2499 skb->len = len; 2500 skb_set_tail_pointer(skb, len); 2501 } 2502 2503 static inline void skb_split_no_header(struct sk_buff *skb, 2504 struct sk_buff* skb1, 2505 const u32 len, int pos) 2506 { 2507 int i, k = 0; 2508 const int nfrags = skb_shinfo(skb)->nr_frags; 2509 2510 skb_shinfo(skb)->nr_frags = 0; 2511 skb1->len = skb1->data_len = skb->len - len; 2512 skb->len = len; 2513 skb->data_len = len - pos; 2514 2515 for (i = 0; i < nfrags; i++) { 2516 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2517 2518 if (pos + size > len) { 2519 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2520 2521 if (pos < len) { 2522 /* Split frag. 2523 * We have two variants in this case: 2524 * 1. Move all the frag to the second 2525 * part, if it is possible. F.e. 2526 * this approach is mandatory for TUX, 2527 * where splitting is expensive. 2528 * 2. Split is accurately. We make this. 2529 */ 2530 skb_frag_ref(skb, i); 2531 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2532 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2533 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2534 skb_shinfo(skb)->nr_frags++; 2535 } 2536 k++; 2537 } else 2538 skb_shinfo(skb)->nr_frags++; 2539 pos += size; 2540 } 2541 skb_shinfo(skb1)->nr_frags = k; 2542 } 2543 2544 /** 2545 * skb_split - Split fragmented skb to two parts at length len. 2546 * @skb: the buffer to split 2547 * @skb1: the buffer to receive the second part 2548 * @len: new length for skb 2549 */ 2550 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2551 { 2552 int pos = skb_headlen(skb); 2553 2554 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2555 if (len < pos) /* Split line is inside header. */ 2556 skb_split_inside_header(skb, skb1, len, pos); 2557 else /* Second chunk has no header, nothing to copy. */ 2558 skb_split_no_header(skb, skb1, len, pos); 2559 } 2560 EXPORT_SYMBOL(skb_split); 2561 2562 /* Shifting from/to a cloned skb is a no-go. 2563 * 2564 * Caller cannot keep skb_shinfo related pointers past calling here! 2565 */ 2566 static int skb_prepare_for_shift(struct sk_buff *skb) 2567 { 2568 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2569 } 2570 2571 /** 2572 * skb_shift - Shifts paged data partially from skb to another 2573 * @tgt: buffer into which tail data gets added 2574 * @skb: buffer from which the paged data comes from 2575 * @shiftlen: shift up to this many bytes 2576 * 2577 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2578 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2579 * It's up to caller to free skb if everything was shifted. 2580 * 2581 * If @tgt runs out of frags, the whole operation is aborted. 2582 * 2583 * Skb cannot include anything else but paged data while tgt is allowed 2584 * to have non-paged data as well. 2585 * 2586 * TODO: full sized shift could be optimized but that would need 2587 * specialized skb free'er to handle frags without up-to-date nr_frags. 2588 */ 2589 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2590 { 2591 int from, to, merge, todo; 2592 struct skb_frag_struct *fragfrom, *fragto; 2593 2594 BUG_ON(shiftlen > skb->len); 2595 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2596 2597 todo = shiftlen; 2598 from = 0; 2599 to = skb_shinfo(tgt)->nr_frags; 2600 fragfrom = &skb_shinfo(skb)->frags[from]; 2601 2602 /* Actual merge is delayed until the point when we know we can 2603 * commit all, so that we don't have to undo partial changes 2604 */ 2605 if (!to || 2606 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2607 fragfrom->page_offset)) { 2608 merge = -1; 2609 } else { 2610 merge = to - 1; 2611 2612 todo -= skb_frag_size(fragfrom); 2613 if (todo < 0) { 2614 if (skb_prepare_for_shift(skb) || 2615 skb_prepare_for_shift(tgt)) 2616 return 0; 2617 2618 /* All previous frag pointers might be stale! */ 2619 fragfrom = &skb_shinfo(skb)->frags[from]; 2620 fragto = &skb_shinfo(tgt)->frags[merge]; 2621 2622 skb_frag_size_add(fragto, shiftlen); 2623 skb_frag_size_sub(fragfrom, shiftlen); 2624 fragfrom->page_offset += shiftlen; 2625 2626 goto onlymerged; 2627 } 2628 2629 from++; 2630 } 2631 2632 /* Skip full, not-fitting skb to avoid expensive operations */ 2633 if ((shiftlen == skb->len) && 2634 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2635 return 0; 2636 2637 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2638 return 0; 2639 2640 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2641 if (to == MAX_SKB_FRAGS) 2642 return 0; 2643 2644 fragfrom = &skb_shinfo(skb)->frags[from]; 2645 fragto = &skb_shinfo(tgt)->frags[to]; 2646 2647 if (todo >= skb_frag_size(fragfrom)) { 2648 *fragto = *fragfrom; 2649 todo -= skb_frag_size(fragfrom); 2650 from++; 2651 to++; 2652 2653 } else { 2654 __skb_frag_ref(fragfrom); 2655 fragto->page = fragfrom->page; 2656 fragto->page_offset = fragfrom->page_offset; 2657 skb_frag_size_set(fragto, todo); 2658 2659 fragfrom->page_offset += todo; 2660 skb_frag_size_sub(fragfrom, todo); 2661 todo = 0; 2662 2663 to++; 2664 break; 2665 } 2666 } 2667 2668 /* Ready to "commit" this state change to tgt */ 2669 skb_shinfo(tgt)->nr_frags = to; 2670 2671 if (merge >= 0) { 2672 fragfrom = &skb_shinfo(skb)->frags[0]; 2673 fragto = &skb_shinfo(tgt)->frags[merge]; 2674 2675 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2676 __skb_frag_unref(fragfrom); 2677 } 2678 2679 /* Reposition in the original skb */ 2680 to = 0; 2681 while (from < skb_shinfo(skb)->nr_frags) 2682 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2683 skb_shinfo(skb)->nr_frags = to; 2684 2685 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2686 2687 onlymerged: 2688 /* Most likely the tgt won't ever need its checksum anymore, skb on 2689 * the other hand might need it if it needs to be resent 2690 */ 2691 tgt->ip_summed = CHECKSUM_PARTIAL; 2692 skb->ip_summed = CHECKSUM_PARTIAL; 2693 2694 /* Yak, is it really working this way? Some helper please? */ 2695 skb->len -= shiftlen; 2696 skb->data_len -= shiftlen; 2697 skb->truesize -= shiftlen; 2698 tgt->len += shiftlen; 2699 tgt->data_len += shiftlen; 2700 tgt->truesize += shiftlen; 2701 2702 return shiftlen; 2703 } 2704 2705 /** 2706 * skb_prepare_seq_read - Prepare a sequential read of skb data 2707 * @skb: the buffer to read 2708 * @from: lower offset of data to be read 2709 * @to: upper offset of data to be read 2710 * @st: state variable 2711 * 2712 * Initializes the specified state variable. Must be called before 2713 * invoking skb_seq_read() for the first time. 2714 */ 2715 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2716 unsigned int to, struct skb_seq_state *st) 2717 { 2718 st->lower_offset = from; 2719 st->upper_offset = to; 2720 st->root_skb = st->cur_skb = skb; 2721 st->frag_idx = st->stepped_offset = 0; 2722 st->frag_data = NULL; 2723 } 2724 EXPORT_SYMBOL(skb_prepare_seq_read); 2725 2726 /** 2727 * skb_seq_read - Sequentially read skb data 2728 * @consumed: number of bytes consumed by the caller so far 2729 * @data: destination pointer for data to be returned 2730 * @st: state variable 2731 * 2732 * Reads a block of skb data at @consumed relative to the 2733 * lower offset specified to skb_prepare_seq_read(). Assigns 2734 * the head of the data block to @data and returns the length 2735 * of the block or 0 if the end of the skb data or the upper 2736 * offset has been reached. 2737 * 2738 * The caller is not required to consume all of the data 2739 * returned, i.e. @consumed is typically set to the number 2740 * of bytes already consumed and the next call to 2741 * skb_seq_read() will return the remaining part of the block. 2742 * 2743 * Note 1: The size of each block of data returned can be arbitrary, 2744 * this limitation is the cost for zerocopy sequential 2745 * reads of potentially non linear data. 2746 * 2747 * Note 2: Fragment lists within fragments are not implemented 2748 * at the moment, state->root_skb could be replaced with 2749 * a stack for this purpose. 2750 */ 2751 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2752 struct skb_seq_state *st) 2753 { 2754 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2755 skb_frag_t *frag; 2756 2757 if (unlikely(abs_offset >= st->upper_offset)) { 2758 if (st->frag_data) { 2759 kunmap_atomic(st->frag_data); 2760 st->frag_data = NULL; 2761 } 2762 return 0; 2763 } 2764 2765 next_skb: 2766 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2767 2768 if (abs_offset < block_limit && !st->frag_data) { 2769 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2770 return block_limit - abs_offset; 2771 } 2772 2773 if (st->frag_idx == 0 && !st->frag_data) 2774 st->stepped_offset += skb_headlen(st->cur_skb); 2775 2776 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2777 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2778 block_limit = skb_frag_size(frag) + st->stepped_offset; 2779 2780 if (abs_offset < block_limit) { 2781 if (!st->frag_data) 2782 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2783 2784 *data = (u8 *) st->frag_data + frag->page_offset + 2785 (abs_offset - st->stepped_offset); 2786 2787 return block_limit - abs_offset; 2788 } 2789 2790 if (st->frag_data) { 2791 kunmap_atomic(st->frag_data); 2792 st->frag_data = NULL; 2793 } 2794 2795 st->frag_idx++; 2796 st->stepped_offset += skb_frag_size(frag); 2797 } 2798 2799 if (st->frag_data) { 2800 kunmap_atomic(st->frag_data); 2801 st->frag_data = NULL; 2802 } 2803 2804 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2805 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2806 st->frag_idx = 0; 2807 goto next_skb; 2808 } else if (st->cur_skb->next) { 2809 st->cur_skb = st->cur_skb->next; 2810 st->frag_idx = 0; 2811 goto next_skb; 2812 } 2813 2814 return 0; 2815 } 2816 EXPORT_SYMBOL(skb_seq_read); 2817 2818 /** 2819 * skb_abort_seq_read - Abort a sequential read of skb data 2820 * @st: state variable 2821 * 2822 * Must be called if skb_seq_read() was not called until it 2823 * returned 0. 2824 */ 2825 void skb_abort_seq_read(struct skb_seq_state *st) 2826 { 2827 if (st->frag_data) 2828 kunmap_atomic(st->frag_data); 2829 } 2830 EXPORT_SYMBOL(skb_abort_seq_read); 2831 2832 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2833 2834 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2835 struct ts_config *conf, 2836 struct ts_state *state) 2837 { 2838 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2839 } 2840 2841 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2842 { 2843 skb_abort_seq_read(TS_SKB_CB(state)); 2844 } 2845 2846 /** 2847 * skb_find_text - Find a text pattern in skb data 2848 * @skb: the buffer to look in 2849 * @from: search offset 2850 * @to: search limit 2851 * @config: textsearch configuration 2852 * 2853 * Finds a pattern in the skb data according to the specified 2854 * textsearch configuration. Use textsearch_next() to retrieve 2855 * subsequent occurrences of the pattern. Returns the offset 2856 * to the first occurrence or UINT_MAX if no match was found. 2857 */ 2858 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2859 unsigned int to, struct ts_config *config) 2860 { 2861 struct ts_state state; 2862 unsigned int ret; 2863 2864 config->get_next_block = skb_ts_get_next_block; 2865 config->finish = skb_ts_finish; 2866 2867 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 2868 2869 ret = textsearch_find(config, &state); 2870 return (ret <= to - from ? ret : UINT_MAX); 2871 } 2872 EXPORT_SYMBOL(skb_find_text); 2873 2874 /** 2875 * skb_append_datato_frags - append the user data to a skb 2876 * @sk: sock structure 2877 * @skb: skb structure to be appended with user data. 2878 * @getfrag: call back function to be used for getting the user data 2879 * @from: pointer to user message iov 2880 * @length: length of the iov message 2881 * 2882 * Description: This procedure append the user data in the fragment part 2883 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2884 */ 2885 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2886 int (*getfrag)(void *from, char *to, int offset, 2887 int len, int odd, struct sk_buff *skb), 2888 void *from, int length) 2889 { 2890 int frg_cnt = skb_shinfo(skb)->nr_frags; 2891 int copy; 2892 int offset = 0; 2893 int ret; 2894 struct page_frag *pfrag = ¤t->task_frag; 2895 2896 do { 2897 /* Return error if we don't have space for new frag */ 2898 if (frg_cnt >= MAX_SKB_FRAGS) 2899 return -EMSGSIZE; 2900 2901 if (!sk_page_frag_refill(sk, pfrag)) 2902 return -ENOMEM; 2903 2904 /* copy the user data to page */ 2905 copy = min_t(int, length, pfrag->size - pfrag->offset); 2906 2907 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2908 offset, copy, 0, skb); 2909 if (ret < 0) 2910 return -EFAULT; 2911 2912 /* copy was successful so update the size parameters */ 2913 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2914 copy); 2915 frg_cnt++; 2916 pfrag->offset += copy; 2917 get_page(pfrag->page); 2918 2919 skb->truesize += copy; 2920 atomic_add(copy, &sk->sk_wmem_alloc); 2921 skb->len += copy; 2922 skb->data_len += copy; 2923 offset += copy; 2924 length -= copy; 2925 2926 } while (length > 0); 2927 2928 return 0; 2929 } 2930 EXPORT_SYMBOL(skb_append_datato_frags); 2931 2932 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 2933 int offset, size_t size) 2934 { 2935 int i = skb_shinfo(skb)->nr_frags; 2936 2937 if (skb_can_coalesce(skb, i, page, offset)) { 2938 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 2939 } else if (i < MAX_SKB_FRAGS) { 2940 get_page(page); 2941 skb_fill_page_desc(skb, i, page, offset, size); 2942 } else { 2943 return -EMSGSIZE; 2944 } 2945 2946 return 0; 2947 } 2948 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 2949 2950 /** 2951 * skb_pull_rcsum - pull skb and update receive checksum 2952 * @skb: buffer to update 2953 * @len: length of data pulled 2954 * 2955 * This function performs an skb_pull on the packet and updates 2956 * the CHECKSUM_COMPLETE checksum. It should be used on 2957 * receive path processing instead of skb_pull unless you know 2958 * that the checksum difference is zero (e.g., a valid IP header) 2959 * or you are setting ip_summed to CHECKSUM_NONE. 2960 */ 2961 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2962 { 2963 unsigned char *data = skb->data; 2964 2965 BUG_ON(len > skb->len); 2966 __skb_pull(skb, len); 2967 skb_postpull_rcsum(skb, data, len); 2968 return skb->data; 2969 } 2970 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2971 2972 /** 2973 * skb_segment - Perform protocol segmentation on skb. 2974 * @head_skb: buffer to segment 2975 * @features: features for the output path (see dev->features) 2976 * 2977 * This function performs segmentation on the given skb. It returns 2978 * a pointer to the first in a list of new skbs for the segments. 2979 * In case of error it returns ERR_PTR(err). 2980 */ 2981 struct sk_buff *skb_segment(struct sk_buff *head_skb, 2982 netdev_features_t features) 2983 { 2984 struct sk_buff *segs = NULL; 2985 struct sk_buff *tail = NULL; 2986 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 2987 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 2988 unsigned int mss = skb_shinfo(head_skb)->gso_size; 2989 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 2990 struct sk_buff *frag_skb = head_skb; 2991 unsigned int offset = doffset; 2992 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 2993 unsigned int headroom; 2994 unsigned int len; 2995 __be16 proto; 2996 bool csum; 2997 int sg = !!(features & NETIF_F_SG); 2998 int nfrags = skb_shinfo(head_skb)->nr_frags; 2999 int err = -ENOMEM; 3000 int i = 0; 3001 int pos; 3002 int dummy; 3003 3004 __skb_push(head_skb, doffset); 3005 proto = skb_network_protocol(head_skb, &dummy); 3006 if (unlikely(!proto)) 3007 return ERR_PTR(-EINVAL); 3008 3009 csum = !head_skb->encap_hdr_csum && 3010 !!can_checksum_protocol(features, proto); 3011 3012 headroom = skb_headroom(head_skb); 3013 pos = skb_headlen(head_skb); 3014 3015 do { 3016 struct sk_buff *nskb; 3017 skb_frag_t *nskb_frag; 3018 int hsize; 3019 int size; 3020 3021 len = head_skb->len - offset; 3022 if (len > mss) 3023 len = mss; 3024 3025 hsize = skb_headlen(head_skb) - offset; 3026 if (hsize < 0) 3027 hsize = 0; 3028 if (hsize > len || !sg) 3029 hsize = len; 3030 3031 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3032 (skb_headlen(list_skb) == len || sg)) { 3033 BUG_ON(skb_headlen(list_skb) > len); 3034 3035 i = 0; 3036 nfrags = skb_shinfo(list_skb)->nr_frags; 3037 frag = skb_shinfo(list_skb)->frags; 3038 frag_skb = list_skb; 3039 pos += skb_headlen(list_skb); 3040 3041 while (pos < offset + len) { 3042 BUG_ON(i >= nfrags); 3043 3044 size = skb_frag_size(frag); 3045 if (pos + size > offset + len) 3046 break; 3047 3048 i++; 3049 pos += size; 3050 frag++; 3051 } 3052 3053 nskb = skb_clone(list_skb, GFP_ATOMIC); 3054 list_skb = list_skb->next; 3055 3056 if (unlikely(!nskb)) 3057 goto err; 3058 3059 if (unlikely(pskb_trim(nskb, len))) { 3060 kfree_skb(nskb); 3061 goto err; 3062 } 3063 3064 hsize = skb_end_offset(nskb); 3065 if (skb_cow_head(nskb, doffset + headroom)) { 3066 kfree_skb(nskb); 3067 goto err; 3068 } 3069 3070 nskb->truesize += skb_end_offset(nskb) - hsize; 3071 skb_release_head_state(nskb); 3072 __skb_push(nskb, doffset); 3073 } else { 3074 nskb = __alloc_skb(hsize + doffset + headroom, 3075 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3076 NUMA_NO_NODE); 3077 3078 if (unlikely(!nskb)) 3079 goto err; 3080 3081 skb_reserve(nskb, headroom); 3082 __skb_put(nskb, doffset); 3083 } 3084 3085 if (segs) 3086 tail->next = nskb; 3087 else 3088 segs = nskb; 3089 tail = nskb; 3090 3091 __copy_skb_header(nskb, head_skb); 3092 3093 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3094 skb_reset_mac_len(nskb); 3095 3096 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3097 nskb->data - tnl_hlen, 3098 doffset + tnl_hlen); 3099 3100 if (nskb->len == len + doffset) 3101 goto perform_csum_check; 3102 3103 if (!sg && !nskb->remcsum_offload) { 3104 nskb->ip_summed = CHECKSUM_NONE; 3105 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 3106 skb_put(nskb, len), 3107 len, 0); 3108 SKB_GSO_CB(nskb)->csum_start = 3109 skb_headroom(nskb) + doffset; 3110 continue; 3111 } 3112 3113 nskb_frag = skb_shinfo(nskb)->frags; 3114 3115 skb_copy_from_linear_data_offset(head_skb, offset, 3116 skb_put(nskb, hsize), hsize); 3117 3118 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 3119 SKBTX_SHARED_FRAG; 3120 3121 while (pos < offset + len) { 3122 if (i >= nfrags) { 3123 BUG_ON(skb_headlen(list_skb)); 3124 3125 i = 0; 3126 nfrags = skb_shinfo(list_skb)->nr_frags; 3127 frag = skb_shinfo(list_skb)->frags; 3128 frag_skb = list_skb; 3129 3130 BUG_ON(!nfrags); 3131 3132 list_skb = list_skb->next; 3133 } 3134 3135 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3136 MAX_SKB_FRAGS)) { 3137 net_warn_ratelimited( 3138 "skb_segment: too many frags: %u %u\n", 3139 pos, mss); 3140 goto err; 3141 } 3142 3143 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3144 goto err; 3145 3146 *nskb_frag = *frag; 3147 __skb_frag_ref(nskb_frag); 3148 size = skb_frag_size(nskb_frag); 3149 3150 if (pos < offset) { 3151 nskb_frag->page_offset += offset - pos; 3152 skb_frag_size_sub(nskb_frag, offset - pos); 3153 } 3154 3155 skb_shinfo(nskb)->nr_frags++; 3156 3157 if (pos + size <= offset + len) { 3158 i++; 3159 frag++; 3160 pos += size; 3161 } else { 3162 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3163 goto skip_fraglist; 3164 } 3165 3166 nskb_frag++; 3167 } 3168 3169 skip_fraglist: 3170 nskb->data_len = len - hsize; 3171 nskb->len += nskb->data_len; 3172 nskb->truesize += nskb->data_len; 3173 3174 perform_csum_check: 3175 if (!csum && !nskb->remcsum_offload) { 3176 nskb->csum = skb_checksum(nskb, doffset, 3177 nskb->len - doffset, 0); 3178 nskb->ip_summed = CHECKSUM_NONE; 3179 SKB_GSO_CB(nskb)->csum_start = 3180 skb_headroom(nskb) + doffset; 3181 } 3182 } while ((offset += len) < head_skb->len); 3183 3184 /* Some callers want to get the end of the list. 3185 * Put it in segs->prev to avoid walking the list. 3186 * (see validate_xmit_skb_list() for example) 3187 */ 3188 segs->prev = tail; 3189 3190 /* Following permits correct backpressure, for protocols 3191 * using skb_set_owner_w(). 3192 * Idea is to tranfert ownership from head_skb to last segment. 3193 */ 3194 if (head_skb->destructor == sock_wfree) { 3195 swap(tail->truesize, head_skb->truesize); 3196 swap(tail->destructor, head_skb->destructor); 3197 swap(tail->sk, head_skb->sk); 3198 } 3199 return segs; 3200 3201 err: 3202 kfree_skb_list(segs); 3203 return ERR_PTR(err); 3204 } 3205 EXPORT_SYMBOL_GPL(skb_segment); 3206 3207 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3208 { 3209 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3210 unsigned int offset = skb_gro_offset(skb); 3211 unsigned int headlen = skb_headlen(skb); 3212 unsigned int len = skb_gro_len(skb); 3213 struct sk_buff *lp, *p = *head; 3214 unsigned int delta_truesize; 3215 3216 if (unlikely(p->len + len >= 65536)) 3217 return -E2BIG; 3218 3219 lp = NAPI_GRO_CB(p)->last; 3220 pinfo = skb_shinfo(lp); 3221 3222 if (headlen <= offset) { 3223 skb_frag_t *frag; 3224 skb_frag_t *frag2; 3225 int i = skbinfo->nr_frags; 3226 int nr_frags = pinfo->nr_frags + i; 3227 3228 if (nr_frags > MAX_SKB_FRAGS) 3229 goto merge; 3230 3231 offset -= headlen; 3232 pinfo->nr_frags = nr_frags; 3233 skbinfo->nr_frags = 0; 3234 3235 frag = pinfo->frags + nr_frags; 3236 frag2 = skbinfo->frags + i; 3237 do { 3238 *--frag = *--frag2; 3239 } while (--i); 3240 3241 frag->page_offset += offset; 3242 skb_frag_size_sub(frag, offset); 3243 3244 /* all fragments truesize : remove (head size + sk_buff) */ 3245 delta_truesize = skb->truesize - 3246 SKB_TRUESIZE(skb_end_offset(skb)); 3247 3248 skb->truesize -= skb->data_len; 3249 skb->len -= skb->data_len; 3250 skb->data_len = 0; 3251 3252 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3253 goto done; 3254 } else if (skb->head_frag) { 3255 int nr_frags = pinfo->nr_frags; 3256 skb_frag_t *frag = pinfo->frags + nr_frags; 3257 struct page *page = virt_to_head_page(skb->head); 3258 unsigned int first_size = headlen - offset; 3259 unsigned int first_offset; 3260 3261 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3262 goto merge; 3263 3264 first_offset = skb->data - 3265 (unsigned char *)page_address(page) + 3266 offset; 3267 3268 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3269 3270 frag->page.p = page; 3271 frag->page_offset = first_offset; 3272 skb_frag_size_set(frag, first_size); 3273 3274 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3275 /* We dont need to clear skbinfo->nr_frags here */ 3276 3277 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3278 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3279 goto done; 3280 } 3281 3282 merge: 3283 delta_truesize = skb->truesize; 3284 if (offset > headlen) { 3285 unsigned int eat = offset - headlen; 3286 3287 skbinfo->frags[0].page_offset += eat; 3288 skb_frag_size_sub(&skbinfo->frags[0], eat); 3289 skb->data_len -= eat; 3290 skb->len -= eat; 3291 offset = headlen; 3292 } 3293 3294 __skb_pull(skb, offset); 3295 3296 if (NAPI_GRO_CB(p)->last == p) 3297 skb_shinfo(p)->frag_list = skb; 3298 else 3299 NAPI_GRO_CB(p)->last->next = skb; 3300 NAPI_GRO_CB(p)->last = skb; 3301 __skb_header_release(skb); 3302 lp = p; 3303 3304 done: 3305 NAPI_GRO_CB(p)->count++; 3306 p->data_len += len; 3307 p->truesize += delta_truesize; 3308 p->len += len; 3309 if (lp != p) { 3310 lp->data_len += len; 3311 lp->truesize += delta_truesize; 3312 lp->len += len; 3313 } 3314 NAPI_GRO_CB(skb)->same_flow = 1; 3315 return 0; 3316 } 3317 3318 void __init skb_init(void) 3319 { 3320 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3321 sizeof(struct sk_buff), 3322 0, 3323 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3324 NULL); 3325 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3326 sizeof(struct sk_buff_fclones), 3327 0, 3328 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3329 NULL); 3330 } 3331 3332 /** 3333 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3334 * @skb: Socket buffer containing the buffers to be mapped 3335 * @sg: The scatter-gather list to map into 3336 * @offset: The offset into the buffer's contents to start mapping 3337 * @len: Length of buffer space to be mapped 3338 * 3339 * Fill the specified scatter-gather list with mappings/pointers into a 3340 * region of the buffer space attached to a socket buffer. 3341 */ 3342 static int 3343 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3344 { 3345 int start = skb_headlen(skb); 3346 int i, copy = start - offset; 3347 struct sk_buff *frag_iter; 3348 int elt = 0; 3349 3350 if (copy > 0) { 3351 if (copy > len) 3352 copy = len; 3353 sg_set_buf(sg, skb->data + offset, copy); 3354 elt++; 3355 if ((len -= copy) == 0) 3356 return elt; 3357 offset += copy; 3358 } 3359 3360 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3361 int end; 3362 3363 WARN_ON(start > offset + len); 3364 3365 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3366 if ((copy = end - offset) > 0) { 3367 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3368 3369 if (copy > len) 3370 copy = len; 3371 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3372 frag->page_offset+offset-start); 3373 elt++; 3374 if (!(len -= copy)) 3375 return elt; 3376 offset += copy; 3377 } 3378 start = end; 3379 } 3380 3381 skb_walk_frags(skb, frag_iter) { 3382 int end; 3383 3384 WARN_ON(start > offset + len); 3385 3386 end = start + frag_iter->len; 3387 if ((copy = end - offset) > 0) { 3388 if (copy > len) 3389 copy = len; 3390 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3391 copy); 3392 if ((len -= copy) == 0) 3393 return elt; 3394 offset += copy; 3395 } 3396 start = end; 3397 } 3398 BUG_ON(len); 3399 return elt; 3400 } 3401 3402 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3403 * sglist without mark the sg which contain last skb data as the end. 3404 * So the caller can mannipulate sg list as will when padding new data after 3405 * the first call without calling sg_unmark_end to expend sg list. 3406 * 3407 * Scenario to use skb_to_sgvec_nomark: 3408 * 1. sg_init_table 3409 * 2. skb_to_sgvec_nomark(payload1) 3410 * 3. skb_to_sgvec_nomark(payload2) 3411 * 3412 * This is equivalent to: 3413 * 1. sg_init_table 3414 * 2. skb_to_sgvec(payload1) 3415 * 3. sg_unmark_end 3416 * 4. skb_to_sgvec(payload2) 3417 * 3418 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 3419 * is more preferable. 3420 */ 3421 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3422 int offset, int len) 3423 { 3424 return __skb_to_sgvec(skb, sg, offset, len); 3425 } 3426 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3427 3428 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3429 { 3430 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3431 3432 sg_mark_end(&sg[nsg - 1]); 3433 3434 return nsg; 3435 } 3436 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3437 3438 /** 3439 * skb_cow_data - Check that a socket buffer's data buffers are writable 3440 * @skb: The socket buffer to check. 3441 * @tailbits: Amount of trailing space to be added 3442 * @trailer: Returned pointer to the skb where the @tailbits space begins 3443 * 3444 * Make sure that the data buffers attached to a socket buffer are 3445 * writable. If they are not, private copies are made of the data buffers 3446 * and the socket buffer is set to use these instead. 3447 * 3448 * If @tailbits is given, make sure that there is space to write @tailbits 3449 * bytes of data beyond current end of socket buffer. @trailer will be 3450 * set to point to the skb in which this space begins. 3451 * 3452 * The number of scatterlist elements required to completely map the 3453 * COW'd and extended socket buffer will be returned. 3454 */ 3455 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3456 { 3457 int copyflag; 3458 int elt; 3459 struct sk_buff *skb1, **skb_p; 3460 3461 /* If skb is cloned or its head is paged, reallocate 3462 * head pulling out all the pages (pages are considered not writable 3463 * at the moment even if they are anonymous). 3464 */ 3465 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3466 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3467 return -ENOMEM; 3468 3469 /* Easy case. Most of packets will go this way. */ 3470 if (!skb_has_frag_list(skb)) { 3471 /* A little of trouble, not enough of space for trailer. 3472 * This should not happen, when stack is tuned to generate 3473 * good frames. OK, on miss we reallocate and reserve even more 3474 * space, 128 bytes is fair. */ 3475 3476 if (skb_tailroom(skb) < tailbits && 3477 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3478 return -ENOMEM; 3479 3480 /* Voila! */ 3481 *trailer = skb; 3482 return 1; 3483 } 3484 3485 /* Misery. We are in troubles, going to mincer fragments... */ 3486 3487 elt = 1; 3488 skb_p = &skb_shinfo(skb)->frag_list; 3489 copyflag = 0; 3490 3491 while ((skb1 = *skb_p) != NULL) { 3492 int ntail = 0; 3493 3494 /* The fragment is partially pulled by someone, 3495 * this can happen on input. Copy it and everything 3496 * after it. */ 3497 3498 if (skb_shared(skb1)) 3499 copyflag = 1; 3500 3501 /* If the skb is the last, worry about trailer. */ 3502 3503 if (skb1->next == NULL && tailbits) { 3504 if (skb_shinfo(skb1)->nr_frags || 3505 skb_has_frag_list(skb1) || 3506 skb_tailroom(skb1) < tailbits) 3507 ntail = tailbits + 128; 3508 } 3509 3510 if (copyflag || 3511 skb_cloned(skb1) || 3512 ntail || 3513 skb_shinfo(skb1)->nr_frags || 3514 skb_has_frag_list(skb1)) { 3515 struct sk_buff *skb2; 3516 3517 /* Fuck, we are miserable poor guys... */ 3518 if (ntail == 0) 3519 skb2 = skb_copy(skb1, GFP_ATOMIC); 3520 else 3521 skb2 = skb_copy_expand(skb1, 3522 skb_headroom(skb1), 3523 ntail, 3524 GFP_ATOMIC); 3525 if (unlikely(skb2 == NULL)) 3526 return -ENOMEM; 3527 3528 if (skb1->sk) 3529 skb_set_owner_w(skb2, skb1->sk); 3530 3531 /* Looking around. Are we still alive? 3532 * OK, link new skb, drop old one */ 3533 3534 skb2->next = skb1->next; 3535 *skb_p = skb2; 3536 kfree_skb(skb1); 3537 skb1 = skb2; 3538 } 3539 elt++; 3540 *trailer = skb1; 3541 skb_p = &skb1->next; 3542 } 3543 3544 return elt; 3545 } 3546 EXPORT_SYMBOL_GPL(skb_cow_data); 3547 3548 static void sock_rmem_free(struct sk_buff *skb) 3549 { 3550 struct sock *sk = skb->sk; 3551 3552 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3553 } 3554 3555 /* 3556 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3557 */ 3558 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3559 { 3560 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3561 (unsigned int)sk->sk_rcvbuf) 3562 return -ENOMEM; 3563 3564 skb_orphan(skb); 3565 skb->sk = sk; 3566 skb->destructor = sock_rmem_free; 3567 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3568 3569 /* before exiting rcu section, make sure dst is refcounted */ 3570 skb_dst_force(skb); 3571 3572 skb_queue_tail(&sk->sk_error_queue, skb); 3573 if (!sock_flag(sk, SOCK_DEAD)) 3574 sk->sk_data_ready(sk); 3575 return 0; 3576 } 3577 EXPORT_SYMBOL(sock_queue_err_skb); 3578 3579 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 3580 { 3581 struct sk_buff_head *q = &sk->sk_error_queue; 3582 struct sk_buff *skb, *skb_next; 3583 unsigned long flags; 3584 int err = 0; 3585 3586 spin_lock_irqsave(&q->lock, flags); 3587 skb = __skb_dequeue(q); 3588 if (skb && (skb_next = skb_peek(q))) 3589 err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 3590 spin_unlock_irqrestore(&q->lock, flags); 3591 3592 sk->sk_err = err; 3593 if (err) 3594 sk->sk_error_report(sk); 3595 3596 return skb; 3597 } 3598 EXPORT_SYMBOL(sock_dequeue_err_skb); 3599 3600 /** 3601 * skb_clone_sk - create clone of skb, and take reference to socket 3602 * @skb: the skb to clone 3603 * 3604 * This function creates a clone of a buffer that holds a reference on 3605 * sk_refcnt. Buffers created via this function are meant to be 3606 * returned using sock_queue_err_skb, or free via kfree_skb. 3607 * 3608 * When passing buffers allocated with this function to sock_queue_err_skb 3609 * it is necessary to wrap the call with sock_hold/sock_put in order to 3610 * prevent the socket from being released prior to being enqueued on 3611 * the sk_error_queue. 3612 */ 3613 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 3614 { 3615 struct sock *sk = skb->sk; 3616 struct sk_buff *clone; 3617 3618 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) 3619 return NULL; 3620 3621 clone = skb_clone(skb, GFP_ATOMIC); 3622 if (!clone) { 3623 sock_put(sk); 3624 return NULL; 3625 } 3626 3627 clone->sk = sk; 3628 clone->destructor = sock_efree; 3629 3630 return clone; 3631 } 3632 EXPORT_SYMBOL(skb_clone_sk); 3633 3634 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3635 struct sock *sk, 3636 int tstype) 3637 { 3638 struct sock_exterr_skb *serr; 3639 int err; 3640 3641 serr = SKB_EXT_ERR(skb); 3642 memset(serr, 0, sizeof(*serr)); 3643 serr->ee.ee_errno = ENOMSG; 3644 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3645 serr->ee.ee_info = tstype; 3646 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3647 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3648 if (sk->sk_protocol == IPPROTO_TCP && 3649 sk->sk_type == SOCK_STREAM) 3650 serr->ee.ee_data -= sk->sk_tskey; 3651 } 3652 3653 err = sock_queue_err_skb(sk, skb); 3654 3655 if (err) 3656 kfree_skb(skb); 3657 } 3658 3659 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 3660 { 3661 bool ret; 3662 3663 if (likely(sysctl_tstamp_allow_data || tsonly)) 3664 return true; 3665 3666 read_lock_bh(&sk->sk_callback_lock); 3667 ret = sk->sk_socket && sk->sk_socket->file && 3668 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 3669 read_unlock_bh(&sk->sk_callback_lock); 3670 return ret; 3671 } 3672 3673 void skb_complete_tx_timestamp(struct sk_buff *skb, 3674 struct skb_shared_hwtstamps *hwtstamps) 3675 { 3676 struct sock *sk = skb->sk; 3677 3678 if (!skb_may_tx_timestamp(sk, false)) 3679 return; 3680 3681 /* take a reference to prevent skb_orphan() from freeing the socket */ 3682 sock_hold(sk); 3683 3684 *skb_hwtstamps(skb) = *hwtstamps; 3685 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3686 3687 sock_put(sk); 3688 } 3689 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3690 3691 void __skb_tstamp_tx(struct sk_buff *orig_skb, 3692 struct skb_shared_hwtstamps *hwtstamps, 3693 struct sock *sk, int tstype) 3694 { 3695 struct sk_buff *skb; 3696 bool tsonly; 3697 3698 if (!sk) 3699 return; 3700 3701 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3702 if (!skb_may_tx_timestamp(sk, tsonly)) 3703 return; 3704 3705 if (tsonly) 3706 skb = alloc_skb(0, GFP_ATOMIC); 3707 else 3708 skb = skb_clone(orig_skb, GFP_ATOMIC); 3709 if (!skb) 3710 return; 3711 3712 if (tsonly) { 3713 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; 3714 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3715 } 3716 3717 if (hwtstamps) 3718 *skb_hwtstamps(skb) = *hwtstamps; 3719 else 3720 skb->tstamp = ktime_get_real(); 3721 3722 __skb_complete_tx_timestamp(skb, sk, tstype); 3723 } 3724 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3725 3726 void skb_tstamp_tx(struct sk_buff *orig_skb, 3727 struct skb_shared_hwtstamps *hwtstamps) 3728 { 3729 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 3730 SCM_TSTAMP_SND); 3731 } 3732 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3733 3734 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3735 { 3736 struct sock *sk = skb->sk; 3737 struct sock_exterr_skb *serr; 3738 int err; 3739 3740 skb->wifi_acked_valid = 1; 3741 skb->wifi_acked = acked; 3742 3743 serr = SKB_EXT_ERR(skb); 3744 memset(serr, 0, sizeof(*serr)); 3745 serr->ee.ee_errno = ENOMSG; 3746 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3747 3748 /* take a reference to prevent skb_orphan() from freeing the socket */ 3749 sock_hold(sk); 3750 3751 err = sock_queue_err_skb(sk, skb); 3752 if (err) 3753 kfree_skb(skb); 3754 3755 sock_put(sk); 3756 } 3757 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3758 3759 /** 3760 * skb_partial_csum_set - set up and verify partial csum values for packet 3761 * @skb: the skb to set 3762 * @start: the number of bytes after skb->data to start checksumming. 3763 * @off: the offset from start to place the checksum. 3764 * 3765 * For untrusted partially-checksummed packets, we need to make sure the values 3766 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3767 * 3768 * This function checks and sets those values and skb->ip_summed: if this 3769 * returns false you should drop the packet. 3770 */ 3771 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3772 { 3773 if (unlikely(start > skb_headlen(skb)) || 3774 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3775 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3776 start, off, skb_headlen(skb)); 3777 return false; 3778 } 3779 skb->ip_summed = CHECKSUM_PARTIAL; 3780 skb->csum_start = skb_headroom(skb) + start; 3781 skb->csum_offset = off; 3782 skb_set_transport_header(skb, start); 3783 return true; 3784 } 3785 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3786 3787 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 3788 unsigned int max) 3789 { 3790 if (skb_headlen(skb) >= len) 3791 return 0; 3792 3793 /* If we need to pullup then pullup to the max, so we 3794 * won't need to do it again. 3795 */ 3796 if (max > skb->len) 3797 max = skb->len; 3798 3799 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 3800 return -ENOMEM; 3801 3802 if (skb_headlen(skb) < len) 3803 return -EPROTO; 3804 3805 return 0; 3806 } 3807 3808 #define MAX_TCP_HDR_LEN (15 * 4) 3809 3810 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 3811 typeof(IPPROTO_IP) proto, 3812 unsigned int off) 3813 { 3814 switch (proto) { 3815 int err; 3816 3817 case IPPROTO_TCP: 3818 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 3819 off + MAX_TCP_HDR_LEN); 3820 if (!err && !skb_partial_csum_set(skb, off, 3821 offsetof(struct tcphdr, 3822 check))) 3823 err = -EPROTO; 3824 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 3825 3826 case IPPROTO_UDP: 3827 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 3828 off + sizeof(struct udphdr)); 3829 if (!err && !skb_partial_csum_set(skb, off, 3830 offsetof(struct udphdr, 3831 check))) 3832 err = -EPROTO; 3833 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 3834 } 3835 3836 return ERR_PTR(-EPROTO); 3837 } 3838 3839 /* This value should be large enough to cover a tagged ethernet header plus 3840 * maximally sized IP and TCP or UDP headers. 3841 */ 3842 #define MAX_IP_HDR_LEN 128 3843 3844 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 3845 { 3846 unsigned int off; 3847 bool fragment; 3848 __sum16 *csum; 3849 int err; 3850 3851 fragment = false; 3852 3853 err = skb_maybe_pull_tail(skb, 3854 sizeof(struct iphdr), 3855 MAX_IP_HDR_LEN); 3856 if (err < 0) 3857 goto out; 3858 3859 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 3860 fragment = true; 3861 3862 off = ip_hdrlen(skb); 3863 3864 err = -EPROTO; 3865 3866 if (fragment) 3867 goto out; 3868 3869 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 3870 if (IS_ERR(csum)) 3871 return PTR_ERR(csum); 3872 3873 if (recalculate) 3874 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3875 ip_hdr(skb)->daddr, 3876 skb->len - off, 3877 ip_hdr(skb)->protocol, 0); 3878 err = 0; 3879 3880 out: 3881 return err; 3882 } 3883 3884 /* This value should be large enough to cover a tagged ethernet header plus 3885 * an IPv6 header, all options, and a maximal TCP or UDP header. 3886 */ 3887 #define MAX_IPV6_HDR_LEN 256 3888 3889 #define OPT_HDR(type, skb, off) \ 3890 (type *)(skb_network_header(skb) + (off)) 3891 3892 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 3893 { 3894 int err; 3895 u8 nexthdr; 3896 unsigned int off; 3897 unsigned int len; 3898 bool fragment; 3899 bool done; 3900 __sum16 *csum; 3901 3902 fragment = false; 3903 done = false; 3904 3905 off = sizeof(struct ipv6hdr); 3906 3907 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 3908 if (err < 0) 3909 goto out; 3910 3911 nexthdr = ipv6_hdr(skb)->nexthdr; 3912 3913 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 3914 while (off <= len && !done) { 3915 switch (nexthdr) { 3916 case IPPROTO_DSTOPTS: 3917 case IPPROTO_HOPOPTS: 3918 case IPPROTO_ROUTING: { 3919 struct ipv6_opt_hdr *hp; 3920 3921 err = skb_maybe_pull_tail(skb, 3922 off + 3923 sizeof(struct ipv6_opt_hdr), 3924 MAX_IPV6_HDR_LEN); 3925 if (err < 0) 3926 goto out; 3927 3928 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 3929 nexthdr = hp->nexthdr; 3930 off += ipv6_optlen(hp); 3931 break; 3932 } 3933 case IPPROTO_AH: { 3934 struct ip_auth_hdr *hp; 3935 3936 err = skb_maybe_pull_tail(skb, 3937 off + 3938 sizeof(struct ip_auth_hdr), 3939 MAX_IPV6_HDR_LEN); 3940 if (err < 0) 3941 goto out; 3942 3943 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 3944 nexthdr = hp->nexthdr; 3945 off += ipv6_authlen(hp); 3946 break; 3947 } 3948 case IPPROTO_FRAGMENT: { 3949 struct frag_hdr *hp; 3950 3951 err = skb_maybe_pull_tail(skb, 3952 off + 3953 sizeof(struct frag_hdr), 3954 MAX_IPV6_HDR_LEN); 3955 if (err < 0) 3956 goto out; 3957 3958 hp = OPT_HDR(struct frag_hdr, skb, off); 3959 3960 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 3961 fragment = true; 3962 3963 nexthdr = hp->nexthdr; 3964 off += sizeof(struct frag_hdr); 3965 break; 3966 } 3967 default: 3968 done = true; 3969 break; 3970 } 3971 } 3972 3973 err = -EPROTO; 3974 3975 if (!done || fragment) 3976 goto out; 3977 3978 csum = skb_checksum_setup_ip(skb, nexthdr, off); 3979 if (IS_ERR(csum)) 3980 return PTR_ERR(csum); 3981 3982 if (recalculate) 3983 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3984 &ipv6_hdr(skb)->daddr, 3985 skb->len - off, nexthdr, 0); 3986 err = 0; 3987 3988 out: 3989 return err; 3990 } 3991 3992 /** 3993 * skb_checksum_setup - set up partial checksum offset 3994 * @skb: the skb to set up 3995 * @recalculate: if true the pseudo-header checksum will be recalculated 3996 */ 3997 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 3998 { 3999 int err; 4000 4001 switch (skb->protocol) { 4002 case htons(ETH_P_IP): 4003 err = skb_checksum_setup_ipv4(skb, recalculate); 4004 break; 4005 4006 case htons(ETH_P_IPV6): 4007 err = skb_checksum_setup_ipv6(skb, recalculate); 4008 break; 4009 4010 default: 4011 err = -EPROTO; 4012 break; 4013 } 4014 4015 return err; 4016 } 4017 EXPORT_SYMBOL(skb_checksum_setup); 4018 4019 /** 4020 * skb_checksum_maybe_trim - maybe trims the given skb 4021 * @skb: the skb to check 4022 * @transport_len: the data length beyond the network header 4023 * 4024 * Checks whether the given skb has data beyond the given transport length. 4025 * If so, returns a cloned skb trimmed to this transport length. 4026 * Otherwise returns the provided skb. Returns NULL in error cases 4027 * (e.g. transport_len exceeds skb length or out-of-memory). 4028 * 4029 * Caller needs to set the skb transport header and free any returned skb if it 4030 * differs from the provided skb. 4031 */ 4032 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4033 unsigned int transport_len) 4034 { 4035 struct sk_buff *skb_chk; 4036 unsigned int len = skb_transport_offset(skb) + transport_len; 4037 int ret; 4038 4039 if (skb->len < len) 4040 return NULL; 4041 else if (skb->len == len) 4042 return skb; 4043 4044 skb_chk = skb_clone(skb, GFP_ATOMIC); 4045 if (!skb_chk) 4046 return NULL; 4047 4048 ret = pskb_trim_rcsum(skb_chk, len); 4049 if (ret) { 4050 kfree_skb(skb_chk); 4051 return NULL; 4052 } 4053 4054 return skb_chk; 4055 } 4056 4057 /** 4058 * skb_checksum_trimmed - validate checksum of an skb 4059 * @skb: the skb to check 4060 * @transport_len: the data length beyond the network header 4061 * @skb_chkf: checksum function to use 4062 * 4063 * Applies the given checksum function skb_chkf to the provided skb. 4064 * Returns a checked and maybe trimmed skb. Returns NULL on error. 4065 * 4066 * If the skb has data beyond the given transport length, then a 4067 * trimmed & cloned skb is checked and returned. 4068 * 4069 * Caller needs to set the skb transport header and free any returned skb if it 4070 * differs from the provided skb. 4071 */ 4072 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4073 unsigned int transport_len, 4074 __sum16(*skb_chkf)(struct sk_buff *skb)) 4075 { 4076 struct sk_buff *skb_chk; 4077 unsigned int offset = skb_transport_offset(skb); 4078 __sum16 ret; 4079 4080 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4081 if (!skb_chk) 4082 goto err; 4083 4084 if (!pskb_may_pull(skb_chk, offset)) 4085 goto err; 4086 4087 __skb_pull(skb_chk, offset); 4088 ret = skb_chkf(skb_chk); 4089 __skb_push(skb_chk, offset); 4090 4091 if (ret) 4092 goto err; 4093 4094 return skb_chk; 4095 4096 err: 4097 if (skb_chk && skb_chk != skb) 4098 kfree_skb(skb_chk); 4099 4100 return NULL; 4101 4102 } 4103 EXPORT_SYMBOL(skb_checksum_trimmed); 4104 4105 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4106 { 4107 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4108 skb->dev->name); 4109 } 4110 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4111 4112 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4113 { 4114 if (head_stolen) { 4115 skb_release_head_state(skb); 4116 kmem_cache_free(skbuff_head_cache, skb); 4117 } else { 4118 __kfree_skb(skb); 4119 } 4120 } 4121 EXPORT_SYMBOL(kfree_skb_partial); 4122 4123 /** 4124 * skb_try_coalesce - try to merge skb to prior one 4125 * @to: prior buffer 4126 * @from: buffer to add 4127 * @fragstolen: pointer to boolean 4128 * @delta_truesize: how much more was allocated than was requested 4129 */ 4130 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4131 bool *fragstolen, int *delta_truesize) 4132 { 4133 int i, delta, len = from->len; 4134 4135 *fragstolen = false; 4136 4137 if (skb_cloned(to)) 4138 return false; 4139 4140 if (len <= skb_tailroom(to)) { 4141 if (len) 4142 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4143 *delta_truesize = 0; 4144 return true; 4145 } 4146 4147 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4148 return false; 4149 4150 if (skb_headlen(from) != 0) { 4151 struct page *page; 4152 unsigned int offset; 4153 4154 if (skb_shinfo(to)->nr_frags + 4155 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4156 return false; 4157 4158 if (skb_head_is_locked(from)) 4159 return false; 4160 4161 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4162 4163 page = virt_to_head_page(from->head); 4164 offset = from->data - (unsigned char *)page_address(page); 4165 4166 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4167 page, offset, skb_headlen(from)); 4168 *fragstolen = true; 4169 } else { 4170 if (skb_shinfo(to)->nr_frags + 4171 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4172 return false; 4173 4174 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 4175 } 4176 4177 WARN_ON_ONCE(delta < len); 4178 4179 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4180 skb_shinfo(from)->frags, 4181 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4182 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4183 4184 if (!skb_cloned(from)) 4185 skb_shinfo(from)->nr_frags = 0; 4186 4187 /* if the skb is not cloned this does nothing 4188 * since we set nr_frags to 0. 4189 */ 4190 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4191 skb_frag_ref(from, i); 4192 4193 to->truesize += delta; 4194 to->len += len; 4195 to->data_len += len; 4196 4197 *delta_truesize = delta; 4198 return true; 4199 } 4200 EXPORT_SYMBOL(skb_try_coalesce); 4201 4202 /** 4203 * skb_scrub_packet - scrub an skb 4204 * 4205 * @skb: buffer to clean 4206 * @xnet: packet is crossing netns 4207 * 4208 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 4209 * into/from a tunnel. Some information have to be cleared during these 4210 * operations. 4211 * skb_scrub_packet can also be used to clean a skb before injecting it in 4212 * another namespace (@xnet == true). We have to clear all information in the 4213 * skb that could impact namespace isolation. 4214 */ 4215 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4216 { 4217 skb->tstamp.tv64 = 0; 4218 skb->pkt_type = PACKET_HOST; 4219 skb->skb_iif = 0; 4220 skb->ignore_df = 0; 4221 skb_dst_drop(skb); 4222 skb_sender_cpu_clear(skb); 4223 secpath_reset(skb); 4224 nf_reset(skb); 4225 nf_reset_trace(skb); 4226 4227 if (!xnet) 4228 return; 4229 4230 skb_orphan(skb); 4231 skb->mark = 0; 4232 } 4233 EXPORT_SYMBOL_GPL(skb_scrub_packet); 4234 4235 /** 4236 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 4237 * 4238 * @skb: GSO skb 4239 * 4240 * skb_gso_transport_seglen is used to determine the real size of the 4241 * individual segments, including Layer4 headers (TCP/UDP). 4242 * 4243 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4244 */ 4245 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4246 { 4247 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4248 unsigned int thlen = 0; 4249 4250 if (skb->encapsulation) { 4251 thlen = skb_inner_transport_header(skb) - 4252 skb_transport_header(skb); 4253 4254 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4255 thlen += inner_tcp_hdrlen(skb); 4256 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4257 thlen = tcp_hdrlen(skb); 4258 } 4259 /* UFO sets gso_size to the size of the fragmentation 4260 * payload, i.e. the size of the L4 (UDP) header is already 4261 * accounted for. 4262 */ 4263 return thlen + shinfo->gso_size; 4264 } 4265 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4266 4267 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4268 { 4269 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4270 kfree_skb(skb); 4271 return NULL; 4272 } 4273 4274 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, 4275 2 * ETH_ALEN); 4276 skb->mac_header += VLAN_HLEN; 4277 return skb; 4278 } 4279 4280 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 4281 { 4282 struct vlan_hdr *vhdr; 4283 u16 vlan_tci; 4284 4285 if (unlikely(skb_vlan_tag_present(skb))) { 4286 /* vlan_tci is already set-up so leave this for another time */ 4287 return skb; 4288 } 4289 4290 skb = skb_share_check(skb, GFP_ATOMIC); 4291 if (unlikely(!skb)) 4292 goto err_free; 4293 4294 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 4295 goto err_free; 4296 4297 vhdr = (struct vlan_hdr *)skb->data; 4298 vlan_tci = ntohs(vhdr->h_vlan_TCI); 4299 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 4300 4301 skb_pull_rcsum(skb, VLAN_HLEN); 4302 vlan_set_encap_proto(skb, vhdr); 4303 4304 skb = skb_reorder_vlan_header(skb); 4305 if (unlikely(!skb)) 4306 goto err_free; 4307 4308 skb_reset_network_header(skb); 4309 skb_reset_transport_header(skb); 4310 skb_reset_mac_len(skb); 4311 4312 return skb; 4313 4314 err_free: 4315 kfree_skb(skb); 4316 return NULL; 4317 } 4318 EXPORT_SYMBOL(skb_vlan_untag); 4319 4320 int skb_ensure_writable(struct sk_buff *skb, int write_len) 4321 { 4322 if (!pskb_may_pull(skb, write_len)) 4323 return -ENOMEM; 4324 4325 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 4326 return 0; 4327 4328 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4329 } 4330 EXPORT_SYMBOL(skb_ensure_writable); 4331 4332 /* remove VLAN header from packet and update csum accordingly. */ 4333 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 4334 { 4335 struct vlan_hdr *vhdr; 4336 unsigned int offset = skb->data - skb_mac_header(skb); 4337 int err; 4338 4339 __skb_push(skb, offset); 4340 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 4341 if (unlikely(err)) 4342 goto pull; 4343 4344 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4345 4346 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 4347 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 4348 4349 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 4350 __skb_pull(skb, VLAN_HLEN); 4351 4352 vlan_set_encap_proto(skb, vhdr); 4353 skb->mac_header += VLAN_HLEN; 4354 4355 if (skb_network_offset(skb) < ETH_HLEN) 4356 skb_set_network_header(skb, ETH_HLEN); 4357 4358 skb_reset_mac_len(skb); 4359 pull: 4360 __skb_pull(skb, offset); 4361 4362 return err; 4363 } 4364 4365 int skb_vlan_pop(struct sk_buff *skb) 4366 { 4367 u16 vlan_tci; 4368 __be16 vlan_proto; 4369 int err; 4370 4371 if (likely(skb_vlan_tag_present(skb))) { 4372 skb->vlan_tci = 0; 4373 } else { 4374 if (unlikely((skb->protocol != htons(ETH_P_8021Q) && 4375 skb->protocol != htons(ETH_P_8021AD)) || 4376 skb->len < VLAN_ETH_HLEN)) 4377 return 0; 4378 4379 err = __skb_vlan_pop(skb, &vlan_tci); 4380 if (err) 4381 return err; 4382 } 4383 /* move next vlan tag to hw accel tag */ 4384 if (likely((skb->protocol != htons(ETH_P_8021Q) && 4385 skb->protocol != htons(ETH_P_8021AD)) || 4386 skb->len < VLAN_ETH_HLEN)) 4387 return 0; 4388 4389 vlan_proto = skb->protocol; 4390 err = __skb_vlan_pop(skb, &vlan_tci); 4391 if (unlikely(err)) 4392 return err; 4393 4394 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4395 return 0; 4396 } 4397 EXPORT_SYMBOL(skb_vlan_pop); 4398 4399 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 4400 { 4401 if (skb_vlan_tag_present(skb)) { 4402 unsigned int offset = skb->data - skb_mac_header(skb); 4403 int err; 4404 4405 /* __vlan_insert_tag expect skb->data pointing to mac header. 4406 * So change skb->data before calling it and change back to 4407 * original position later 4408 */ 4409 __skb_push(skb, offset); 4410 err = __vlan_insert_tag(skb, skb->vlan_proto, 4411 skb_vlan_tag_get(skb)); 4412 if (err) 4413 return err; 4414 skb->protocol = skb->vlan_proto; 4415 skb->mac_len += VLAN_HLEN; 4416 __skb_pull(skb, offset); 4417 4418 if (skb->ip_summed == CHECKSUM_COMPLETE) 4419 skb->csum = csum_add(skb->csum, csum_partial(skb->data 4420 + (2 * ETH_ALEN), VLAN_HLEN, 0)); 4421 } 4422 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4423 return 0; 4424 } 4425 EXPORT_SYMBOL(skb_vlan_push); 4426 4427 /** 4428 * alloc_skb_with_frags - allocate skb with page frags 4429 * 4430 * @header_len: size of linear part 4431 * @data_len: needed length in frags 4432 * @max_page_order: max page order desired. 4433 * @errcode: pointer to error code if any 4434 * @gfp_mask: allocation mask 4435 * 4436 * This can be used to allocate a paged skb, given a maximal order for frags. 4437 */ 4438 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 4439 unsigned long data_len, 4440 int max_page_order, 4441 int *errcode, 4442 gfp_t gfp_mask) 4443 { 4444 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 4445 unsigned long chunk; 4446 struct sk_buff *skb; 4447 struct page *page; 4448 gfp_t gfp_head; 4449 int i; 4450 4451 *errcode = -EMSGSIZE; 4452 /* Note this test could be relaxed, if we succeed to allocate 4453 * high order pages... 4454 */ 4455 if (npages > MAX_SKB_FRAGS) 4456 return NULL; 4457 4458 gfp_head = gfp_mask; 4459 if (gfp_head & __GFP_DIRECT_RECLAIM) 4460 gfp_head |= __GFP_REPEAT; 4461 4462 *errcode = -ENOBUFS; 4463 skb = alloc_skb(header_len, gfp_head); 4464 if (!skb) 4465 return NULL; 4466 4467 skb->truesize += npages << PAGE_SHIFT; 4468 4469 for (i = 0; npages > 0; i++) { 4470 int order = max_page_order; 4471 4472 while (order) { 4473 if (npages >= 1 << order) { 4474 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 4475 __GFP_COMP | 4476 __GFP_NOWARN | 4477 __GFP_NORETRY, 4478 order); 4479 if (page) 4480 goto fill_page; 4481 /* Do not retry other high order allocations */ 4482 order = 1; 4483 max_page_order = 0; 4484 } 4485 order--; 4486 } 4487 page = alloc_page(gfp_mask); 4488 if (!page) 4489 goto failure; 4490 fill_page: 4491 chunk = min_t(unsigned long, data_len, 4492 PAGE_SIZE << order); 4493 skb_fill_page_desc(skb, i, page, 0, chunk); 4494 data_len -= chunk; 4495 npages -= 1 << order; 4496 } 4497 return skb; 4498 4499 failure: 4500 kfree_skb(skb); 4501 return NULL; 4502 } 4503 EXPORT_SYMBOL(alloc_skb_with_frags); 4504