1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/if_vlan.h> 62 #include <linux/mpls.h> 63 64 #include <net/protocol.h> 65 #include <net/dst.h> 66 #include <net/sock.h> 67 #include <net/checksum.h> 68 #include <net/ip6_checksum.h> 69 #include <net/xfrm.h> 70 #include <net/mpls.h> 71 #include <net/mptcp.h> 72 73 #include <linux/uaccess.h> 74 #include <trace/events/skb.h> 75 #include <linux/highmem.h> 76 #include <linux/capability.h> 77 #include <linux/user_namespace.h> 78 #include <linux/indirect_call_wrapper.h> 79 80 #include "datagram.h" 81 82 struct kmem_cache *skbuff_head_cache __ro_after_init; 83 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 84 #ifdef CONFIG_SKB_EXTENSIONS 85 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 86 #endif 87 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 88 EXPORT_SYMBOL(sysctl_max_skb_frags); 89 90 /** 91 * skb_panic - private function for out-of-line support 92 * @skb: buffer 93 * @sz: size 94 * @addr: address 95 * @msg: skb_over_panic or skb_under_panic 96 * 97 * Out-of-line support for skb_put() and skb_push(). 98 * Called via the wrapper skb_over_panic() or skb_under_panic(). 99 * Keep out of line to prevent kernel bloat. 100 * __builtin_return_address is not used because it is not always reliable. 101 */ 102 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 103 const char msg[]) 104 { 105 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 106 msg, addr, skb->len, sz, skb->head, skb->data, 107 (unsigned long)skb->tail, (unsigned long)skb->end, 108 skb->dev ? skb->dev->name : "<NULL>"); 109 BUG(); 110 } 111 112 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 113 { 114 skb_panic(skb, sz, addr, __func__); 115 } 116 117 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 118 { 119 skb_panic(skb, sz, addr, __func__); 120 } 121 122 /* 123 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 124 * the caller if emergency pfmemalloc reserves are being used. If it is and 125 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 126 * may be used. Otherwise, the packet data may be discarded until enough 127 * memory is free 128 */ 129 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 130 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 131 132 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 133 unsigned long ip, bool *pfmemalloc) 134 { 135 void *obj; 136 bool ret_pfmemalloc = false; 137 138 /* 139 * Try a regular allocation, when that fails and we're not entitled 140 * to the reserves, fail. 141 */ 142 obj = kmalloc_node_track_caller(size, 143 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 144 node); 145 if (obj || !(gfp_pfmemalloc_allowed(flags))) 146 goto out; 147 148 /* Try again but now we are using pfmemalloc reserves */ 149 ret_pfmemalloc = true; 150 obj = kmalloc_node_track_caller(size, flags, node); 151 152 out: 153 if (pfmemalloc) 154 *pfmemalloc = ret_pfmemalloc; 155 156 return obj; 157 } 158 159 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 160 * 'private' fields and also do memory statistics to find all the 161 * [BEEP] leaks. 162 * 163 */ 164 165 /** 166 * __alloc_skb - allocate a network buffer 167 * @size: size to allocate 168 * @gfp_mask: allocation mask 169 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 170 * instead of head cache and allocate a cloned (child) skb. 171 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 172 * allocations in case the data is required for writeback 173 * @node: numa node to allocate memory on 174 * 175 * Allocate a new &sk_buff. The returned buffer has no headroom and a 176 * tail room of at least size bytes. The object has a reference count 177 * of one. The return is the buffer. On a failure the return is %NULL. 178 * 179 * Buffers may only be allocated from interrupts using a @gfp_mask of 180 * %GFP_ATOMIC. 181 */ 182 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 183 int flags, int node) 184 { 185 struct kmem_cache *cache; 186 struct skb_shared_info *shinfo; 187 struct sk_buff *skb; 188 u8 *data; 189 bool pfmemalloc; 190 191 cache = (flags & SKB_ALLOC_FCLONE) 192 ? skbuff_fclone_cache : skbuff_head_cache; 193 194 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 195 gfp_mask |= __GFP_MEMALLOC; 196 197 /* Get the HEAD */ 198 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 199 if (!skb) 200 goto out; 201 prefetchw(skb); 202 203 /* We do our best to align skb_shared_info on a separate cache 204 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 205 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 206 * Both skb->head and skb_shared_info are cache line aligned. 207 */ 208 size = SKB_DATA_ALIGN(size); 209 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 210 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 211 if (!data) 212 goto nodata; 213 /* kmalloc(size) might give us more room than requested. 214 * Put skb_shared_info exactly at the end of allocated zone, 215 * to allow max possible filling before reallocation. 216 */ 217 size = SKB_WITH_OVERHEAD(ksize(data)); 218 prefetchw(data + size); 219 220 /* 221 * Only clear those fields we need to clear, not those that we will 222 * actually initialise below. Hence, don't put any more fields after 223 * the tail pointer in struct sk_buff! 224 */ 225 memset(skb, 0, offsetof(struct sk_buff, tail)); 226 /* Account for allocated memory : skb + skb->head */ 227 skb->truesize = SKB_TRUESIZE(size); 228 skb->pfmemalloc = pfmemalloc; 229 refcount_set(&skb->users, 1); 230 skb->head = data; 231 skb->data = data; 232 skb_reset_tail_pointer(skb); 233 skb->end = skb->tail + size; 234 skb->mac_header = (typeof(skb->mac_header))~0U; 235 skb->transport_header = (typeof(skb->transport_header))~0U; 236 237 /* make sure we initialize shinfo sequentially */ 238 shinfo = skb_shinfo(skb); 239 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 240 atomic_set(&shinfo->dataref, 1); 241 242 if (flags & SKB_ALLOC_FCLONE) { 243 struct sk_buff_fclones *fclones; 244 245 fclones = container_of(skb, struct sk_buff_fclones, skb1); 246 247 skb->fclone = SKB_FCLONE_ORIG; 248 refcount_set(&fclones->fclone_ref, 1); 249 250 fclones->skb2.fclone = SKB_FCLONE_CLONE; 251 } 252 out: 253 return skb; 254 nodata: 255 kmem_cache_free(cache, skb); 256 skb = NULL; 257 goto out; 258 } 259 EXPORT_SYMBOL(__alloc_skb); 260 261 /* Caller must provide SKB that is memset cleared */ 262 static struct sk_buff *__build_skb_around(struct sk_buff *skb, 263 void *data, unsigned int frag_size) 264 { 265 struct skb_shared_info *shinfo; 266 unsigned int size = frag_size ? : ksize(data); 267 268 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 269 270 /* Assumes caller memset cleared SKB */ 271 skb->truesize = SKB_TRUESIZE(size); 272 refcount_set(&skb->users, 1); 273 skb->head = data; 274 skb->data = data; 275 skb_reset_tail_pointer(skb); 276 skb->end = skb->tail + size; 277 skb->mac_header = (typeof(skb->mac_header))~0U; 278 skb->transport_header = (typeof(skb->transport_header))~0U; 279 280 /* make sure we initialize shinfo sequentially */ 281 shinfo = skb_shinfo(skb); 282 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 283 atomic_set(&shinfo->dataref, 1); 284 285 return skb; 286 } 287 288 /** 289 * __build_skb - build a network buffer 290 * @data: data buffer provided by caller 291 * @frag_size: size of data, or 0 if head was kmalloced 292 * 293 * Allocate a new &sk_buff. Caller provides space holding head and 294 * skb_shared_info. @data must have been allocated by kmalloc() only if 295 * @frag_size is 0, otherwise data should come from the page allocator 296 * or vmalloc() 297 * The return is the new skb buffer. 298 * On a failure the return is %NULL, and @data is not freed. 299 * Notes : 300 * Before IO, driver allocates only data buffer where NIC put incoming frame 301 * Driver should add room at head (NET_SKB_PAD) and 302 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 303 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 304 * before giving packet to stack. 305 * RX rings only contains data buffers, not full skbs. 306 */ 307 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 308 { 309 struct sk_buff *skb; 310 311 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 312 if (unlikely(!skb)) 313 return NULL; 314 315 memset(skb, 0, offsetof(struct sk_buff, tail)); 316 317 return __build_skb_around(skb, data, frag_size); 318 } 319 320 /* build_skb() is wrapper over __build_skb(), that specifically 321 * takes care of skb->head and skb->pfmemalloc 322 * This means that if @frag_size is not zero, then @data must be backed 323 * by a page fragment, not kmalloc() or vmalloc() 324 */ 325 struct sk_buff *build_skb(void *data, unsigned int frag_size) 326 { 327 struct sk_buff *skb = __build_skb(data, frag_size); 328 329 if (skb && frag_size) { 330 skb->head_frag = 1; 331 if (page_is_pfmemalloc(virt_to_head_page(data))) 332 skb->pfmemalloc = 1; 333 } 334 return skb; 335 } 336 EXPORT_SYMBOL(build_skb); 337 338 /** 339 * build_skb_around - build a network buffer around provided skb 340 * @skb: sk_buff provide by caller, must be memset cleared 341 * @data: data buffer provided by caller 342 * @frag_size: size of data, or 0 if head was kmalloced 343 */ 344 struct sk_buff *build_skb_around(struct sk_buff *skb, 345 void *data, unsigned int frag_size) 346 { 347 if (unlikely(!skb)) 348 return NULL; 349 350 skb = __build_skb_around(skb, data, frag_size); 351 352 if (skb && frag_size) { 353 skb->head_frag = 1; 354 if (page_is_pfmemalloc(virt_to_head_page(data))) 355 skb->pfmemalloc = 1; 356 } 357 return skb; 358 } 359 EXPORT_SYMBOL(build_skb_around); 360 361 #define NAPI_SKB_CACHE_SIZE 64 362 363 struct napi_alloc_cache { 364 struct page_frag_cache page; 365 unsigned int skb_count; 366 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 367 }; 368 369 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 370 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 371 372 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 373 { 374 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 375 376 return page_frag_alloc(&nc->page, fragsz, gfp_mask); 377 } 378 379 void *napi_alloc_frag(unsigned int fragsz) 380 { 381 fragsz = SKB_DATA_ALIGN(fragsz); 382 383 return __napi_alloc_frag(fragsz, GFP_ATOMIC); 384 } 385 EXPORT_SYMBOL(napi_alloc_frag); 386 387 /** 388 * netdev_alloc_frag - allocate a page fragment 389 * @fragsz: fragment size 390 * 391 * Allocates a frag from a page for receive buffer. 392 * Uses GFP_ATOMIC allocations. 393 */ 394 void *netdev_alloc_frag(unsigned int fragsz) 395 { 396 struct page_frag_cache *nc; 397 void *data; 398 399 fragsz = SKB_DATA_ALIGN(fragsz); 400 if (in_irq() || irqs_disabled()) { 401 nc = this_cpu_ptr(&netdev_alloc_cache); 402 data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); 403 } else { 404 local_bh_disable(); 405 data = __napi_alloc_frag(fragsz, GFP_ATOMIC); 406 local_bh_enable(); 407 } 408 return data; 409 } 410 EXPORT_SYMBOL(netdev_alloc_frag); 411 412 /** 413 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 414 * @dev: network device to receive on 415 * @len: length to allocate 416 * @gfp_mask: get_free_pages mask, passed to alloc_skb 417 * 418 * Allocate a new &sk_buff and assign it a usage count of one. The 419 * buffer has NET_SKB_PAD headroom built in. Users should allocate 420 * the headroom they think they need without accounting for the 421 * built in space. The built in space is used for optimisations. 422 * 423 * %NULL is returned if there is no free memory. 424 */ 425 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 426 gfp_t gfp_mask) 427 { 428 struct page_frag_cache *nc; 429 struct sk_buff *skb; 430 bool pfmemalloc; 431 void *data; 432 433 len += NET_SKB_PAD; 434 435 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 436 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 437 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 438 if (!skb) 439 goto skb_fail; 440 goto skb_success; 441 } 442 443 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 444 len = SKB_DATA_ALIGN(len); 445 446 if (sk_memalloc_socks()) 447 gfp_mask |= __GFP_MEMALLOC; 448 449 if (in_irq() || irqs_disabled()) { 450 nc = this_cpu_ptr(&netdev_alloc_cache); 451 data = page_frag_alloc(nc, len, gfp_mask); 452 pfmemalloc = nc->pfmemalloc; 453 } else { 454 local_bh_disable(); 455 nc = this_cpu_ptr(&napi_alloc_cache.page); 456 data = page_frag_alloc(nc, len, gfp_mask); 457 pfmemalloc = nc->pfmemalloc; 458 local_bh_enable(); 459 } 460 461 if (unlikely(!data)) 462 return NULL; 463 464 skb = __build_skb(data, len); 465 if (unlikely(!skb)) { 466 skb_free_frag(data); 467 return NULL; 468 } 469 470 if (pfmemalloc) 471 skb->pfmemalloc = 1; 472 skb->head_frag = 1; 473 474 skb_success: 475 skb_reserve(skb, NET_SKB_PAD); 476 skb->dev = dev; 477 478 skb_fail: 479 return skb; 480 } 481 EXPORT_SYMBOL(__netdev_alloc_skb); 482 483 /** 484 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 485 * @napi: napi instance this buffer was allocated for 486 * @len: length to allocate 487 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 488 * 489 * Allocate a new sk_buff for use in NAPI receive. This buffer will 490 * attempt to allocate the head from a special reserved region used 491 * only for NAPI Rx allocation. By doing this we can save several 492 * CPU cycles by avoiding having to disable and re-enable IRQs. 493 * 494 * %NULL is returned if there is no free memory. 495 */ 496 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 497 gfp_t gfp_mask) 498 { 499 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 500 struct sk_buff *skb; 501 void *data; 502 503 len += NET_SKB_PAD + NET_IP_ALIGN; 504 505 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 506 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 508 if (!skb) 509 goto skb_fail; 510 goto skb_success; 511 } 512 513 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 514 len = SKB_DATA_ALIGN(len); 515 516 if (sk_memalloc_socks()) 517 gfp_mask |= __GFP_MEMALLOC; 518 519 data = page_frag_alloc(&nc->page, len, gfp_mask); 520 if (unlikely(!data)) 521 return NULL; 522 523 skb = __build_skb(data, len); 524 if (unlikely(!skb)) { 525 skb_free_frag(data); 526 return NULL; 527 } 528 529 if (nc->page.pfmemalloc) 530 skb->pfmemalloc = 1; 531 skb->head_frag = 1; 532 533 skb_success: 534 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 535 skb->dev = napi->dev; 536 537 skb_fail: 538 return skb; 539 } 540 EXPORT_SYMBOL(__napi_alloc_skb); 541 542 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 543 int size, unsigned int truesize) 544 { 545 skb_fill_page_desc(skb, i, page, off, size); 546 skb->len += size; 547 skb->data_len += size; 548 skb->truesize += truesize; 549 } 550 EXPORT_SYMBOL(skb_add_rx_frag); 551 552 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 553 unsigned int truesize) 554 { 555 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 556 557 skb_frag_size_add(frag, size); 558 skb->len += size; 559 skb->data_len += size; 560 skb->truesize += truesize; 561 } 562 EXPORT_SYMBOL(skb_coalesce_rx_frag); 563 564 static void skb_drop_list(struct sk_buff **listp) 565 { 566 kfree_skb_list(*listp); 567 *listp = NULL; 568 } 569 570 static inline void skb_drop_fraglist(struct sk_buff *skb) 571 { 572 skb_drop_list(&skb_shinfo(skb)->frag_list); 573 } 574 575 static void skb_clone_fraglist(struct sk_buff *skb) 576 { 577 struct sk_buff *list; 578 579 skb_walk_frags(skb, list) 580 skb_get(list); 581 } 582 583 static void skb_free_head(struct sk_buff *skb) 584 { 585 unsigned char *head = skb->head; 586 587 if (skb->head_frag) 588 skb_free_frag(head); 589 else 590 kfree(head); 591 } 592 593 static void skb_release_data(struct sk_buff *skb) 594 { 595 struct skb_shared_info *shinfo = skb_shinfo(skb); 596 int i; 597 598 if (skb->cloned && 599 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 600 &shinfo->dataref)) 601 return; 602 603 for (i = 0; i < shinfo->nr_frags; i++) 604 __skb_frag_unref(&shinfo->frags[i]); 605 606 if (shinfo->frag_list) 607 kfree_skb_list(shinfo->frag_list); 608 609 skb_zcopy_clear(skb, true); 610 skb_free_head(skb); 611 } 612 613 /* 614 * Free an skbuff by memory without cleaning the state. 615 */ 616 static void kfree_skbmem(struct sk_buff *skb) 617 { 618 struct sk_buff_fclones *fclones; 619 620 switch (skb->fclone) { 621 case SKB_FCLONE_UNAVAILABLE: 622 kmem_cache_free(skbuff_head_cache, skb); 623 return; 624 625 case SKB_FCLONE_ORIG: 626 fclones = container_of(skb, struct sk_buff_fclones, skb1); 627 628 /* We usually free the clone (TX completion) before original skb 629 * This test would have no chance to be true for the clone, 630 * while here, branch prediction will be good. 631 */ 632 if (refcount_read(&fclones->fclone_ref) == 1) 633 goto fastpath; 634 break; 635 636 default: /* SKB_FCLONE_CLONE */ 637 fclones = container_of(skb, struct sk_buff_fclones, skb2); 638 break; 639 } 640 if (!refcount_dec_and_test(&fclones->fclone_ref)) 641 return; 642 fastpath: 643 kmem_cache_free(skbuff_fclone_cache, fclones); 644 } 645 646 void skb_release_head_state(struct sk_buff *skb) 647 { 648 skb_dst_drop(skb); 649 if (skb->destructor) { 650 WARN_ON(in_irq()); 651 skb->destructor(skb); 652 } 653 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 654 nf_conntrack_put(skb_nfct(skb)); 655 #endif 656 skb_ext_put(skb); 657 } 658 659 /* Free everything but the sk_buff shell. */ 660 static void skb_release_all(struct sk_buff *skb) 661 { 662 skb_release_head_state(skb); 663 if (likely(skb->head)) 664 skb_release_data(skb); 665 } 666 667 /** 668 * __kfree_skb - private function 669 * @skb: buffer 670 * 671 * Free an sk_buff. Release anything attached to the buffer. 672 * Clean the state. This is an internal helper function. Users should 673 * always call kfree_skb 674 */ 675 676 void __kfree_skb(struct sk_buff *skb) 677 { 678 skb_release_all(skb); 679 kfree_skbmem(skb); 680 } 681 EXPORT_SYMBOL(__kfree_skb); 682 683 /** 684 * kfree_skb - free an sk_buff 685 * @skb: buffer to free 686 * 687 * Drop a reference to the buffer and free it if the usage count has 688 * hit zero. 689 */ 690 void kfree_skb(struct sk_buff *skb) 691 { 692 if (!skb_unref(skb)) 693 return; 694 695 trace_kfree_skb(skb, __builtin_return_address(0)); 696 __kfree_skb(skb); 697 } 698 EXPORT_SYMBOL(kfree_skb); 699 700 void kfree_skb_list(struct sk_buff *segs) 701 { 702 while (segs) { 703 struct sk_buff *next = segs->next; 704 705 kfree_skb(segs); 706 segs = next; 707 } 708 } 709 EXPORT_SYMBOL(kfree_skb_list); 710 711 /* Dump skb information and contents. 712 * 713 * Must only be called from net_ratelimit()-ed paths. 714 * 715 * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise. 716 */ 717 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 718 { 719 static atomic_t can_dump_full = ATOMIC_INIT(5); 720 struct skb_shared_info *sh = skb_shinfo(skb); 721 struct net_device *dev = skb->dev; 722 struct sock *sk = skb->sk; 723 struct sk_buff *list_skb; 724 bool has_mac, has_trans; 725 int headroom, tailroom; 726 int i, len, seg_len; 727 728 if (full_pkt) 729 full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; 730 731 if (full_pkt) 732 len = skb->len; 733 else 734 len = min_t(int, skb->len, MAX_HEADER + 128); 735 736 headroom = skb_headroom(skb); 737 tailroom = skb_tailroom(skb); 738 739 has_mac = skb_mac_header_was_set(skb); 740 has_trans = skb_transport_header_was_set(skb); 741 742 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 743 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 744 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 745 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 746 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 747 level, skb->len, headroom, skb_headlen(skb), tailroom, 748 has_mac ? skb->mac_header : -1, 749 has_mac ? skb_mac_header_len(skb) : -1, 750 skb->network_header, 751 has_trans ? skb_network_header_len(skb) : -1, 752 has_trans ? skb->transport_header : -1, 753 sh->tx_flags, sh->nr_frags, 754 sh->gso_size, sh->gso_type, sh->gso_segs, 755 skb->csum, skb->ip_summed, skb->csum_complete_sw, 756 skb->csum_valid, skb->csum_level, 757 skb->hash, skb->sw_hash, skb->l4_hash, 758 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 759 760 if (dev) 761 printk("%sdev name=%s feat=0x%pNF\n", 762 level, dev->name, &dev->features); 763 if (sk) 764 printk("%ssk family=%hu type=%u proto=%u\n", 765 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 766 767 if (full_pkt && headroom) 768 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 769 16, 1, skb->head, headroom, false); 770 771 seg_len = min_t(int, skb_headlen(skb), len); 772 if (seg_len) 773 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 774 16, 1, skb->data, seg_len, false); 775 len -= seg_len; 776 777 if (full_pkt && tailroom) 778 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 779 16, 1, skb_tail_pointer(skb), tailroom, false); 780 781 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 783 u32 p_off, p_len, copied; 784 struct page *p; 785 u8 *vaddr; 786 787 skb_frag_foreach_page(frag, skb_frag_off(frag), 788 skb_frag_size(frag), p, p_off, p_len, 789 copied) { 790 seg_len = min_t(int, p_len, len); 791 vaddr = kmap_atomic(p); 792 print_hex_dump(level, "skb frag: ", 793 DUMP_PREFIX_OFFSET, 794 16, 1, vaddr + p_off, seg_len, false); 795 kunmap_atomic(vaddr); 796 len -= seg_len; 797 if (!len) 798 break; 799 } 800 } 801 802 if (full_pkt && skb_has_frag_list(skb)) { 803 printk("skb fraglist:\n"); 804 skb_walk_frags(skb, list_skb) 805 skb_dump(level, list_skb, true); 806 } 807 } 808 EXPORT_SYMBOL(skb_dump); 809 810 /** 811 * skb_tx_error - report an sk_buff xmit error 812 * @skb: buffer that triggered an error 813 * 814 * Report xmit error if a device callback is tracking this skb. 815 * skb must be freed afterwards. 816 */ 817 void skb_tx_error(struct sk_buff *skb) 818 { 819 skb_zcopy_clear(skb, true); 820 } 821 EXPORT_SYMBOL(skb_tx_error); 822 823 #ifdef CONFIG_TRACEPOINTS 824 /** 825 * consume_skb - free an skbuff 826 * @skb: buffer to free 827 * 828 * Drop a ref to the buffer and free it if the usage count has hit zero 829 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 830 * is being dropped after a failure and notes that 831 */ 832 void consume_skb(struct sk_buff *skb) 833 { 834 if (!skb_unref(skb)) 835 return; 836 837 trace_consume_skb(skb); 838 __kfree_skb(skb); 839 } 840 EXPORT_SYMBOL(consume_skb); 841 #endif 842 843 /** 844 * consume_stateless_skb - free an skbuff, assuming it is stateless 845 * @skb: buffer to free 846 * 847 * Alike consume_skb(), but this variant assumes that this is the last 848 * skb reference and all the head states have been already dropped 849 */ 850 void __consume_stateless_skb(struct sk_buff *skb) 851 { 852 trace_consume_skb(skb); 853 skb_release_data(skb); 854 kfree_skbmem(skb); 855 } 856 857 void __kfree_skb_flush(void) 858 { 859 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 860 861 /* flush skb_cache if containing objects */ 862 if (nc->skb_count) { 863 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, 864 nc->skb_cache); 865 nc->skb_count = 0; 866 } 867 } 868 869 static inline void _kfree_skb_defer(struct sk_buff *skb) 870 { 871 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 872 873 /* drop skb->head and call any destructors for packet */ 874 skb_release_all(skb); 875 876 /* record skb to CPU local list */ 877 nc->skb_cache[nc->skb_count++] = skb; 878 879 #ifdef CONFIG_SLUB 880 /* SLUB writes into objects when freeing */ 881 prefetchw(skb); 882 #endif 883 884 /* flush skb_cache if it is filled */ 885 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 886 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, 887 nc->skb_cache); 888 nc->skb_count = 0; 889 } 890 } 891 void __kfree_skb_defer(struct sk_buff *skb) 892 { 893 _kfree_skb_defer(skb); 894 } 895 896 void napi_consume_skb(struct sk_buff *skb, int budget) 897 { 898 if (unlikely(!skb)) 899 return; 900 901 /* Zero budget indicate non-NAPI context called us, like netpoll */ 902 if (unlikely(!budget)) { 903 dev_consume_skb_any(skb); 904 return; 905 } 906 907 if (!skb_unref(skb)) 908 return; 909 910 /* if reaching here SKB is ready to free */ 911 trace_consume_skb(skb); 912 913 /* if SKB is a clone, don't handle this case */ 914 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 915 __kfree_skb(skb); 916 return; 917 } 918 919 _kfree_skb_defer(skb); 920 } 921 EXPORT_SYMBOL(napi_consume_skb); 922 923 /* Make sure a field is enclosed inside headers_start/headers_end section */ 924 #define CHECK_SKB_FIELD(field) \ 925 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 926 offsetof(struct sk_buff, headers_start)); \ 927 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 928 offsetof(struct sk_buff, headers_end)); \ 929 930 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 931 { 932 new->tstamp = old->tstamp; 933 /* We do not copy old->sk */ 934 new->dev = old->dev; 935 memcpy(new->cb, old->cb, sizeof(old->cb)); 936 skb_dst_copy(new, old); 937 __skb_ext_copy(new, old); 938 __nf_copy(new, old, false); 939 940 /* Note : this field could be in headers_start/headers_end section 941 * It is not yet because we do not want to have a 16 bit hole 942 */ 943 new->queue_mapping = old->queue_mapping; 944 945 memcpy(&new->headers_start, &old->headers_start, 946 offsetof(struct sk_buff, headers_end) - 947 offsetof(struct sk_buff, headers_start)); 948 CHECK_SKB_FIELD(protocol); 949 CHECK_SKB_FIELD(csum); 950 CHECK_SKB_FIELD(hash); 951 CHECK_SKB_FIELD(priority); 952 CHECK_SKB_FIELD(skb_iif); 953 CHECK_SKB_FIELD(vlan_proto); 954 CHECK_SKB_FIELD(vlan_tci); 955 CHECK_SKB_FIELD(transport_header); 956 CHECK_SKB_FIELD(network_header); 957 CHECK_SKB_FIELD(mac_header); 958 CHECK_SKB_FIELD(inner_protocol); 959 CHECK_SKB_FIELD(inner_transport_header); 960 CHECK_SKB_FIELD(inner_network_header); 961 CHECK_SKB_FIELD(inner_mac_header); 962 CHECK_SKB_FIELD(mark); 963 #ifdef CONFIG_NETWORK_SECMARK 964 CHECK_SKB_FIELD(secmark); 965 #endif 966 #ifdef CONFIG_NET_RX_BUSY_POLL 967 CHECK_SKB_FIELD(napi_id); 968 #endif 969 #ifdef CONFIG_XPS 970 CHECK_SKB_FIELD(sender_cpu); 971 #endif 972 #ifdef CONFIG_NET_SCHED 973 CHECK_SKB_FIELD(tc_index); 974 #endif 975 976 } 977 978 /* 979 * You should not add any new code to this function. Add it to 980 * __copy_skb_header above instead. 981 */ 982 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 983 { 984 #define C(x) n->x = skb->x 985 986 n->next = n->prev = NULL; 987 n->sk = NULL; 988 __copy_skb_header(n, skb); 989 990 C(len); 991 C(data_len); 992 C(mac_len); 993 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 994 n->cloned = 1; 995 n->nohdr = 0; 996 n->peeked = 0; 997 C(pfmemalloc); 998 n->destructor = NULL; 999 C(tail); 1000 C(end); 1001 C(head); 1002 C(head_frag); 1003 C(data); 1004 C(truesize); 1005 refcount_set(&n->users, 1); 1006 1007 atomic_inc(&(skb_shinfo(skb)->dataref)); 1008 skb->cloned = 1; 1009 1010 return n; 1011 #undef C 1012 } 1013 1014 /** 1015 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1016 * @first: first sk_buff of the msg 1017 */ 1018 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1019 { 1020 struct sk_buff *n; 1021 1022 n = alloc_skb(0, GFP_ATOMIC); 1023 if (!n) 1024 return NULL; 1025 1026 n->len = first->len; 1027 n->data_len = first->len; 1028 n->truesize = first->truesize; 1029 1030 skb_shinfo(n)->frag_list = first; 1031 1032 __copy_skb_header(n, first); 1033 n->destructor = NULL; 1034 1035 return n; 1036 } 1037 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1038 1039 /** 1040 * skb_morph - morph one skb into another 1041 * @dst: the skb to receive the contents 1042 * @src: the skb to supply the contents 1043 * 1044 * This is identical to skb_clone except that the target skb is 1045 * supplied by the user. 1046 * 1047 * The target skb is returned upon exit. 1048 */ 1049 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1050 { 1051 skb_release_all(dst); 1052 return __skb_clone(dst, src); 1053 } 1054 EXPORT_SYMBOL_GPL(skb_morph); 1055 1056 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1057 { 1058 unsigned long max_pg, num_pg, new_pg, old_pg; 1059 struct user_struct *user; 1060 1061 if (capable(CAP_IPC_LOCK) || !size) 1062 return 0; 1063 1064 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1065 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1066 user = mmp->user ? : current_user(); 1067 1068 do { 1069 old_pg = atomic_long_read(&user->locked_vm); 1070 new_pg = old_pg + num_pg; 1071 if (new_pg > max_pg) 1072 return -ENOBUFS; 1073 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1074 old_pg); 1075 1076 if (!mmp->user) { 1077 mmp->user = get_uid(user); 1078 mmp->num_pg = num_pg; 1079 } else { 1080 mmp->num_pg += num_pg; 1081 } 1082 1083 return 0; 1084 } 1085 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1086 1087 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1088 { 1089 if (mmp->user) { 1090 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1091 free_uid(mmp->user); 1092 } 1093 } 1094 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1095 1096 struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) 1097 { 1098 struct ubuf_info *uarg; 1099 struct sk_buff *skb; 1100 1101 WARN_ON_ONCE(!in_task()); 1102 1103 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1104 if (!skb) 1105 return NULL; 1106 1107 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1108 uarg = (void *)skb->cb; 1109 uarg->mmp.user = NULL; 1110 1111 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1112 kfree_skb(skb); 1113 return NULL; 1114 } 1115 1116 uarg->callback = sock_zerocopy_callback; 1117 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1118 uarg->len = 1; 1119 uarg->bytelen = size; 1120 uarg->zerocopy = 1; 1121 refcount_set(&uarg->refcnt, 1); 1122 sock_hold(sk); 1123 1124 return uarg; 1125 } 1126 EXPORT_SYMBOL_GPL(sock_zerocopy_alloc); 1127 1128 static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 1129 { 1130 return container_of((void *)uarg, struct sk_buff, cb); 1131 } 1132 1133 struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, 1134 struct ubuf_info *uarg) 1135 { 1136 if (uarg) { 1137 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1138 u32 bytelen, next; 1139 1140 /* realloc only when socket is locked (TCP, UDP cork), 1141 * so uarg->len and sk_zckey access is serialized 1142 */ 1143 if (!sock_owned_by_user(sk)) { 1144 WARN_ON_ONCE(1); 1145 return NULL; 1146 } 1147 1148 bytelen = uarg->bytelen + size; 1149 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1150 /* TCP can create new skb to attach new uarg */ 1151 if (sk->sk_type == SOCK_STREAM) 1152 goto new_alloc; 1153 return NULL; 1154 } 1155 1156 next = (u32)atomic_read(&sk->sk_zckey); 1157 if ((u32)(uarg->id + uarg->len) == next) { 1158 if (mm_account_pinned_pages(&uarg->mmp, size)) 1159 return NULL; 1160 uarg->len++; 1161 uarg->bytelen = bytelen; 1162 atomic_set(&sk->sk_zckey, ++next); 1163 1164 /* no extra ref when appending to datagram (MSG_MORE) */ 1165 if (sk->sk_type == SOCK_STREAM) 1166 sock_zerocopy_get(uarg); 1167 1168 return uarg; 1169 } 1170 } 1171 1172 new_alloc: 1173 return sock_zerocopy_alloc(sk, size); 1174 } 1175 EXPORT_SYMBOL_GPL(sock_zerocopy_realloc); 1176 1177 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1178 { 1179 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1180 u32 old_lo, old_hi; 1181 u64 sum_len; 1182 1183 old_lo = serr->ee.ee_info; 1184 old_hi = serr->ee.ee_data; 1185 sum_len = old_hi - old_lo + 1ULL + len; 1186 1187 if (sum_len >= (1ULL << 32)) 1188 return false; 1189 1190 if (lo != old_hi + 1) 1191 return false; 1192 1193 serr->ee.ee_data += len; 1194 return true; 1195 } 1196 1197 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) 1198 { 1199 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1200 struct sock_exterr_skb *serr; 1201 struct sock *sk = skb->sk; 1202 struct sk_buff_head *q; 1203 unsigned long flags; 1204 u32 lo, hi; 1205 u16 len; 1206 1207 mm_unaccount_pinned_pages(&uarg->mmp); 1208 1209 /* if !len, there was only 1 call, and it was aborted 1210 * so do not queue a completion notification 1211 */ 1212 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1213 goto release; 1214 1215 len = uarg->len; 1216 lo = uarg->id; 1217 hi = uarg->id + len - 1; 1218 1219 serr = SKB_EXT_ERR(skb); 1220 memset(serr, 0, sizeof(*serr)); 1221 serr->ee.ee_errno = 0; 1222 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1223 serr->ee.ee_data = hi; 1224 serr->ee.ee_info = lo; 1225 if (!success) 1226 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1227 1228 q = &sk->sk_error_queue; 1229 spin_lock_irqsave(&q->lock, flags); 1230 tail = skb_peek_tail(q); 1231 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1232 !skb_zerocopy_notify_extend(tail, lo, len)) { 1233 __skb_queue_tail(q, skb); 1234 skb = NULL; 1235 } 1236 spin_unlock_irqrestore(&q->lock, flags); 1237 1238 sk->sk_error_report(sk); 1239 1240 release: 1241 consume_skb(skb); 1242 sock_put(sk); 1243 } 1244 EXPORT_SYMBOL_GPL(sock_zerocopy_callback); 1245 1246 void sock_zerocopy_put(struct ubuf_info *uarg) 1247 { 1248 if (uarg && refcount_dec_and_test(&uarg->refcnt)) { 1249 if (uarg->callback) 1250 uarg->callback(uarg, uarg->zerocopy); 1251 else 1252 consume_skb(skb_from_uarg(uarg)); 1253 } 1254 } 1255 EXPORT_SYMBOL_GPL(sock_zerocopy_put); 1256 1257 void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1258 { 1259 if (uarg) { 1260 struct sock *sk = skb_from_uarg(uarg)->sk; 1261 1262 atomic_dec(&sk->sk_zckey); 1263 uarg->len--; 1264 1265 if (have_uref) 1266 sock_zerocopy_put(uarg); 1267 } 1268 } 1269 EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort); 1270 1271 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1272 { 1273 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1274 } 1275 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1276 1277 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1278 struct msghdr *msg, int len, 1279 struct ubuf_info *uarg) 1280 { 1281 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1282 struct iov_iter orig_iter = msg->msg_iter; 1283 int err, orig_len = skb->len; 1284 1285 /* An skb can only point to one uarg. This edge case happens when 1286 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1287 */ 1288 if (orig_uarg && uarg != orig_uarg) 1289 return -EEXIST; 1290 1291 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1292 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1293 struct sock *save_sk = skb->sk; 1294 1295 /* Streams do not free skb on error. Reset to prev state. */ 1296 msg->msg_iter = orig_iter; 1297 skb->sk = sk; 1298 ___pskb_trim(skb, orig_len); 1299 skb->sk = save_sk; 1300 return err; 1301 } 1302 1303 skb_zcopy_set(skb, uarg, NULL); 1304 return skb->len - orig_len; 1305 } 1306 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1307 1308 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1309 gfp_t gfp_mask) 1310 { 1311 if (skb_zcopy(orig)) { 1312 if (skb_zcopy(nskb)) { 1313 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1314 if (!gfp_mask) { 1315 WARN_ON_ONCE(1); 1316 return -ENOMEM; 1317 } 1318 if (skb_uarg(nskb) == skb_uarg(orig)) 1319 return 0; 1320 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1321 return -EIO; 1322 } 1323 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1324 } 1325 return 0; 1326 } 1327 1328 /** 1329 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1330 * @skb: the skb to modify 1331 * @gfp_mask: allocation priority 1332 * 1333 * This must be called on SKBTX_DEV_ZEROCOPY skb. 1334 * It will copy all frags into kernel and drop the reference 1335 * to userspace pages. 1336 * 1337 * If this function is called from an interrupt gfp_mask() must be 1338 * %GFP_ATOMIC. 1339 * 1340 * Returns 0 on success or a negative error code on failure 1341 * to allocate kernel memory to copy to. 1342 */ 1343 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1344 { 1345 int num_frags = skb_shinfo(skb)->nr_frags; 1346 struct page *page, *head = NULL; 1347 int i, new_frags; 1348 u32 d_off; 1349 1350 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1351 return -EINVAL; 1352 1353 if (!num_frags) 1354 goto release; 1355 1356 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1357 for (i = 0; i < new_frags; i++) { 1358 page = alloc_page(gfp_mask); 1359 if (!page) { 1360 while (head) { 1361 struct page *next = (struct page *)page_private(head); 1362 put_page(head); 1363 head = next; 1364 } 1365 return -ENOMEM; 1366 } 1367 set_page_private(page, (unsigned long)head); 1368 head = page; 1369 } 1370 1371 page = head; 1372 d_off = 0; 1373 for (i = 0; i < num_frags; i++) { 1374 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1375 u32 p_off, p_len, copied; 1376 struct page *p; 1377 u8 *vaddr; 1378 1379 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1380 p, p_off, p_len, copied) { 1381 u32 copy, done = 0; 1382 vaddr = kmap_atomic(p); 1383 1384 while (done < p_len) { 1385 if (d_off == PAGE_SIZE) { 1386 d_off = 0; 1387 page = (struct page *)page_private(page); 1388 } 1389 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 1390 memcpy(page_address(page) + d_off, 1391 vaddr + p_off + done, copy); 1392 done += copy; 1393 d_off += copy; 1394 } 1395 kunmap_atomic(vaddr); 1396 } 1397 } 1398 1399 /* skb frags release userspace buffers */ 1400 for (i = 0; i < num_frags; i++) 1401 skb_frag_unref(skb, i); 1402 1403 /* skb frags point to kernel buffers */ 1404 for (i = 0; i < new_frags - 1; i++) { 1405 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 1406 head = (struct page *)page_private(head); 1407 } 1408 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 1409 skb_shinfo(skb)->nr_frags = new_frags; 1410 1411 release: 1412 skb_zcopy_clear(skb, false); 1413 return 0; 1414 } 1415 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1416 1417 /** 1418 * skb_clone - duplicate an sk_buff 1419 * @skb: buffer to clone 1420 * @gfp_mask: allocation priority 1421 * 1422 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1423 * copies share the same packet data but not structure. The new 1424 * buffer has a reference count of 1. If the allocation fails the 1425 * function returns %NULL otherwise the new buffer is returned. 1426 * 1427 * If this function is called from an interrupt gfp_mask() must be 1428 * %GFP_ATOMIC. 1429 */ 1430 1431 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1432 { 1433 struct sk_buff_fclones *fclones = container_of(skb, 1434 struct sk_buff_fclones, 1435 skb1); 1436 struct sk_buff *n; 1437 1438 if (skb_orphan_frags(skb, gfp_mask)) 1439 return NULL; 1440 1441 if (skb->fclone == SKB_FCLONE_ORIG && 1442 refcount_read(&fclones->fclone_ref) == 1) { 1443 n = &fclones->skb2; 1444 refcount_set(&fclones->fclone_ref, 2); 1445 } else { 1446 if (skb_pfmemalloc(skb)) 1447 gfp_mask |= __GFP_MEMALLOC; 1448 1449 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1450 if (!n) 1451 return NULL; 1452 1453 n->fclone = SKB_FCLONE_UNAVAILABLE; 1454 } 1455 1456 return __skb_clone(n, skb); 1457 } 1458 EXPORT_SYMBOL(skb_clone); 1459 1460 void skb_headers_offset_update(struct sk_buff *skb, int off) 1461 { 1462 /* Only adjust this if it actually is csum_start rather than csum */ 1463 if (skb->ip_summed == CHECKSUM_PARTIAL) 1464 skb->csum_start += off; 1465 /* {transport,network,mac}_header and tail are relative to skb->head */ 1466 skb->transport_header += off; 1467 skb->network_header += off; 1468 if (skb_mac_header_was_set(skb)) 1469 skb->mac_header += off; 1470 skb->inner_transport_header += off; 1471 skb->inner_network_header += off; 1472 skb->inner_mac_header += off; 1473 } 1474 EXPORT_SYMBOL(skb_headers_offset_update); 1475 1476 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 1477 { 1478 __copy_skb_header(new, old); 1479 1480 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1481 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1482 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1483 } 1484 EXPORT_SYMBOL(skb_copy_header); 1485 1486 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1487 { 1488 if (skb_pfmemalloc(skb)) 1489 return SKB_ALLOC_RX; 1490 return 0; 1491 } 1492 1493 /** 1494 * skb_copy - create private copy of an sk_buff 1495 * @skb: buffer to copy 1496 * @gfp_mask: allocation priority 1497 * 1498 * Make a copy of both an &sk_buff and its data. This is used when the 1499 * caller wishes to modify the data and needs a private copy of the 1500 * data to alter. Returns %NULL on failure or the pointer to the buffer 1501 * on success. The returned buffer has a reference count of 1. 1502 * 1503 * As by-product this function converts non-linear &sk_buff to linear 1504 * one, so that &sk_buff becomes completely private and caller is allowed 1505 * to modify all the data of returned buffer. This means that this 1506 * function is not recommended for use in circumstances when only 1507 * header is going to be modified. Use pskb_copy() instead. 1508 */ 1509 1510 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1511 { 1512 int headerlen = skb_headroom(skb); 1513 unsigned int size = skb_end_offset(skb) + skb->data_len; 1514 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1515 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1516 1517 if (!n) 1518 return NULL; 1519 1520 /* Set the data pointer */ 1521 skb_reserve(n, headerlen); 1522 /* Set the tail pointer and length */ 1523 skb_put(n, skb->len); 1524 1525 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 1526 1527 skb_copy_header(n, skb); 1528 return n; 1529 } 1530 EXPORT_SYMBOL(skb_copy); 1531 1532 /** 1533 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1534 * @skb: buffer to copy 1535 * @headroom: headroom of new skb 1536 * @gfp_mask: allocation priority 1537 * @fclone: if true allocate the copy of the skb from the fclone 1538 * cache instead of the head cache; it is recommended to set this 1539 * to true for the cases where the copy will likely be cloned 1540 * 1541 * Make a copy of both an &sk_buff and part of its data, located 1542 * in header. Fragmented data remain shared. This is used when 1543 * the caller wishes to modify only header of &sk_buff and needs 1544 * private copy of the header to alter. Returns %NULL on failure 1545 * or the pointer to the buffer on success. 1546 * The returned buffer has a reference count of 1. 1547 */ 1548 1549 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1550 gfp_t gfp_mask, bool fclone) 1551 { 1552 unsigned int size = skb_headlen(skb) + headroom; 1553 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1554 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1555 1556 if (!n) 1557 goto out; 1558 1559 /* Set the data pointer */ 1560 skb_reserve(n, headroom); 1561 /* Set the tail pointer and length */ 1562 skb_put(n, skb_headlen(skb)); 1563 /* Copy the bytes */ 1564 skb_copy_from_linear_data(skb, n->data, n->len); 1565 1566 n->truesize += skb->data_len; 1567 n->data_len = skb->data_len; 1568 n->len = skb->len; 1569 1570 if (skb_shinfo(skb)->nr_frags) { 1571 int i; 1572 1573 if (skb_orphan_frags(skb, gfp_mask) || 1574 skb_zerocopy_clone(n, skb, gfp_mask)) { 1575 kfree_skb(n); 1576 n = NULL; 1577 goto out; 1578 } 1579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1580 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1581 skb_frag_ref(skb, i); 1582 } 1583 skb_shinfo(n)->nr_frags = i; 1584 } 1585 1586 if (skb_has_frag_list(skb)) { 1587 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1588 skb_clone_fraglist(n); 1589 } 1590 1591 skb_copy_header(n, skb); 1592 out: 1593 return n; 1594 } 1595 EXPORT_SYMBOL(__pskb_copy_fclone); 1596 1597 /** 1598 * pskb_expand_head - reallocate header of &sk_buff 1599 * @skb: buffer to reallocate 1600 * @nhead: room to add at head 1601 * @ntail: room to add at tail 1602 * @gfp_mask: allocation priority 1603 * 1604 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1605 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1606 * reference count of 1. Returns zero in the case of success or error, 1607 * if expansion failed. In the last case, &sk_buff is not changed. 1608 * 1609 * All the pointers pointing into skb header may change and must be 1610 * reloaded after call to this function. 1611 */ 1612 1613 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1614 gfp_t gfp_mask) 1615 { 1616 int i, osize = skb_end_offset(skb); 1617 int size = osize + nhead + ntail; 1618 long off; 1619 u8 *data; 1620 1621 BUG_ON(nhead < 0); 1622 1623 BUG_ON(skb_shared(skb)); 1624 1625 size = SKB_DATA_ALIGN(size); 1626 1627 if (skb_pfmemalloc(skb)) 1628 gfp_mask |= __GFP_MEMALLOC; 1629 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1630 gfp_mask, NUMA_NO_NODE, NULL); 1631 if (!data) 1632 goto nodata; 1633 size = SKB_WITH_OVERHEAD(ksize(data)); 1634 1635 /* Copy only real data... and, alas, header. This should be 1636 * optimized for the cases when header is void. 1637 */ 1638 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1639 1640 memcpy((struct skb_shared_info *)(data + size), 1641 skb_shinfo(skb), 1642 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1643 1644 /* 1645 * if shinfo is shared we must drop the old head gracefully, but if it 1646 * is not we can just drop the old head and let the existing refcount 1647 * be since all we did is relocate the values 1648 */ 1649 if (skb_cloned(skb)) { 1650 if (skb_orphan_frags(skb, gfp_mask)) 1651 goto nofrags; 1652 if (skb_zcopy(skb)) 1653 refcount_inc(&skb_uarg(skb)->refcnt); 1654 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1655 skb_frag_ref(skb, i); 1656 1657 if (skb_has_frag_list(skb)) 1658 skb_clone_fraglist(skb); 1659 1660 skb_release_data(skb); 1661 } else { 1662 skb_free_head(skb); 1663 } 1664 off = (data + nhead) - skb->head; 1665 1666 skb->head = data; 1667 skb->head_frag = 0; 1668 skb->data += off; 1669 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1670 skb->end = size; 1671 off = nhead; 1672 #else 1673 skb->end = skb->head + size; 1674 #endif 1675 skb->tail += off; 1676 skb_headers_offset_update(skb, nhead); 1677 skb->cloned = 0; 1678 skb->hdr_len = 0; 1679 skb->nohdr = 0; 1680 atomic_set(&skb_shinfo(skb)->dataref, 1); 1681 1682 skb_metadata_clear(skb); 1683 1684 /* It is not generally safe to change skb->truesize. 1685 * For the moment, we really care of rx path, or 1686 * when skb is orphaned (not attached to a socket). 1687 */ 1688 if (!skb->sk || skb->destructor == sock_edemux) 1689 skb->truesize += size - osize; 1690 1691 return 0; 1692 1693 nofrags: 1694 kfree(data); 1695 nodata: 1696 return -ENOMEM; 1697 } 1698 EXPORT_SYMBOL(pskb_expand_head); 1699 1700 /* Make private copy of skb with writable head and some headroom */ 1701 1702 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1703 { 1704 struct sk_buff *skb2; 1705 int delta = headroom - skb_headroom(skb); 1706 1707 if (delta <= 0) 1708 skb2 = pskb_copy(skb, GFP_ATOMIC); 1709 else { 1710 skb2 = skb_clone(skb, GFP_ATOMIC); 1711 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1712 GFP_ATOMIC)) { 1713 kfree_skb(skb2); 1714 skb2 = NULL; 1715 } 1716 } 1717 return skb2; 1718 } 1719 EXPORT_SYMBOL(skb_realloc_headroom); 1720 1721 /** 1722 * skb_copy_expand - copy and expand sk_buff 1723 * @skb: buffer to copy 1724 * @newheadroom: new free bytes at head 1725 * @newtailroom: new free bytes at tail 1726 * @gfp_mask: allocation priority 1727 * 1728 * Make a copy of both an &sk_buff and its data and while doing so 1729 * allocate additional space. 1730 * 1731 * This is used when the caller wishes to modify the data and needs a 1732 * private copy of the data to alter as well as more space for new fields. 1733 * Returns %NULL on failure or the pointer to the buffer 1734 * on success. The returned buffer has a reference count of 1. 1735 * 1736 * You must pass %GFP_ATOMIC as the allocation priority if this function 1737 * is called from an interrupt. 1738 */ 1739 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1740 int newheadroom, int newtailroom, 1741 gfp_t gfp_mask) 1742 { 1743 /* 1744 * Allocate the copy buffer 1745 */ 1746 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1747 gfp_mask, skb_alloc_rx_flag(skb), 1748 NUMA_NO_NODE); 1749 int oldheadroom = skb_headroom(skb); 1750 int head_copy_len, head_copy_off; 1751 1752 if (!n) 1753 return NULL; 1754 1755 skb_reserve(n, newheadroom); 1756 1757 /* Set the tail pointer and length */ 1758 skb_put(n, skb->len); 1759 1760 head_copy_len = oldheadroom; 1761 head_copy_off = 0; 1762 if (newheadroom <= head_copy_len) 1763 head_copy_len = newheadroom; 1764 else 1765 head_copy_off = newheadroom - head_copy_len; 1766 1767 /* Copy the linear header and data. */ 1768 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1769 skb->len + head_copy_len)); 1770 1771 skb_copy_header(n, skb); 1772 1773 skb_headers_offset_update(n, newheadroom - oldheadroom); 1774 1775 return n; 1776 } 1777 EXPORT_SYMBOL(skb_copy_expand); 1778 1779 /** 1780 * __skb_pad - zero pad the tail of an skb 1781 * @skb: buffer to pad 1782 * @pad: space to pad 1783 * @free_on_error: free buffer on error 1784 * 1785 * Ensure that a buffer is followed by a padding area that is zero 1786 * filled. Used by network drivers which may DMA or transfer data 1787 * beyond the buffer end onto the wire. 1788 * 1789 * May return error in out of memory cases. The skb is freed on error 1790 * if @free_on_error is true. 1791 */ 1792 1793 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 1794 { 1795 int err; 1796 int ntail; 1797 1798 /* If the skbuff is non linear tailroom is always zero.. */ 1799 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1800 memset(skb->data+skb->len, 0, pad); 1801 return 0; 1802 } 1803 1804 ntail = skb->data_len + pad - (skb->end - skb->tail); 1805 if (likely(skb_cloned(skb) || ntail > 0)) { 1806 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1807 if (unlikely(err)) 1808 goto free_skb; 1809 } 1810 1811 /* FIXME: The use of this function with non-linear skb's really needs 1812 * to be audited. 1813 */ 1814 err = skb_linearize(skb); 1815 if (unlikely(err)) 1816 goto free_skb; 1817 1818 memset(skb->data + skb->len, 0, pad); 1819 return 0; 1820 1821 free_skb: 1822 if (free_on_error) 1823 kfree_skb(skb); 1824 return err; 1825 } 1826 EXPORT_SYMBOL(__skb_pad); 1827 1828 /** 1829 * pskb_put - add data to the tail of a potentially fragmented buffer 1830 * @skb: start of the buffer to use 1831 * @tail: tail fragment of the buffer to use 1832 * @len: amount of data to add 1833 * 1834 * This function extends the used data area of the potentially 1835 * fragmented buffer. @tail must be the last fragment of @skb -- or 1836 * @skb itself. If this would exceed the total buffer size the kernel 1837 * will panic. A pointer to the first byte of the extra data is 1838 * returned. 1839 */ 1840 1841 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1842 { 1843 if (tail != skb) { 1844 skb->data_len += len; 1845 skb->len += len; 1846 } 1847 return skb_put(tail, len); 1848 } 1849 EXPORT_SYMBOL_GPL(pskb_put); 1850 1851 /** 1852 * skb_put - add data to a buffer 1853 * @skb: buffer to use 1854 * @len: amount of data to add 1855 * 1856 * This function extends the used data area of the buffer. If this would 1857 * exceed the total buffer size the kernel will panic. A pointer to the 1858 * first byte of the extra data is returned. 1859 */ 1860 void *skb_put(struct sk_buff *skb, unsigned int len) 1861 { 1862 void *tmp = skb_tail_pointer(skb); 1863 SKB_LINEAR_ASSERT(skb); 1864 skb->tail += len; 1865 skb->len += len; 1866 if (unlikely(skb->tail > skb->end)) 1867 skb_over_panic(skb, len, __builtin_return_address(0)); 1868 return tmp; 1869 } 1870 EXPORT_SYMBOL(skb_put); 1871 1872 /** 1873 * skb_push - add data to the start of a buffer 1874 * @skb: buffer to use 1875 * @len: amount of data to add 1876 * 1877 * This function extends the used data area of the buffer at the buffer 1878 * start. If this would exceed the total buffer headroom the kernel will 1879 * panic. A pointer to the first byte of the extra data is returned. 1880 */ 1881 void *skb_push(struct sk_buff *skb, unsigned int len) 1882 { 1883 skb->data -= len; 1884 skb->len += len; 1885 if (unlikely(skb->data < skb->head)) 1886 skb_under_panic(skb, len, __builtin_return_address(0)); 1887 return skb->data; 1888 } 1889 EXPORT_SYMBOL(skb_push); 1890 1891 /** 1892 * skb_pull - remove data from the start of a buffer 1893 * @skb: buffer to use 1894 * @len: amount of data to remove 1895 * 1896 * This function removes data from the start of a buffer, returning 1897 * the memory to the headroom. A pointer to the next data in the buffer 1898 * is returned. Once the data has been pulled future pushes will overwrite 1899 * the old data. 1900 */ 1901 void *skb_pull(struct sk_buff *skb, unsigned int len) 1902 { 1903 return skb_pull_inline(skb, len); 1904 } 1905 EXPORT_SYMBOL(skb_pull); 1906 1907 /** 1908 * skb_trim - remove end from a buffer 1909 * @skb: buffer to alter 1910 * @len: new length 1911 * 1912 * Cut the length of a buffer down by removing data from the tail. If 1913 * the buffer is already under the length specified it is not modified. 1914 * The skb must be linear. 1915 */ 1916 void skb_trim(struct sk_buff *skb, unsigned int len) 1917 { 1918 if (skb->len > len) 1919 __skb_trim(skb, len); 1920 } 1921 EXPORT_SYMBOL(skb_trim); 1922 1923 /* Trims skb to length len. It can change skb pointers. 1924 */ 1925 1926 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1927 { 1928 struct sk_buff **fragp; 1929 struct sk_buff *frag; 1930 int offset = skb_headlen(skb); 1931 int nfrags = skb_shinfo(skb)->nr_frags; 1932 int i; 1933 int err; 1934 1935 if (skb_cloned(skb) && 1936 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1937 return err; 1938 1939 i = 0; 1940 if (offset >= len) 1941 goto drop_pages; 1942 1943 for (; i < nfrags; i++) { 1944 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1945 1946 if (end < len) { 1947 offset = end; 1948 continue; 1949 } 1950 1951 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1952 1953 drop_pages: 1954 skb_shinfo(skb)->nr_frags = i; 1955 1956 for (; i < nfrags; i++) 1957 skb_frag_unref(skb, i); 1958 1959 if (skb_has_frag_list(skb)) 1960 skb_drop_fraglist(skb); 1961 goto done; 1962 } 1963 1964 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1965 fragp = &frag->next) { 1966 int end = offset + frag->len; 1967 1968 if (skb_shared(frag)) { 1969 struct sk_buff *nfrag; 1970 1971 nfrag = skb_clone(frag, GFP_ATOMIC); 1972 if (unlikely(!nfrag)) 1973 return -ENOMEM; 1974 1975 nfrag->next = frag->next; 1976 consume_skb(frag); 1977 frag = nfrag; 1978 *fragp = frag; 1979 } 1980 1981 if (end < len) { 1982 offset = end; 1983 continue; 1984 } 1985 1986 if (end > len && 1987 unlikely((err = pskb_trim(frag, len - offset)))) 1988 return err; 1989 1990 if (frag->next) 1991 skb_drop_list(&frag->next); 1992 break; 1993 } 1994 1995 done: 1996 if (len > skb_headlen(skb)) { 1997 skb->data_len -= skb->len - len; 1998 skb->len = len; 1999 } else { 2000 skb->len = len; 2001 skb->data_len = 0; 2002 skb_set_tail_pointer(skb, len); 2003 } 2004 2005 if (!skb->sk || skb->destructor == sock_edemux) 2006 skb_condense(skb); 2007 return 0; 2008 } 2009 EXPORT_SYMBOL(___pskb_trim); 2010 2011 /* Note : use pskb_trim_rcsum() instead of calling this directly 2012 */ 2013 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2014 { 2015 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2016 int delta = skb->len - len; 2017 2018 skb->csum = csum_block_sub(skb->csum, 2019 skb_checksum(skb, len, delta, 0), 2020 len); 2021 } 2022 return __pskb_trim(skb, len); 2023 } 2024 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2025 2026 /** 2027 * __pskb_pull_tail - advance tail of skb header 2028 * @skb: buffer to reallocate 2029 * @delta: number of bytes to advance tail 2030 * 2031 * The function makes a sense only on a fragmented &sk_buff, 2032 * it expands header moving its tail forward and copying necessary 2033 * data from fragmented part. 2034 * 2035 * &sk_buff MUST have reference count of 1. 2036 * 2037 * Returns %NULL (and &sk_buff does not change) if pull failed 2038 * or value of new tail of skb in the case of success. 2039 * 2040 * All the pointers pointing into skb header may change and must be 2041 * reloaded after call to this function. 2042 */ 2043 2044 /* Moves tail of skb head forward, copying data from fragmented part, 2045 * when it is necessary. 2046 * 1. It may fail due to malloc failure. 2047 * 2. It may change skb pointers. 2048 * 2049 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2050 */ 2051 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2052 { 2053 /* If skb has not enough free space at tail, get new one 2054 * plus 128 bytes for future expansions. If we have enough 2055 * room at tail, reallocate without expansion only if skb is cloned. 2056 */ 2057 int i, k, eat = (skb->tail + delta) - skb->end; 2058 2059 if (eat > 0 || skb_cloned(skb)) { 2060 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2061 GFP_ATOMIC)) 2062 return NULL; 2063 } 2064 2065 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2066 skb_tail_pointer(skb), delta)); 2067 2068 /* Optimization: no fragments, no reasons to preestimate 2069 * size of pulled pages. Superb. 2070 */ 2071 if (!skb_has_frag_list(skb)) 2072 goto pull_pages; 2073 2074 /* Estimate size of pulled pages. */ 2075 eat = delta; 2076 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2077 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2078 2079 if (size >= eat) 2080 goto pull_pages; 2081 eat -= size; 2082 } 2083 2084 /* If we need update frag list, we are in troubles. 2085 * Certainly, it is possible to add an offset to skb data, 2086 * but taking into account that pulling is expected to 2087 * be very rare operation, it is worth to fight against 2088 * further bloating skb head and crucify ourselves here instead. 2089 * Pure masohism, indeed. 8)8) 2090 */ 2091 if (eat) { 2092 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2093 struct sk_buff *clone = NULL; 2094 struct sk_buff *insp = NULL; 2095 2096 do { 2097 if (list->len <= eat) { 2098 /* Eaten as whole. */ 2099 eat -= list->len; 2100 list = list->next; 2101 insp = list; 2102 } else { 2103 /* Eaten partially. */ 2104 2105 if (skb_shared(list)) { 2106 /* Sucks! We need to fork list. :-( */ 2107 clone = skb_clone(list, GFP_ATOMIC); 2108 if (!clone) 2109 return NULL; 2110 insp = list->next; 2111 list = clone; 2112 } else { 2113 /* This may be pulled without 2114 * problems. */ 2115 insp = list; 2116 } 2117 if (!pskb_pull(list, eat)) { 2118 kfree_skb(clone); 2119 return NULL; 2120 } 2121 break; 2122 } 2123 } while (eat); 2124 2125 /* Free pulled out fragments. */ 2126 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2127 skb_shinfo(skb)->frag_list = list->next; 2128 kfree_skb(list); 2129 } 2130 /* And insert new clone at head. */ 2131 if (clone) { 2132 clone->next = list; 2133 skb_shinfo(skb)->frag_list = clone; 2134 } 2135 } 2136 /* Success! Now we may commit changes to skb data. */ 2137 2138 pull_pages: 2139 eat = delta; 2140 k = 0; 2141 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2142 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2143 2144 if (size <= eat) { 2145 skb_frag_unref(skb, i); 2146 eat -= size; 2147 } else { 2148 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2149 2150 *frag = skb_shinfo(skb)->frags[i]; 2151 if (eat) { 2152 skb_frag_off_add(frag, eat); 2153 skb_frag_size_sub(frag, eat); 2154 if (!i) 2155 goto end; 2156 eat = 0; 2157 } 2158 k++; 2159 } 2160 } 2161 skb_shinfo(skb)->nr_frags = k; 2162 2163 end: 2164 skb->tail += delta; 2165 skb->data_len -= delta; 2166 2167 if (!skb->data_len) 2168 skb_zcopy_clear(skb, false); 2169 2170 return skb_tail_pointer(skb); 2171 } 2172 EXPORT_SYMBOL(__pskb_pull_tail); 2173 2174 /** 2175 * skb_copy_bits - copy bits from skb to kernel buffer 2176 * @skb: source skb 2177 * @offset: offset in source 2178 * @to: destination buffer 2179 * @len: number of bytes to copy 2180 * 2181 * Copy the specified number of bytes from the source skb to the 2182 * destination buffer. 2183 * 2184 * CAUTION ! : 2185 * If its prototype is ever changed, 2186 * check arch/{*}/net/{*}.S files, 2187 * since it is called from BPF assembly code. 2188 */ 2189 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2190 { 2191 int start = skb_headlen(skb); 2192 struct sk_buff *frag_iter; 2193 int i, copy; 2194 2195 if (offset > (int)skb->len - len) 2196 goto fault; 2197 2198 /* Copy header. */ 2199 if ((copy = start - offset) > 0) { 2200 if (copy > len) 2201 copy = len; 2202 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2203 if ((len -= copy) == 0) 2204 return 0; 2205 offset += copy; 2206 to += copy; 2207 } 2208 2209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2210 int end; 2211 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2212 2213 WARN_ON(start > offset + len); 2214 2215 end = start + skb_frag_size(f); 2216 if ((copy = end - offset) > 0) { 2217 u32 p_off, p_len, copied; 2218 struct page *p; 2219 u8 *vaddr; 2220 2221 if (copy > len) 2222 copy = len; 2223 2224 skb_frag_foreach_page(f, 2225 skb_frag_off(f) + offset - start, 2226 copy, p, p_off, p_len, copied) { 2227 vaddr = kmap_atomic(p); 2228 memcpy(to + copied, vaddr + p_off, p_len); 2229 kunmap_atomic(vaddr); 2230 } 2231 2232 if ((len -= copy) == 0) 2233 return 0; 2234 offset += copy; 2235 to += copy; 2236 } 2237 start = end; 2238 } 2239 2240 skb_walk_frags(skb, frag_iter) { 2241 int end; 2242 2243 WARN_ON(start > offset + len); 2244 2245 end = start + frag_iter->len; 2246 if ((copy = end - offset) > 0) { 2247 if (copy > len) 2248 copy = len; 2249 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2250 goto fault; 2251 if ((len -= copy) == 0) 2252 return 0; 2253 offset += copy; 2254 to += copy; 2255 } 2256 start = end; 2257 } 2258 2259 if (!len) 2260 return 0; 2261 2262 fault: 2263 return -EFAULT; 2264 } 2265 EXPORT_SYMBOL(skb_copy_bits); 2266 2267 /* 2268 * Callback from splice_to_pipe(), if we need to release some pages 2269 * at the end of the spd in case we error'ed out in filling the pipe. 2270 */ 2271 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2272 { 2273 put_page(spd->pages[i]); 2274 } 2275 2276 static struct page *linear_to_page(struct page *page, unsigned int *len, 2277 unsigned int *offset, 2278 struct sock *sk) 2279 { 2280 struct page_frag *pfrag = sk_page_frag(sk); 2281 2282 if (!sk_page_frag_refill(sk, pfrag)) 2283 return NULL; 2284 2285 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 2286 2287 memcpy(page_address(pfrag->page) + pfrag->offset, 2288 page_address(page) + *offset, *len); 2289 *offset = pfrag->offset; 2290 pfrag->offset += *len; 2291 2292 return pfrag->page; 2293 } 2294 2295 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 2296 struct page *page, 2297 unsigned int offset) 2298 { 2299 return spd->nr_pages && 2300 spd->pages[spd->nr_pages - 1] == page && 2301 (spd->partial[spd->nr_pages - 1].offset + 2302 spd->partial[spd->nr_pages - 1].len == offset); 2303 } 2304 2305 /* 2306 * Fill page/offset/length into spd, if it can hold more pages. 2307 */ 2308 static bool spd_fill_page(struct splice_pipe_desc *spd, 2309 struct pipe_inode_info *pipe, struct page *page, 2310 unsigned int *len, unsigned int offset, 2311 bool linear, 2312 struct sock *sk) 2313 { 2314 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2315 return true; 2316 2317 if (linear) { 2318 page = linear_to_page(page, len, &offset, sk); 2319 if (!page) 2320 return true; 2321 } 2322 if (spd_can_coalesce(spd, page, offset)) { 2323 spd->partial[spd->nr_pages - 1].len += *len; 2324 return false; 2325 } 2326 get_page(page); 2327 spd->pages[spd->nr_pages] = page; 2328 spd->partial[spd->nr_pages].len = *len; 2329 spd->partial[spd->nr_pages].offset = offset; 2330 spd->nr_pages++; 2331 2332 return false; 2333 } 2334 2335 static bool __splice_segment(struct page *page, unsigned int poff, 2336 unsigned int plen, unsigned int *off, 2337 unsigned int *len, 2338 struct splice_pipe_desc *spd, bool linear, 2339 struct sock *sk, 2340 struct pipe_inode_info *pipe) 2341 { 2342 if (!*len) 2343 return true; 2344 2345 /* skip this segment if already processed */ 2346 if (*off >= plen) { 2347 *off -= plen; 2348 return false; 2349 } 2350 2351 /* ignore any bits we already processed */ 2352 poff += *off; 2353 plen -= *off; 2354 *off = 0; 2355 2356 do { 2357 unsigned int flen = min(*len, plen); 2358 2359 if (spd_fill_page(spd, pipe, page, &flen, poff, 2360 linear, sk)) 2361 return true; 2362 poff += flen; 2363 plen -= flen; 2364 *len -= flen; 2365 } while (*len && plen); 2366 2367 return false; 2368 } 2369 2370 /* 2371 * Map linear and fragment data from the skb to spd. It reports true if the 2372 * pipe is full or if we already spliced the requested length. 2373 */ 2374 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 2375 unsigned int *offset, unsigned int *len, 2376 struct splice_pipe_desc *spd, struct sock *sk) 2377 { 2378 int seg; 2379 struct sk_buff *iter; 2380 2381 /* map the linear part : 2382 * If skb->head_frag is set, this 'linear' part is backed by a 2383 * fragment, and if the head is not shared with any clones then 2384 * we can avoid a copy since we own the head portion of this page. 2385 */ 2386 if (__splice_segment(virt_to_page(skb->data), 2387 (unsigned long) skb->data & (PAGE_SIZE - 1), 2388 skb_headlen(skb), 2389 offset, len, spd, 2390 skb_head_is_locked(skb), 2391 sk, pipe)) 2392 return true; 2393 2394 /* 2395 * then map the fragments 2396 */ 2397 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 2398 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 2399 2400 if (__splice_segment(skb_frag_page(f), 2401 skb_frag_off(f), skb_frag_size(f), 2402 offset, len, spd, false, sk, pipe)) 2403 return true; 2404 } 2405 2406 skb_walk_frags(skb, iter) { 2407 if (*offset >= iter->len) { 2408 *offset -= iter->len; 2409 continue; 2410 } 2411 /* __skb_splice_bits() only fails if the output has no room 2412 * left, so no point in going over the frag_list for the error 2413 * case. 2414 */ 2415 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2416 return true; 2417 } 2418 2419 return false; 2420 } 2421 2422 /* 2423 * Map data from the skb to a pipe. Should handle both the linear part, 2424 * the fragments, and the frag list. 2425 */ 2426 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 2427 struct pipe_inode_info *pipe, unsigned int tlen, 2428 unsigned int flags) 2429 { 2430 struct partial_page partial[MAX_SKB_FRAGS]; 2431 struct page *pages[MAX_SKB_FRAGS]; 2432 struct splice_pipe_desc spd = { 2433 .pages = pages, 2434 .partial = partial, 2435 .nr_pages_max = MAX_SKB_FRAGS, 2436 .ops = &nosteal_pipe_buf_ops, 2437 .spd_release = sock_spd_release, 2438 }; 2439 int ret = 0; 2440 2441 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 2442 2443 if (spd.nr_pages) 2444 ret = splice_to_pipe(pipe, &spd); 2445 2446 return ret; 2447 } 2448 EXPORT_SYMBOL_GPL(skb_splice_bits); 2449 2450 /* Send skb data on a socket. Socket must be locked. */ 2451 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 2452 int len) 2453 { 2454 unsigned int orig_len = len; 2455 struct sk_buff *head = skb; 2456 unsigned short fragidx; 2457 int slen, ret; 2458 2459 do_frag_list: 2460 2461 /* Deal with head data */ 2462 while (offset < skb_headlen(skb) && len) { 2463 struct kvec kv; 2464 struct msghdr msg; 2465 2466 slen = min_t(int, len, skb_headlen(skb) - offset); 2467 kv.iov_base = skb->data + offset; 2468 kv.iov_len = slen; 2469 memset(&msg, 0, sizeof(msg)); 2470 msg.msg_flags = MSG_DONTWAIT; 2471 2472 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); 2473 if (ret <= 0) 2474 goto error; 2475 2476 offset += ret; 2477 len -= ret; 2478 } 2479 2480 /* All the data was skb head? */ 2481 if (!len) 2482 goto out; 2483 2484 /* Make offset relative to start of frags */ 2485 offset -= skb_headlen(skb); 2486 2487 /* Find where we are in frag list */ 2488 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2489 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2490 2491 if (offset < skb_frag_size(frag)) 2492 break; 2493 2494 offset -= skb_frag_size(frag); 2495 } 2496 2497 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2498 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2499 2500 slen = min_t(size_t, len, skb_frag_size(frag) - offset); 2501 2502 while (slen) { 2503 ret = kernel_sendpage_locked(sk, skb_frag_page(frag), 2504 skb_frag_off(frag) + offset, 2505 slen, MSG_DONTWAIT); 2506 if (ret <= 0) 2507 goto error; 2508 2509 len -= ret; 2510 offset += ret; 2511 slen -= ret; 2512 } 2513 2514 offset = 0; 2515 } 2516 2517 if (len) { 2518 /* Process any frag lists */ 2519 2520 if (skb == head) { 2521 if (skb_has_frag_list(skb)) { 2522 skb = skb_shinfo(skb)->frag_list; 2523 goto do_frag_list; 2524 } 2525 } else if (skb->next) { 2526 skb = skb->next; 2527 goto do_frag_list; 2528 } 2529 } 2530 2531 out: 2532 return orig_len - len; 2533 2534 error: 2535 return orig_len == len ? ret : orig_len - len; 2536 } 2537 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 2538 2539 /** 2540 * skb_store_bits - store bits from kernel buffer to skb 2541 * @skb: destination buffer 2542 * @offset: offset in destination 2543 * @from: source buffer 2544 * @len: number of bytes to copy 2545 * 2546 * Copy the specified number of bytes from the source buffer to the 2547 * destination skb. This function handles all the messy bits of 2548 * traversing fragment lists and such. 2549 */ 2550 2551 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2552 { 2553 int start = skb_headlen(skb); 2554 struct sk_buff *frag_iter; 2555 int i, copy; 2556 2557 if (offset > (int)skb->len - len) 2558 goto fault; 2559 2560 if ((copy = start - offset) > 0) { 2561 if (copy > len) 2562 copy = len; 2563 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2564 if ((len -= copy) == 0) 2565 return 0; 2566 offset += copy; 2567 from += copy; 2568 } 2569 2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2571 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2572 int end; 2573 2574 WARN_ON(start > offset + len); 2575 2576 end = start + skb_frag_size(frag); 2577 if ((copy = end - offset) > 0) { 2578 u32 p_off, p_len, copied; 2579 struct page *p; 2580 u8 *vaddr; 2581 2582 if (copy > len) 2583 copy = len; 2584 2585 skb_frag_foreach_page(frag, 2586 skb_frag_off(frag) + offset - start, 2587 copy, p, p_off, p_len, copied) { 2588 vaddr = kmap_atomic(p); 2589 memcpy(vaddr + p_off, from + copied, p_len); 2590 kunmap_atomic(vaddr); 2591 } 2592 2593 if ((len -= copy) == 0) 2594 return 0; 2595 offset += copy; 2596 from += copy; 2597 } 2598 start = end; 2599 } 2600 2601 skb_walk_frags(skb, frag_iter) { 2602 int end; 2603 2604 WARN_ON(start > offset + len); 2605 2606 end = start + frag_iter->len; 2607 if ((copy = end - offset) > 0) { 2608 if (copy > len) 2609 copy = len; 2610 if (skb_store_bits(frag_iter, offset - start, 2611 from, copy)) 2612 goto fault; 2613 if ((len -= copy) == 0) 2614 return 0; 2615 offset += copy; 2616 from += copy; 2617 } 2618 start = end; 2619 } 2620 if (!len) 2621 return 0; 2622 2623 fault: 2624 return -EFAULT; 2625 } 2626 EXPORT_SYMBOL(skb_store_bits); 2627 2628 /* Checksum skb data. */ 2629 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2630 __wsum csum, const struct skb_checksum_ops *ops) 2631 { 2632 int start = skb_headlen(skb); 2633 int i, copy = start - offset; 2634 struct sk_buff *frag_iter; 2635 int pos = 0; 2636 2637 /* Checksum header. */ 2638 if (copy > 0) { 2639 if (copy > len) 2640 copy = len; 2641 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 2642 skb->data + offset, copy, csum); 2643 if ((len -= copy) == 0) 2644 return csum; 2645 offset += copy; 2646 pos = copy; 2647 } 2648 2649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2650 int end; 2651 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2652 2653 WARN_ON(start > offset + len); 2654 2655 end = start + skb_frag_size(frag); 2656 if ((copy = end - offset) > 0) { 2657 u32 p_off, p_len, copied; 2658 struct page *p; 2659 __wsum csum2; 2660 u8 *vaddr; 2661 2662 if (copy > len) 2663 copy = len; 2664 2665 skb_frag_foreach_page(frag, 2666 skb_frag_off(frag) + offset - start, 2667 copy, p, p_off, p_len, copied) { 2668 vaddr = kmap_atomic(p); 2669 csum2 = INDIRECT_CALL_1(ops->update, 2670 csum_partial_ext, 2671 vaddr + p_off, p_len, 0); 2672 kunmap_atomic(vaddr); 2673 csum = INDIRECT_CALL_1(ops->combine, 2674 csum_block_add_ext, csum, 2675 csum2, pos, p_len); 2676 pos += p_len; 2677 } 2678 2679 if (!(len -= copy)) 2680 return csum; 2681 offset += copy; 2682 } 2683 start = end; 2684 } 2685 2686 skb_walk_frags(skb, frag_iter) { 2687 int end; 2688 2689 WARN_ON(start > offset + len); 2690 2691 end = start + frag_iter->len; 2692 if ((copy = end - offset) > 0) { 2693 __wsum csum2; 2694 if (copy > len) 2695 copy = len; 2696 csum2 = __skb_checksum(frag_iter, offset - start, 2697 copy, 0, ops); 2698 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 2699 csum, csum2, pos, copy); 2700 if ((len -= copy) == 0) 2701 return csum; 2702 offset += copy; 2703 pos += copy; 2704 } 2705 start = end; 2706 } 2707 BUG_ON(len); 2708 2709 return csum; 2710 } 2711 EXPORT_SYMBOL(__skb_checksum); 2712 2713 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2714 int len, __wsum csum) 2715 { 2716 const struct skb_checksum_ops ops = { 2717 .update = csum_partial_ext, 2718 .combine = csum_block_add_ext, 2719 }; 2720 2721 return __skb_checksum(skb, offset, len, csum, &ops); 2722 } 2723 EXPORT_SYMBOL(skb_checksum); 2724 2725 /* Both of above in one bottle. */ 2726 2727 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2728 u8 *to, int len, __wsum csum) 2729 { 2730 int start = skb_headlen(skb); 2731 int i, copy = start - offset; 2732 struct sk_buff *frag_iter; 2733 int pos = 0; 2734 2735 /* Copy header. */ 2736 if (copy > 0) { 2737 if (copy > len) 2738 copy = len; 2739 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2740 copy, csum); 2741 if ((len -= copy) == 0) 2742 return csum; 2743 offset += copy; 2744 to += copy; 2745 pos = copy; 2746 } 2747 2748 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2749 int end; 2750 2751 WARN_ON(start > offset + len); 2752 2753 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2754 if ((copy = end - offset) > 0) { 2755 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2756 u32 p_off, p_len, copied; 2757 struct page *p; 2758 __wsum csum2; 2759 u8 *vaddr; 2760 2761 if (copy > len) 2762 copy = len; 2763 2764 skb_frag_foreach_page(frag, 2765 skb_frag_off(frag) + offset - start, 2766 copy, p, p_off, p_len, copied) { 2767 vaddr = kmap_atomic(p); 2768 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2769 to + copied, 2770 p_len, 0); 2771 kunmap_atomic(vaddr); 2772 csum = csum_block_add(csum, csum2, pos); 2773 pos += p_len; 2774 } 2775 2776 if (!(len -= copy)) 2777 return csum; 2778 offset += copy; 2779 to += copy; 2780 } 2781 start = end; 2782 } 2783 2784 skb_walk_frags(skb, frag_iter) { 2785 __wsum csum2; 2786 int end; 2787 2788 WARN_ON(start > offset + len); 2789 2790 end = start + frag_iter->len; 2791 if ((copy = end - offset) > 0) { 2792 if (copy > len) 2793 copy = len; 2794 csum2 = skb_copy_and_csum_bits(frag_iter, 2795 offset - start, 2796 to, copy, 0); 2797 csum = csum_block_add(csum, csum2, pos); 2798 if ((len -= copy) == 0) 2799 return csum; 2800 offset += copy; 2801 to += copy; 2802 pos += copy; 2803 } 2804 start = end; 2805 } 2806 BUG_ON(len); 2807 return csum; 2808 } 2809 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2810 2811 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 2812 { 2813 __sum16 sum; 2814 2815 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 2816 /* See comments in __skb_checksum_complete(). */ 2817 if (likely(!sum)) { 2818 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 2819 !skb->csum_complete_sw) 2820 netdev_rx_csum_fault(skb->dev, skb); 2821 } 2822 if (!skb_shared(skb)) 2823 skb->csum_valid = !sum; 2824 return sum; 2825 } 2826 EXPORT_SYMBOL(__skb_checksum_complete_head); 2827 2828 /* This function assumes skb->csum already holds pseudo header's checksum, 2829 * which has been changed from the hardware checksum, for example, by 2830 * __skb_checksum_validate_complete(). And, the original skb->csum must 2831 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 2832 * 2833 * It returns non-zero if the recomputed checksum is still invalid, otherwise 2834 * zero. The new checksum is stored back into skb->csum unless the skb is 2835 * shared. 2836 */ 2837 __sum16 __skb_checksum_complete(struct sk_buff *skb) 2838 { 2839 __wsum csum; 2840 __sum16 sum; 2841 2842 csum = skb_checksum(skb, 0, skb->len, 0); 2843 2844 sum = csum_fold(csum_add(skb->csum, csum)); 2845 /* This check is inverted, because we already knew the hardware 2846 * checksum is invalid before calling this function. So, if the 2847 * re-computed checksum is valid instead, then we have a mismatch 2848 * between the original skb->csum and skb_checksum(). This means either 2849 * the original hardware checksum is incorrect or we screw up skb->csum 2850 * when moving skb->data around. 2851 */ 2852 if (likely(!sum)) { 2853 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 2854 !skb->csum_complete_sw) 2855 netdev_rx_csum_fault(skb->dev, skb); 2856 } 2857 2858 if (!skb_shared(skb)) { 2859 /* Save full packet checksum */ 2860 skb->csum = csum; 2861 skb->ip_summed = CHECKSUM_COMPLETE; 2862 skb->csum_complete_sw = 1; 2863 skb->csum_valid = !sum; 2864 } 2865 2866 return sum; 2867 } 2868 EXPORT_SYMBOL(__skb_checksum_complete); 2869 2870 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 2871 { 2872 net_warn_ratelimited( 2873 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2874 __func__); 2875 return 0; 2876 } 2877 2878 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 2879 int offset, int len) 2880 { 2881 net_warn_ratelimited( 2882 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2883 __func__); 2884 return 0; 2885 } 2886 2887 static const struct skb_checksum_ops default_crc32c_ops = { 2888 .update = warn_crc32c_csum_update, 2889 .combine = warn_crc32c_csum_combine, 2890 }; 2891 2892 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 2893 &default_crc32c_ops; 2894 EXPORT_SYMBOL(crc32c_csum_stub); 2895 2896 /** 2897 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2898 * @from: source buffer 2899 * 2900 * Calculates the amount of linear headroom needed in the 'to' skb passed 2901 * into skb_zerocopy(). 2902 */ 2903 unsigned int 2904 skb_zerocopy_headlen(const struct sk_buff *from) 2905 { 2906 unsigned int hlen = 0; 2907 2908 if (!from->head_frag || 2909 skb_headlen(from) < L1_CACHE_BYTES || 2910 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2911 hlen = skb_headlen(from); 2912 2913 if (skb_has_frag_list(from)) 2914 hlen = from->len; 2915 2916 return hlen; 2917 } 2918 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2919 2920 /** 2921 * skb_zerocopy - Zero copy skb to skb 2922 * @to: destination buffer 2923 * @from: source buffer 2924 * @len: number of bytes to copy from source buffer 2925 * @hlen: size of linear headroom in destination buffer 2926 * 2927 * Copies up to `len` bytes from `from` to `to` by creating references 2928 * to the frags in the source buffer. 2929 * 2930 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2931 * headroom in the `to` buffer. 2932 * 2933 * Return value: 2934 * 0: everything is OK 2935 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2936 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2937 */ 2938 int 2939 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2940 { 2941 int i, j = 0; 2942 int plen = 0; /* length of skb->head fragment */ 2943 int ret; 2944 struct page *page; 2945 unsigned int offset; 2946 2947 BUG_ON(!from->head_frag && !hlen); 2948 2949 /* dont bother with small payloads */ 2950 if (len <= skb_tailroom(to)) 2951 return skb_copy_bits(from, 0, skb_put(to, len), len); 2952 2953 if (hlen) { 2954 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2955 if (unlikely(ret)) 2956 return ret; 2957 len -= hlen; 2958 } else { 2959 plen = min_t(int, skb_headlen(from), len); 2960 if (plen) { 2961 page = virt_to_head_page(from->head); 2962 offset = from->data - (unsigned char *)page_address(page); 2963 __skb_fill_page_desc(to, 0, page, offset, plen); 2964 get_page(page); 2965 j = 1; 2966 len -= plen; 2967 } 2968 } 2969 2970 to->truesize += len + plen; 2971 to->len += len + plen; 2972 to->data_len += len + plen; 2973 2974 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2975 skb_tx_error(from); 2976 return -ENOMEM; 2977 } 2978 skb_zerocopy_clone(to, from, GFP_ATOMIC); 2979 2980 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2981 int size; 2982 2983 if (!len) 2984 break; 2985 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2986 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 2987 len); 2988 skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 2989 len -= size; 2990 skb_frag_ref(to, j); 2991 j++; 2992 } 2993 skb_shinfo(to)->nr_frags = j; 2994 2995 return 0; 2996 } 2997 EXPORT_SYMBOL_GPL(skb_zerocopy); 2998 2999 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 3000 { 3001 __wsum csum; 3002 long csstart; 3003 3004 if (skb->ip_summed == CHECKSUM_PARTIAL) 3005 csstart = skb_checksum_start_offset(skb); 3006 else 3007 csstart = skb_headlen(skb); 3008 3009 BUG_ON(csstart > skb_headlen(skb)); 3010 3011 skb_copy_from_linear_data(skb, to, csstart); 3012 3013 csum = 0; 3014 if (csstart != skb->len) 3015 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3016 skb->len - csstart, 0); 3017 3018 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3019 long csstuff = csstart + skb->csum_offset; 3020 3021 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3022 } 3023 } 3024 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3025 3026 /** 3027 * skb_dequeue - remove from the head of the queue 3028 * @list: list to dequeue from 3029 * 3030 * Remove the head of the list. The list lock is taken so the function 3031 * may be used safely with other locking list functions. The head item is 3032 * returned or %NULL if the list is empty. 3033 */ 3034 3035 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3036 { 3037 unsigned long flags; 3038 struct sk_buff *result; 3039 3040 spin_lock_irqsave(&list->lock, flags); 3041 result = __skb_dequeue(list); 3042 spin_unlock_irqrestore(&list->lock, flags); 3043 return result; 3044 } 3045 EXPORT_SYMBOL(skb_dequeue); 3046 3047 /** 3048 * skb_dequeue_tail - remove from the tail of the queue 3049 * @list: list to dequeue from 3050 * 3051 * Remove the tail of the list. The list lock is taken so the function 3052 * may be used safely with other locking list functions. The tail item is 3053 * returned or %NULL if the list is empty. 3054 */ 3055 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3056 { 3057 unsigned long flags; 3058 struct sk_buff *result; 3059 3060 spin_lock_irqsave(&list->lock, flags); 3061 result = __skb_dequeue_tail(list); 3062 spin_unlock_irqrestore(&list->lock, flags); 3063 return result; 3064 } 3065 EXPORT_SYMBOL(skb_dequeue_tail); 3066 3067 /** 3068 * skb_queue_purge - empty a list 3069 * @list: list to empty 3070 * 3071 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3072 * the list and one reference dropped. This function takes the list 3073 * lock and is atomic with respect to other list locking functions. 3074 */ 3075 void skb_queue_purge(struct sk_buff_head *list) 3076 { 3077 struct sk_buff *skb; 3078 while ((skb = skb_dequeue(list)) != NULL) 3079 kfree_skb(skb); 3080 } 3081 EXPORT_SYMBOL(skb_queue_purge); 3082 3083 /** 3084 * skb_rbtree_purge - empty a skb rbtree 3085 * @root: root of the rbtree to empty 3086 * Return value: the sum of truesizes of all purged skbs. 3087 * 3088 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3089 * the list and one reference dropped. This function does not take 3090 * any lock. Synchronization should be handled by the caller (e.g., TCP 3091 * out-of-order queue is protected by the socket lock). 3092 */ 3093 unsigned int skb_rbtree_purge(struct rb_root *root) 3094 { 3095 struct rb_node *p = rb_first(root); 3096 unsigned int sum = 0; 3097 3098 while (p) { 3099 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3100 3101 p = rb_next(p); 3102 rb_erase(&skb->rbnode, root); 3103 sum += skb->truesize; 3104 kfree_skb(skb); 3105 } 3106 return sum; 3107 } 3108 3109 /** 3110 * skb_queue_head - queue a buffer at the list head 3111 * @list: list to use 3112 * @newsk: buffer to queue 3113 * 3114 * Queue a buffer at the start of the list. This function takes the 3115 * list lock and can be used safely with other locking &sk_buff functions 3116 * safely. 3117 * 3118 * A buffer cannot be placed on two lists at the same time. 3119 */ 3120 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3121 { 3122 unsigned long flags; 3123 3124 spin_lock_irqsave(&list->lock, flags); 3125 __skb_queue_head(list, newsk); 3126 spin_unlock_irqrestore(&list->lock, flags); 3127 } 3128 EXPORT_SYMBOL(skb_queue_head); 3129 3130 /** 3131 * skb_queue_tail - queue a buffer at the list tail 3132 * @list: list to use 3133 * @newsk: buffer to queue 3134 * 3135 * Queue a buffer at the tail of the list. This function takes the 3136 * list lock and can be used safely with other locking &sk_buff functions 3137 * safely. 3138 * 3139 * A buffer cannot be placed on two lists at the same time. 3140 */ 3141 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3142 { 3143 unsigned long flags; 3144 3145 spin_lock_irqsave(&list->lock, flags); 3146 __skb_queue_tail(list, newsk); 3147 spin_unlock_irqrestore(&list->lock, flags); 3148 } 3149 EXPORT_SYMBOL(skb_queue_tail); 3150 3151 /** 3152 * skb_unlink - remove a buffer from a list 3153 * @skb: buffer to remove 3154 * @list: list to use 3155 * 3156 * Remove a packet from a list. The list locks are taken and this 3157 * function is atomic with respect to other list locked calls 3158 * 3159 * You must know what list the SKB is on. 3160 */ 3161 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3162 { 3163 unsigned long flags; 3164 3165 spin_lock_irqsave(&list->lock, flags); 3166 __skb_unlink(skb, list); 3167 spin_unlock_irqrestore(&list->lock, flags); 3168 } 3169 EXPORT_SYMBOL(skb_unlink); 3170 3171 /** 3172 * skb_append - append a buffer 3173 * @old: buffer to insert after 3174 * @newsk: buffer to insert 3175 * @list: list to use 3176 * 3177 * Place a packet after a given packet in a list. The list locks are taken 3178 * and this function is atomic with respect to other list locked calls. 3179 * A buffer cannot be placed on two lists at the same time. 3180 */ 3181 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3182 { 3183 unsigned long flags; 3184 3185 spin_lock_irqsave(&list->lock, flags); 3186 __skb_queue_after(list, old, newsk); 3187 spin_unlock_irqrestore(&list->lock, flags); 3188 } 3189 EXPORT_SYMBOL(skb_append); 3190 3191 static inline void skb_split_inside_header(struct sk_buff *skb, 3192 struct sk_buff* skb1, 3193 const u32 len, const int pos) 3194 { 3195 int i; 3196 3197 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3198 pos - len); 3199 /* And move data appendix as is. */ 3200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 3201 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 3202 3203 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 3204 skb_shinfo(skb)->nr_frags = 0; 3205 skb1->data_len = skb->data_len; 3206 skb1->len += skb1->data_len; 3207 skb->data_len = 0; 3208 skb->len = len; 3209 skb_set_tail_pointer(skb, len); 3210 } 3211 3212 static inline void skb_split_no_header(struct sk_buff *skb, 3213 struct sk_buff* skb1, 3214 const u32 len, int pos) 3215 { 3216 int i, k = 0; 3217 const int nfrags = skb_shinfo(skb)->nr_frags; 3218 3219 skb_shinfo(skb)->nr_frags = 0; 3220 skb1->len = skb1->data_len = skb->len - len; 3221 skb->len = len; 3222 skb->data_len = len - pos; 3223 3224 for (i = 0; i < nfrags; i++) { 3225 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 3226 3227 if (pos + size > len) { 3228 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 3229 3230 if (pos < len) { 3231 /* Split frag. 3232 * We have two variants in this case: 3233 * 1. Move all the frag to the second 3234 * part, if it is possible. F.e. 3235 * this approach is mandatory for TUX, 3236 * where splitting is expensive. 3237 * 2. Split is accurately. We make this. 3238 */ 3239 skb_frag_ref(skb, i); 3240 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 3241 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 3242 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 3243 skb_shinfo(skb)->nr_frags++; 3244 } 3245 k++; 3246 } else 3247 skb_shinfo(skb)->nr_frags++; 3248 pos += size; 3249 } 3250 skb_shinfo(skb1)->nr_frags = k; 3251 } 3252 3253 /** 3254 * skb_split - Split fragmented skb to two parts at length len. 3255 * @skb: the buffer to split 3256 * @skb1: the buffer to receive the second part 3257 * @len: new length for skb 3258 */ 3259 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 3260 { 3261 int pos = skb_headlen(skb); 3262 3263 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & 3264 SKBTX_SHARED_FRAG; 3265 skb_zerocopy_clone(skb1, skb, 0); 3266 if (len < pos) /* Split line is inside header. */ 3267 skb_split_inside_header(skb, skb1, len, pos); 3268 else /* Second chunk has no header, nothing to copy. */ 3269 skb_split_no_header(skb, skb1, len, pos); 3270 } 3271 EXPORT_SYMBOL(skb_split); 3272 3273 /* Shifting from/to a cloned skb is a no-go. 3274 * 3275 * Caller cannot keep skb_shinfo related pointers past calling here! 3276 */ 3277 static int skb_prepare_for_shift(struct sk_buff *skb) 3278 { 3279 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3280 } 3281 3282 /** 3283 * skb_shift - Shifts paged data partially from skb to another 3284 * @tgt: buffer into which tail data gets added 3285 * @skb: buffer from which the paged data comes from 3286 * @shiftlen: shift up to this many bytes 3287 * 3288 * Attempts to shift up to shiftlen worth of bytes, which may be less than 3289 * the length of the skb, from skb to tgt. Returns number bytes shifted. 3290 * It's up to caller to free skb if everything was shifted. 3291 * 3292 * If @tgt runs out of frags, the whole operation is aborted. 3293 * 3294 * Skb cannot include anything else but paged data while tgt is allowed 3295 * to have non-paged data as well. 3296 * 3297 * TODO: full sized shift could be optimized but that would need 3298 * specialized skb free'er to handle frags without up-to-date nr_frags. 3299 */ 3300 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3301 { 3302 int from, to, merge, todo; 3303 skb_frag_t *fragfrom, *fragto; 3304 3305 BUG_ON(shiftlen > skb->len); 3306 3307 if (skb_headlen(skb)) 3308 return 0; 3309 if (skb_zcopy(tgt) || skb_zcopy(skb)) 3310 return 0; 3311 3312 todo = shiftlen; 3313 from = 0; 3314 to = skb_shinfo(tgt)->nr_frags; 3315 fragfrom = &skb_shinfo(skb)->frags[from]; 3316 3317 /* Actual merge is delayed until the point when we know we can 3318 * commit all, so that we don't have to undo partial changes 3319 */ 3320 if (!to || 3321 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3322 skb_frag_off(fragfrom))) { 3323 merge = -1; 3324 } else { 3325 merge = to - 1; 3326 3327 todo -= skb_frag_size(fragfrom); 3328 if (todo < 0) { 3329 if (skb_prepare_for_shift(skb) || 3330 skb_prepare_for_shift(tgt)) 3331 return 0; 3332 3333 /* All previous frag pointers might be stale! */ 3334 fragfrom = &skb_shinfo(skb)->frags[from]; 3335 fragto = &skb_shinfo(tgt)->frags[merge]; 3336 3337 skb_frag_size_add(fragto, shiftlen); 3338 skb_frag_size_sub(fragfrom, shiftlen); 3339 skb_frag_off_add(fragfrom, shiftlen); 3340 3341 goto onlymerged; 3342 } 3343 3344 from++; 3345 } 3346 3347 /* Skip full, not-fitting skb to avoid expensive operations */ 3348 if ((shiftlen == skb->len) && 3349 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3350 return 0; 3351 3352 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3353 return 0; 3354 3355 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3356 if (to == MAX_SKB_FRAGS) 3357 return 0; 3358 3359 fragfrom = &skb_shinfo(skb)->frags[from]; 3360 fragto = &skb_shinfo(tgt)->frags[to]; 3361 3362 if (todo >= skb_frag_size(fragfrom)) { 3363 *fragto = *fragfrom; 3364 todo -= skb_frag_size(fragfrom); 3365 from++; 3366 to++; 3367 3368 } else { 3369 __skb_frag_ref(fragfrom); 3370 skb_frag_page_copy(fragto, fragfrom); 3371 skb_frag_off_copy(fragto, fragfrom); 3372 skb_frag_size_set(fragto, todo); 3373 3374 skb_frag_off_add(fragfrom, todo); 3375 skb_frag_size_sub(fragfrom, todo); 3376 todo = 0; 3377 3378 to++; 3379 break; 3380 } 3381 } 3382 3383 /* Ready to "commit" this state change to tgt */ 3384 skb_shinfo(tgt)->nr_frags = to; 3385 3386 if (merge >= 0) { 3387 fragfrom = &skb_shinfo(skb)->frags[0]; 3388 fragto = &skb_shinfo(tgt)->frags[merge]; 3389 3390 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 3391 __skb_frag_unref(fragfrom); 3392 } 3393 3394 /* Reposition in the original skb */ 3395 to = 0; 3396 while (from < skb_shinfo(skb)->nr_frags) 3397 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3398 skb_shinfo(skb)->nr_frags = to; 3399 3400 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3401 3402 onlymerged: 3403 /* Most likely the tgt won't ever need its checksum anymore, skb on 3404 * the other hand might need it if it needs to be resent 3405 */ 3406 tgt->ip_summed = CHECKSUM_PARTIAL; 3407 skb->ip_summed = CHECKSUM_PARTIAL; 3408 3409 /* Yak, is it really working this way? Some helper please? */ 3410 skb->len -= shiftlen; 3411 skb->data_len -= shiftlen; 3412 skb->truesize -= shiftlen; 3413 tgt->len += shiftlen; 3414 tgt->data_len += shiftlen; 3415 tgt->truesize += shiftlen; 3416 3417 return shiftlen; 3418 } 3419 3420 /** 3421 * skb_prepare_seq_read - Prepare a sequential read of skb data 3422 * @skb: the buffer to read 3423 * @from: lower offset of data to be read 3424 * @to: upper offset of data to be read 3425 * @st: state variable 3426 * 3427 * Initializes the specified state variable. Must be called before 3428 * invoking skb_seq_read() for the first time. 3429 */ 3430 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3431 unsigned int to, struct skb_seq_state *st) 3432 { 3433 st->lower_offset = from; 3434 st->upper_offset = to; 3435 st->root_skb = st->cur_skb = skb; 3436 st->frag_idx = st->stepped_offset = 0; 3437 st->frag_data = NULL; 3438 } 3439 EXPORT_SYMBOL(skb_prepare_seq_read); 3440 3441 /** 3442 * skb_seq_read - Sequentially read skb data 3443 * @consumed: number of bytes consumed by the caller so far 3444 * @data: destination pointer for data to be returned 3445 * @st: state variable 3446 * 3447 * Reads a block of skb data at @consumed relative to the 3448 * lower offset specified to skb_prepare_seq_read(). Assigns 3449 * the head of the data block to @data and returns the length 3450 * of the block or 0 if the end of the skb data or the upper 3451 * offset has been reached. 3452 * 3453 * The caller is not required to consume all of the data 3454 * returned, i.e. @consumed is typically set to the number 3455 * of bytes already consumed and the next call to 3456 * skb_seq_read() will return the remaining part of the block. 3457 * 3458 * Note 1: The size of each block of data returned can be arbitrary, 3459 * this limitation is the cost for zerocopy sequential 3460 * reads of potentially non linear data. 3461 * 3462 * Note 2: Fragment lists within fragments are not implemented 3463 * at the moment, state->root_skb could be replaced with 3464 * a stack for this purpose. 3465 */ 3466 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3467 struct skb_seq_state *st) 3468 { 3469 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3470 skb_frag_t *frag; 3471 3472 if (unlikely(abs_offset >= st->upper_offset)) { 3473 if (st->frag_data) { 3474 kunmap_atomic(st->frag_data); 3475 st->frag_data = NULL; 3476 } 3477 return 0; 3478 } 3479 3480 next_skb: 3481 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3482 3483 if (abs_offset < block_limit && !st->frag_data) { 3484 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3485 return block_limit - abs_offset; 3486 } 3487 3488 if (st->frag_idx == 0 && !st->frag_data) 3489 st->stepped_offset += skb_headlen(st->cur_skb); 3490 3491 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 3492 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 3493 block_limit = skb_frag_size(frag) + st->stepped_offset; 3494 3495 if (abs_offset < block_limit) { 3496 if (!st->frag_data) 3497 st->frag_data = kmap_atomic(skb_frag_page(frag)); 3498 3499 *data = (u8 *) st->frag_data + skb_frag_off(frag) + 3500 (abs_offset - st->stepped_offset); 3501 3502 return block_limit - abs_offset; 3503 } 3504 3505 if (st->frag_data) { 3506 kunmap_atomic(st->frag_data); 3507 st->frag_data = NULL; 3508 } 3509 3510 st->frag_idx++; 3511 st->stepped_offset += skb_frag_size(frag); 3512 } 3513 3514 if (st->frag_data) { 3515 kunmap_atomic(st->frag_data); 3516 st->frag_data = NULL; 3517 } 3518 3519 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3520 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 3521 st->frag_idx = 0; 3522 goto next_skb; 3523 } else if (st->cur_skb->next) { 3524 st->cur_skb = st->cur_skb->next; 3525 st->frag_idx = 0; 3526 goto next_skb; 3527 } 3528 3529 return 0; 3530 } 3531 EXPORT_SYMBOL(skb_seq_read); 3532 3533 /** 3534 * skb_abort_seq_read - Abort a sequential read of skb data 3535 * @st: state variable 3536 * 3537 * Must be called if skb_seq_read() was not called until it 3538 * returned 0. 3539 */ 3540 void skb_abort_seq_read(struct skb_seq_state *st) 3541 { 3542 if (st->frag_data) 3543 kunmap_atomic(st->frag_data); 3544 } 3545 EXPORT_SYMBOL(skb_abort_seq_read); 3546 3547 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 3548 3549 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 3550 struct ts_config *conf, 3551 struct ts_state *state) 3552 { 3553 return skb_seq_read(offset, text, TS_SKB_CB(state)); 3554 } 3555 3556 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 3557 { 3558 skb_abort_seq_read(TS_SKB_CB(state)); 3559 } 3560 3561 /** 3562 * skb_find_text - Find a text pattern in skb data 3563 * @skb: the buffer to look in 3564 * @from: search offset 3565 * @to: search limit 3566 * @config: textsearch configuration 3567 * 3568 * Finds a pattern in the skb data according to the specified 3569 * textsearch configuration. Use textsearch_next() to retrieve 3570 * subsequent occurrences of the pattern. Returns the offset 3571 * to the first occurrence or UINT_MAX if no match was found. 3572 */ 3573 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3574 unsigned int to, struct ts_config *config) 3575 { 3576 struct ts_state state; 3577 unsigned int ret; 3578 3579 config->get_next_block = skb_ts_get_next_block; 3580 config->finish = skb_ts_finish; 3581 3582 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 3583 3584 ret = textsearch_find(config, &state); 3585 return (ret <= to - from ? ret : UINT_MAX); 3586 } 3587 EXPORT_SYMBOL(skb_find_text); 3588 3589 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3590 int offset, size_t size) 3591 { 3592 int i = skb_shinfo(skb)->nr_frags; 3593 3594 if (skb_can_coalesce(skb, i, page, offset)) { 3595 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3596 } else if (i < MAX_SKB_FRAGS) { 3597 get_page(page); 3598 skb_fill_page_desc(skb, i, page, offset, size); 3599 } else { 3600 return -EMSGSIZE; 3601 } 3602 3603 return 0; 3604 } 3605 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3606 3607 /** 3608 * skb_pull_rcsum - pull skb and update receive checksum 3609 * @skb: buffer to update 3610 * @len: length of data pulled 3611 * 3612 * This function performs an skb_pull on the packet and updates 3613 * the CHECKSUM_COMPLETE checksum. It should be used on 3614 * receive path processing instead of skb_pull unless you know 3615 * that the checksum difference is zero (e.g., a valid IP header) 3616 * or you are setting ip_summed to CHECKSUM_NONE. 3617 */ 3618 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3619 { 3620 unsigned char *data = skb->data; 3621 3622 BUG_ON(len > skb->len); 3623 __skb_pull(skb, len); 3624 skb_postpull_rcsum(skb, data, len); 3625 return skb->data; 3626 } 3627 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3628 3629 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 3630 { 3631 skb_frag_t head_frag; 3632 struct page *page; 3633 3634 page = virt_to_head_page(frag_skb->head); 3635 __skb_frag_set_page(&head_frag, page); 3636 skb_frag_off_set(&head_frag, frag_skb->data - 3637 (unsigned char *)page_address(page)); 3638 skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); 3639 return head_frag; 3640 } 3641 3642 struct sk_buff *skb_segment_list(struct sk_buff *skb, 3643 netdev_features_t features, 3644 unsigned int offset) 3645 { 3646 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 3647 unsigned int tnl_hlen = skb_tnl_header_len(skb); 3648 unsigned int delta_truesize = 0; 3649 unsigned int delta_len = 0; 3650 struct sk_buff *tail = NULL; 3651 struct sk_buff *nskb; 3652 3653 skb_push(skb, -skb_network_offset(skb) + offset); 3654 3655 skb_shinfo(skb)->frag_list = NULL; 3656 3657 do { 3658 nskb = list_skb; 3659 list_skb = list_skb->next; 3660 3661 if (!tail) 3662 skb->next = nskb; 3663 else 3664 tail->next = nskb; 3665 3666 tail = nskb; 3667 3668 delta_len += nskb->len; 3669 delta_truesize += nskb->truesize; 3670 3671 skb_push(nskb, -skb_network_offset(nskb) + offset); 3672 3673 skb_release_head_state(nskb); 3674 __copy_skb_header(nskb, skb); 3675 3676 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 3677 skb_copy_from_linear_data_offset(skb, -tnl_hlen, 3678 nskb->data - tnl_hlen, 3679 offset + tnl_hlen); 3680 3681 if (skb_needs_linearize(nskb, features) && 3682 __skb_linearize(nskb)) 3683 goto err_linearize; 3684 3685 } while (list_skb); 3686 3687 skb->truesize = skb->truesize - delta_truesize; 3688 skb->data_len = skb->data_len - delta_len; 3689 skb->len = skb->len - delta_len; 3690 3691 skb_gso_reset(skb); 3692 3693 skb->prev = tail; 3694 3695 if (skb_needs_linearize(skb, features) && 3696 __skb_linearize(skb)) 3697 goto err_linearize; 3698 3699 skb_get(skb); 3700 3701 return skb; 3702 3703 err_linearize: 3704 kfree_skb_list(skb->next); 3705 skb->next = NULL; 3706 return ERR_PTR(-ENOMEM); 3707 } 3708 EXPORT_SYMBOL_GPL(skb_segment_list); 3709 3710 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 3711 { 3712 if (unlikely(p->len + skb->len >= 65536)) 3713 return -E2BIG; 3714 3715 if (NAPI_GRO_CB(p)->last == p) 3716 skb_shinfo(p)->frag_list = skb; 3717 else 3718 NAPI_GRO_CB(p)->last->next = skb; 3719 3720 skb_pull(skb, skb_gro_offset(skb)); 3721 3722 NAPI_GRO_CB(p)->last = skb; 3723 NAPI_GRO_CB(p)->count++; 3724 p->data_len += skb->len; 3725 p->truesize += skb->truesize; 3726 p->len += skb->len; 3727 3728 NAPI_GRO_CB(skb)->same_flow = 1; 3729 3730 return 0; 3731 } 3732 3733 /** 3734 * skb_segment - Perform protocol segmentation on skb. 3735 * @head_skb: buffer to segment 3736 * @features: features for the output path (see dev->features) 3737 * 3738 * This function performs segmentation on the given skb. It returns 3739 * a pointer to the first in a list of new skbs for the segments. 3740 * In case of error it returns ERR_PTR(err). 3741 */ 3742 struct sk_buff *skb_segment(struct sk_buff *head_skb, 3743 netdev_features_t features) 3744 { 3745 struct sk_buff *segs = NULL; 3746 struct sk_buff *tail = NULL; 3747 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3748 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3749 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3750 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3751 struct sk_buff *frag_skb = head_skb; 3752 unsigned int offset = doffset; 3753 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3754 unsigned int partial_segs = 0; 3755 unsigned int headroom; 3756 unsigned int len = head_skb->len; 3757 __be16 proto; 3758 bool csum, sg; 3759 int nfrags = skb_shinfo(head_skb)->nr_frags; 3760 int err = -ENOMEM; 3761 int i = 0; 3762 int pos; 3763 3764 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && 3765 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { 3766 /* gso_size is untrusted, and we have a frag_list with a linear 3767 * non head_frag head. 3768 * 3769 * (we assume checking the first list_skb member suffices; 3770 * i.e if either of the list_skb members have non head_frag 3771 * head, then the first one has too). 3772 * 3773 * If head_skb's headlen does not fit requested gso_size, it 3774 * means that the frag_list members do NOT terminate on exact 3775 * gso_size boundaries. Hence we cannot perform skb_frag_t page 3776 * sharing. Therefore we must fallback to copying the frag_list 3777 * skbs; we do so by disabling SG. 3778 */ 3779 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) 3780 features &= ~NETIF_F_SG; 3781 } 3782 3783 __skb_push(head_skb, doffset); 3784 proto = skb_network_protocol(head_skb, NULL); 3785 if (unlikely(!proto)) 3786 return ERR_PTR(-EINVAL); 3787 3788 sg = !!(features & NETIF_F_SG); 3789 csum = !!can_checksum_protocol(features, proto); 3790 3791 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3792 if (!(features & NETIF_F_GSO_PARTIAL)) { 3793 struct sk_buff *iter; 3794 unsigned int frag_len; 3795 3796 if (!list_skb || 3797 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3798 goto normal; 3799 3800 /* If we get here then all the required 3801 * GSO features except frag_list are supported. 3802 * Try to split the SKB to multiple GSO SKBs 3803 * with no frag_list. 3804 * Currently we can do that only when the buffers don't 3805 * have a linear part and all the buffers except 3806 * the last are of the same length. 3807 */ 3808 frag_len = list_skb->len; 3809 skb_walk_frags(head_skb, iter) { 3810 if (frag_len != iter->len && iter->next) 3811 goto normal; 3812 if (skb_headlen(iter) && !iter->head_frag) 3813 goto normal; 3814 3815 len -= iter->len; 3816 } 3817 3818 if (len != frag_len) 3819 goto normal; 3820 } 3821 3822 /* GSO partial only requires that we trim off any excess that 3823 * doesn't fit into an MSS sized block, so take care of that 3824 * now. 3825 */ 3826 partial_segs = len / mss; 3827 if (partial_segs > 1) 3828 mss *= partial_segs; 3829 else 3830 partial_segs = 0; 3831 } 3832 3833 normal: 3834 headroom = skb_headroom(head_skb); 3835 pos = skb_headlen(head_skb); 3836 3837 do { 3838 struct sk_buff *nskb; 3839 skb_frag_t *nskb_frag; 3840 int hsize; 3841 int size; 3842 3843 if (unlikely(mss == GSO_BY_FRAGS)) { 3844 len = list_skb->len; 3845 } else { 3846 len = head_skb->len - offset; 3847 if (len > mss) 3848 len = mss; 3849 } 3850 3851 hsize = skb_headlen(head_skb) - offset; 3852 if (hsize < 0) 3853 hsize = 0; 3854 if (hsize > len || !sg) 3855 hsize = len; 3856 3857 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3858 (skb_headlen(list_skb) == len || sg)) { 3859 BUG_ON(skb_headlen(list_skb) > len); 3860 3861 i = 0; 3862 nfrags = skb_shinfo(list_skb)->nr_frags; 3863 frag = skb_shinfo(list_skb)->frags; 3864 frag_skb = list_skb; 3865 pos += skb_headlen(list_skb); 3866 3867 while (pos < offset + len) { 3868 BUG_ON(i >= nfrags); 3869 3870 size = skb_frag_size(frag); 3871 if (pos + size > offset + len) 3872 break; 3873 3874 i++; 3875 pos += size; 3876 frag++; 3877 } 3878 3879 nskb = skb_clone(list_skb, GFP_ATOMIC); 3880 list_skb = list_skb->next; 3881 3882 if (unlikely(!nskb)) 3883 goto err; 3884 3885 if (unlikely(pskb_trim(nskb, len))) { 3886 kfree_skb(nskb); 3887 goto err; 3888 } 3889 3890 hsize = skb_end_offset(nskb); 3891 if (skb_cow_head(nskb, doffset + headroom)) { 3892 kfree_skb(nskb); 3893 goto err; 3894 } 3895 3896 nskb->truesize += skb_end_offset(nskb) - hsize; 3897 skb_release_head_state(nskb); 3898 __skb_push(nskb, doffset); 3899 } else { 3900 nskb = __alloc_skb(hsize + doffset + headroom, 3901 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3902 NUMA_NO_NODE); 3903 3904 if (unlikely(!nskb)) 3905 goto err; 3906 3907 skb_reserve(nskb, headroom); 3908 __skb_put(nskb, doffset); 3909 } 3910 3911 if (segs) 3912 tail->next = nskb; 3913 else 3914 segs = nskb; 3915 tail = nskb; 3916 3917 __copy_skb_header(nskb, head_skb); 3918 3919 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3920 skb_reset_mac_len(nskb); 3921 3922 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3923 nskb->data - tnl_hlen, 3924 doffset + tnl_hlen); 3925 3926 if (nskb->len == len + doffset) 3927 goto perform_csum_check; 3928 3929 if (!sg) { 3930 if (!csum) { 3931 if (!nskb->remcsum_offload) 3932 nskb->ip_summed = CHECKSUM_NONE; 3933 SKB_GSO_CB(nskb)->csum = 3934 skb_copy_and_csum_bits(head_skb, offset, 3935 skb_put(nskb, 3936 len), 3937 len, 0); 3938 SKB_GSO_CB(nskb)->csum_start = 3939 skb_headroom(nskb) + doffset; 3940 } else { 3941 skb_copy_bits(head_skb, offset, 3942 skb_put(nskb, len), 3943 len); 3944 } 3945 continue; 3946 } 3947 3948 nskb_frag = skb_shinfo(nskb)->frags; 3949 3950 skb_copy_from_linear_data_offset(head_skb, offset, 3951 skb_put(nskb, hsize), hsize); 3952 3953 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & 3954 SKBTX_SHARED_FRAG; 3955 3956 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 3957 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 3958 goto err; 3959 3960 while (pos < offset + len) { 3961 if (i >= nfrags) { 3962 i = 0; 3963 nfrags = skb_shinfo(list_skb)->nr_frags; 3964 frag = skb_shinfo(list_skb)->frags; 3965 frag_skb = list_skb; 3966 if (!skb_headlen(list_skb)) { 3967 BUG_ON(!nfrags); 3968 } else { 3969 BUG_ON(!list_skb->head_frag); 3970 3971 /* to make room for head_frag. */ 3972 i--; 3973 frag--; 3974 } 3975 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 3976 skb_zerocopy_clone(nskb, frag_skb, 3977 GFP_ATOMIC)) 3978 goto err; 3979 3980 list_skb = list_skb->next; 3981 } 3982 3983 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3984 MAX_SKB_FRAGS)) { 3985 net_warn_ratelimited( 3986 "skb_segment: too many frags: %u %u\n", 3987 pos, mss); 3988 err = -EINVAL; 3989 goto err; 3990 } 3991 3992 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 3993 __skb_frag_ref(nskb_frag); 3994 size = skb_frag_size(nskb_frag); 3995 3996 if (pos < offset) { 3997 skb_frag_off_add(nskb_frag, offset - pos); 3998 skb_frag_size_sub(nskb_frag, offset - pos); 3999 } 4000 4001 skb_shinfo(nskb)->nr_frags++; 4002 4003 if (pos + size <= offset + len) { 4004 i++; 4005 frag++; 4006 pos += size; 4007 } else { 4008 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 4009 goto skip_fraglist; 4010 } 4011 4012 nskb_frag++; 4013 } 4014 4015 skip_fraglist: 4016 nskb->data_len = len - hsize; 4017 nskb->len += nskb->data_len; 4018 nskb->truesize += nskb->data_len; 4019 4020 perform_csum_check: 4021 if (!csum) { 4022 if (skb_has_shared_frag(nskb) && 4023 __skb_linearize(nskb)) 4024 goto err; 4025 4026 if (!nskb->remcsum_offload) 4027 nskb->ip_summed = CHECKSUM_NONE; 4028 SKB_GSO_CB(nskb)->csum = 4029 skb_checksum(nskb, doffset, 4030 nskb->len - doffset, 0); 4031 SKB_GSO_CB(nskb)->csum_start = 4032 skb_headroom(nskb) + doffset; 4033 } 4034 } while ((offset += len) < head_skb->len); 4035 4036 /* Some callers want to get the end of the list. 4037 * Put it in segs->prev to avoid walking the list. 4038 * (see validate_xmit_skb_list() for example) 4039 */ 4040 segs->prev = tail; 4041 4042 if (partial_segs) { 4043 struct sk_buff *iter; 4044 int type = skb_shinfo(head_skb)->gso_type; 4045 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4046 4047 /* Update type to add partial and then remove dodgy if set */ 4048 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4049 type &= ~SKB_GSO_DODGY; 4050 4051 /* Update GSO info and prepare to start updating headers on 4052 * our way back down the stack of protocols. 4053 */ 4054 for (iter = segs; iter; iter = iter->next) { 4055 skb_shinfo(iter)->gso_size = gso_size; 4056 skb_shinfo(iter)->gso_segs = partial_segs; 4057 skb_shinfo(iter)->gso_type = type; 4058 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 4059 } 4060 4061 if (tail->len - doffset <= gso_size) 4062 skb_shinfo(tail)->gso_size = 0; 4063 else if (tail != segs) 4064 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4065 } 4066 4067 /* Following permits correct backpressure, for protocols 4068 * using skb_set_owner_w(). 4069 * Idea is to tranfert ownership from head_skb to last segment. 4070 */ 4071 if (head_skb->destructor == sock_wfree) { 4072 swap(tail->truesize, head_skb->truesize); 4073 swap(tail->destructor, head_skb->destructor); 4074 swap(tail->sk, head_skb->sk); 4075 } 4076 return segs; 4077 4078 err: 4079 kfree_skb_list(segs); 4080 return ERR_PTR(err); 4081 } 4082 EXPORT_SYMBOL_GPL(skb_segment); 4083 4084 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 4085 { 4086 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 4087 unsigned int offset = skb_gro_offset(skb); 4088 unsigned int headlen = skb_headlen(skb); 4089 unsigned int len = skb_gro_len(skb); 4090 unsigned int delta_truesize; 4091 struct sk_buff *lp; 4092 4093 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 4094 return -E2BIG; 4095 4096 lp = NAPI_GRO_CB(p)->last; 4097 pinfo = skb_shinfo(lp); 4098 4099 if (headlen <= offset) { 4100 skb_frag_t *frag; 4101 skb_frag_t *frag2; 4102 int i = skbinfo->nr_frags; 4103 int nr_frags = pinfo->nr_frags + i; 4104 4105 if (nr_frags > MAX_SKB_FRAGS) 4106 goto merge; 4107 4108 offset -= headlen; 4109 pinfo->nr_frags = nr_frags; 4110 skbinfo->nr_frags = 0; 4111 4112 frag = pinfo->frags + nr_frags; 4113 frag2 = skbinfo->frags + i; 4114 do { 4115 *--frag = *--frag2; 4116 } while (--i); 4117 4118 skb_frag_off_add(frag, offset); 4119 skb_frag_size_sub(frag, offset); 4120 4121 /* all fragments truesize : remove (head size + sk_buff) */ 4122 delta_truesize = skb->truesize - 4123 SKB_TRUESIZE(skb_end_offset(skb)); 4124 4125 skb->truesize -= skb->data_len; 4126 skb->len -= skb->data_len; 4127 skb->data_len = 0; 4128 4129 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 4130 goto done; 4131 } else if (skb->head_frag) { 4132 int nr_frags = pinfo->nr_frags; 4133 skb_frag_t *frag = pinfo->frags + nr_frags; 4134 struct page *page = virt_to_head_page(skb->head); 4135 unsigned int first_size = headlen - offset; 4136 unsigned int first_offset; 4137 4138 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 4139 goto merge; 4140 4141 first_offset = skb->data - 4142 (unsigned char *)page_address(page) + 4143 offset; 4144 4145 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4146 4147 __skb_frag_set_page(frag, page); 4148 skb_frag_off_set(frag, first_offset); 4149 skb_frag_size_set(frag, first_size); 4150 4151 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4152 /* We dont need to clear skbinfo->nr_frags here */ 4153 4154 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4155 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4156 goto done; 4157 } 4158 4159 merge: 4160 delta_truesize = skb->truesize; 4161 if (offset > headlen) { 4162 unsigned int eat = offset - headlen; 4163 4164 skb_frag_off_add(&skbinfo->frags[0], eat); 4165 skb_frag_size_sub(&skbinfo->frags[0], eat); 4166 skb->data_len -= eat; 4167 skb->len -= eat; 4168 offset = headlen; 4169 } 4170 4171 __skb_pull(skb, offset); 4172 4173 if (NAPI_GRO_CB(p)->last == p) 4174 skb_shinfo(p)->frag_list = skb; 4175 else 4176 NAPI_GRO_CB(p)->last->next = skb; 4177 NAPI_GRO_CB(p)->last = skb; 4178 __skb_header_release(skb); 4179 lp = p; 4180 4181 done: 4182 NAPI_GRO_CB(p)->count++; 4183 p->data_len += len; 4184 p->truesize += delta_truesize; 4185 p->len += len; 4186 if (lp != p) { 4187 lp->data_len += len; 4188 lp->truesize += delta_truesize; 4189 lp->len += len; 4190 } 4191 NAPI_GRO_CB(skb)->same_flow = 1; 4192 return 0; 4193 } 4194 4195 #ifdef CONFIG_SKB_EXTENSIONS 4196 #define SKB_EXT_ALIGN_VALUE 8 4197 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4198 4199 static const u8 skb_ext_type_len[] = { 4200 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4201 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4202 #endif 4203 #ifdef CONFIG_XFRM 4204 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4205 #endif 4206 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4207 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 4208 #endif 4209 #if IS_ENABLED(CONFIG_MPTCP) 4210 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 4211 #endif 4212 }; 4213 4214 static __always_inline unsigned int skb_ext_total_length(void) 4215 { 4216 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4217 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4218 skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4219 #endif 4220 #ifdef CONFIG_XFRM 4221 skb_ext_type_len[SKB_EXT_SEC_PATH] + 4222 #endif 4223 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 4224 skb_ext_type_len[TC_SKB_EXT] + 4225 #endif 4226 #if IS_ENABLED(CONFIG_MPTCP) 4227 skb_ext_type_len[SKB_EXT_MPTCP] + 4228 #endif 4229 0; 4230 } 4231 4232 static void skb_extensions_init(void) 4233 { 4234 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4235 BUILD_BUG_ON(skb_ext_total_length() > 255); 4236 4237 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4238 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4239 0, 4240 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4241 NULL); 4242 } 4243 #else 4244 static void skb_extensions_init(void) {} 4245 #endif 4246 4247 void __init skb_init(void) 4248 { 4249 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4250 sizeof(struct sk_buff), 4251 0, 4252 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4253 offsetof(struct sk_buff, cb), 4254 sizeof_field(struct sk_buff, cb), 4255 NULL); 4256 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4257 sizeof(struct sk_buff_fclones), 4258 0, 4259 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4260 NULL); 4261 skb_extensions_init(); 4262 } 4263 4264 static int 4265 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 4266 unsigned int recursion_level) 4267 { 4268 int start = skb_headlen(skb); 4269 int i, copy = start - offset; 4270 struct sk_buff *frag_iter; 4271 int elt = 0; 4272 4273 if (unlikely(recursion_level >= 24)) 4274 return -EMSGSIZE; 4275 4276 if (copy > 0) { 4277 if (copy > len) 4278 copy = len; 4279 sg_set_buf(sg, skb->data + offset, copy); 4280 elt++; 4281 if ((len -= copy) == 0) 4282 return elt; 4283 offset += copy; 4284 } 4285 4286 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4287 int end; 4288 4289 WARN_ON(start > offset + len); 4290 4291 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4292 if ((copy = end - offset) > 0) { 4293 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4294 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4295 return -EMSGSIZE; 4296 4297 if (copy > len) 4298 copy = len; 4299 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4300 skb_frag_off(frag) + offset - start); 4301 elt++; 4302 if (!(len -= copy)) 4303 return elt; 4304 offset += copy; 4305 } 4306 start = end; 4307 } 4308 4309 skb_walk_frags(skb, frag_iter) { 4310 int end, ret; 4311 4312 WARN_ON(start > offset + len); 4313 4314 end = start + frag_iter->len; 4315 if ((copy = end - offset) > 0) { 4316 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4317 return -EMSGSIZE; 4318 4319 if (copy > len) 4320 copy = len; 4321 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 4322 copy, recursion_level + 1); 4323 if (unlikely(ret < 0)) 4324 return ret; 4325 elt += ret; 4326 if ((len -= copy) == 0) 4327 return elt; 4328 offset += copy; 4329 } 4330 start = end; 4331 } 4332 BUG_ON(len); 4333 return elt; 4334 } 4335 4336 /** 4337 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 4338 * @skb: Socket buffer containing the buffers to be mapped 4339 * @sg: The scatter-gather list to map into 4340 * @offset: The offset into the buffer's contents to start mapping 4341 * @len: Length of buffer space to be mapped 4342 * 4343 * Fill the specified scatter-gather list with mappings/pointers into a 4344 * region of the buffer space attached to a socket buffer. Returns either 4345 * the number of scatterlist items used, or -EMSGSIZE if the contents 4346 * could not fit. 4347 */ 4348 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 4349 { 4350 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 4351 4352 if (nsg <= 0) 4353 return nsg; 4354 4355 sg_mark_end(&sg[nsg - 1]); 4356 4357 return nsg; 4358 } 4359 EXPORT_SYMBOL_GPL(skb_to_sgvec); 4360 4361 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 4362 * sglist without mark the sg which contain last skb data as the end. 4363 * So the caller can mannipulate sg list as will when padding new data after 4364 * the first call without calling sg_unmark_end to expend sg list. 4365 * 4366 * Scenario to use skb_to_sgvec_nomark: 4367 * 1. sg_init_table 4368 * 2. skb_to_sgvec_nomark(payload1) 4369 * 3. skb_to_sgvec_nomark(payload2) 4370 * 4371 * This is equivalent to: 4372 * 1. sg_init_table 4373 * 2. skb_to_sgvec(payload1) 4374 * 3. sg_unmark_end 4375 * 4. skb_to_sgvec(payload2) 4376 * 4377 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 4378 * is more preferable. 4379 */ 4380 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 4381 int offset, int len) 4382 { 4383 return __skb_to_sgvec(skb, sg, offset, len, 0); 4384 } 4385 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 4386 4387 4388 4389 /** 4390 * skb_cow_data - Check that a socket buffer's data buffers are writable 4391 * @skb: The socket buffer to check. 4392 * @tailbits: Amount of trailing space to be added 4393 * @trailer: Returned pointer to the skb where the @tailbits space begins 4394 * 4395 * Make sure that the data buffers attached to a socket buffer are 4396 * writable. If they are not, private copies are made of the data buffers 4397 * and the socket buffer is set to use these instead. 4398 * 4399 * If @tailbits is given, make sure that there is space to write @tailbits 4400 * bytes of data beyond current end of socket buffer. @trailer will be 4401 * set to point to the skb in which this space begins. 4402 * 4403 * The number of scatterlist elements required to completely map the 4404 * COW'd and extended socket buffer will be returned. 4405 */ 4406 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4407 { 4408 int copyflag; 4409 int elt; 4410 struct sk_buff *skb1, **skb_p; 4411 4412 /* If skb is cloned or its head is paged, reallocate 4413 * head pulling out all the pages (pages are considered not writable 4414 * at the moment even if they are anonymous). 4415 */ 4416 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4417 !__pskb_pull_tail(skb, __skb_pagelen(skb))) 4418 return -ENOMEM; 4419 4420 /* Easy case. Most of packets will go this way. */ 4421 if (!skb_has_frag_list(skb)) { 4422 /* A little of trouble, not enough of space for trailer. 4423 * This should not happen, when stack is tuned to generate 4424 * good frames. OK, on miss we reallocate and reserve even more 4425 * space, 128 bytes is fair. */ 4426 4427 if (skb_tailroom(skb) < tailbits && 4428 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4429 return -ENOMEM; 4430 4431 /* Voila! */ 4432 *trailer = skb; 4433 return 1; 4434 } 4435 4436 /* Misery. We are in troubles, going to mincer fragments... */ 4437 4438 elt = 1; 4439 skb_p = &skb_shinfo(skb)->frag_list; 4440 copyflag = 0; 4441 4442 while ((skb1 = *skb_p) != NULL) { 4443 int ntail = 0; 4444 4445 /* The fragment is partially pulled by someone, 4446 * this can happen on input. Copy it and everything 4447 * after it. */ 4448 4449 if (skb_shared(skb1)) 4450 copyflag = 1; 4451 4452 /* If the skb is the last, worry about trailer. */ 4453 4454 if (skb1->next == NULL && tailbits) { 4455 if (skb_shinfo(skb1)->nr_frags || 4456 skb_has_frag_list(skb1) || 4457 skb_tailroom(skb1) < tailbits) 4458 ntail = tailbits + 128; 4459 } 4460 4461 if (copyflag || 4462 skb_cloned(skb1) || 4463 ntail || 4464 skb_shinfo(skb1)->nr_frags || 4465 skb_has_frag_list(skb1)) { 4466 struct sk_buff *skb2; 4467 4468 /* Fuck, we are miserable poor guys... */ 4469 if (ntail == 0) 4470 skb2 = skb_copy(skb1, GFP_ATOMIC); 4471 else 4472 skb2 = skb_copy_expand(skb1, 4473 skb_headroom(skb1), 4474 ntail, 4475 GFP_ATOMIC); 4476 if (unlikely(skb2 == NULL)) 4477 return -ENOMEM; 4478 4479 if (skb1->sk) 4480 skb_set_owner_w(skb2, skb1->sk); 4481 4482 /* Looking around. Are we still alive? 4483 * OK, link new skb, drop old one */ 4484 4485 skb2->next = skb1->next; 4486 *skb_p = skb2; 4487 kfree_skb(skb1); 4488 skb1 = skb2; 4489 } 4490 elt++; 4491 *trailer = skb1; 4492 skb_p = &skb1->next; 4493 } 4494 4495 return elt; 4496 } 4497 EXPORT_SYMBOL_GPL(skb_cow_data); 4498 4499 static void sock_rmem_free(struct sk_buff *skb) 4500 { 4501 struct sock *sk = skb->sk; 4502 4503 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4504 } 4505 4506 static void skb_set_err_queue(struct sk_buff *skb) 4507 { 4508 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 4509 * So, it is safe to (mis)use it to mark skbs on the error queue. 4510 */ 4511 skb->pkt_type = PACKET_OUTGOING; 4512 BUILD_BUG_ON(PACKET_OUTGOING == 0); 4513 } 4514 4515 /* 4516 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4517 */ 4518 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4519 { 4520 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4521 (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 4522 return -ENOMEM; 4523 4524 skb_orphan(skb); 4525 skb->sk = sk; 4526 skb->destructor = sock_rmem_free; 4527 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 4528 skb_set_err_queue(skb); 4529 4530 /* before exiting rcu section, make sure dst is refcounted */ 4531 skb_dst_force(skb); 4532 4533 skb_queue_tail(&sk->sk_error_queue, skb); 4534 if (!sock_flag(sk, SOCK_DEAD)) 4535 sk->sk_error_report(sk); 4536 return 0; 4537 } 4538 EXPORT_SYMBOL(sock_queue_err_skb); 4539 4540 static bool is_icmp_err_skb(const struct sk_buff *skb) 4541 { 4542 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 4543 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 4544 } 4545 4546 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4547 { 4548 struct sk_buff_head *q = &sk->sk_error_queue; 4549 struct sk_buff *skb, *skb_next = NULL; 4550 bool icmp_next = false; 4551 unsigned long flags; 4552 4553 spin_lock_irqsave(&q->lock, flags); 4554 skb = __skb_dequeue(q); 4555 if (skb && (skb_next = skb_peek(q))) { 4556 icmp_next = is_icmp_err_skb(skb_next); 4557 if (icmp_next) 4558 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; 4559 } 4560 spin_unlock_irqrestore(&q->lock, flags); 4561 4562 if (is_icmp_err_skb(skb) && !icmp_next) 4563 sk->sk_err = 0; 4564 4565 if (skb_next) 4566 sk->sk_error_report(sk); 4567 4568 return skb; 4569 } 4570 EXPORT_SYMBOL(sock_dequeue_err_skb); 4571 4572 /** 4573 * skb_clone_sk - create clone of skb, and take reference to socket 4574 * @skb: the skb to clone 4575 * 4576 * This function creates a clone of a buffer that holds a reference on 4577 * sk_refcnt. Buffers created via this function are meant to be 4578 * returned using sock_queue_err_skb, or free via kfree_skb. 4579 * 4580 * When passing buffers allocated with this function to sock_queue_err_skb 4581 * it is necessary to wrap the call with sock_hold/sock_put in order to 4582 * prevent the socket from being released prior to being enqueued on 4583 * the sk_error_queue. 4584 */ 4585 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 4586 { 4587 struct sock *sk = skb->sk; 4588 struct sk_buff *clone; 4589 4590 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 4591 return NULL; 4592 4593 clone = skb_clone(skb, GFP_ATOMIC); 4594 if (!clone) { 4595 sock_put(sk); 4596 return NULL; 4597 } 4598 4599 clone->sk = sk; 4600 clone->destructor = sock_efree; 4601 4602 return clone; 4603 } 4604 EXPORT_SYMBOL(skb_clone_sk); 4605 4606 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 4607 struct sock *sk, 4608 int tstype, 4609 bool opt_stats) 4610 { 4611 struct sock_exterr_skb *serr; 4612 int err; 4613 4614 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 4615 4616 serr = SKB_EXT_ERR(skb); 4617 memset(serr, 0, sizeof(*serr)); 4618 serr->ee.ee_errno = ENOMSG; 4619 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4620 serr->ee.ee_info = tstype; 4621 serr->opt_stats = opt_stats; 4622 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 4623 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 4624 serr->ee.ee_data = skb_shinfo(skb)->tskey; 4625 if (sk->sk_protocol == IPPROTO_TCP && 4626 sk->sk_type == SOCK_STREAM) 4627 serr->ee.ee_data -= sk->sk_tskey; 4628 } 4629 4630 err = sock_queue_err_skb(sk, skb); 4631 4632 if (err) 4633 kfree_skb(skb); 4634 } 4635 4636 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4637 { 4638 bool ret; 4639 4640 if (likely(sysctl_tstamp_allow_data || tsonly)) 4641 return true; 4642 4643 read_lock_bh(&sk->sk_callback_lock); 4644 ret = sk->sk_socket && sk->sk_socket->file && 4645 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4646 read_unlock_bh(&sk->sk_callback_lock); 4647 return ret; 4648 } 4649 4650 void skb_complete_tx_timestamp(struct sk_buff *skb, 4651 struct skb_shared_hwtstamps *hwtstamps) 4652 { 4653 struct sock *sk = skb->sk; 4654 4655 if (!skb_may_tx_timestamp(sk, false)) 4656 goto err; 4657 4658 /* Take a reference to prevent skb_orphan() from freeing the socket, 4659 * but only if the socket refcount is not zero. 4660 */ 4661 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4662 *skb_hwtstamps(skb) = *hwtstamps; 4663 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4664 sock_put(sk); 4665 return; 4666 } 4667 4668 err: 4669 kfree_skb(skb); 4670 } 4671 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4672 4673 void __skb_tstamp_tx(struct sk_buff *orig_skb, 4674 struct skb_shared_hwtstamps *hwtstamps, 4675 struct sock *sk, int tstype) 4676 { 4677 struct sk_buff *skb; 4678 bool tsonly, opt_stats = false; 4679 4680 if (!sk) 4681 return; 4682 4683 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4684 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4685 return; 4686 4687 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 4688 if (!skb_may_tx_timestamp(sk, tsonly)) 4689 return; 4690 4691 if (tsonly) { 4692 #ifdef CONFIG_INET 4693 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 4694 sk->sk_protocol == IPPROTO_TCP && 4695 sk->sk_type == SOCK_STREAM) { 4696 skb = tcp_get_timestamping_opt_stats(sk, orig_skb); 4697 opt_stats = true; 4698 } else 4699 #endif 4700 skb = alloc_skb(0, GFP_ATOMIC); 4701 } else { 4702 skb = skb_clone(orig_skb, GFP_ATOMIC); 4703 } 4704 if (!skb) 4705 return; 4706 4707 if (tsonly) { 4708 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4709 SKBTX_ANY_TSTAMP; 4710 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 4711 } 4712 4713 if (hwtstamps) 4714 *skb_hwtstamps(skb) = *hwtstamps; 4715 else 4716 skb->tstamp = ktime_get_real(); 4717 4718 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 4719 } 4720 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4721 4722 void skb_tstamp_tx(struct sk_buff *orig_skb, 4723 struct skb_shared_hwtstamps *hwtstamps) 4724 { 4725 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 4726 SCM_TSTAMP_SND); 4727 } 4728 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4729 4730 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 4731 { 4732 struct sock *sk = skb->sk; 4733 struct sock_exterr_skb *serr; 4734 int err = 1; 4735 4736 skb->wifi_acked_valid = 1; 4737 skb->wifi_acked = acked; 4738 4739 serr = SKB_EXT_ERR(skb); 4740 memset(serr, 0, sizeof(*serr)); 4741 serr->ee.ee_errno = ENOMSG; 4742 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 4743 4744 /* Take a reference to prevent skb_orphan() from freeing the socket, 4745 * but only if the socket refcount is not zero. 4746 */ 4747 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4748 err = sock_queue_err_skb(sk, skb); 4749 sock_put(sk); 4750 } 4751 if (err) 4752 kfree_skb(skb); 4753 } 4754 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 4755 4756 /** 4757 * skb_partial_csum_set - set up and verify partial csum values for packet 4758 * @skb: the skb to set 4759 * @start: the number of bytes after skb->data to start checksumming. 4760 * @off: the offset from start to place the checksum. 4761 * 4762 * For untrusted partially-checksummed packets, we need to make sure the values 4763 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4764 * 4765 * This function checks and sets those values and skb->ip_summed: if this 4766 * returns false you should drop the packet. 4767 */ 4768 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4769 { 4770 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 4771 u32 csum_start = skb_headroom(skb) + (u32)start; 4772 4773 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 4774 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 4775 start, off, skb_headroom(skb), skb_headlen(skb)); 4776 return false; 4777 } 4778 skb->ip_summed = CHECKSUM_PARTIAL; 4779 skb->csum_start = csum_start; 4780 skb->csum_offset = off; 4781 skb_set_transport_header(skb, start); 4782 return true; 4783 } 4784 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4785 4786 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4787 unsigned int max) 4788 { 4789 if (skb_headlen(skb) >= len) 4790 return 0; 4791 4792 /* If we need to pullup then pullup to the max, so we 4793 * won't need to do it again. 4794 */ 4795 if (max > skb->len) 4796 max = skb->len; 4797 4798 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4799 return -ENOMEM; 4800 4801 if (skb_headlen(skb) < len) 4802 return -EPROTO; 4803 4804 return 0; 4805 } 4806 4807 #define MAX_TCP_HDR_LEN (15 * 4) 4808 4809 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4810 typeof(IPPROTO_IP) proto, 4811 unsigned int off) 4812 { 4813 int err; 4814 4815 switch (proto) { 4816 case IPPROTO_TCP: 4817 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4818 off + MAX_TCP_HDR_LEN); 4819 if (!err && !skb_partial_csum_set(skb, off, 4820 offsetof(struct tcphdr, 4821 check))) 4822 err = -EPROTO; 4823 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4824 4825 case IPPROTO_UDP: 4826 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4827 off + sizeof(struct udphdr)); 4828 if (!err && !skb_partial_csum_set(skb, off, 4829 offsetof(struct udphdr, 4830 check))) 4831 err = -EPROTO; 4832 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4833 } 4834 4835 return ERR_PTR(-EPROTO); 4836 } 4837 4838 /* This value should be large enough to cover a tagged ethernet header plus 4839 * maximally sized IP and TCP or UDP headers. 4840 */ 4841 #define MAX_IP_HDR_LEN 128 4842 4843 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4844 { 4845 unsigned int off; 4846 bool fragment; 4847 __sum16 *csum; 4848 int err; 4849 4850 fragment = false; 4851 4852 err = skb_maybe_pull_tail(skb, 4853 sizeof(struct iphdr), 4854 MAX_IP_HDR_LEN); 4855 if (err < 0) 4856 goto out; 4857 4858 if (ip_is_fragment(ip_hdr(skb))) 4859 fragment = true; 4860 4861 off = ip_hdrlen(skb); 4862 4863 err = -EPROTO; 4864 4865 if (fragment) 4866 goto out; 4867 4868 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4869 if (IS_ERR(csum)) 4870 return PTR_ERR(csum); 4871 4872 if (recalculate) 4873 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4874 ip_hdr(skb)->daddr, 4875 skb->len - off, 4876 ip_hdr(skb)->protocol, 0); 4877 err = 0; 4878 4879 out: 4880 return err; 4881 } 4882 4883 /* This value should be large enough to cover a tagged ethernet header plus 4884 * an IPv6 header, all options, and a maximal TCP or UDP header. 4885 */ 4886 #define MAX_IPV6_HDR_LEN 256 4887 4888 #define OPT_HDR(type, skb, off) \ 4889 (type *)(skb_network_header(skb) + (off)) 4890 4891 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4892 { 4893 int err; 4894 u8 nexthdr; 4895 unsigned int off; 4896 unsigned int len; 4897 bool fragment; 4898 bool done; 4899 __sum16 *csum; 4900 4901 fragment = false; 4902 done = false; 4903 4904 off = sizeof(struct ipv6hdr); 4905 4906 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 4907 if (err < 0) 4908 goto out; 4909 4910 nexthdr = ipv6_hdr(skb)->nexthdr; 4911 4912 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 4913 while (off <= len && !done) { 4914 switch (nexthdr) { 4915 case IPPROTO_DSTOPTS: 4916 case IPPROTO_HOPOPTS: 4917 case IPPROTO_ROUTING: { 4918 struct ipv6_opt_hdr *hp; 4919 4920 err = skb_maybe_pull_tail(skb, 4921 off + 4922 sizeof(struct ipv6_opt_hdr), 4923 MAX_IPV6_HDR_LEN); 4924 if (err < 0) 4925 goto out; 4926 4927 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 4928 nexthdr = hp->nexthdr; 4929 off += ipv6_optlen(hp); 4930 break; 4931 } 4932 case IPPROTO_AH: { 4933 struct ip_auth_hdr *hp; 4934 4935 err = skb_maybe_pull_tail(skb, 4936 off + 4937 sizeof(struct ip_auth_hdr), 4938 MAX_IPV6_HDR_LEN); 4939 if (err < 0) 4940 goto out; 4941 4942 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 4943 nexthdr = hp->nexthdr; 4944 off += ipv6_authlen(hp); 4945 break; 4946 } 4947 case IPPROTO_FRAGMENT: { 4948 struct frag_hdr *hp; 4949 4950 err = skb_maybe_pull_tail(skb, 4951 off + 4952 sizeof(struct frag_hdr), 4953 MAX_IPV6_HDR_LEN); 4954 if (err < 0) 4955 goto out; 4956 4957 hp = OPT_HDR(struct frag_hdr, skb, off); 4958 4959 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 4960 fragment = true; 4961 4962 nexthdr = hp->nexthdr; 4963 off += sizeof(struct frag_hdr); 4964 break; 4965 } 4966 default: 4967 done = true; 4968 break; 4969 } 4970 } 4971 4972 err = -EPROTO; 4973 4974 if (!done || fragment) 4975 goto out; 4976 4977 csum = skb_checksum_setup_ip(skb, nexthdr, off); 4978 if (IS_ERR(csum)) 4979 return PTR_ERR(csum); 4980 4981 if (recalculate) 4982 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4983 &ipv6_hdr(skb)->daddr, 4984 skb->len - off, nexthdr, 0); 4985 err = 0; 4986 4987 out: 4988 return err; 4989 } 4990 4991 /** 4992 * skb_checksum_setup - set up partial checksum offset 4993 * @skb: the skb to set up 4994 * @recalculate: if true the pseudo-header checksum will be recalculated 4995 */ 4996 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 4997 { 4998 int err; 4999 5000 switch (skb->protocol) { 5001 case htons(ETH_P_IP): 5002 err = skb_checksum_setup_ipv4(skb, recalculate); 5003 break; 5004 5005 case htons(ETH_P_IPV6): 5006 err = skb_checksum_setup_ipv6(skb, recalculate); 5007 break; 5008 5009 default: 5010 err = -EPROTO; 5011 break; 5012 } 5013 5014 return err; 5015 } 5016 EXPORT_SYMBOL(skb_checksum_setup); 5017 5018 /** 5019 * skb_checksum_maybe_trim - maybe trims the given skb 5020 * @skb: the skb to check 5021 * @transport_len: the data length beyond the network header 5022 * 5023 * Checks whether the given skb has data beyond the given transport length. 5024 * If so, returns a cloned skb trimmed to this transport length. 5025 * Otherwise returns the provided skb. Returns NULL in error cases 5026 * (e.g. transport_len exceeds skb length or out-of-memory). 5027 * 5028 * Caller needs to set the skb transport header and free any returned skb if it 5029 * differs from the provided skb. 5030 */ 5031 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 5032 unsigned int transport_len) 5033 { 5034 struct sk_buff *skb_chk; 5035 unsigned int len = skb_transport_offset(skb) + transport_len; 5036 int ret; 5037 5038 if (skb->len < len) 5039 return NULL; 5040 else if (skb->len == len) 5041 return skb; 5042 5043 skb_chk = skb_clone(skb, GFP_ATOMIC); 5044 if (!skb_chk) 5045 return NULL; 5046 5047 ret = pskb_trim_rcsum(skb_chk, len); 5048 if (ret) { 5049 kfree_skb(skb_chk); 5050 return NULL; 5051 } 5052 5053 return skb_chk; 5054 } 5055 5056 /** 5057 * skb_checksum_trimmed - validate checksum of an skb 5058 * @skb: the skb to check 5059 * @transport_len: the data length beyond the network header 5060 * @skb_chkf: checksum function to use 5061 * 5062 * Applies the given checksum function skb_chkf to the provided skb. 5063 * Returns a checked and maybe trimmed skb. Returns NULL on error. 5064 * 5065 * If the skb has data beyond the given transport length, then a 5066 * trimmed & cloned skb is checked and returned. 5067 * 5068 * Caller needs to set the skb transport header and free any returned skb if it 5069 * differs from the provided skb. 5070 */ 5071 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 5072 unsigned int transport_len, 5073 __sum16(*skb_chkf)(struct sk_buff *skb)) 5074 { 5075 struct sk_buff *skb_chk; 5076 unsigned int offset = skb_transport_offset(skb); 5077 __sum16 ret; 5078 5079 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 5080 if (!skb_chk) 5081 goto err; 5082 5083 if (!pskb_may_pull(skb_chk, offset)) 5084 goto err; 5085 5086 skb_pull_rcsum(skb_chk, offset); 5087 ret = skb_chkf(skb_chk); 5088 skb_push_rcsum(skb_chk, offset); 5089 5090 if (ret) 5091 goto err; 5092 5093 return skb_chk; 5094 5095 err: 5096 if (skb_chk && skb_chk != skb) 5097 kfree_skb(skb_chk); 5098 5099 return NULL; 5100 5101 } 5102 EXPORT_SYMBOL(skb_checksum_trimmed); 5103 5104 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 5105 { 5106 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5107 skb->dev->name); 5108 } 5109 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5110 5111 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5112 { 5113 if (head_stolen) { 5114 skb_release_head_state(skb); 5115 kmem_cache_free(skbuff_head_cache, skb); 5116 } else { 5117 __kfree_skb(skb); 5118 } 5119 } 5120 EXPORT_SYMBOL(kfree_skb_partial); 5121 5122 /** 5123 * skb_try_coalesce - try to merge skb to prior one 5124 * @to: prior buffer 5125 * @from: buffer to add 5126 * @fragstolen: pointer to boolean 5127 * @delta_truesize: how much more was allocated than was requested 5128 */ 5129 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5130 bool *fragstolen, int *delta_truesize) 5131 { 5132 struct skb_shared_info *to_shinfo, *from_shinfo; 5133 int i, delta, len = from->len; 5134 5135 *fragstolen = false; 5136 5137 if (skb_cloned(to)) 5138 return false; 5139 5140 if (len <= skb_tailroom(to)) { 5141 if (len) 5142 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5143 *delta_truesize = 0; 5144 return true; 5145 } 5146 5147 to_shinfo = skb_shinfo(to); 5148 from_shinfo = skb_shinfo(from); 5149 if (to_shinfo->frag_list || from_shinfo->frag_list) 5150 return false; 5151 if (skb_zcopy(to) || skb_zcopy(from)) 5152 return false; 5153 5154 if (skb_headlen(from) != 0) { 5155 struct page *page; 5156 unsigned int offset; 5157 5158 if (to_shinfo->nr_frags + 5159 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5160 return false; 5161 5162 if (skb_head_is_locked(from)) 5163 return false; 5164 5165 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5166 5167 page = virt_to_head_page(from->head); 5168 offset = from->data - (unsigned char *)page_address(page); 5169 5170 skb_fill_page_desc(to, to_shinfo->nr_frags, 5171 page, offset, skb_headlen(from)); 5172 *fragstolen = true; 5173 } else { 5174 if (to_shinfo->nr_frags + 5175 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5176 return false; 5177 5178 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5179 } 5180 5181 WARN_ON_ONCE(delta < len); 5182 5183 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5184 from_shinfo->frags, 5185 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5186 to_shinfo->nr_frags += from_shinfo->nr_frags; 5187 5188 if (!skb_cloned(from)) 5189 from_shinfo->nr_frags = 0; 5190 5191 /* if the skb is not cloned this does nothing 5192 * since we set nr_frags to 0. 5193 */ 5194 for (i = 0; i < from_shinfo->nr_frags; i++) 5195 __skb_frag_ref(&from_shinfo->frags[i]); 5196 5197 to->truesize += delta; 5198 to->len += len; 5199 to->data_len += len; 5200 5201 *delta_truesize = delta; 5202 return true; 5203 } 5204 EXPORT_SYMBOL(skb_try_coalesce); 5205 5206 /** 5207 * skb_scrub_packet - scrub an skb 5208 * 5209 * @skb: buffer to clean 5210 * @xnet: packet is crossing netns 5211 * 5212 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5213 * into/from a tunnel. Some information have to be cleared during these 5214 * operations. 5215 * skb_scrub_packet can also be used to clean a skb before injecting it in 5216 * another namespace (@xnet == true). We have to clear all information in the 5217 * skb that could impact namespace isolation. 5218 */ 5219 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5220 { 5221 skb->pkt_type = PACKET_HOST; 5222 skb->skb_iif = 0; 5223 skb->ignore_df = 0; 5224 skb_dst_drop(skb); 5225 skb_ext_reset(skb); 5226 nf_reset_ct(skb); 5227 nf_reset_trace(skb); 5228 5229 #ifdef CONFIG_NET_SWITCHDEV 5230 skb->offload_fwd_mark = 0; 5231 skb->offload_l3_fwd_mark = 0; 5232 #endif 5233 5234 if (!xnet) 5235 return; 5236 5237 ipvs_reset(skb); 5238 skb->mark = 0; 5239 skb->tstamp = 0; 5240 } 5241 EXPORT_SYMBOL_GPL(skb_scrub_packet); 5242 5243 /** 5244 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5245 * 5246 * @skb: GSO skb 5247 * 5248 * skb_gso_transport_seglen is used to determine the real size of the 5249 * individual segments, including Layer4 headers (TCP/UDP). 5250 * 5251 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5252 */ 5253 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5254 { 5255 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5256 unsigned int thlen = 0; 5257 5258 if (skb->encapsulation) { 5259 thlen = skb_inner_transport_header(skb) - 5260 skb_transport_header(skb); 5261 5262 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5263 thlen += inner_tcp_hdrlen(skb); 5264 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5265 thlen = tcp_hdrlen(skb); 5266 } else if (unlikely(skb_is_gso_sctp(skb))) { 5267 thlen = sizeof(struct sctphdr); 5268 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5269 thlen = sizeof(struct udphdr); 5270 } 5271 /* UFO sets gso_size to the size of the fragmentation 5272 * payload, i.e. the size of the L4 (UDP) header is already 5273 * accounted for. 5274 */ 5275 return thlen + shinfo->gso_size; 5276 } 5277 5278 /** 5279 * skb_gso_network_seglen - Return length of individual segments of a gso packet 5280 * 5281 * @skb: GSO skb 5282 * 5283 * skb_gso_network_seglen is used to determine the real size of the 5284 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5285 * 5286 * The MAC/L2 header is not accounted for. 5287 */ 5288 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5289 { 5290 unsigned int hdr_len = skb_transport_header(skb) - 5291 skb_network_header(skb); 5292 5293 return hdr_len + skb_gso_transport_seglen(skb); 5294 } 5295 5296 /** 5297 * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5298 * 5299 * @skb: GSO skb 5300 * 5301 * skb_gso_mac_seglen is used to determine the real size of the 5302 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5303 * headers (TCP/UDP). 5304 */ 5305 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5306 { 5307 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5308 5309 return hdr_len + skb_gso_transport_seglen(skb); 5310 } 5311 5312 /** 5313 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 5314 * 5315 * There are a couple of instances where we have a GSO skb, and we 5316 * want to determine what size it would be after it is segmented. 5317 * 5318 * We might want to check: 5319 * - L3+L4+payload size (e.g. IP forwarding) 5320 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 5321 * 5322 * This is a helper to do that correctly considering GSO_BY_FRAGS. 5323 * 5324 * @skb: GSO skb 5325 * 5326 * @seg_len: The segmented length (from skb_gso_*_seglen). In the 5327 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 5328 * 5329 * @max_len: The maximum permissible length. 5330 * 5331 * Returns true if the segmented length <= max length. 5332 */ 5333 static inline bool skb_gso_size_check(const struct sk_buff *skb, 5334 unsigned int seg_len, 5335 unsigned int max_len) { 5336 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5337 const struct sk_buff *iter; 5338 5339 if (shinfo->gso_size != GSO_BY_FRAGS) 5340 return seg_len <= max_len; 5341 5342 /* Undo this so we can re-use header sizes */ 5343 seg_len -= GSO_BY_FRAGS; 5344 5345 skb_walk_frags(skb, iter) { 5346 if (seg_len + skb_headlen(iter) > max_len) 5347 return false; 5348 } 5349 5350 return true; 5351 } 5352 5353 /** 5354 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5355 * 5356 * @skb: GSO skb 5357 * @mtu: MTU to validate against 5358 * 5359 * skb_gso_validate_network_len validates if a given skb will fit a 5360 * wanted MTU once split. It considers L3 headers, L4 headers, and the 5361 * payload. 5362 */ 5363 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5364 { 5365 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5366 } 5367 EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5368 5369 /** 5370 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 5371 * 5372 * @skb: GSO skb 5373 * @len: length to validate against 5374 * 5375 * skb_gso_validate_mac_len validates if a given skb will fit a wanted 5376 * length once split, including L2, L3 and L4 headers and the payload. 5377 */ 5378 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 5379 { 5380 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 5381 } 5382 EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 5383 5384 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5385 { 5386 int mac_len, meta_len; 5387 void *meta; 5388 5389 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5390 kfree_skb(skb); 5391 return NULL; 5392 } 5393 5394 mac_len = skb->data - skb_mac_header(skb); 5395 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 5396 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5397 mac_len - VLAN_HLEN - ETH_TLEN); 5398 } 5399 5400 meta_len = skb_metadata_len(skb); 5401 if (meta_len) { 5402 meta = skb_metadata_end(skb) - meta_len; 5403 memmove(meta + VLAN_HLEN, meta, meta_len); 5404 } 5405 5406 skb->mac_header += VLAN_HLEN; 5407 return skb; 5408 } 5409 5410 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 5411 { 5412 struct vlan_hdr *vhdr; 5413 u16 vlan_tci; 5414 5415 if (unlikely(skb_vlan_tag_present(skb))) { 5416 /* vlan_tci is already set-up so leave this for another time */ 5417 return skb; 5418 } 5419 5420 skb = skb_share_check(skb, GFP_ATOMIC); 5421 if (unlikely(!skb)) 5422 goto err_free; 5423 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 5424 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 5425 goto err_free; 5426 5427 vhdr = (struct vlan_hdr *)skb->data; 5428 vlan_tci = ntohs(vhdr->h_vlan_TCI); 5429 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 5430 5431 skb_pull_rcsum(skb, VLAN_HLEN); 5432 vlan_set_encap_proto(skb, vhdr); 5433 5434 skb = skb_reorder_vlan_header(skb); 5435 if (unlikely(!skb)) 5436 goto err_free; 5437 5438 skb_reset_network_header(skb); 5439 skb_reset_transport_header(skb); 5440 skb_reset_mac_len(skb); 5441 5442 return skb; 5443 5444 err_free: 5445 kfree_skb(skb); 5446 return NULL; 5447 } 5448 EXPORT_SYMBOL(skb_vlan_untag); 5449 5450 int skb_ensure_writable(struct sk_buff *skb, int write_len) 5451 { 5452 if (!pskb_may_pull(skb, write_len)) 5453 return -ENOMEM; 5454 5455 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5456 return 0; 5457 5458 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5459 } 5460 EXPORT_SYMBOL(skb_ensure_writable); 5461 5462 /* remove VLAN header from packet and update csum accordingly. 5463 * expects a non skb_vlan_tag_present skb with a vlan tag payload 5464 */ 5465 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 5466 { 5467 struct vlan_hdr *vhdr; 5468 int offset = skb->data - skb_mac_header(skb); 5469 int err; 5470 5471 if (WARN_ONCE(offset, 5472 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5473 offset)) { 5474 return -EINVAL; 5475 } 5476 5477 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 5478 if (unlikely(err)) 5479 return err; 5480 5481 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5482 5483 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 5484 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 5485 5486 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 5487 __skb_pull(skb, VLAN_HLEN); 5488 5489 vlan_set_encap_proto(skb, vhdr); 5490 skb->mac_header += VLAN_HLEN; 5491 5492 if (skb_network_offset(skb) < ETH_HLEN) 5493 skb_set_network_header(skb, ETH_HLEN); 5494 5495 skb_reset_mac_len(skb); 5496 5497 return err; 5498 } 5499 EXPORT_SYMBOL(__skb_vlan_pop); 5500 5501 /* Pop a vlan tag either from hwaccel or from payload. 5502 * Expects skb->data at mac header. 5503 */ 5504 int skb_vlan_pop(struct sk_buff *skb) 5505 { 5506 u16 vlan_tci; 5507 __be16 vlan_proto; 5508 int err; 5509 5510 if (likely(skb_vlan_tag_present(skb))) { 5511 __vlan_hwaccel_clear_tag(skb); 5512 } else { 5513 if (unlikely(!eth_type_vlan(skb->protocol))) 5514 return 0; 5515 5516 err = __skb_vlan_pop(skb, &vlan_tci); 5517 if (err) 5518 return err; 5519 } 5520 /* move next vlan tag to hw accel tag */ 5521 if (likely(!eth_type_vlan(skb->protocol))) 5522 return 0; 5523 5524 vlan_proto = skb->protocol; 5525 err = __skb_vlan_pop(skb, &vlan_tci); 5526 if (unlikely(err)) 5527 return err; 5528 5529 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5530 return 0; 5531 } 5532 EXPORT_SYMBOL(skb_vlan_pop); 5533 5534 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5535 * Expects skb->data at mac header. 5536 */ 5537 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 5538 { 5539 if (skb_vlan_tag_present(skb)) { 5540 int offset = skb->data - skb_mac_header(skb); 5541 int err; 5542 5543 if (WARN_ONCE(offset, 5544 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5545 offset)) { 5546 return -EINVAL; 5547 } 5548 5549 err = __vlan_insert_tag(skb, skb->vlan_proto, 5550 skb_vlan_tag_get(skb)); 5551 if (err) 5552 return err; 5553 5554 skb->protocol = skb->vlan_proto; 5555 skb->mac_len += VLAN_HLEN; 5556 5557 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5558 } 5559 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5560 return 0; 5561 } 5562 EXPORT_SYMBOL(skb_vlan_push); 5563 5564 /* Update the ethertype of hdr and the skb csum value if required. */ 5565 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 5566 __be16 ethertype) 5567 { 5568 if (skb->ip_summed == CHECKSUM_COMPLETE) { 5569 __be16 diff[] = { ~hdr->h_proto, ethertype }; 5570 5571 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5572 } 5573 5574 hdr->h_proto = ethertype; 5575 } 5576 5577 /** 5578 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 5579 * the packet 5580 * 5581 * @skb: buffer 5582 * @mpls_lse: MPLS label stack entry to push 5583 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5584 * @mac_len: length of the MAC header 5585 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 5586 * ethernet 5587 * 5588 * Expects skb->data at mac header. 5589 * 5590 * Returns 0 on success, -errno otherwise. 5591 */ 5592 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5593 int mac_len, bool ethernet) 5594 { 5595 struct mpls_shim_hdr *lse; 5596 int err; 5597 5598 if (unlikely(!eth_p_mpls(mpls_proto))) 5599 return -EINVAL; 5600 5601 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 5602 if (skb->encapsulation) 5603 return -EINVAL; 5604 5605 err = skb_cow_head(skb, MPLS_HLEN); 5606 if (unlikely(err)) 5607 return err; 5608 5609 if (!skb->inner_protocol) { 5610 skb_set_inner_network_header(skb, skb_network_offset(skb)); 5611 skb_set_inner_protocol(skb, skb->protocol); 5612 } 5613 5614 skb_push(skb, MPLS_HLEN); 5615 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5616 mac_len); 5617 skb_reset_mac_header(skb); 5618 skb_set_network_header(skb, mac_len); 5619 skb_reset_mac_len(skb); 5620 5621 lse = mpls_hdr(skb); 5622 lse->label_stack_entry = mpls_lse; 5623 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 5624 5625 if (ethernet && mac_len >= ETH_HLEN) 5626 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 5627 skb->protocol = mpls_proto; 5628 5629 return 0; 5630 } 5631 EXPORT_SYMBOL_GPL(skb_mpls_push); 5632 5633 /** 5634 * skb_mpls_pop() - pop the outermost MPLS header 5635 * 5636 * @skb: buffer 5637 * @next_proto: ethertype of header after popped MPLS header 5638 * @mac_len: length of the MAC header 5639 * @ethernet: flag to indicate if the packet is ethernet 5640 * 5641 * Expects skb->data at mac header. 5642 * 5643 * Returns 0 on success, -errno otherwise. 5644 */ 5645 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5646 bool ethernet) 5647 { 5648 int err; 5649 5650 if (unlikely(!eth_p_mpls(skb->protocol))) 5651 return 0; 5652 5653 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 5654 if (unlikely(err)) 5655 return err; 5656 5657 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5658 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5659 mac_len); 5660 5661 __skb_pull(skb, MPLS_HLEN); 5662 skb_reset_mac_header(skb); 5663 skb_set_network_header(skb, mac_len); 5664 5665 if (ethernet && mac_len >= ETH_HLEN) { 5666 struct ethhdr *hdr; 5667 5668 /* use mpls_hdr() to get ethertype to account for VLANs. */ 5669 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5670 skb_mod_eth_type(skb, hdr, next_proto); 5671 } 5672 skb->protocol = next_proto; 5673 5674 return 0; 5675 } 5676 EXPORT_SYMBOL_GPL(skb_mpls_pop); 5677 5678 /** 5679 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5680 * 5681 * @skb: buffer 5682 * @mpls_lse: new MPLS label stack entry to update to 5683 * 5684 * Expects skb->data at mac header. 5685 * 5686 * Returns 0 on success, -errno otherwise. 5687 */ 5688 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5689 { 5690 int err; 5691 5692 if (unlikely(!eth_p_mpls(skb->protocol))) 5693 return -EINVAL; 5694 5695 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5696 if (unlikely(err)) 5697 return err; 5698 5699 if (skb->ip_summed == CHECKSUM_COMPLETE) { 5700 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 5701 5702 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5703 } 5704 5705 mpls_hdr(skb)->label_stack_entry = mpls_lse; 5706 5707 return 0; 5708 } 5709 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 5710 5711 /** 5712 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 5713 * 5714 * @skb: buffer 5715 * 5716 * Expects skb->data at mac header. 5717 * 5718 * Returns 0 on success, -errno otherwise. 5719 */ 5720 int skb_mpls_dec_ttl(struct sk_buff *skb) 5721 { 5722 u32 lse; 5723 u8 ttl; 5724 5725 if (unlikely(!eth_p_mpls(skb->protocol))) 5726 return -EINVAL; 5727 5728 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 5729 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 5730 if (!--ttl) 5731 return -EINVAL; 5732 5733 lse &= ~MPLS_LS_TTL_MASK; 5734 lse |= ttl << MPLS_LS_TTL_SHIFT; 5735 5736 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 5737 } 5738 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 5739 5740 /** 5741 * alloc_skb_with_frags - allocate skb with page frags 5742 * 5743 * @header_len: size of linear part 5744 * @data_len: needed length in frags 5745 * @max_page_order: max page order desired. 5746 * @errcode: pointer to error code if any 5747 * @gfp_mask: allocation mask 5748 * 5749 * This can be used to allocate a paged skb, given a maximal order for frags. 5750 */ 5751 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 5752 unsigned long data_len, 5753 int max_page_order, 5754 int *errcode, 5755 gfp_t gfp_mask) 5756 { 5757 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 5758 unsigned long chunk; 5759 struct sk_buff *skb; 5760 struct page *page; 5761 int i; 5762 5763 *errcode = -EMSGSIZE; 5764 /* Note this test could be relaxed, if we succeed to allocate 5765 * high order pages... 5766 */ 5767 if (npages > MAX_SKB_FRAGS) 5768 return NULL; 5769 5770 *errcode = -ENOBUFS; 5771 skb = alloc_skb(header_len, gfp_mask); 5772 if (!skb) 5773 return NULL; 5774 5775 skb->truesize += npages << PAGE_SHIFT; 5776 5777 for (i = 0; npages > 0; i++) { 5778 int order = max_page_order; 5779 5780 while (order) { 5781 if (npages >= 1 << order) { 5782 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 5783 __GFP_COMP | 5784 __GFP_NOWARN, 5785 order); 5786 if (page) 5787 goto fill_page; 5788 /* Do not retry other high order allocations */ 5789 order = 1; 5790 max_page_order = 0; 5791 } 5792 order--; 5793 } 5794 page = alloc_page(gfp_mask); 5795 if (!page) 5796 goto failure; 5797 fill_page: 5798 chunk = min_t(unsigned long, data_len, 5799 PAGE_SIZE << order); 5800 skb_fill_page_desc(skb, i, page, 0, chunk); 5801 data_len -= chunk; 5802 npages -= 1 << order; 5803 } 5804 return skb; 5805 5806 failure: 5807 kfree_skb(skb); 5808 return NULL; 5809 } 5810 EXPORT_SYMBOL(alloc_skb_with_frags); 5811 5812 /* carve out the first off bytes from skb when off < headlen */ 5813 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 5814 const int headlen, gfp_t gfp_mask) 5815 { 5816 int i; 5817 int size = skb_end_offset(skb); 5818 int new_hlen = headlen - off; 5819 u8 *data; 5820 5821 size = SKB_DATA_ALIGN(size); 5822 5823 if (skb_pfmemalloc(skb)) 5824 gfp_mask |= __GFP_MEMALLOC; 5825 data = kmalloc_reserve(size + 5826 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 5827 gfp_mask, NUMA_NO_NODE, NULL); 5828 if (!data) 5829 return -ENOMEM; 5830 5831 size = SKB_WITH_OVERHEAD(ksize(data)); 5832 5833 /* Copy real data, and all frags */ 5834 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 5835 skb->len -= off; 5836 5837 memcpy((struct skb_shared_info *)(data + size), 5838 skb_shinfo(skb), 5839 offsetof(struct skb_shared_info, 5840 frags[skb_shinfo(skb)->nr_frags])); 5841 if (skb_cloned(skb)) { 5842 /* drop the old head gracefully */ 5843 if (skb_orphan_frags(skb, gfp_mask)) { 5844 kfree(data); 5845 return -ENOMEM; 5846 } 5847 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 5848 skb_frag_ref(skb, i); 5849 if (skb_has_frag_list(skb)) 5850 skb_clone_fraglist(skb); 5851 skb_release_data(skb); 5852 } else { 5853 /* we can reuse existing recount- all we did was 5854 * relocate values 5855 */ 5856 skb_free_head(skb); 5857 } 5858 5859 skb->head = data; 5860 skb->data = data; 5861 skb->head_frag = 0; 5862 #ifdef NET_SKBUFF_DATA_USES_OFFSET 5863 skb->end = size; 5864 #else 5865 skb->end = skb->head + size; 5866 #endif 5867 skb_set_tail_pointer(skb, skb_headlen(skb)); 5868 skb_headers_offset_update(skb, 0); 5869 skb->cloned = 0; 5870 skb->hdr_len = 0; 5871 skb->nohdr = 0; 5872 atomic_set(&skb_shinfo(skb)->dataref, 1); 5873 5874 return 0; 5875 } 5876 5877 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 5878 5879 /* carve out the first eat bytes from skb's frag_list. May recurse into 5880 * pskb_carve() 5881 */ 5882 static int pskb_carve_frag_list(struct sk_buff *skb, 5883 struct skb_shared_info *shinfo, int eat, 5884 gfp_t gfp_mask) 5885 { 5886 struct sk_buff *list = shinfo->frag_list; 5887 struct sk_buff *clone = NULL; 5888 struct sk_buff *insp = NULL; 5889 5890 do { 5891 if (!list) { 5892 pr_err("Not enough bytes to eat. Want %d\n", eat); 5893 return -EFAULT; 5894 } 5895 if (list->len <= eat) { 5896 /* Eaten as whole. */ 5897 eat -= list->len; 5898 list = list->next; 5899 insp = list; 5900 } else { 5901 /* Eaten partially. */ 5902 if (skb_shared(list)) { 5903 clone = skb_clone(list, gfp_mask); 5904 if (!clone) 5905 return -ENOMEM; 5906 insp = list->next; 5907 list = clone; 5908 } else { 5909 /* This may be pulled without problems. */ 5910 insp = list; 5911 } 5912 if (pskb_carve(list, eat, gfp_mask) < 0) { 5913 kfree_skb(clone); 5914 return -ENOMEM; 5915 } 5916 break; 5917 } 5918 } while (eat); 5919 5920 /* Free pulled out fragments. */ 5921 while ((list = shinfo->frag_list) != insp) { 5922 shinfo->frag_list = list->next; 5923 kfree_skb(list); 5924 } 5925 /* And insert new clone at head. */ 5926 if (clone) { 5927 clone->next = list; 5928 shinfo->frag_list = clone; 5929 } 5930 return 0; 5931 } 5932 5933 /* carve off first len bytes from skb. Split line (off) is in the 5934 * non-linear part of skb 5935 */ 5936 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 5937 int pos, gfp_t gfp_mask) 5938 { 5939 int i, k = 0; 5940 int size = skb_end_offset(skb); 5941 u8 *data; 5942 const int nfrags = skb_shinfo(skb)->nr_frags; 5943 struct skb_shared_info *shinfo; 5944 5945 size = SKB_DATA_ALIGN(size); 5946 5947 if (skb_pfmemalloc(skb)) 5948 gfp_mask |= __GFP_MEMALLOC; 5949 data = kmalloc_reserve(size + 5950 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 5951 gfp_mask, NUMA_NO_NODE, NULL); 5952 if (!data) 5953 return -ENOMEM; 5954 5955 size = SKB_WITH_OVERHEAD(ksize(data)); 5956 5957 memcpy((struct skb_shared_info *)(data + size), 5958 skb_shinfo(skb), offsetof(struct skb_shared_info, 5959 frags[skb_shinfo(skb)->nr_frags])); 5960 if (skb_orphan_frags(skb, gfp_mask)) { 5961 kfree(data); 5962 return -ENOMEM; 5963 } 5964 shinfo = (struct skb_shared_info *)(data + size); 5965 for (i = 0; i < nfrags; i++) { 5966 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 5967 5968 if (pos + fsize > off) { 5969 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 5970 5971 if (pos < off) { 5972 /* Split frag. 5973 * We have two variants in this case: 5974 * 1. Move all the frag to the second 5975 * part, if it is possible. F.e. 5976 * this approach is mandatory for TUX, 5977 * where splitting is expensive. 5978 * 2. Split is accurately. We make this. 5979 */ 5980 skb_frag_off_add(&shinfo->frags[0], off - pos); 5981 skb_frag_size_sub(&shinfo->frags[0], off - pos); 5982 } 5983 skb_frag_ref(skb, i); 5984 k++; 5985 } 5986 pos += fsize; 5987 } 5988 shinfo->nr_frags = k; 5989 if (skb_has_frag_list(skb)) 5990 skb_clone_fraglist(skb); 5991 5992 /* split line is in frag list */ 5993 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 5994 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 5995 if (skb_has_frag_list(skb)) 5996 kfree_skb_list(skb_shinfo(skb)->frag_list); 5997 kfree(data); 5998 return -ENOMEM; 5999 } 6000 skb_release_data(skb); 6001 6002 skb->head = data; 6003 skb->head_frag = 0; 6004 skb->data = data; 6005 #ifdef NET_SKBUFF_DATA_USES_OFFSET 6006 skb->end = size; 6007 #else 6008 skb->end = skb->head + size; 6009 #endif 6010 skb_reset_tail_pointer(skb); 6011 skb_headers_offset_update(skb, 0); 6012 skb->cloned = 0; 6013 skb->hdr_len = 0; 6014 skb->nohdr = 0; 6015 skb->len -= off; 6016 skb->data_len = skb->len; 6017 atomic_set(&skb_shinfo(skb)->dataref, 1); 6018 return 0; 6019 } 6020 6021 /* remove len bytes from the beginning of the skb */ 6022 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 6023 { 6024 int headlen = skb_headlen(skb); 6025 6026 if (len < headlen) 6027 return pskb_carve_inside_header(skb, len, headlen, gfp); 6028 else 6029 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 6030 } 6031 6032 /* Extract to_copy bytes starting at off from skb, and return this in 6033 * a new skb 6034 */ 6035 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 6036 int to_copy, gfp_t gfp) 6037 { 6038 struct sk_buff *clone = skb_clone(skb, gfp); 6039 6040 if (!clone) 6041 return NULL; 6042 6043 if (pskb_carve(clone, off, gfp) < 0 || 6044 pskb_trim(clone, to_copy)) { 6045 kfree_skb(clone); 6046 return NULL; 6047 } 6048 return clone; 6049 } 6050 EXPORT_SYMBOL(pskb_extract); 6051 6052 /** 6053 * skb_condense - try to get rid of fragments/frag_list if possible 6054 * @skb: buffer 6055 * 6056 * Can be used to save memory before skb is added to a busy queue. 6057 * If packet has bytes in frags and enough tail room in skb->head, 6058 * pull all of them, so that we can free the frags right now and adjust 6059 * truesize. 6060 * Notes: 6061 * We do not reallocate skb->head thus can not fail. 6062 * Caller must re-evaluate skb->truesize if needed. 6063 */ 6064 void skb_condense(struct sk_buff *skb) 6065 { 6066 if (skb->data_len) { 6067 if (skb->data_len > skb->end - skb->tail || 6068 skb_cloned(skb)) 6069 return; 6070 6071 /* Nice, we can free page frag(s) right now */ 6072 __pskb_pull_tail(skb, skb->data_len); 6073 } 6074 /* At this point, skb->truesize might be over estimated, 6075 * because skb had a fragment, and fragments do not tell 6076 * their truesize. 6077 * When we pulled its content into skb->head, fragment 6078 * was freed, but __pskb_pull_tail() could not possibly 6079 * adjust skb->truesize, not knowing the frag truesize. 6080 */ 6081 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6082 } 6083 6084 #ifdef CONFIG_SKB_EXTENSIONS 6085 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6086 { 6087 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6088 } 6089 6090 /** 6091 * __skb_ext_alloc - allocate a new skb extensions storage 6092 * 6093 * @flags: See kmalloc(). 6094 * 6095 * Returns the newly allocated pointer. The pointer can later attached to a 6096 * skb via __skb_ext_set(). 6097 * Note: caller must handle the skb_ext as an opaque data. 6098 */ 6099 struct skb_ext *__skb_ext_alloc(gfp_t flags) 6100 { 6101 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6102 6103 if (new) { 6104 memset(new->offset, 0, sizeof(new->offset)); 6105 refcount_set(&new->refcnt, 1); 6106 } 6107 6108 return new; 6109 } 6110 6111 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 6112 unsigned int old_active) 6113 { 6114 struct skb_ext *new; 6115 6116 if (refcount_read(&old->refcnt) == 1) 6117 return old; 6118 6119 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6120 if (!new) 6121 return NULL; 6122 6123 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6124 refcount_set(&new->refcnt, 1); 6125 6126 #ifdef CONFIG_XFRM 6127 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 6128 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 6129 unsigned int i; 6130 6131 for (i = 0; i < sp->len; i++) 6132 xfrm_state_hold(sp->xvec[i]); 6133 } 6134 #endif 6135 __skb_ext_put(old); 6136 return new; 6137 } 6138 6139 /** 6140 * __skb_ext_set - attach the specified extension storage to this skb 6141 * @skb: buffer 6142 * @id: extension id 6143 * @ext: extension storage previously allocated via __skb_ext_alloc() 6144 * 6145 * Existing extensions, if any, are cleared. 6146 * 6147 * Returns the pointer to the extension. 6148 */ 6149 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 6150 struct skb_ext *ext) 6151 { 6152 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 6153 6154 skb_ext_put(skb); 6155 newlen = newoff + skb_ext_type_len[id]; 6156 ext->chunks = newlen; 6157 ext->offset[id] = newoff; 6158 skb->extensions = ext; 6159 skb->active_extensions = 1 << id; 6160 return skb_ext_get_ptr(ext, id); 6161 } 6162 6163 /** 6164 * skb_ext_add - allocate space for given extension, COW if needed 6165 * @skb: buffer 6166 * @id: extension to allocate space for 6167 * 6168 * Allocates enough space for the given extension. 6169 * If the extension is already present, a pointer to that extension 6170 * is returned. 6171 * 6172 * If the skb was cloned, COW applies and the returned memory can be 6173 * modified without changing the extension space of clones buffers. 6174 * 6175 * Returns pointer to the extension or NULL on allocation failure. 6176 */ 6177 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6178 { 6179 struct skb_ext *new, *old = NULL; 6180 unsigned int newlen, newoff; 6181 6182 if (skb->active_extensions) { 6183 old = skb->extensions; 6184 6185 new = skb_ext_maybe_cow(old, skb->active_extensions); 6186 if (!new) 6187 return NULL; 6188 6189 if (__skb_ext_exist(new, id)) 6190 goto set_active; 6191 6192 newoff = new->chunks; 6193 } else { 6194 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6195 6196 new = __skb_ext_alloc(GFP_ATOMIC); 6197 if (!new) 6198 return NULL; 6199 } 6200 6201 newlen = newoff + skb_ext_type_len[id]; 6202 new->chunks = newlen; 6203 new->offset[id] = newoff; 6204 set_active: 6205 skb->extensions = new; 6206 skb->active_extensions |= 1 << id; 6207 return skb_ext_get_ptr(new, id); 6208 } 6209 EXPORT_SYMBOL(skb_ext_add); 6210 6211 #ifdef CONFIG_XFRM 6212 static void skb_ext_put_sp(struct sec_path *sp) 6213 { 6214 unsigned int i; 6215 6216 for (i = 0; i < sp->len; i++) 6217 xfrm_state_put(sp->xvec[i]); 6218 } 6219 #endif 6220 6221 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6222 { 6223 struct skb_ext *ext = skb->extensions; 6224 6225 skb->active_extensions &= ~(1 << id); 6226 if (skb->active_extensions == 0) { 6227 skb->extensions = NULL; 6228 __skb_ext_put(ext); 6229 #ifdef CONFIG_XFRM 6230 } else if (id == SKB_EXT_SEC_PATH && 6231 refcount_read(&ext->refcnt) == 1) { 6232 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6233 6234 skb_ext_put_sp(sp); 6235 sp->len = 0; 6236 #endif 6237 } 6238 } 6239 EXPORT_SYMBOL(__skb_ext_del); 6240 6241 void __skb_ext_put(struct skb_ext *ext) 6242 { 6243 /* If this is last clone, nothing can increment 6244 * it after check passes. Avoids one atomic op. 6245 */ 6246 if (refcount_read(&ext->refcnt) == 1) 6247 goto free_now; 6248 6249 if (!refcount_dec_and_test(&ext->refcnt)) 6250 return; 6251 free_now: 6252 #ifdef CONFIG_XFRM 6253 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6254 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6255 #endif 6256 6257 kmem_cache_free(skbuff_ext_cache, ext); 6258 } 6259 EXPORT_SYMBOL(__skb_ext_put); 6260 #endif /* CONFIG_SKB_EXTENSIONS */ 6261