1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Routines having to do with the 'struct sk_buff' memory handlers. 4 * 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 6 * Florian La Roche <rzsfl@rz.uni-sb.de> 7 * 8 * Fixes: 9 * Alan Cox : Fixed the worst of the load 10 * balancer bugs. 11 * Dave Platt : Interrupt stacking fix. 12 * Richard Kooijman : Timestamp fixes. 13 * Alan Cox : Changed buffer format. 14 * Alan Cox : destructor hook for AF_UNIX etc. 15 * Linus Torvalds : Better skb_clone. 16 * Alan Cox : Added skb_copy. 17 * Alan Cox : Added all the changed routines Linus 18 * only put in the headers 19 * Ray VanTassle : Fixed --skb->lock in free 20 * Alan Cox : skb_copy copy arp field 21 * Andi Kleen : slabified it. 22 * Robert Olsson : Removed skb_head_pool 23 * 24 * NOTE: 25 * The __skb_ routines should be called with interrupts 26 * disabled, or you better be *real* sure that the operation is atomic 27 * with respect to whatever list is being frobbed (e.g. via lock_sock() 28 * or via disabling bottom half handlers, etc). 29 */ 30 31 /* 32 * The functions in this file will not compile correctly with gcc 2.4.x 33 */ 34 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36 37 #include <linux/module.h> 38 #include <linux/types.h> 39 #include <linux/kernel.h> 40 #include <linux/mm.h> 41 #include <linux/interrupt.h> 42 #include <linux/in.h> 43 #include <linux/inet.h> 44 #include <linux/slab.h> 45 #include <linux/tcp.h> 46 #include <linux/udp.h> 47 #include <linux/sctp.h> 48 #include <linux/netdevice.h> 49 #ifdef CONFIG_NET_CLS_ACT 50 #include <net/pkt_sched.h> 51 #endif 52 #include <linux/string.h> 53 #include <linux/skbuff.h> 54 #include <linux/splice.h> 55 #include <linux/cache.h> 56 #include <linux/rtnetlink.h> 57 #include <linux/init.h> 58 #include <linux/scatterlist.h> 59 #include <linux/errqueue.h> 60 #include <linux/prefetch.h> 61 #include <linux/if_vlan.h> 62 #include <linux/mpls.h> 63 64 #include <net/protocol.h> 65 #include <net/dst.h> 66 #include <net/sock.h> 67 #include <net/checksum.h> 68 #include <net/ip6_checksum.h> 69 #include <net/xfrm.h> 70 #include <net/mpls.h> 71 72 #include <linux/uaccess.h> 73 #include <trace/events/skb.h> 74 #include <linux/highmem.h> 75 #include <linux/capability.h> 76 #include <linux/user_namespace.h> 77 #include <linux/indirect_call_wrapper.h> 78 79 #include "datagram.h" 80 81 struct kmem_cache *skbuff_head_cache __ro_after_init; 82 static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 83 #ifdef CONFIG_SKB_EXTENSIONS 84 static struct kmem_cache *skbuff_ext_cache __ro_after_init; 85 #endif 86 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 87 EXPORT_SYMBOL(sysctl_max_skb_frags); 88 89 /** 90 * skb_panic - private function for out-of-line support 91 * @skb: buffer 92 * @sz: size 93 * @addr: address 94 * @msg: skb_over_panic or skb_under_panic 95 * 96 * Out-of-line support for skb_put() and skb_push(). 97 * Called via the wrapper skb_over_panic() or skb_under_panic(). 98 * Keep out of line to prevent kernel bloat. 99 * __builtin_return_address is not used because it is not always reliable. 100 */ 101 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 102 const char msg[]) 103 { 104 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 105 msg, addr, skb->len, sz, skb->head, skb->data, 106 (unsigned long)skb->tail, (unsigned long)skb->end, 107 skb->dev ? skb->dev->name : "<NULL>"); 108 BUG(); 109 } 110 111 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 112 { 113 skb_panic(skb, sz, addr, __func__); 114 } 115 116 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 117 { 118 skb_panic(skb, sz, addr, __func__); 119 } 120 121 /* 122 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 123 * the caller if emergency pfmemalloc reserves are being used. If it is and 124 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 125 * may be used. Otherwise, the packet data may be discarded until enough 126 * memory is free 127 */ 128 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 129 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 130 131 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 132 unsigned long ip, bool *pfmemalloc) 133 { 134 void *obj; 135 bool ret_pfmemalloc = false; 136 137 /* 138 * Try a regular allocation, when that fails and we're not entitled 139 * to the reserves, fail. 140 */ 141 obj = kmalloc_node_track_caller(size, 142 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 143 node); 144 if (obj || !(gfp_pfmemalloc_allowed(flags))) 145 goto out; 146 147 /* Try again but now we are using pfmemalloc reserves */ 148 ret_pfmemalloc = true; 149 obj = kmalloc_node_track_caller(size, flags, node); 150 151 out: 152 if (pfmemalloc) 153 *pfmemalloc = ret_pfmemalloc; 154 155 return obj; 156 } 157 158 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 159 * 'private' fields and also do memory statistics to find all the 160 * [BEEP] leaks. 161 * 162 */ 163 164 /** 165 * __alloc_skb - allocate a network buffer 166 * @size: size to allocate 167 * @gfp_mask: allocation mask 168 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 169 * instead of head cache and allocate a cloned (child) skb. 170 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 171 * allocations in case the data is required for writeback 172 * @node: numa node to allocate memory on 173 * 174 * Allocate a new &sk_buff. The returned buffer has no headroom and a 175 * tail room of at least size bytes. The object has a reference count 176 * of one. The return is the buffer. On a failure the return is %NULL. 177 * 178 * Buffers may only be allocated from interrupts using a @gfp_mask of 179 * %GFP_ATOMIC. 180 */ 181 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 182 int flags, int node) 183 { 184 struct kmem_cache *cache; 185 struct skb_shared_info *shinfo; 186 struct sk_buff *skb; 187 u8 *data; 188 bool pfmemalloc; 189 190 cache = (flags & SKB_ALLOC_FCLONE) 191 ? skbuff_fclone_cache : skbuff_head_cache; 192 193 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 194 gfp_mask |= __GFP_MEMALLOC; 195 196 /* Get the HEAD */ 197 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 198 if (!skb) 199 goto out; 200 prefetchw(skb); 201 202 /* We do our best to align skb_shared_info on a separate cache 203 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 204 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 205 * Both skb->head and skb_shared_info are cache line aligned. 206 */ 207 size = SKB_DATA_ALIGN(size); 208 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 209 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 210 if (!data) 211 goto nodata; 212 /* kmalloc(size) might give us more room than requested. 213 * Put skb_shared_info exactly at the end of allocated zone, 214 * to allow max possible filling before reallocation. 215 */ 216 size = SKB_WITH_OVERHEAD(ksize(data)); 217 prefetchw(data + size); 218 219 /* 220 * Only clear those fields we need to clear, not those that we will 221 * actually initialise below. Hence, don't put any more fields after 222 * the tail pointer in struct sk_buff! 223 */ 224 memset(skb, 0, offsetof(struct sk_buff, tail)); 225 /* Account for allocated memory : skb + skb->head */ 226 skb->truesize = SKB_TRUESIZE(size); 227 skb->pfmemalloc = pfmemalloc; 228 refcount_set(&skb->users, 1); 229 skb->head = data; 230 skb->data = data; 231 skb_reset_tail_pointer(skb); 232 skb->end = skb->tail + size; 233 skb->mac_header = (typeof(skb->mac_header))~0U; 234 skb->transport_header = (typeof(skb->transport_header))~0U; 235 236 /* make sure we initialize shinfo sequentially */ 237 shinfo = skb_shinfo(skb); 238 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 239 atomic_set(&shinfo->dataref, 1); 240 241 if (flags & SKB_ALLOC_FCLONE) { 242 struct sk_buff_fclones *fclones; 243 244 fclones = container_of(skb, struct sk_buff_fclones, skb1); 245 246 skb->fclone = SKB_FCLONE_ORIG; 247 refcount_set(&fclones->fclone_ref, 1); 248 249 fclones->skb2.fclone = SKB_FCLONE_CLONE; 250 } 251 out: 252 return skb; 253 nodata: 254 kmem_cache_free(cache, skb); 255 skb = NULL; 256 goto out; 257 } 258 EXPORT_SYMBOL(__alloc_skb); 259 260 /* Caller must provide SKB that is memset cleared */ 261 static struct sk_buff *__build_skb_around(struct sk_buff *skb, 262 void *data, unsigned int frag_size) 263 { 264 struct skb_shared_info *shinfo; 265 unsigned int size = frag_size ? : ksize(data); 266 267 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 268 269 /* Assumes caller memset cleared SKB */ 270 skb->truesize = SKB_TRUESIZE(size); 271 refcount_set(&skb->users, 1); 272 skb->head = data; 273 skb->data = data; 274 skb_reset_tail_pointer(skb); 275 skb->end = skb->tail + size; 276 skb->mac_header = (typeof(skb->mac_header))~0U; 277 skb->transport_header = (typeof(skb->transport_header))~0U; 278 279 /* make sure we initialize shinfo sequentially */ 280 shinfo = skb_shinfo(skb); 281 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 282 atomic_set(&shinfo->dataref, 1); 283 284 return skb; 285 } 286 287 /** 288 * __build_skb - build a network buffer 289 * @data: data buffer provided by caller 290 * @frag_size: size of data, or 0 if head was kmalloced 291 * 292 * Allocate a new &sk_buff. Caller provides space holding head and 293 * skb_shared_info. @data must have been allocated by kmalloc() only if 294 * @frag_size is 0, otherwise data should come from the page allocator 295 * or vmalloc() 296 * The return is the new skb buffer. 297 * On a failure the return is %NULL, and @data is not freed. 298 * Notes : 299 * Before IO, driver allocates only data buffer where NIC put incoming frame 300 * Driver should add room at head (NET_SKB_PAD) and 301 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 302 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 303 * before giving packet to stack. 304 * RX rings only contains data buffers, not full skbs. 305 */ 306 struct sk_buff *__build_skb(void *data, unsigned int frag_size) 307 { 308 struct sk_buff *skb; 309 310 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 311 if (unlikely(!skb)) 312 return NULL; 313 314 memset(skb, 0, offsetof(struct sk_buff, tail)); 315 316 return __build_skb_around(skb, data, frag_size); 317 } 318 319 /* build_skb() is wrapper over __build_skb(), that specifically 320 * takes care of skb->head and skb->pfmemalloc 321 * This means that if @frag_size is not zero, then @data must be backed 322 * by a page fragment, not kmalloc() or vmalloc() 323 */ 324 struct sk_buff *build_skb(void *data, unsigned int frag_size) 325 { 326 struct sk_buff *skb = __build_skb(data, frag_size); 327 328 if (skb && frag_size) { 329 skb->head_frag = 1; 330 if (page_is_pfmemalloc(virt_to_head_page(data))) 331 skb->pfmemalloc = 1; 332 } 333 return skb; 334 } 335 EXPORT_SYMBOL(build_skb); 336 337 /** 338 * build_skb_around - build a network buffer around provided skb 339 * @skb: sk_buff provide by caller, must be memset cleared 340 * @data: data buffer provided by caller 341 * @frag_size: size of data, or 0 if head was kmalloced 342 */ 343 struct sk_buff *build_skb_around(struct sk_buff *skb, 344 void *data, unsigned int frag_size) 345 { 346 if (unlikely(!skb)) 347 return NULL; 348 349 skb = __build_skb_around(skb, data, frag_size); 350 351 if (skb && frag_size) { 352 skb->head_frag = 1; 353 if (page_is_pfmemalloc(virt_to_head_page(data))) 354 skb->pfmemalloc = 1; 355 } 356 return skb; 357 } 358 EXPORT_SYMBOL(build_skb_around); 359 360 #define NAPI_SKB_CACHE_SIZE 64 361 362 struct napi_alloc_cache { 363 struct page_frag_cache page; 364 unsigned int skb_count; 365 void *skb_cache[NAPI_SKB_CACHE_SIZE]; 366 }; 367 368 static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 369 static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 370 371 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 372 { 373 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 374 375 return page_frag_alloc(&nc->page, fragsz, gfp_mask); 376 } 377 378 void *napi_alloc_frag(unsigned int fragsz) 379 { 380 fragsz = SKB_DATA_ALIGN(fragsz); 381 382 return __napi_alloc_frag(fragsz, GFP_ATOMIC); 383 } 384 EXPORT_SYMBOL(napi_alloc_frag); 385 386 /** 387 * netdev_alloc_frag - allocate a page fragment 388 * @fragsz: fragment size 389 * 390 * Allocates a frag from a page for receive buffer. 391 * Uses GFP_ATOMIC allocations. 392 */ 393 void *netdev_alloc_frag(unsigned int fragsz) 394 { 395 struct page_frag_cache *nc; 396 void *data; 397 398 fragsz = SKB_DATA_ALIGN(fragsz); 399 if (in_irq() || irqs_disabled()) { 400 nc = this_cpu_ptr(&netdev_alloc_cache); 401 data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); 402 } else { 403 local_bh_disable(); 404 data = __napi_alloc_frag(fragsz, GFP_ATOMIC); 405 local_bh_enable(); 406 } 407 return data; 408 } 409 EXPORT_SYMBOL(netdev_alloc_frag); 410 411 /** 412 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 413 * @dev: network device to receive on 414 * @len: length to allocate 415 * @gfp_mask: get_free_pages mask, passed to alloc_skb 416 * 417 * Allocate a new &sk_buff and assign it a usage count of one. The 418 * buffer has NET_SKB_PAD headroom built in. Users should allocate 419 * the headroom they think they need without accounting for the 420 * built in space. The built in space is used for optimisations. 421 * 422 * %NULL is returned if there is no free memory. 423 */ 424 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 425 gfp_t gfp_mask) 426 { 427 struct page_frag_cache *nc; 428 struct sk_buff *skb; 429 bool pfmemalloc; 430 void *data; 431 432 len += NET_SKB_PAD; 433 434 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 435 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 436 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 437 if (!skb) 438 goto skb_fail; 439 goto skb_success; 440 } 441 442 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 443 len = SKB_DATA_ALIGN(len); 444 445 if (sk_memalloc_socks()) 446 gfp_mask |= __GFP_MEMALLOC; 447 448 if (in_irq() || irqs_disabled()) { 449 nc = this_cpu_ptr(&netdev_alloc_cache); 450 data = page_frag_alloc(nc, len, gfp_mask); 451 pfmemalloc = nc->pfmemalloc; 452 } else { 453 local_bh_disable(); 454 nc = this_cpu_ptr(&napi_alloc_cache.page); 455 data = page_frag_alloc(nc, len, gfp_mask); 456 pfmemalloc = nc->pfmemalloc; 457 local_bh_enable(); 458 } 459 460 if (unlikely(!data)) 461 return NULL; 462 463 skb = __build_skb(data, len); 464 if (unlikely(!skb)) { 465 skb_free_frag(data); 466 return NULL; 467 } 468 469 /* use OR instead of assignment to avoid clearing of bits in mask */ 470 if (pfmemalloc) 471 skb->pfmemalloc = 1; 472 skb->head_frag = 1; 473 474 skb_success: 475 skb_reserve(skb, NET_SKB_PAD); 476 skb->dev = dev; 477 478 skb_fail: 479 return skb; 480 } 481 EXPORT_SYMBOL(__netdev_alloc_skb); 482 483 /** 484 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 485 * @napi: napi instance this buffer was allocated for 486 * @len: length to allocate 487 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 488 * 489 * Allocate a new sk_buff for use in NAPI receive. This buffer will 490 * attempt to allocate the head from a special reserved region used 491 * only for NAPI Rx allocation. By doing this we can save several 492 * CPU cycles by avoiding having to disable and re-enable IRQs. 493 * 494 * %NULL is returned if there is no free memory. 495 */ 496 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 497 gfp_t gfp_mask) 498 { 499 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 500 struct sk_buff *skb; 501 void *data; 502 503 len += NET_SKB_PAD + NET_IP_ALIGN; 504 505 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) || 506 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 508 if (!skb) 509 goto skb_fail; 510 goto skb_success; 511 } 512 513 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 514 len = SKB_DATA_ALIGN(len); 515 516 if (sk_memalloc_socks()) 517 gfp_mask |= __GFP_MEMALLOC; 518 519 data = page_frag_alloc(&nc->page, len, gfp_mask); 520 if (unlikely(!data)) 521 return NULL; 522 523 skb = __build_skb(data, len); 524 if (unlikely(!skb)) { 525 skb_free_frag(data); 526 return NULL; 527 } 528 529 /* use OR instead of assignment to avoid clearing of bits in mask */ 530 if (nc->page.pfmemalloc) 531 skb->pfmemalloc = 1; 532 skb->head_frag = 1; 533 534 skb_success: 535 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 536 skb->dev = napi->dev; 537 538 skb_fail: 539 return skb; 540 } 541 EXPORT_SYMBOL(__napi_alloc_skb); 542 543 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 544 int size, unsigned int truesize) 545 { 546 skb_fill_page_desc(skb, i, page, off, size); 547 skb->len += size; 548 skb->data_len += size; 549 skb->truesize += truesize; 550 } 551 EXPORT_SYMBOL(skb_add_rx_frag); 552 553 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 554 unsigned int truesize) 555 { 556 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 557 558 skb_frag_size_add(frag, size); 559 skb->len += size; 560 skb->data_len += size; 561 skb->truesize += truesize; 562 } 563 EXPORT_SYMBOL(skb_coalesce_rx_frag); 564 565 static void skb_drop_list(struct sk_buff **listp) 566 { 567 kfree_skb_list(*listp); 568 *listp = NULL; 569 } 570 571 static inline void skb_drop_fraglist(struct sk_buff *skb) 572 { 573 skb_drop_list(&skb_shinfo(skb)->frag_list); 574 } 575 576 static void skb_clone_fraglist(struct sk_buff *skb) 577 { 578 struct sk_buff *list; 579 580 skb_walk_frags(skb, list) 581 skb_get(list); 582 } 583 584 static void skb_free_head(struct sk_buff *skb) 585 { 586 unsigned char *head = skb->head; 587 588 if (skb->head_frag) 589 skb_free_frag(head); 590 else 591 kfree(head); 592 } 593 594 static void skb_release_data(struct sk_buff *skb) 595 { 596 struct skb_shared_info *shinfo = skb_shinfo(skb); 597 int i; 598 599 if (skb->cloned && 600 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 601 &shinfo->dataref)) 602 return; 603 604 for (i = 0; i < shinfo->nr_frags; i++) 605 __skb_frag_unref(&shinfo->frags[i]); 606 607 if (shinfo->frag_list) 608 kfree_skb_list(shinfo->frag_list); 609 610 skb_zcopy_clear(skb, true); 611 skb_free_head(skb); 612 } 613 614 /* 615 * Free an skbuff by memory without cleaning the state. 616 */ 617 static void kfree_skbmem(struct sk_buff *skb) 618 { 619 struct sk_buff_fclones *fclones; 620 621 switch (skb->fclone) { 622 case SKB_FCLONE_UNAVAILABLE: 623 kmem_cache_free(skbuff_head_cache, skb); 624 return; 625 626 case SKB_FCLONE_ORIG: 627 fclones = container_of(skb, struct sk_buff_fclones, skb1); 628 629 /* We usually free the clone (TX completion) before original skb 630 * This test would have no chance to be true for the clone, 631 * while here, branch prediction will be good. 632 */ 633 if (refcount_read(&fclones->fclone_ref) == 1) 634 goto fastpath; 635 break; 636 637 default: /* SKB_FCLONE_CLONE */ 638 fclones = container_of(skb, struct sk_buff_fclones, skb2); 639 break; 640 } 641 if (!refcount_dec_and_test(&fclones->fclone_ref)) 642 return; 643 fastpath: 644 kmem_cache_free(skbuff_fclone_cache, fclones); 645 } 646 647 void skb_release_head_state(struct sk_buff *skb) 648 { 649 skb_dst_drop(skb); 650 if (skb->destructor) { 651 WARN_ON(in_irq()); 652 skb->destructor(skb); 653 } 654 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 655 nf_conntrack_put(skb_nfct(skb)); 656 #endif 657 skb_ext_put(skb); 658 } 659 660 /* Free everything but the sk_buff shell. */ 661 static void skb_release_all(struct sk_buff *skb) 662 { 663 skb_release_head_state(skb); 664 if (likely(skb->head)) 665 skb_release_data(skb); 666 } 667 668 /** 669 * __kfree_skb - private function 670 * @skb: buffer 671 * 672 * Free an sk_buff. Release anything attached to the buffer. 673 * Clean the state. This is an internal helper function. Users should 674 * always call kfree_skb 675 */ 676 677 void __kfree_skb(struct sk_buff *skb) 678 { 679 skb_release_all(skb); 680 kfree_skbmem(skb); 681 } 682 EXPORT_SYMBOL(__kfree_skb); 683 684 /** 685 * kfree_skb - free an sk_buff 686 * @skb: buffer to free 687 * 688 * Drop a reference to the buffer and free it if the usage count has 689 * hit zero. 690 */ 691 void kfree_skb(struct sk_buff *skb) 692 { 693 if (!skb_unref(skb)) 694 return; 695 696 trace_kfree_skb(skb, __builtin_return_address(0)); 697 __kfree_skb(skb); 698 } 699 EXPORT_SYMBOL(kfree_skb); 700 701 void kfree_skb_list(struct sk_buff *segs) 702 { 703 while (segs) { 704 struct sk_buff *next = segs->next; 705 706 kfree_skb(segs); 707 segs = next; 708 } 709 } 710 EXPORT_SYMBOL(kfree_skb_list); 711 712 /* Dump skb information and contents. 713 * 714 * Must only be called from net_ratelimit()-ed paths. 715 * 716 * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise. 717 */ 718 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 719 { 720 static atomic_t can_dump_full = ATOMIC_INIT(5); 721 struct skb_shared_info *sh = skb_shinfo(skb); 722 struct net_device *dev = skb->dev; 723 struct sock *sk = skb->sk; 724 struct sk_buff *list_skb; 725 bool has_mac, has_trans; 726 int headroom, tailroom; 727 int i, len, seg_len; 728 729 if (full_pkt) 730 full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; 731 732 if (full_pkt) 733 len = skb->len; 734 else 735 len = min_t(int, skb->len, MAX_HEADER + 128); 736 737 headroom = skb_headroom(skb); 738 tailroom = skb_tailroom(skb); 739 740 has_mac = skb_mac_header_was_set(skb); 741 has_trans = skb_transport_header_was_set(skb); 742 743 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 744 "mac=(%d,%d) net=(%d,%d) trans=%d\n" 745 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 746 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 747 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 748 level, skb->len, headroom, skb_headlen(skb), tailroom, 749 has_mac ? skb->mac_header : -1, 750 has_mac ? skb_mac_header_len(skb) : -1, 751 skb->network_header, 752 has_trans ? skb_network_header_len(skb) : -1, 753 has_trans ? skb->transport_header : -1, 754 sh->tx_flags, sh->nr_frags, 755 sh->gso_size, sh->gso_type, sh->gso_segs, 756 skb->csum, skb->ip_summed, skb->csum_complete_sw, 757 skb->csum_valid, skb->csum_level, 758 skb->hash, skb->sw_hash, skb->l4_hash, 759 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 760 761 if (dev) 762 printk("%sdev name=%s feat=0x%pNF\n", 763 level, dev->name, &dev->features); 764 if (sk) 765 printk("%ssk family=%hu type=%u proto=%u\n", 766 level, sk->sk_family, sk->sk_type, sk->sk_protocol); 767 768 if (full_pkt && headroom) 769 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 770 16, 1, skb->head, headroom, false); 771 772 seg_len = min_t(int, skb_headlen(skb), len); 773 if (seg_len) 774 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 775 16, 1, skb->data, seg_len, false); 776 len -= seg_len; 777 778 if (full_pkt && tailroom) 779 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 780 16, 1, skb_tail_pointer(skb), tailroom, false); 781 782 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 783 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 784 u32 p_off, p_len, copied; 785 struct page *p; 786 u8 *vaddr; 787 788 skb_frag_foreach_page(frag, frag->page_offset, 789 skb_frag_size(frag), p, p_off, p_len, 790 copied) { 791 seg_len = min_t(int, p_len, len); 792 vaddr = kmap_atomic(p); 793 print_hex_dump(level, "skb frag: ", 794 DUMP_PREFIX_OFFSET, 795 16, 1, vaddr + p_off, seg_len, false); 796 kunmap_atomic(vaddr); 797 len -= seg_len; 798 if (!len) 799 break; 800 } 801 } 802 803 if (full_pkt && skb_has_frag_list(skb)) { 804 printk("skb fraglist:\n"); 805 skb_walk_frags(skb, list_skb) 806 skb_dump(level, list_skb, true); 807 } 808 } 809 EXPORT_SYMBOL(skb_dump); 810 811 /** 812 * skb_tx_error - report an sk_buff xmit error 813 * @skb: buffer that triggered an error 814 * 815 * Report xmit error if a device callback is tracking this skb. 816 * skb must be freed afterwards. 817 */ 818 void skb_tx_error(struct sk_buff *skb) 819 { 820 skb_zcopy_clear(skb, true); 821 } 822 EXPORT_SYMBOL(skb_tx_error); 823 824 /** 825 * consume_skb - free an skbuff 826 * @skb: buffer to free 827 * 828 * Drop a ref to the buffer and free it if the usage count has hit zero 829 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 830 * is being dropped after a failure and notes that 831 */ 832 void consume_skb(struct sk_buff *skb) 833 { 834 if (!skb_unref(skb)) 835 return; 836 837 trace_consume_skb(skb); 838 __kfree_skb(skb); 839 } 840 EXPORT_SYMBOL(consume_skb); 841 842 /** 843 * consume_stateless_skb - free an skbuff, assuming it is stateless 844 * @skb: buffer to free 845 * 846 * Alike consume_skb(), but this variant assumes that this is the last 847 * skb reference and all the head states have been already dropped 848 */ 849 void __consume_stateless_skb(struct sk_buff *skb) 850 { 851 trace_consume_skb(skb); 852 skb_release_data(skb); 853 kfree_skbmem(skb); 854 } 855 856 void __kfree_skb_flush(void) 857 { 858 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 859 860 /* flush skb_cache if containing objects */ 861 if (nc->skb_count) { 862 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, 863 nc->skb_cache); 864 nc->skb_count = 0; 865 } 866 } 867 868 static inline void _kfree_skb_defer(struct sk_buff *skb) 869 { 870 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 871 872 /* drop skb->head and call any destructors for packet */ 873 skb_release_all(skb); 874 875 /* record skb to CPU local list */ 876 nc->skb_cache[nc->skb_count++] = skb; 877 878 #ifdef CONFIG_SLUB 879 /* SLUB writes into objects when freeing */ 880 prefetchw(skb); 881 #endif 882 883 /* flush skb_cache if it is filled */ 884 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 885 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, 886 nc->skb_cache); 887 nc->skb_count = 0; 888 } 889 } 890 void __kfree_skb_defer(struct sk_buff *skb) 891 { 892 _kfree_skb_defer(skb); 893 } 894 895 void napi_consume_skb(struct sk_buff *skb, int budget) 896 { 897 if (unlikely(!skb)) 898 return; 899 900 /* Zero budget indicate non-NAPI context called us, like netpoll */ 901 if (unlikely(!budget)) { 902 dev_consume_skb_any(skb); 903 return; 904 } 905 906 if (!skb_unref(skb)) 907 return; 908 909 /* if reaching here SKB is ready to free */ 910 trace_consume_skb(skb); 911 912 /* if SKB is a clone, don't handle this case */ 913 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 914 __kfree_skb(skb); 915 return; 916 } 917 918 _kfree_skb_defer(skb); 919 } 920 EXPORT_SYMBOL(napi_consume_skb); 921 922 /* Make sure a field is enclosed inside headers_start/headers_end section */ 923 #define CHECK_SKB_FIELD(field) \ 924 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 925 offsetof(struct sk_buff, headers_start)); \ 926 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 927 offsetof(struct sk_buff, headers_end)); \ 928 929 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 930 { 931 new->tstamp = old->tstamp; 932 /* We do not copy old->sk */ 933 new->dev = old->dev; 934 memcpy(new->cb, old->cb, sizeof(old->cb)); 935 skb_dst_copy(new, old); 936 __skb_ext_copy(new, old); 937 __nf_copy(new, old, false); 938 939 /* Note : this field could be in headers_start/headers_end section 940 * It is not yet because we do not want to have a 16 bit hole 941 */ 942 new->queue_mapping = old->queue_mapping; 943 944 memcpy(&new->headers_start, &old->headers_start, 945 offsetof(struct sk_buff, headers_end) - 946 offsetof(struct sk_buff, headers_start)); 947 CHECK_SKB_FIELD(protocol); 948 CHECK_SKB_FIELD(csum); 949 CHECK_SKB_FIELD(hash); 950 CHECK_SKB_FIELD(priority); 951 CHECK_SKB_FIELD(skb_iif); 952 CHECK_SKB_FIELD(vlan_proto); 953 CHECK_SKB_FIELD(vlan_tci); 954 CHECK_SKB_FIELD(transport_header); 955 CHECK_SKB_FIELD(network_header); 956 CHECK_SKB_FIELD(mac_header); 957 CHECK_SKB_FIELD(inner_protocol); 958 CHECK_SKB_FIELD(inner_transport_header); 959 CHECK_SKB_FIELD(inner_network_header); 960 CHECK_SKB_FIELD(inner_mac_header); 961 CHECK_SKB_FIELD(mark); 962 #ifdef CONFIG_NETWORK_SECMARK 963 CHECK_SKB_FIELD(secmark); 964 #endif 965 #ifdef CONFIG_NET_RX_BUSY_POLL 966 CHECK_SKB_FIELD(napi_id); 967 #endif 968 #ifdef CONFIG_XPS 969 CHECK_SKB_FIELD(sender_cpu); 970 #endif 971 #ifdef CONFIG_NET_SCHED 972 CHECK_SKB_FIELD(tc_index); 973 #endif 974 975 } 976 977 /* 978 * You should not add any new code to this function. Add it to 979 * __copy_skb_header above instead. 980 */ 981 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 982 { 983 #define C(x) n->x = skb->x 984 985 n->next = n->prev = NULL; 986 n->sk = NULL; 987 __copy_skb_header(n, skb); 988 989 C(len); 990 C(data_len); 991 C(mac_len); 992 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 993 n->cloned = 1; 994 n->nohdr = 0; 995 n->peeked = 0; 996 C(pfmemalloc); 997 n->destructor = NULL; 998 C(tail); 999 C(end); 1000 C(head); 1001 C(head_frag); 1002 C(data); 1003 C(truesize); 1004 refcount_set(&n->users, 1); 1005 1006 atomic_inc(&(skb_shinfo(skb)->dataref)); 1007 skb->cloned = 1; 1008 1009 return n; 1010 #undef C 1011 } 1012 1013 /** 1014 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1015 * @first: first sk_buff of the msg 1016 */ 1017 struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1018 { 1019 struct sk_buff *n; 1020 1021 n = alloc_skb(0, GFP_ATOMIC); 1022 if (!n) 1023 return NULL; 1024 1025 n->len = first->len; 1026 n->data_len = first->len; 1027 n->truesize = first->truesize; 1028 1029 skb_shinfo(n)->frag_list = first; 1030 1031 __copy_skb_header(n, first); 1032 n->destructor = NULL; 1033 1034 return n; 1035 } 1036 EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1037 1038 /** 1039 * skb_morph - morph one skb into another 1040 * @dst: the skb to receive the contents 1041 * @src: the skb to supply the contents 1042 * 1043 * This is identical to skb_clone except that the target skb is 1044 * supplied by the user. 1045 * 1046 * The target skb is returned upon exit. 1047 */ 1048 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1049 { 1050 skb_release_all(dst); 1051 return __skb_clone(dst, src); 1052 } 1053 EXPORT_SYMBOL_GPL(skb_morph); 1054 1055 int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1056 { 1057 unsigned long max_pg, num_pg, new_pg, old_pg; 1058 struct user_struct *user; 1059 1060 if (capable(CAP_IPC_LOCK) || !size) 1061 return 0; 1062 1063 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1064 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1065 user = mmp->user ? : current_user(); 1066 1067 do { 1068 old_pg = atomic_long_read(&user->locked_vm); 1069 new_pg = old_pg + num_pg; 1070 if (new_pg > max_pg) 1071 return -ENOBUFS; 1072 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1073 old_pg); 1074 1075 if (!mmp->user) { 1076 mmp->user = get_uid(user); 1077 mmp->num_pg = num_pg; 1078 } else { 1079 mmp->num_pg += num_pg; 1080 } 1081 1082 return 0; 1083 } 1084 EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1085 1086 void mm_unaccount_pinned_pages(struct mmpin *mmp) 1087 { 1088 if (mmp->user) { 1089 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1090 free_uid(mmp->user); 1091 } 1092 } 1093 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1094 1095 struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) 1096 { 1097 struct ubuf_info *uarg; 1098 struct sk_buff *skb; 1099 1100 WARN_ON_ONCE(!in_task()); 1101 1102 skb = sock_omalloc(sk, 0, GFP_KERNEL); 1103 if (!skb) 1104 return NULL; 1105 1106 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 1107 uarg = (void *)skb->cb; 1108 uarg->mmp.user = NULL; 1109 1110 if (mm_account_pinned_pages(&uarg->mmp, size)) { 1111 kfree_skb(skb); 1112 return NULL; 1113 } 1114 1115 uarg->callback = sock_zerocopy_callback; 1116 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 1117 uarg->len = 1; 1118 uarg->bytelen = size; 1119 uarg->zerocopy = 1; 1120 refcount_set(&uarg->refcnt, 1); 1121 sock_hold(sk); 1122 1123 return uarg; 1124 } 1125 EXPORT_SYMBOL_GPL(sock_zerocopy_alloc); 1126 1127 static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 1128 { 1129 return container_of((void *)uarg, struct sk_buff, cb); 1130 } 1131 1132 struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, 1133 struct ubuf_info *uarg) 1134 { 1135 if (uarg) { 1136 const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 1137 u32 bytelen, next; 1138 1139 /* realloc only when socket is locked (TCP, UDP cork), 1140 * so uarg->len and sk_zckey access is serialized 1141 */ 1142 if (!sock_owned_by_user(sk)) { 1143 WARN_ON_ONCE(1); 1144 return NULL; 1145 } 1146 1147 bytelen = uarg->bytelen + size; 1148 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 1149 /* TCP can create new skb to attach new uarg */ 1150 if (sk->sk_type == SOCK_STREAM) 1151 goto new_alloc; 1152 return NULL; 1153 } 1154 1155 next = (u32)atomic_read(&sk->sk_zckey); 1156 if ((u32)(uarg->id + uarg->len) == next) { 1157 if (mm_account_pinned_pages(&uarg->mmp, size)) 1158 return NULL; 1159 uarg->len++; 1160 uarg->bytelen = bytelen; 1161 atomic_set(&sk->sk_zckey, ++next); 1162 1163 /* no extra ref when appending to datagram (MSG_MORE) */ 1164 if (sk->sk_type == SOCK_STREAM) 1165 sock_zerocopy_get(uarg); 1166 1167 return uarg; 1168 } 1169 } 1170 1171 new_alloc: 1172 return sock_zerocopy_alloc(sk, size); 1173 } 1174 EXPORT_SYMBOL_GPL(sock_zerocopy_realloc); 1175 1176 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 1177 { 1178 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 1179 u32 old_lo, old_hi; 1180 u64 sum_len; 1181 1182 old_lo = serr->ee.ee_info; 1183 old_hi = serr->ee.ee_data; 1184 sum_len = old_hi - old_lo + 1ULL + len; 1185 1186 if (sum_len >= (1ULL << 32)) 1187 return false; 1188 1189 if (lo != old_hi + 1) 1190 return false; 1191 1192 serr->ee.ee_data += len; 1193 return true; 1194 } 1195 1196 void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) 1197 { 1198 struct sk_buff *tail, *skb = skb_from_uarg(uarg); 1199 struct sock_exterr_skb *serr; 1200 struct sock *sk = skb->sk; 1201 struct sk_buff_head *q; 1202 unsigned long flags; 1203 u32 lo, hi; 1204 u16 len; 1205 1206 mm_unaccount_pinned_pages(&uarg->mmp); 1207 1208 /* if !len, there was only 1 call, and it was aborted 1209 * so do not queue a completion notification 1210 */ 1211 if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 1212 goto release; 1213 1214 len = uarg->len; 1215 lo = uarg->id; 1216 hi = uarg->id + len - 1; 1217 1218 serr = SKB_EXT_ERR(skb); 1219 memset(serr, 0, sizeof(*serr)); 1220 serr->ee.ee_errno = 0; 1221 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 1222 serr->ee.ee_data = hi; 1223 serr->ee.ee_info = lo; 1224 if (!success) 1225 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 1226 1227 q = &sk->sk_error_queue; 1228 spin_lock_irqsave(&q->lock, flags); 1229 tail = skb_peek_tail(q); 1230 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 1231 !skb_zerocopy_notify_extend(tail, lo, len)) { 1232 __skb_queue_tail(q, skb); 1233 skb = NULL; 1234 } 1235 spin_unlock_irqrestore(&q->lock, flags); 1236 1237 sk->sk_error_report(sk); 1238 1239 release: 1240 consume_skb(skb); 1241 sock_put(sk); 1242 } 1243 EXPORT_SYMBOL_GPL(sock_zerocopy_callback); 1244 1245 void sock_zerocopy_put(struct ubuf_info *uarg) 1246 { 1247 if (uarg && refcount_dec_and_test(&uarg->refcnt)) { 1248 if (uarg->callback) 1249 uarg->callback(uarg, uarg->zerocopy); 1250 else 1251 consume_skb(skb_from_uarg(uarg)); 1252 } 1253 } 1254 EXPORT_SYMBOL_GPL(sock_zerocopy_put); 1255 1256 void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 1257 { 1258 if (uarg) { 1259 struct sock *sk = skb_from_uarg(uarg)->sk; 1260 1261 atomic_dec(&sk->sk_zckey); 1262 uarg->len--; 1263 1264 if (have_uref) 1265 sock_zerocopy_put(uarg); 1266 } 1267 } 1268 EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort); 1269 1270 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1271 { 1272 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1273 } 1274 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1275 1276 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 1277 struct msghdr *msg, int len, 1278 struct ubuf_info *uarg) 1279 { 1280 struct ubuf_info *orig_uarg = skb_zcopy(skb); 1281 struct iov_iter orig_iter = msg->msg_iter; 1282 int err, orig_len = skb->len; 1283 1284 /* An skb can only point to one uarg. This edge case happens when 1285 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 1286 */ 1287 if (orig_uarg && uarg != orig_uarg) 1288 return -EEXIST; 1289 1290 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 1291 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 1292 struct sock *save_sk = skb->sk; 1293 1294 /* Streams do not free skb on error. Reset to prev state. */ 1295 msg->msg_iter = orig_iter; 1296 skb->sk = sk; 1297 ___pskb_trim(skb, orig_len); 1298 skb->sk = save_sk; 1299 return err; 1300 } 1301 1302 skb_zcopy_set(skb, uarg, NULL); 1303 return skb->len - orig_len; 1304 } 1305 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 1306 1307 static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 1308 gfp_t gfp_mask) 1309 { 1310 if (skb_zcopy(orig)) { 1311 if (skb_zcopy(nskb)) { 1312 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 1313 if (!gfp_mask) { 1314 WARN_ON_ONCE(1); 1315 return -ENOMEM; 1316 } 1317 if (skb_uarg(nskb) == skb_uarg(orig)) 1318 return 0; 1319 if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 1320 return -EIO; 1321 } 1322 skb_zcopy_set(nskb, skb_uarg(orig), NULL); 1323 } 1324 return 0; 1325 } 1326 1327 /** 1328 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 1329 * @skb: the skb to modify 1330 * @gfp_mask: allocation priority 1331 * 1332 * This must be called on SKBTX_DEV_ZEROCOPY skb. 1333 * It will copy all frags into kernel and drop the reference 1334 * to userspace pages. 1335 * 1336 * If this function is called from an interrupt gfp_mask() must be 1337 * %GFP_ATOMIC. 1338 * 1339 * Returns 0 on success or a negative error code on failure 1340 * to allocate kernel memory to copy to. 1341 */ 1342 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1343 { 1344 int num_frags = skb_shinfo(skb)->nr_frags; 1345 struct page *page, *head = NULL; 1346 int i, new_frags; 1347 u32 d_off; 1348 1349 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 1350 return -EINVAL; 1351 1352 if (!num_frags) 1353 goto release; 1354 1355 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1356 for (i = 0; i < new_frags; i++) { 1357 page = alloc_page(gfp_mask); 1358 if (!page) { 1359 while (head) { 1360 struct page *next = (struct page *)page_private(head); 1361 put_page(head); 1362 head = next; 1363 } 1364 return -ENOMEM; 1365 } 1366 set_page_private(page, (unsigned long)head); 1367 head = page; 1368 } 1369 1370 page = head; 1371 d_off = 0; 1372 for (i = 0; i < num_frags; i++) { 1373 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1374 u32 p_off, p_len, copied; 1375 struct page *p; 1376 u8 *vaddr; 1377 1378 skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), 1379 p, p_off, p_len, copied) { 1380 u32 copy, done = 0; 1381 vaddr = kmap_atomic(p); 1382 1383 while (done < p_len) { 1384 if (d_off == PAGE_SIZE) { 1385 d_off = 0; 1386 page = (struct page *)page_private(page); 1387 } 1388 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 1389 memcpy(page_address(page) + d_off, 1390 vaddr + p_off + done, copy); 1391 done += copy; 1392 d_off += copy; 1393 } 1394 kunmap_atomic(vaddr); 1395 } 1396 } 1397 1398 /* skb frags release userspace buffers */ 1399 for (i = 0; i < num_frags; i++) 1400 skb_frag_unref(skb, i); 1401 1402 /* skb frags point to kernel buffers */ 1403 for (i = 0; i < new_frags - 1; i++) { 1404 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 1405 head = (struct page *)page_private(head); 1406 } 1407 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 1408 skb_shinfo(skb)->nr_frags = new_frags; 1409 1410 release: 1411 skb_zcopy_clear(skb, false); 1412 return 0; 1413 } 1414 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1415 1416 /** 1417 * skb_clone - duplicate an sk_buff 1418 * @skb: buffer to clone 1419 * @gfp_mask: allocation priority 1420 * 1421 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1422 * copies share the same packet data but not structure. The new 1423 * buffer has a reference count of 1. If the allocation fails the 1424 * function returns %NULL otherwise the new buffer is returned. 1425 * 1426 * If this function is called from an interrupt gfp_mask() must be 1427 * %GFP_ATOMIC. 1428 */ 1429 1430 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1431 { 1432 struct sk_buff_fclones *fclones = container_of(skb, 1433 struct sk_buff_fclones, 1434 skb1); 1435 struct sk_buff *n; 1436 1437 if (skb_orphan_frags(skb, gfp_mask)) 1438 return NULL; 1439 1440 if (skb->fclone == SKB_FCLONE_ORIG && 1441 refcount_read(&fclones->fclone_ref) == 1) { 1442 n = &fclones->skb2; 1443 refcount_set(&fclones->fclone_ref, 2); 1444 } else { 1445 if (skb_pfmemalloc(skb)) 1446 gfp_mask |= __GFP_MEMALLOC; 1447 1448 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1449 if (!n) 1450 return NULL; 1451 1452 n->fclone = SKB_FCLONE_UNAVAILABLE; 1453 } 1454 1455 return __skb_clone(n, skb); 1456 } 1457 EXPORT_SYMBOL(skb_clone); 1458 1459 void skb_headers_offset_update(struct sk_buff *skb, int off) 1460 { 1461 /* Only adjust this if it actually is csum_start rather than csum */ 1462 if (skb->ip_summed == CHECKSUM_PARTIAL) 1463 skb->csum_start += off; 1464 /* {transport,network,mac}_header and tail are relative to skb->head */ 1465 skb->transport_header += off; 1466 skb->network_header += off; 1467 if (skb_mac_header_was_set(skb)) 1468 skb->mac_header += off; 1469 skb->inner_transport_header += off; 1470 skb->inner_network_header += off; 1471 skb->inner_mac_header += off; 1472 } 1473 EXPORT_SYMBOL(skb_headers_offset_update); 1474 1475 void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 1476 { 1477 __copy_skb_header(new, old); 1478 1479 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1480 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1481 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1482 } 1483 EXPORT_SYMBOL(skb_copy_header); 1484 1485 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1486 { 1487 if (skb_pfmemalloc(skb)) 1488 return SKB_ALLOC_RX; 1489 return 0; 1490 } 1491 1492 /** 1493 * skb_copy - create private copy of an sk_buff 1494 * @skb: buffer to copy 1495 * @gfp_mask: allocation priority 1496 * 1497 * Make a copy of both an &sk_buff and its data. This is used when the 1498 * caller wishes to modify the data and needs a private copy of the 1499 * data to alter. Returns %NULL on failure or the pointer to the buffer 1500 * on success. The returned buffer has a reference count of 1. 1501 * 1502 * As by-product this function converts non-linear &sk_buff to linear 1503 * one, so that &sk_buff becomes completely private and caller is allowed 1504 * to modify all the data of returned buffer. This means that this 1505 * function is not recommended for use in circumstances when only 1506 * header is going to be modified. Use pskb_copy() instead. 1507 */ 1508 1509 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1510 { 1511 int headerlen = skb_headroom(skb); 1512 unsigned int size = skb_end_offset(skb) + skb->data_len; 1513 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1514 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1515 1516 if (!n) 1517 return NULL; 1518 1519 /* Set the data pointer */ 1520 skb_reserve(n, headerlen); 1521 /* Set the tail pointer and length */ 1522 skb_put(n, skb->len); 1523 1524 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 1525 1526 skb_copy_header(n, skb); 1527 return n; 1528 } 1529 EXPORT_SYMBOL(skb_copy); 1530 1531 /** 1532 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1533 * @skb: buffer to copy 1534 * @headroom: headroom of new skb 1535 * @gfp_mask: allocation priority 1536 * @fclone: if true allocate the copy of the skb from the fclone 1537 * cache instead of the head cache; it is recommended to set this 1538 * to true for the cases where the copy will likely be cloned 1539 * 1540 * Make a copy of both an &sk_buff and part of its data, located 1541 * in header. Fragmented data remain shared. This is used when 1542 * the caller wishes to modify only header of &sk_buff and needs 1543 * private copy of the header to alter. Returns %NULL on failure 1544 * or the pointer to the buffer on success. 1545 * The returned buffer has a reference count of 1. 1546 */ 1547 1548 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1549 gfp_t gfp_mask, bool fclone) 1550 { 1551 unsigned int size = skb_headlen(skb) + headroom; 1552 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1553 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1554 1555 if (!n) 1556 goto out; 1557 1558 /* Set the data pointer */ 1559 skb_reserve(n, headroom); 1560 /* Set the tail pointer and length */ 1561 skb_put(n, skb_headlen(skb)); 1562 /* Copy the bytes */ 1563 skb_copy_from_linear_data(skb, n->data, n->len); 1564 1565 n->truesize += skb->data_len; 1566 n->data_len = skb->data_len; 1567 n->len = skb->len; 1568 1569 if (skb_shinfo(skb)->nr_frags) { 1570 int i; 1571 1572 if (skb_orphan_frags(skb, gfp_mask) || 1573 skb_zerocopy_clone(n, skb, gfp_mask)) { 1574 kfree_skb(n); 1575 n = NULL; 1576 goto out; 1577 } 1578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1579 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1580 skb_frag_ref(skb, i); 1581 } 1582 skb_shinfo(n)->nr_frags = i; 1583 } 1584 1585 if (skb_has_frag_list(skb)) { 1586 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1587 skb_clone_fraglist(n); 1588 } 1589 1590 skb_copy_header(n, skb); 1591 out: 1592 return n; 1593 } 1594 EXPORT_SYMBOL(__pskb_copy_fclone); 1595 1596 /** 1597 * pskb_expand_head - reallocate header of &sk_buff 1598 * @skb: buffer to reallocate 1599 * @nhead: room to add at head 1600 * @ntail: room to add at tail 1601 * @gfp_mask: allocation priority 1602 * 1603 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1604 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1605 * reference count of 1. Returns zero in the case of success or error, 1606 * if expansion failed. In the last case, &sk_buff is not changed. 1607 * 1608 * All the pointers pointing into skb header may change and must be 1609 * reloaded after call to this function. 1610 */ 1611 1612 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1613 gfp_t gfp_mask) 1614 { 1615 int i, osize = skb_end_offset(skb); 1616 int size = osize + nhead + ntail; 1617 long off; 1618 u8 *data; 1619 1620 BUG_ON(nhead < 0); 1621 1622 BUG_ON(skb_shared(skb)); 1623 1624 size = SKB_DATA_ALIGN(size); 1625 1626 if (skb_pfmemalloc(skb)) 1627 gfp_mask |= __GFP_MEMALLOC; 1628 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1629 gfp_mask, NUMA_NO_NODE, NULL); 1630 if (!data) 1631 goto nodata; 1632 size = SKB_WITH_OVERHEAD(ksize(data)); 1633 1634 /* Copy only real data... and, alas, header. This should be 1635 * optimized for the cases when header is void. 1636 */ 1637 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1638 1639 memcpy((struct skb_shared_info *)(data + size), 1640 skb_shinfo(skb), 1641 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1642 1643 /* 1644 * if shinfo is shared we must drop the old head gracefully, but if it 1645 * is not we can just drop the old head and let the existing refcount 1646 * be since all we did is relocate the values 1647 */ 1648 if (skb_cloned(skb)) { 1649 if (skb_orphan_frags(skb, gfp_mask)) 1650 goto nofrags; 1651 if (skb_zcopy(skb)) 1652 refcount_inc(&skb_uarg(skb)->refcnt); 1653 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1654 skb_frag_ref(skb, i); 1655 1656 if (skb_has_frag_list(skb)) 1657 skb_clone_fraglist(skb); 1658 1659 skb_release_data(skb); 1660 } else { 1661 skb_free_head(skb); 1662 } 1663 off = (data + nhead) - skb->head; 1664 1665 skb->head = data; 1666 skb->head_frag = 0; 1667 skb->data += off; 1668 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1669 skb->end = size; 1670 off = nhead; 1671 #else 1672 skb->end = skb->head + size; 1673 #endif 1674 skb->tail += off; 1675 skb_headers_offset_update(skb, nhead); 1676 skb->cloned = 0; 1677 skb->hdr_len = 0; 1678 skb->nohdr = 0; 1679 atomic_set(&skb_shinfo(skb)->dataref, 1); 1680 1681 skb_metadata_clear(skb); 1682 1683 /* It is not generally safe to change skb->truesize. 1684 * For the moment, we really care of rx path, or 1685 * when skb is orphaned (not attached to a socket). 1686 */ 1687 if (!skb->sk || skb->destructor == sock_edemux) 1688 skb->truesize += size - osize; 1689 1690 return 0; 1691 1692 nofrags: 1693 kfree(data); 1694 nodata: 1695 return -ENOMEM; 1696 } 1697 EXPORT_SYMBOL(pskb_expand_head); 1698 1699 /* Make private copy of skb with writable head and some headroom */ 1700 1701 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1702 { 1703 struct sk_buff *skb2; 1704 int delta = headroom - skb_headroom(skb); 1705 1706 if (delta <= 0) 1707 skb2 = pskb_copy(skb, GFP_ATOMIC); 1708 else { 1709 skb2 = skb_clone(skb, GFP_ATOMIC); 1710 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1711 GFP_ATOMIC)) { 1712 kfree_skb(skb2); 1713 skb2 = NULL; 1714 } 1715 } 1716 return skb2; 1717 } 1718 EXPORT_SYMBOL(skb_realloc_headroom); 1719 1720 /** 1721 * skb_copy_expand - copy and expand sk_buff 1722 * @skb: buffer to copy 1723 * @newheadroom: new free bytes at head 1724 * @newtailroom: new free bytes at tail 1725 * @gfp_mask: allocation priority 1726 * 1727 * Make a copy of both an &sk_buff and its data and while doing so 1728 * allocate additional space. 1729 * 1730 * This is used when the caller wishes to modify the data and needs a 1731 * private copy of the data to alter as well as more space for new fields. 1732 * Returns %NULL on failure or the pointer to the buffer 1733 * on success. The returned buffer has a reference count of 1. 1734 * 1735 * You must pass %GFP_ATOMIC as the allocation priority if this function 1736 * is called from an interrupt. 1737 */ 1738 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1739 int newheadroom, int newtailroom, 1740 gfp_t gfp_mask) 1741 { 1742 /* 1743 * Allocate the copy buffer 1744 */ 1745 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1746 gfp_mask, skb_alloc_rx_flag(skb), 1747 NUMA_NO_NODE); 1748 int oldheadroom = skb_headroom(skb); 1749 int head_copy_len, head_copy_off; 1750 1751 if (!n) 1752 return NULL; 1753 1754 skb_reserve(n, newheadroom); 1755 1756 /* Set the tail pointer and length */ 1757 skb_put(n, skb->len); 1758 1759 head_copy_len = oldheadroom; 1760 head_copy_off = 0; 1761 if (newheadroom <= head_copy_len) 1762 head_copy_len = newheadroom; 1763 else 1764 head_copy_off = newheadroom - head_copy_len; 1765 1766 /* Copy the linear header and data. */ 1767 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1768 skb->len + head_copy_len)); 1769 1770 skb_copy_header(n, skb); 1771 1772 skb_headers_offset_update(n, newheadroom - oldheadroom); 1773 1774 return n; 1775 } 1776 EXPORT_SYMBOL(skb_copy_expand); 1777 1778 /** 1779 * __skb_pad - zero pad the tail of an skb 1780 * @skb: buffer to pad 1781 * @pad: space to pad 1782 * @free_on_error: free buffer on error 1783 * 1784 * Ensure that a buffer is followed by a padding area that is zero 1785 * filled. Used by network drivers which may DMA or transfer data 1786 * beyond the buffer end onto the wire. 1787 * 1788 * May return error in out of memory cases. The skb is freed on error 1789 * if @free_on_error is true. 1790 */ 1791 1792 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 1793 { 1794 int err; 1795 int ntail; 1796 1797 /* If the skbuff is non linear tailroom is always zero.. */ 1798 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1799 memset(skb->data+skb->len, 0, pad); 1800 return 0; 1801 } 1802 1803 ntail = skb->data_len + pad - (skb->end - skb->tail); 1804 if (likely(skb_cloned(skb) || ntail > 0)) { 1805 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1806 if (unlikely(err)) 1807 goto free_skb; 1808 } 1809 1810 /* FIXME: The use of this function with non-linear skb's really needs 1811 * to be audited. 1812 */ 1813 err = skb_linearize(skb); 1814 if (unlikely(err)) 1815 goto free_skb; 1816 1817 memset(skb->data + skb->len, 0, pad); 1818 return 0; 1819 1820 free_skb: 1821 if (free_on_error) 1822 kfree_skb(skb); 1823 return err; 1824 } 1825 EXPORT_SYMBOL(__skb_pad); 1826 1827 /** 1828 * pskb_put - add data to the tail of a potentially fragmented buffer 1829 * @skb: start of the buffer to use 1830 * @tail: tail fragment of the buffer to use 1831 * @len: amount of data to add 1832 * 1833 * This function extends the used data area of the potentially 1834 * fragmented buffer. @tail must be the last fragment of @skb -- or 1835 * @skb itself. If this would exceed the total buffer size the kernel 1836 * will panic. A pointer to the first byte of the extra data is 1837 * returned. 1838 */ 1839 1840 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1841 { 1842 if (tail != skb) { 1843 skb->data_len += len; 1844 skb->len += len; 1845 } 1846 return skb_put(tail, len); 1847 } 1848 EXPORT_SYMBOL_GPL(pskb_put); 1849 1850 /** 1851 * skb_put - add data to a buffer 1852 * @skb: buffer to use 1853 * @len: amount of data to add 1854 * 1855 * This function extends the used data area of the buffer. If this would 1856 * exceed the total buffer size the kernel will panic. A pointer to the 1857 * first byte of the extra data is returned. 1858 */ 1859 void *skb_put(struct sk_buff *skb, unsigned int len) 1860 { 1861 void *tmp = skb_tail_pointer(skb); 1862 SKB_LINEAR_ASSERT(skb); 1863 skb->tail += len; 1864 skb->len += len; 1865 if (unlikely(skb->tail > skb->end)) 1866 skb_over_panic(skb, len, __builtin_return_address(0)); 1867 return tmp; 1868 } 1869 EXPORT_SYMBOL(skb_put); 1870 1871 /** 1872 * skb_push - add data to the start of a buffer 1873 * @skb: buffer to use 1874 * @len: amount of data to add 1875 * 1876 * This function extends the used data area of the buffer at the buffer 1877 * start. If this would exceed the total buffer headroom the kernel will 1878 * panic. A pointer to the first byte of the extra data is returned. 1879 */ 1880 void *skb_push(struct sk_buff *skb, unsigned int len) 1881 { 1882 skb->data -= len; 1883 skb->len += len; 1884 if (unlikely(skb->data < skb->head)) 1885 skb_under_panic(skb, len, __builtin_return_address(0)); 1886 return skb->data; 1887 } 1888 EXPORT_SYMBOL(skb_push); 1889 1890 /** 1891 * skb_pull - remove data from the start of a buffer 1892 * @skb: buffer to use 1893 * @len: amount of data to remove 1894 * 1895 * This function removes data from the start of a buffer, returning 1896 * the memory to the headroom. A pointer to the next data in the buffer 1897 * is returned. Once the data has been pulled future pushes will overwrite 1898 * the old data. 1899 */ 1900 void *skb_pull(struct sk_buff *skb, unsigned int len) 1901 { 1902 return skb_pull_inline(skb, len); 1903 } 1904 EXPORT_SYMBOL(skb_pull); 1905 1906 /** 1907 * skb_trim - remove end from a buffer 1908 * @skb: buffer to alter 1909 * @len: new length 1910 * 1911 * Cut the length of a buffer down by removing data from the tail. If 1912 * the buffer is already under the length specified it is not modified. 1913 * The skb must be linear. 1914 */ 1915 void skb_trim(struct sk_buff *skb, unsigned int len) 1916 { 1917 if (skb->len > len) 1918 __skb_trim(skb, len); 1919 } 1920 EXPORT_SYMBOL(skb_trim); 1921 1922 /* Trims skb to length len. It can change skb pointers. 1923 */ 1924 1925 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1926 { 1927 struct sk_buff **fragp; 1928 struct sk_buff *frag; 1929 int offset = skb_headlen(skb); 1930 int nfrags = skb_shinfo(skb)->nr_frags; 1931 int i; 1932 int err; 1933 1934 if (skb_cloned(skb) && 1935 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1936 return err; 1937 1938 i = 0; 1939 if (offset >= len) 1940 goto drop_pages; 1941 1942 for (; i < nfrags; i++) { 1943 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1944 1945 if (end < len) { 1946 offset = end; 1947 continue; 1948 } 1949 1950 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1951 1952 drop_pages: 1953 skb_shinfo(skb)->nr_frags = i; 1954 1955 for (; i < nfrags; i++) 1956 skb_frag_unref(skb, i); 1957 1958 if (skb_has_frag_list(skb)) 1959 skb_drop_fraglist(skb); 1960 goto done; 1961 } 1962 1963 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1964 fragp = &frag->next) { 1965 int end = offset + frag->len; 1966 1967 if (skb_shared(frag)) { 1968 struct sk_buff *nfrag; 1969 1970 nfrag = skb_clone(frag, GFP_ATOMIC); 1971 if (unlikely(!nfrag)) 1972 return -ENOMEM; 1973 1974 nfrag->next = frag->next; 1975 consume_skb(frag); 1976 frag = nfrag; 1977 *fragp = frag; 1978 } 1979 1980 if (end < len) { 1981 offset = end; 1982 continue; 1983 } 1984 1985 if (end > len && 1986 unlikely((err = pskb_trim(frag, len - offset)))) 1987 return err; 1988 1989 if (frag->next) 1990 skb_drop_list(&frag->next); 1991 break; 1992 } 1993 1994 done: 1995 if (len > skb_headlen(skb)) { 1996 skb->data_len -= skb->len - len; 1997 skb->len = len; 1998 } else { 1999 skb->len = len; 2000 skb->data_len = 0; 2001 skb_set_tail_pointer(skb, len); 2002 } 2003 2004 if (!skb->sk || skb->destructor == sock_edemux) 2005 skb_condense(skb); 2006 return 0; 2007 } 2008 EXPORT_SYMBOL(___pskb_trim); 2009 2010 /* Note : use pskb_trim_rcsum() instead of calling this directly 2011 */ 2012 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 2013 { 2014 if (skb->ip_summed == CHECKSUM_COMPLETE) { 2015 int delta = skb->len - len; 2016 2017 skb->csum = csum_block_sub(skb->csum, 2018 skb_checksum(skb, len, delta, 0), 2019 len); 2020 } 2021 return __pskb_trim(skb, len); 2022 } 2023 EXPORT_SYMBOL(pskb_trim_rcsum_slow); 2024 2025 /** 2026 * __pskb_pull_tail - advance tail of skb header 2027 * @skb: buffer to reallocate 2028 * @delta: number of bytes to advance tail 2029 * 2030 * The function makes a sense only on a fragmented &sk_buff, 2031 * it expands header moving its tail forward and copying necessary 2032 * data from fragmented part. 2033 * 2034 * &sk_buff MUST have reference count of 1. 2035 * 2036 * Returns %NULL (and &sk_buff does not change) if pull failed 2037 * or value of new tail of skb in the case of success. 2038 * 2039 * All the pointers pointing into skb header may change and must be 2040 * reloaded after call to this function. 2041 */ 2042 2043 /* Moves tail of skb head forward, copying data from fragmented part, 2044 * when it is necessary. 2045 * 1. It may fail due to malloc failure. 2046 * 2. It may change skb pointers. 2047 * 2048 * It is pretty complicated. Luckily, it is called only in exceptional cases. 2049 */ 2050 void *__pskb_pull_tail(struct sk_buff *skb, int delta) 2051 { 2052 /* If skb has not enough free space at tail, get new one 2053 * plus 128 bytes for future expansions. If we have enough 2054 * room at tail, reallocate without expansion only if skb is cloned. 2055 */ 2056 int i, k, eat = (skb->tail + delta) - skb->end; 2057 2058 if (eat > 0 || skb_cloned(skb)) { 2059 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 2060 GFP_ATOMIC)) 2061 return NULL; 2062 } 2063 2064 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 2065 skb_tail_pointer(skb), delta)); 2066 2067 /* Optimization: no fragments, no reasons to preestimate 2068 * size of pulled pages. Superb. 2069 */ 2070 if (!skb_has_frag_list(skb)) 2071 goto pull_pages; 2072 2073 /* Estimate size of pulled pages. */ 2074 eat = delta; 2075 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2076 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2077 2078 if (size >= eat) 2079 goto pull_pages; 2080 eat -= size; 2081 } 2082 2083 /* If we need update frag list, we are in troubles. 2084 * Certainly, it is possible to add an offset to skb data, 2085 * but taking into account that pulling is expected to 2086 * be very rare operation, it is worth to fight against 2087 * further bloating skb head and crucify ourselves here instead. 2088 * Pure masohism, indeed. 8)8) 2089 */ 2090 if (eat) { 2091 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2092 struct sk_buff *clone = NULL; 2093 struct sk_buff *insp = NULL; 2094 2095 do { 2096 if (list->len <= eat) { 2097 /* Eaten as whole. */ 2098 eat -= list->len; 2099 list = list->next; 2100 insp = list; 2101 } else { 2102 /* Eaten partially. */ 2103 2104 if (skb_shared(list)) { 2105 /* Sucks! We need to fork list. :-( */ 2106 clone = skb_clone(list, GFP_ATOMIC); 2107 if (!clone) 2108 return NULL; 2109 insp = list->next; 2110 list = clone; 2111 } else { 2112 /* This may be pulled without 2113 * problems. */ 2114 insp = list; 2115 } 2116 if (!pskb_pull(list, eat)) { 2117 kfree_skb(clone); 2118 return NULL; 2119 } 2120 break; 2121 } 2122 } while (eat); 2123 2124 /* Free pulled out fragments. */ 2125 while ((list = skb_shinfo(skb)->frag_list) != insp) { 2126 skb_shinfo(skb)->frag_list = list->next; 2127 kfree_skb(list); 2128 } 2129 /* And insert new clone at head. */ 2130 if (clone) { 2131 clone->next = list; 2132 skb_shinfo(skb)->frag_list = clone; 2133 } 2134 } 2135 /* Success! Now we may commit changes to skb data. */ 2136 2137 pull_pages: 2138 eat = delta; 2139 k = 0; 2140 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2141 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2142 2143 if (size <= eat) { 2144 skb_frag_unref(skb, i); 2145 eat -= size; 2146 } else { 2147 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 2148 if (eat) { 2149 skb_shinfo(skb)->frags[k].page_offset += eat; 2150 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 2151 if (!i) 2152 goto end; 2153 eat = 0; 2154 } 2155 k++; 2156 } 2157 } 2158 skb_shinfo(skb)->nr_frags = k; 2159 2160 end: 2161 skb->tail += delta; 2162 skb->data_len -= delta; 2163 2164 if (!skb->data_len) 2165 skb_zcopy_clear(skb, false); 2166 2167 return skb_tail_pointer(skb); 2168 } 2169 EXPORT_SYMBOL(__pskb_pull_tail); 2170 2171 /** 2172 * skb_copy_bits - copy bits from skb to kernel buffer 2173 * @skb: source skb 2174 * @offset: offset in source 2175 * @to: destination buffer 2176 * @len: number of bytes to copy 2177 * 2178 * Copy the specified number of bytes from the source skb to the 2179 * destination buffer. 2180 * 2181 * CAUTION ! : 2182 * If its prototype is ever changed, 2183 * check arch/{*}/net/{*}.S files, 2184 * since it is called from BPF assembly code. 2185 */ 2186 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 2187 { 2188 int start = skb_headlen(skb); 2189 struct sk_buff *frag_iter; 2190 int i, copy; 2191 2192 if (offset > (int)skb->len - len) 2193 goto fault; 2194 2195 /* Copy header. */ 2196 if ((copy = start - offset) > 0) { 2197 if (copy > len) 2198 copy = len; 2199 skb_copy_from_linear_data_offset(skb, offset, to, copy); 2200 if ((len -= copy) == 0) 2201 return 0; 2202 offset += copy; 2203 to += copy; 2204 } 2205 2206 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2207 int end; 2208 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 2209 2210 WARN_ON(start > offset + len); 2211 2212 end = start + skb_frag_size(f); 2213 if ((copy = end - offset) > 0) { 2214 u32 p_off, p_len, copied; 2215 struct page *p; 2216 u8 *vaddr; 2217 2218 if (copy > len) 2219 copy = len; 2220 2221 skb_frag_foreach_page(f, 2222 f->page_offset + offset - start, 2223 copy, p, p_off, p_len, copied) { 2224 vaddr = kmap_atomic(p); 2225 memcpy(to + copied, vaddr + p_off, p_len); 2226 kunmap_atomic(vaddr); 2227 } 2228 2229 if ((len -= copy) == 0) 2230 return 0; 2231 offset += copy; 2232 to += copy; 2233 } 2234 start = end; 2235 } 2236 2237 skb_walk_frags(skb, frag_iter) { 2238 int end; 2239 2240 WARN_ON(start > offset + len); 2241 2242 end = start + frag_iter->len; 2243 if ((copy = end - offset) > 0) { 2244 if (copy > len) 2245 copy = len; 2246 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 2247 goto fault; 2248 if ((len -= copy) == 0) 2249 return 0; 2250 offset += copy; 2251 to += copy; 2252 } 2253 start = end; 2254 } 2255 2256 if (!len) 2257 return 0; 2258 2259 fault: 2260 return -EFAULT; 2261 } 2262 EXPORT_SYMBOL(skb_copy_bits); 2263 2264 /* 2265 * Callback from splice_to_pipe(), if we need to release some pages 2266 * at the end of the spd in case we error'ed out in filling the pipe. 2267 */ 2268 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 2269 { 2270 put_page(spd->pages[i]); 2271 } 2272 2273 static struct page *linear_to_page(struct page *page, unsigned int *len, 2274 unsigned int *offset, 2275 struct sock *sk) 2276 { 2277 struct page_frag *pfrag = sk_page_frag(sk); 2278 2279 if (!sk_page_frag_refill(sk, pfrag)) 2280 return NULL; 2281 2282 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 2283 2284 memcpy(page_address(pfrag->page) + pfrag->offset, 2285 page_address(page) + *offset, *len); 2286 *offset = pfrag->offset; 2287 pfrag->offset += *len; 2288 2289 return pfrag->page; 2290 } 2291 2292 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 2293 struct page *page, 2294 unsigned int offset) 2295 { 2296 return spd->nr_pages && 2297 spd->pages[spd->nr_pages - 1] == page && 2298 (spd->partial[spd->nr_pages - 1].offset + 2299 spd->partial[spd->nr_pages - 1].len == offset); 2300 } 2301 2302 /* 2303 * Fill page/offset/length into spd, if it can hold more pages. 2304 */ 2305 static bool spd_fill_page(struct splice_pipe_desc *spd, 2306 struct pipe_inode_info *pipe, struct page *page, 2307 unsigned int *len, unsigned int offset, 2308 bool linear, 2309 struct sock *sk) 2310 { 2311 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2312 return true; 2313 2314 if (linear) { 2315 page = linear_to_page(page, len, &offset, sk); 2316 if (!page) 2317 return true; 2318 } 2319 if (spd_can_coalesce(spd, page, offset)) { 2320 spd->partial[spd->nr_pages - 1].len += *len; 2321 return false; 2322 } 2323 get_page(page); 2324 spd->pages[spd->nr_pages] = page; 2325 spd->partial[spd->nr_pages].len = *len; 2326 spd->partial[spd->nr_pages].offset = offset; 2327 spd->nr_pages++; 2328 2329 return false; 2330 } 2331 2332 static bool __splice_segment(struct page *page, unsigned int poff, 2333 unsigned int plen, unsigned int *off, 2334 unsigned int *len, 2335 struct splice_pipe_desc *spd, bool linear, 2336 struct sock *sk, 2337 struct pipe_inode_info *pipe) 2338 { 2339 if (!*len) 2340 return true; 2341 2342 /* skip this segment if already processed */ 2343 if (*off >= plen) { 2344 *off -= plen; 2345 return false; 2346 } 2347 2348 /* ignore any bits we already processed */ 2349 poff += *off; 2350 plen -= *off; 2351 *off = 0; 2352 2353 do { 2354 unsigned int flen = min(*len, plen); 2355 2356 if (spd_fill_page(spd, pipe, page, &flen, poff, 2357 linear, sk)) 2358 return true; 2359 poff += flen; 2360 plen -= flen; 2361 *len -= flen; 2362 } while (*len && plen); 2363 2364 return false; 2365 } 2366 2367 /* 2368 * Map linear and fragment data from the skb to spd. It reports true if the 2369 * pipe is full or if we already spliced the requested length. 2370 */ 2371 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 2372 unsigned int *offset, unsigned int *len, 2373 struct splice_pipe_desc *spd, struct sock *sk) 2374 { 2375 int seg; 2376 struct sk_buff *iter; 2377 2378 /* map the linear part : 2379 * If skb->head_frag is set, this 'linear' part is backed by a 2380 * fragment, and if the head is not shared with any clones then 2381 * we can avoid a copy since we own the head portion of this page. 2382 */ 2383 if (__splice_segment(virt_to_page(skb->data), 2384 (unsigned long) skb->data & (PAGE_SIZE - 1), 2385 skb_headlen(skb), 2386 offset, len, spd, 2387 skb_head_is_locked(skb), 2388 sk, pipe)) 2389 return true; 2390 2391 /* 2392 * then map the fragments 2393 */ 2394 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 2395 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 2396 2397 if (__splice_segment(skb_frag_page(f), 2398 f->page_offset, skb_frag_size(f), 2399 offset, len, spd, false, sk, pipe)) 2400 return true; 2401 } 2402 2403 skb_walk_frags(skb, iter) { 2404 if (*offset >= iter->len) { 2405 *offset -= iter->len; 2406 continue; 2407 } 2408 /* __skb_splice_bits() only fails if the output has no room 2409 * left, so no point in going over the frag_list for the error 2410 * case. 2411 */ 2412 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2413 return true; 2414 } 2415 2416 return false; 2417 } 2418 2419 /* 2420 * Map data from the skb to a pipe. Should handle both the linear part, 2421 * the fragments, and the frag list. 2422 */ 2423 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 2424 struct pipe_inode_info *pipe, unsigned int tlen, 2425 unsigned int flags) 2426 { 2427 struct partial_page partial[MAX_SKB_FRAGS]; 2428 struct page *pages[MAX_SKB_FRAGS]; 2429 struct splice_pipe_desc spd = { 2430 .pages = pages, 2431 .partial = partial, 2432 .nr_pages_max = MAX_SKB_FRAGS, 2433 .ops = &nosteal_pipe_buf_ops, 2434 .spd_release = sock_spd_release, 2435 }; 2436 int ret = 0; 2437 2438 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 2439 2440 if (spd.nr_pages) 2441 ret = splice_to_pipe(pipe, &spd); 2442 2443 return ret; 2444 } 2445 EXPORT_SYMBOL_GPL(skb_splice_bits); 2446 2447 /* Send skb data on a socket. Socket must be locked. */ 2448 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 2449 int len) 2450 { 2451 unsigned int orig_len = len; 2452 struct sk_buff *head = skb; 2453 unsigned short fragidx; 2454 int slen, ret; 2455 2456 do_frag_list: 2457 2458 /* Deal with head data */ 2459 while (offset < skb_headlen(skb) && len) { 2460 struct kvec kv; 2461 struct msghdr msg; 2462 2463 slen = min_t(int, len, skb_headlen(skb) - offset); 2464 kv.iov_base = skb->data + offset; 2465 kv.iov_len = slen; 2466 memset(&msg, 0, sizeof(msg)); 2467 msg.msg_flags = MSG_DONTWAIT; 2468 2469 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); 2470 if (ret <= 0) 2471 goto error; 2472 2473 offset += ret; 2474 len -= ret; 2475 } 2476 2477 /* All the data was skb head? */ 2478 if (!len) 2479 goto out; 2480 2481 /* Make offset relative to start of frags */ 2482 offset -= skb_headlen(skb); 2483 2484 /* Find where we are in frag list */ 2485 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2486 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2487 2488 if (offset < frag->size) 2489 break; 2490 2491 offset -= frag->size; 2492 } 2493 2494 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 2495 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 2496 2497 slen = min_t(size_t, len, frag->size - offset); 2498 2499 while (slen) { 2500 ret = kernel_sendpage_locked(sk, frag->page.p, 2501 frag->page_offset + offset, 2502 slen, MSG_DONTWAIT); 2503 if (ret <= 0) 2504 goto error; 2505 2506 len -= ret; 2507 offset += ret; 2508 slen -= ret; 2509 } 2510 2511 offset = 0; 2512 } 2513 2514 if (len) { 2515 /* Process any frag lists */ 2516 2517 if (skb == head) { 2518 if (skb_has_frag_list(skb)) { 2519 skb = skb_shinfo(skb)->frag_list; 2520 goto do_frag_list; 2521 } 2522 } else if (skb->next) { 2523 skb = skb->next; 2524 goto do_frag_list; 2525 } 2526 } 2527 2528 out: 2529 return orig_len - len; 2530 2531 error: 2532 return orig_len == len ? ret : orig_len - len; 2533 } 2534 EXPORT_SYMBOL_GPL(skb_send_sock_locked); 2535 2536 /** 2537 * skb_store_bits - store bits from kernel buffer to skb 2538 * @skb: destination buffer 2539 * @offset: offset in destination 2540 * @from: source buffer 2541 * @len: number of bytes to copy 2542 * 2543 * Copy the specified number of bytes from the source buffer to the 2544 * destination skb. This function handles all the messy bits of 2545 * traversing fragment lists and such. 2546 */ 2547 2548 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2549 { 2550 int start = skb_headlen(skb); 2551 struct sk_buff *frag_iter; 2552 int i, copy; 2553 2554 if (offset > (int)skb->len - len) 2555 goto fault; 2556 2557 if ((copy = start - offset) > 0) { 2558 if (copy > len) 2559 copy = len; 2560 skb_copy_to_linear_data_offset(skb, offset, from, copy); 2561 if ((len -= copy) == 0) 2562 return 0; 2563 offset += copy; 2564 from += copy; 2565 } 2566 2567 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2568 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2569 int end; 2570 2571 WARN_ON(start > offset + len); 2572 2573 end = start + skb_frag_size(frag); 2574 if ((copy = end - offset) > 0) { 2575 u32 p_off, p_len, copied; 2576 struct page *p; 2577 u8 *vaddr; 2578 2579 if (copy > len) 2580 copy = len; 2581 2582 skb_frag_foreach_page(frag, 2583 frag->page_offset + offset - start, 2584 copy, p, p_off, p_len, copied) { 2585 vaddr = kmap_atomic(p); 2586 memcpy(vaddr + p_off, from + copied, p_len); 2587 kunmap_atomic(vaddr); 2588 } 2589 2590 if ((len -= copy) == 0) 2591 return 0; 2592 offset += copy; 2593 from += copy; 2594 } 2595 start = end; 2596 } 2597 2598 skb_walk_frags(skb, frag_iter) { 2599 int end; 2600 2601 WARN_ON(start > offset + len); 2602 2603 end = start + frag_iter->len; 2604 if ((copy = end - offset) > 0) { 2605 if (copy > len) 2606 copy = len; 2607 if (skb_store_bits(frag_iter, offset - start, 2608 from, copy)) 2609 goto fault; 2610 if ((len -= copy) == 0) 2611 return 0; 2612 offset += copy; 2613 from += copy; 2614 } 2615 start = end; 2616 } 2617 if (!len) 2618 return 0; 2619 2620 fault: 2621 return -EFAULT; 2622 } 2623 EXPORT_SYMBOL(skb_store_bits); 2624 2625 /* Checksum skb data. */ 2626 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2627 __wsum csum, const struct skb_checksum_ops *ops) 2628 { 2629 int start = skb_headlen(skb); 2630 int i, copy = start - offset; 2631 struct sk_buff *frag_iter; 2632 int pos = 0; 2633 2634 /* Checksum header. */ 2635 if (copy > 0) { 2636 if (copy > len) 2637 copy = len; 2638 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 2639 skb->data + offset, copy, csum); 2640 if ((len -= copy) == 0) 2641 return csum; 2642 offset += copy; 2643 pos = copy; 2644 } 2645 2646 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2647 int end; 2648 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2649 2650 WARN_ON(start > offset + len); 2651 2652 end = start + skb_frag_size(frag); 2653 if ((copy = end - offset) > 0) { 2654 u32 p_off, p_len, copied; 2655 struct page *p; 2656 __wsum csum2; 2657 u8 *vaddr; 2658 2659 if (copy > len) 2660 copy = len; 2661 2662 skb_frag_foreach_page(frag, 2663 frag->page_offset + offset - start, 2664 copy, p, p_off, p_len, copied) { 2665 vaddr = kmap_atomic(p); 2666 csum2 = INDIRECT_CALL_1(ops->update, 2667 csum_partial_ext, 2668 vaddr + p_off, p_len, 0); 2669 kunmap_atomic(vaddr); 2670 csum = INDIRECT_CALL_1(ops->combine, 2671 csum_block_add_ext, csum, 2672 csum2, pos, p_len); 2673 pos += p_len; 2674 } 2675 2676 if (!(len -= copy)) 2677 return csum; 2678 offset += copy; 2679 } 2680 start = end; 2681 } 2682 2683 skb_walk_frags(skb, frag_iter) { 2684 int end; 2685 2686 WARN_ON(start > offset + len); 2687 2688 end = start + frag_iter->len; 2689 if ((copy = end - offset) > 0) { 2690 __wsum csum2; 2691 if (copy > len) 2692 copy = len; 2693 csum2 = __skb_checksum(frag_iter, offset - start, 2694 copy, 0, ops); 2695 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 2696 csum, csum2, pos, copy); 2697 if ((len -= copy) == 0) 2698 return csum; 2699 offset += copy; 2700 pos += copy; 2701 } 2702 start = end; 2703 } 2704 BUG_ON(len); 2705 2706 return csum; 2707 } 2708 EXPORT_SYMBOL(__skb_checksum); 2709 2710 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2711 int len, __wsum csum) 2712 { 2713 const struct skb_checksum_ops ops = { 2714 .update = csum_partial_ext, 2715 .combine = csum_block_add_ext, 2716 }; 2717 2718 return __skb_checksum(skb, offset, len, csum, &ops); 2719 } 2720 EXPORT_SYMBOL(skb_checksum); 2721 2722 /* Both of above in one bottle. */ 2723 2724 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2725 u8 *to, int len, __wsum csum) 2726 { 2727 int start = skb_headlen(skb); 2728 int i, copy = start - offset; 2729 struct sk_buff *frag_iter; 2730 int pos = 0; 2731 2732 /* Copy header. */ 2733 if (copy > 0) { 2734 if (copy > len) 2735 copy = len; 2736 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2737 copy, csum); 2738 if ((len -= copy) == 0) 2739 return csum; 2740 offset += copy; 2741 to += copy; 2742 pos = copy; 2743 } 2744 2745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2746 int end; 2747 2748 WARN_ON(start > offset + len); 2749 2750 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2751 if ((copy = end - offset) > 0) { 2752 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2753 u32 p_off, p_len, copied; 2754 struct page *p; 2755 __wsum csum2; 2756 u8 *vaddr; 2757 2758 if (copy > len) 2759 copy = len; 2760 2761 skb_frag_foreach_page(frag, 2762 frag->page_offset + offset - start, 2763 copy, p, p_off, p_len, copied) { 2764 vaddr = kmap_atomic(p); 2765 csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2766 to + copied, 2767 p_len, 0); 2768 kunmap_atomic(vaddr); 2769 csum = csum_block_add(csum, csum2, pos); 2770 pos += p_len; 2771 } 2772 2773 if (!(len -= copy)) 2774 return csum; 2775 offset += copy; 2776 to += copy; 2777 } 2778 start = end; 2779 } 2780 2781 skb_walk_frags(skb, frag_iter) { 2782 __wsum csum2; 2783 int end; 2784 2785 WARN_ON(start > offset + len); 2786 2787 end = start + frag_iter->len; 2788 if ((copy = end - offset) > 0) { 2789 if (copy > len) 2790 copy = len; 2791 csum2 = skb_copy_and_csum_bits(frag_iter, 2792 offset - start, 2793 to, copy, 0); 2794 csum = csum_block_add(csum, csum2, pos); 2795 if ((len -= copy) == 0) 2796 return csum; 2797 offset += copy; 2798 to += copy; 2799 pos += copy; 2800 } 2801 start = end; 2802 } 2803 BUG_ON(len); 2804 return csum; 2805 } 2806 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2807 2808 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 2809 { 2810 __sum16 sum; 2811 2812 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 2813 /* See comments in __skb_checksum_complete(). */ 2814 if (likely(!sum)) { 2815 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 2816 !skb->csum_complete_sw) 2817 netdev_rx_csum_fault(skb->dev, skb); 2818 } 2819 if (!skb_shared(skb)) 2820 skb->csum_valid = !sum; 2821 return sum; 2822 } 2823 EXPORT_SYMBOL(__skb_checksum_complete_head); 2824 2825 /* This function assumes skb->csum already holds pseudo header's checksum, 2826 * which has been changed from the hardware checksum, for example, by 2827 * __skb_checksum_validate_complete(). And, the original skb->csum must 2828 * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 2829 * 2830 * It returns non-zero if the recomputed checksum is still invalid, otherwise 2831 * zero. The new checksum is stored back into skb->csum unless the skb is 2832 * shared. 2833 */ 2834 __sum16 __skb_checksum_complete(struct sk_buff *skb) 2835 { 2836 __wsum csum; 2837 __sum16 sum; 2838 2839 csum = skb_checksum(skb, 0, skb->len, 0); 2840 2841 sum = csum_fold(csum_add(skb->csum, csum)); 2842 /* This check is inverted, because we already knew the hardware 2843 * checksum is invalid before calling this function. So, if the 2844 * re-computed checksum is valid instead, then we have a mismatch 2845 * between the original skb->csum and skb_checksum(). This means either 2846 * the original hardware checksum is incorrect or we screw up skb->csum 2847 * when moving skb->data around. 2848 */ 2849 if (likely(!sum)) { 2850 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 2851 !skb->csum_complete_sw) 2852 netdev_rx_csum_fault(skb->dev, skb); 2853 } 2854 2855 if (!skb_shared(skb)) { 2856 /* Save full packet checksum */ 2857 skb->csum = csum; 2858 skb->ip_summed = CHECKSUM_COMPLETE; 2859 skb->csum_complete_sw = 1; 2860 skb->csum_valid = !sum; 2861 } 2862 2863 return sum; 2864 } 2865 EXPORT_SYMBOL(__skb_checksum_complete); 2866 2867 static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 2868 { 2869 net_warn_ratelimited( 2870 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2871 __func__); 2872 return 0; 2873 } 2874 2875 static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 2876 int offset, int len) 2877 { 2878 net_warn_ratelimited( 2879 "%s: attempt to compute crc32c without libcrc32c.ko\n", 2880 __func__); 2881 return 0; 2882 } 2883 2884 static const struct skb_checksum_ops default_crc32c_ops = { 2885 .update = warn_crc32c_csum_update, 2886 .combine = warn_crc32c_csum_combine, 2887 }; 2888 2889 const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 2890 &default_crc32c_ops; 2891 EXPORT_SYMBOL(crc32c_csum_stub); 2892 2893 /** 2894 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2895 * @from: source buffer 2896 * 2897 * Calculates the amount of linear headroom needed in the 'to' skb passed 2898 * into skb_zerocopy(). 2899 */ 2900 unsigned int 2901 skb_zerocopy_headlen(const struct sk_buff *from) 2902 { 2903 unsigned int hlen = 0; 2904 2905 if (!from->head_frag || 2906 skb_headlen(from) < L1_CACHE_BYTES || 2907 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2908 hlen = skb_headlen(from); 2909 2910 if (skb_has_frag_list(from)) 2911 hlen = from->len; 2912 2913 return hlen; 2914 } 2915 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2916 2917 /** 2918 * skb_zerocopy - Zero copy skb to skb 2919 * @to: destination buffer 2920 * @from: source buffer 2921 * @len: number of bytes to copy from source buffer 2922 * @hlen: size of linear headroom in destination buffer 2923 * 2924 * Copies up to `len` bytes from `from` to `to` by creating references 2925 * to the frags in the source buffer. 2926 * 2927 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2928 * headroom in the `to` buffer. 2929 * 2930 * Return value: 2931 * 0: everything is OK 2932 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2933 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2934 */ 2935 int 2936 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2937 { 2938 int i, j = 0; 2939 int plen = 0; /* length of skb->head fragment */ 2940 int ret; 2941 struct page *page; 2942 unsigned int offset; 2943 2944 BUG_ON(!from->head_frag && !hlen); 2945 2946 /* dont bother with small payloads */ 2947 if (len <= skb_tailroom(to)) 2948 return skb_copy_bits(from, 0, skb_put(to, len), len); 2949 2950 if (hlen) { 2951 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2952 if (unlikely(ret)) 2953 return ret; 2954 len -= hlen; 2955 } else { 2956 plen = min_t(int, skb_headlen(from), len); 2957 if (plen) { 2958 page = virt_to_head_page(from->head); 2959 offset = from->data - (unsigned char *)page_address(page); 2960 __skb_fill_page_desc(to, 0, page, offset, plen); 2961 get_page(page); 2962 j = 1; 2963 len -= plen; 2964 } 2965 } 2966 2967 to->truesize += len + plen; 2968 to->len += len + plen; 2969 to->data_len += len + plen; 2970 2971 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2972 skb_tx_error(from); 2973 return -ENOMEM; 2974 } 2975 skb_zerocopy_clone(to, from, GFP_ATOMIC); 2976 2977 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2978 if (!len) 2979 break; 2980 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2981 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2982 len -= skb_shinfo(to)->frags[j].size; 2983 skb_frag_ref(to, j); 2984 j++; 2985 } 2986 skb_shinfo(to)->nr_frags = j; 2987 2988 return 0; 2989 } 2990 EXPORT_SYMBOL_GPL(skb_zerocopy); 2991 2992 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2993 { 2994 __wsum csum; 2995 long csstart; 2996 2997 if (skb->ip_summed == CHECKSUM_PARTIAL) 2998 csstart = skb_checksum_start_offset(skb); 2999 else 3000 csstart = skb_headlen(skb); 3001 3002 BUG_ON(csstart > skb_headlen(skb)); 3003 3004 skb_copy_from_linear_data(skb, to, csstart); 3005 3006 csum = 0; 3007 if (csstart != skb->len) 3008 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 3009 skb->len - csstart, 0); 3010 3011 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3012 long csstuff = csstart + skb->csum_offset; 3013 3014 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 3015 } 3016 } 3017 EXPORT_SYMBOL(skb_copy_and_csum_dev); 3018 3019 /** 3020 * skb_dequeue - remove from the head of the queue 3021 * @list: list to dequeue from 3022 * 3023 * Remove the head of the list. The list lock is taken so the function 3024 * may be used safely with other locking list functions. The head item is 3025 * returned or %NULL if the list is empty. 3026 */ 3027 3028 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 3029 { 3030 unsigned long flags; 3031 struct sk_buff *result; 3032 3033 spin_lock_irqsave(&list->lock, flags); 3034 result = __skb_dequeue(list); 3035 spin_unlock_irqrestore(&list->lock, flags); 3036 return result; 3037 } 3038 EXPORT_SYMBOL(skb_dequeue); 3039 3040 /** 3041 * skb_dequeue_tail - remove from the tail of the queue 3042 * @list: list to dequeue from 3043 * 3044 * Remove the tail of the list. The list lock is taken so the function 3045 * may be used safely with other locking list functions. The tail item is 3046 * returned or %NULL if the list is empty. 3047 */ 3048 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 3049 { 3050 unsigned long flags; 3051 struct sk_buff *result; 3052 3053 spin_lock_irqsave(&list->lock, flags); 3054 result = __skb_dequeue_tail(list); 3055 spin_unlock_irqrestore(&list->lock, flags); 3056 return result; 3057 } 3058 EXPORT_SYMBOL(skb_dequeue_tail); 3059 3060 /** 3061 * skb_queue_purge - empty a list 3062 * @list: list to empty 3063 * 3064 * Delete all buffers on an &sk_buff list. Each buffer is removed from 3065 * the list and one reference dropped. This function takes the list 3066 * lock and is atomic with respect to other list locking functions. 3067 */ 3068 void skb_queue_purge(struct sk_buff_head *list) 3069 { 3070 struct sk_buff *skb; 3071 while ((skb = skb_dequeue(list)) != NULL) 3072 kfree_skb(skb); 3073 } 3074 EXPORT_SYMBOL(skb_queue_purge); 3075 3076 /** 3077 * skb_rbtree_purge - empty a skb rbtree 3078 * @root: root of the rbtree to empty 3079 * Return value: the sum of truesizes of all purged skbs. 3080 * 3081 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 3082 * the list and one reference dropped. This function does not take 3083 * any lock. Synchronization should be handled by the caller (e.g., TCP 3084 * out-of-order queue is protected by the socket lock). 3085 */ 3086 unsigned int skb_rbtree_purge(struct rb_root *root) 3087 { 3088 struct rb_node *p = rb_first(root); 3089 unsigned int sum = 0; 3090 3091 while (p) { 3092 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 3093 3094 p = rb_next(p); 3095 rb_erase(&skb->rbnode, root); 3096 sum += skb->truesize; 3097 kfree_skb(skb); 3098 } 3099 return sum; 3100 } 3101 3102 /** 3103 * skb_queue_head - queue a buffer at the list head 3104 * @list: list to use 3105 * @newsk: buffer to queue 3106 * 3107 * Queue a buffer at the start of the list. This function takes the 3108 * list lock and can be used safely with other locking &sk_buff functions 3109 * safely. 3110 * 3111 * A buffer cannot be placed on two lists at the same time. 3112 */ 3113 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 3114 { 3115 unsigned long flags; 3116 3117 spin_lock_irqsave(&list->lock, flags); 3118 __skb_queue_head(list, newsk); 3119 spin_unlock_irqrestore(&list->lock, flags); 3120 } 3121 EXPORT_SYMBOL(skb_queue_head); 3122 3123 /** 3124 * skb_queue_tail - queue a buffer at the list tail 3125 * @list: list to use 3126 * @newsk: buffer to queue 3127 * 3128 * Queue a buffer at the tail of the list. This function takes the 3129 * list lock and can be used safely with other locking &sk_buff functions 3130 * safely. 3131 * 3132 * A buffer cannot be placed on two lists at the same time. 3133 */ 3134 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 3135 { 3136 unsigned long flags; 3137 3138 spin_lock_irqsave(&list->lock, flags); 3139 __skb_queue_tail(list, newsk); 3140 spin_unlock_irqrestore(&list->lock, flags); 3141 } 3142 EXPORT_SYMBOL(skb_queue_tail); 3143 3144 /** 3145 * skb_unlink - remove a buffer from a list 3146 * @skb: buffer to remove 3147 * @list: list to use 3148 * 3149 * Remove a packet from a list. The list locks are taken and this 3150 * function is atomic with respect to other list locked calls 3151 * 3152 * You must know what list the SKB is on. 3153 */ 3154 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 3155 { 3156 unsigned long flags; 3157 3158 spin_lock_irqsave(&list->lock, flags); 3159 __skb_unlink(skb, list); 3160 spin_unlock_irqrestore(&list->lock, flags); 3161 } 3162 EXPORT_SYMBOL(skb_unlink); 3163 3164 /** 3165 * skb_append - append a buffer 3166 * @old: buffer to insert after 3167 * @newsk: buffer to insert 3168 * @list: list to use 3169 * 3170 * Place a packet after a given packet in a list. The list locks are taken 3171 * and this function is atomic with respect to other list locked calls. 3172 * A buffer cannot be placed on two lists at the same time. 3173 */ 3174 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 3175 { 3176 unsigned long flags; 3177 3178 spin_lock_irqsave(&list->lock, flags); 3179 __skb_queue_after(list, old, newsk); 3180 spin_unlock_irqrestore(&list->lock, flags); 3181 } 3182 EXPORT_SYMBOL(skb_append); 3183 3184 static inline void skb_split_inside_header(struct sk_buff *skb, 3185 struct sk_buff* skb1, 3186 const u32 len, const int pos) 3187 { 3188 int i; 3189 3190 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3191 pos - len); 3192 /* And move data appendix as is. */ 3193 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 3194 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 3195 3196 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 3197 skb_shinfo(skb)->nr_frags = 0; 3198 skb1->data_len = skb->data_len; 3199 skb1->len += skb1->data_len; 3200 skb->data_len = 0; 3201 skb->len = len; 3202 skb_set_tail_pointer(skb, len); 3203 } 3204 3205 static inline void skb_split_no_header(struct sk_buff *skb, 3206 struct sk_buff* skb1, 3207 const u32 len, int pos) 3208 { 3209 int i, k = 0; 3210 const int nfrags = skb_shinfo(skb)->nr_frags; 3211 3212 skb_shinfo(skb)->nr_frags = 0; 3213 skb1->len = skb1->data_len = skb->len - len; 3214 skb->len = len; 3215 skb->data_len = len - pos; 3216 3217 for (i = 0; i < nfrags; i++) { 3218 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 3219 3220 if (pos + size > len) { 3221 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 3222 3223 if (pos < len) { 3224 /* Split frag. 3225 * We have two variants in this case: 3226 * 1. Move all the frag to the second 3227 * part, if it is possible. F.e. 3228 * this approach is mandatory for TUX, 3229 * where splitting is expensive. 3230 * 2. Split is accurately. We make this. 3231 */ 3232 skb_frag_ref(skb, i); 3233 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 3234 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 3235 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 3236 skb_shinfo(skb)->nr_frags++; 3237 } 3238 k++; 3239 } else 3240 skb_shinfo(skb)->nr_frags++; 3241 pos += size; 3242 } 3243 skb_shinfo(skb1)->nr_frags = k; 3244 } 3245 3246 /** 3247 * skb_split - Split fragmented skb to two parts at length len. 3248 * @skb: the buffer to split 3249 * @skb1: the buffer to receive the second part 3250 * @len: new length for skb 3251 */ 3252 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 3253 { 3254 int pos = skb_headlen(skb); 3255 3256 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & 3257 SKBTX_SHARED_FRAG; 3258 skb_zerocopy_clone(skb1, skb, 0); 3259 if (len < pos) /* Split line is inside header. */ 3260 skb_split_inside_header(skb, skb1, len, pos); 3261 else /* Second chunk has no header, nothing to copy. */ 3262 skb_split_no_header(skb, skb1, len, pos); 3263 } 3264 EXPORT_SYMBOL(skb_split); 3265 3266 /* Shifting from/to a cloned skb is a no-go. 3267 * 3268 * Caller cannot keep skb_shinfo related pointers past calling here! 3269 */ 3270 static int skb_prepare_for_shift(struct sk_buff *skb) 3271 { 3272 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3273 } 3274 3275 /** 3276 * skb_shift - Shifts paged data partially from skb to another 3277 * @tgt: buffer into which tail data gets added 3278 * @skb: buffer from which the paged data comes from 3279 * @shiftlen: shift up to this many bytes 3280 * 3281 * Attempts to shift up to shiftlen worth of bytes, which may be less than 3282 * the length of the skb, from skb to tgt. Returns number bytes shifted. 3283 * It's up to caller to free skb if everything was shifted. 3284 * 3285 * If @tgt runs out of frags, the whole operation is aborted. 3286 * 3287 * Skb cannot include anything else but paged data while tgt is allowed 3288 * to have non-paged data as well. 3289 * 3290 * TODO: full sized shift could be optimized but that would need 3291 * specialized skb free'er to handle frags without up-to-date nr_frags. 3292 */ 3293 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3294 { 3295 int from, to, merge, todo; 3296 struct skb_frag_struct *fragfrom, *fragto; 3297 3298 BUG_ON(shiftlen > skb->len); 3299 3300 if (skb_headlen(skb)) 3301 return 0; 3302 if (skb_zcopy(tgt) || skb_zcopy(skb)) 3303 return 0; 3304 3305 todo = shiftlen; 3306 from = 0; 3307 to = skb_shinfo(tgt)->nr_frags; 3308 fragfrom = &skb_shinfo(skb)->frags[from]; 3309 3310 /* Actual merge is delayed until the point when we know we can 3311 * commit all, so that we don't have to undo partial changes 3312 */ 3313 if (!to || 3314 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3315 fragfrom->page_offset)) { 3316 merge = -1; 3317 } else { 3318 merge = to - 1; 3319 3320 todo -= skb_frag_size(fragfrom); 3321 if (todo < 0) { 3322 if (skb_prepare_for_shift(skb) || 3323 skb_prepare_for_shift(tgt)) 3324 return 0; 3325 3326 /* All previous frag pointers might be stale! */ 3327 fragfrom = &skb_shinfo(skb)->frags[from]; 3328 fragto = &skb_shinfo(tgt)->frags[merge]; 3329 3330 skb_frag_size_add(fragto, shiftlen); 3331 skb_frag_size_sub(fragfrom, shiftlen); 3332 fragfrom->page_offset += shiftlen; 3333 3334 goto onlymerged; 3335 } 3336 3337 from++; 3338 } 3339 3340 /* Skip full, not-fitting skb to avoid expensive operations */ 3341 if ((shiftlen == skb->len) && 3342 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3343 return 0; 3344 3345 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3346 return 0; 3347 3348 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3349 if (to == MAX_SKB_FRAGS) 3350 return 0; 3351 3352 fragfrom = &skb_shinfo(skb)->frags[from]; 3353 fragto = &skb_shinfo(tgt)->frags[to]; 3354 3355 if (todo >= skb_frag_size(fragfrom)) { 3356 *fragto = *fragfrom; 3357 todo -= skb_frag_size(fragfrom); 3358 from++; 3359 to++; 3360 3361 } else { 3362 __skb_frag_ref(fragfrom); 3363 fragto->page = fragfrom->page; 3364 fragto->page_offset = fragfrom->page_offset; 3365 skb_frag_size_set(fragto, todo); 3366 3367 fragfrom->page_offset += todo; 3368 skb_frag_size_sub(fragfrom, todo); 3369 todo = 0; 3370 3371 to++; 3372 break; 3373 } 3374 } 3375 3376 /* Ready to "commit" this state change to tgt */ 3377 skb_shinfo(tgt)->nr_frags = to; 3378 3379 if (merge >= 0) { 3380 fragfrom = &skb_shinfo(skb)->frags[0]; 3381 fragto = &skb_shinfo(tgt)->frags[merge]; 3382 3383 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 3384 __skb_frag_unref(fragfrom); 3385 } 3386 3387 /* Reposition in the original skb */ 3388 to = 0; 3389 while (from < skb_shinfo(skb)->nr_frags) 3390 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3391 skb_shinfo(skb)->nr_frags = to; 3392 3393 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3394 3395 onlymerged: 3396 /* Most likely the tgt won't ever need its checksum anymore, skb on 3397 * the other hand might need it if it needs to be resent 3398 */ 3399 tgt->ip_summed = CHECKSUM_PARTIAL; 3400 skb->ip_summed = CHECKSUM_PARTIAL; 3401 3402 /* Yak, is it really working this way? Some helper please? */ 3403 skb->len -= shiftlen; 3404 skb->data_len -= shiftlen; 3405 skb->truesize -= shiftlen; 3406 tgt->len += shiftlen; 3407 tgt->data_len += shiftlen; 3408 tgt->truesize += shiftlen; 3409 3410 return shiftlen; 3411 } 3412 3413 /** 3414 * skb_prepare_seq_read - Prepare a sequential read of skb data 3415 * @skb: the buffer to read 3416 * @from: lower offset of data to be read 3417 * @to: upper offset of data to be read 3418 * @st: state variable 3419 * 3420 * Initializes the specified state variable. Must be called before 3421 * invoking skb_seq_read() for the first time. 3422 */ 3423 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3424 unsigned int to, struct skb_seq_state *st) 3425 { 3426 st->lower_offset = from; 3427 st->upper_offset = to; 3428 st->root_skb = st->cur_skb = skb; 3429 st->frag_idx = st->stepped_offset = 0; 3430 st->frag_data = NULL; 3431 } 3432 EXPORT_SYMBOL(skb_prepare_seq_read); 3433 3434 /** 3435 * skb_seq_read - Sequentially read skb data 3436 * @consumed: number of bytes consumed by the caller so far 3437 * @data: destination pointer for data to be returned 3438 * @st: state variable 3439 * 3440 * Reads a block of skb data at @consumed relative to the 3441 * lower offset specified to skb_prepare_seq_read(). Assigns 3442 * the head of the data block to @data and returns the length 3443 * of the block or 0 if the end of the skb data or the upper 3444 * offset has been reached. 3445 * 3446 * The caller is not required to consume all of the data 3447 * returned, i.e. @consumed is typically set to the number 3448 * of bytes already consumed and the next call to 3449 * skb_seq_read() will return the remaining part of the block. 3450 * 3451 * Note 1: The size of each block of data returned can be arbitrary, 3452 * this limitation is the cost for zerocopy sequential 3453 * reads of potentially non linear data. 3454 * 3455 * Note 2: Fragment lists within fragments are not implemented 3456 * at the moment, state->root_skb could be replaced with 3457 * a stack for this purpose. 3458 */ 3459 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3460 struct skb_seq_state *st) 3461 { 3462 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3463 skb_frag_t *frag; 3464 3465 if (unlikely(abs_offset >= st->upper_offset)) { 3466 if (st->frag_data) { 3467 kunmap_atomic(st->frag_data); 3468 st->frag_data = NULL; 3469 } 3470 return 0; 3471 } 3472 3473 next_skb: 3474 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3475 3476 if (abs_offset < block_limit && !st->frag_data) { 3477 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3478 return block_limit - abs_offset; 3479 } 3480 3481 if (st->frag_idx == 0 && !st->frag_data) 3482 st->stepped_offset += skb_headlen(st->cur_skb); 3483 3484 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 3485 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 3486 block_limit = skb_frag_size(frag) + st->stepped_offset; 3487 3488 if (abs_offset < block_limit) { 3489 if (!st->frag_data) 3490 st->frag_data = kmap_atomic(skb_frag_page(frag)); 3491 3492 *data = (u8 *) st->frag_data + frag->page_offset + 3493 (abs_offset - st->stepped_offset); 3494 3495 return block_limit - abs_offset; 3496 } 3497 3498 if (st->frag_data) { 3499 kunmap_atomic(st->frag_data); 3500 st->frag_data = NULL; 3501 } 3502 3503 st->frag_idx++; 3504 st->stepped_offset += skb_frag_size(frag); 3505 } 3506 3507 if (st->frag_data) { 3508 kunmap_atomic(st->frag_data); 3509 st->frag_data = NULL; 3510 } 3511 3512 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3513 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 3514 st->frag_idx = 0; 3515 goto next_skb; 3516 } else if (st->cur_skb->next) { 3517 st->cur_skb = st->cur_skb->next; 3518 st->frag_idx = 0; 3519 goto next_skb; 3520 } 3521 3522 return 0; 3523 } 3524 EXPORT_SYMBOL(skb_seq_read); 3525 3526 /** 3527 * skb_abort_seq_read - Abort a sequential read of skb data 3528 * @st: state variable 3529 * 3530 * Must be called if skb_seq_read() was not called until it 3531 * returned 0. 3532 */ 3533 void skb_abort_seq_read(struct skb_seq_state *st) 3534 { 3535 if (st->frag_data) 3536 kunmap_atomic(st->frag_data); 3537 } 3538 EXPORT_SYMBOL(skb_abort_seq_read); 3539 3540 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 3541 3542 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 3543 struct ts_config *conf, 3544 struct ts_state *state) 3545 { 3546 return skb_seq_read(offset, text, TS_SKB_CB(state)); 3547 } 3548 3549 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 3550 { 3551 skb_abort_seq_read(TS_SKB_CB(state)); 3552 } 3553 3554 /** 3555 * skb_find_text - Find a text pattern in skb data 3556 * @skb: the buffer to look in 3557 * @from: search offset 3558 * @to: search limit 3559 * @config: textsearch configuration 3560 * 3561 * Finds a pattern in the skb data according to the specified 3562 * textsearch configuration. Use textsearch_next() to retrieve 3563 * subsequent occurrences of the pattern. Returns the offset 3564 * to the first occurrence or UINT_MAX if no match was found. 3565 */ 3566 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3567 unsigned int to, struct ts_config *config) 3568 { 3569 struct ts_state state; 3570 unsigned int ret; 3571 3572 config->get_next_block = skb_ts_get_next_block; 3573 config->finish = skb_ts_finish; 3574 3575 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 3576 3577 ret = textsearch_find(config, &state); 3578 return (ret <= to - from ? ret : UINT_MAX); 3579 } 3580 EXPORT_SYMBOL(skb_find_text); 3581 3582 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3583 int offset, size_t size) 3584 { 3585 int i = skb_shinfo(skb)->nr_frags; 3586 3587 if (skb_can_coalesce(skb, i, page, offset)) { 3588 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3589 } else if (i < MAX_SKB_FRAGS) { 3590 get_page(page); 3591 skb_fill_page_desc(skb, i, page, offset, size); 3592 } else { 3593 return -EMSGSIZE; 3594 } 3595 3596 return 0; 3597 } 3598 EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3599 3600 /** 3601 * skb_pull_rcsum - pull skb and update receive checksum 3602 * @skb: buffer to update 3603 * @len: length of data pulled 3604 * 3605 * This function performs an skb_pull on the packet and updates 3606 * the CHECKSUM_COMPLETE checksum. It should be used on 3607 * receive path processing instead of skb_pull unless you know 3608 * that the checksum difference is zero (e.g., a valid IP header) 3609 * or you are setting ip_summed to CHECKSUM_NONE. 3610 */ 3611 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3612 { 3613 unsigned char *data = skb->data; 3614 3615 BUG_ON(len > skb->len); 3616 __skb_pull(skb, len); 3617 skb_postpull_rcsum(skb, data, len); 3618 return skb->data; 3619 } 3620 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3621 3622 static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 3623 { 3624 skb_frag_t head_frag; 3625 struct page *page; 3626 3627 page = virt_to_head_page(frag_skb->head); 3628 head_frag.page.p = page; 3629 head_frag.page_offset = frag_skb->data - 3630 (unsigned char *)page_address(page); 3631 head_frag.size = skb_headlen(frag_skb); 3632 return head_frag; 3633 } 3634 3635 /** 3636 * skb_segment - Perform protocol segmentation on skb. 3637 * @head_skb: buffer to segment 3638 * @features: features for the output path (see dev->features) 3639 * 3640 * This function performs segmentation on the given skb. It returns 3641 * a pointer to the first in a list of new skbs for the segments. 3642 * In case of error it returns ERR_PTR(err). 3643 */ 3644 struct sk_buff *skb_segment(struct sk_buff *head_skb, 3645 netdev_features_t features) 3646 { 3647 struct sk_buff *segs = NULL; 3648 struct sk_buff *tail = NULL; 3649 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3650 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3651 unsigned int mss = skb_shinfo(head_skb)->gso_size; 3652 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 3653 struct sk_buff *frag_skb = head_skb; 3654 unsigned int offset = doffset; 3655 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3656 unsigned int partial_segs = 0; 3657 unsigned int headroom; 3658 unsigned int len = head_skb->len; 3659 __be16 proto; 3660 bool csum, sg; 3661 int nfrags = skb_shinfo(head_skb)->nr_frags; 3662 int err = -ENOMEM; 3663 int i = 0; 3664 int pos; 3665 int dummy; 3666 3667 __skb_push(head_skb, doffset); 3668 proto = skb_network_protocol(head_skb, &dummy); 3669 if (unlikely(!proto)) 3670 return ERR_PTR(-EINVAL); 3671 3672 sg = !!(features & NETIF_F_SG); 3673 csum = !!can_checksum_protocol(features, proto); 3674 3675 if (sg && csum && (mss != GSO_BY_FRAGS)) { 3676 if (!(features & NETIF_F_GSO_PARTIAL)) { 3677 struct sk_buff *iter; 3678 unsigned int frag_len; 3679 3680 if (!list_skb || 3681 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 3682 goto normal; 3683 3684 /* If we get here then all the required 3685 * GSO features except frag_list are supported. 3686 * Try to split the SKB to multiple GSO SKBs 3687 * with no frag_list. 3688 * Currently we can do that only when the buffers don't 3689 * have a linear part and all the buffers except 3690 * the last are of the same length. 3691 */ 3692 frag_len = list_skb->len; 3693 skb_walk_frags(head_skb, iter) { 3694 if (frag_len != iter->len && iter->next) 3695 goto normal; 3696 if (skb_headlen(iter) && !iter->head_frag) 3697 goto normal; 3698 3699 len -= iter->len; 3700 } 3701 3702 if (len != frag_len) 3703 goto normal; 3704 } 3705 3706 /* GSO partial only requires that we trim off any excess that 3707 * doesn't fit into an MSS sized block, so take care of that 3708 * now. 3709 */ 3710 partial_segs = len / mss; 3711 if (partial_segs > 1) 3712 mss *= partial_segs; 3713 else 3714 partial_segs = 0; 3715 } 3716 3717 normal: 3718 headroom = skb_headroom(head_skb); 3719 pos = skb_headlen(head_skb); 3720 3721 do { 3722 struct sk_buff *nskb; 3723 skb_frag_t *nskb_frag; 3724 int hsize; 3725 int size; 3726 3727 if (unlikely(mss == GSO_BY_FRAGS)) { 3728 len = list_skb->len; 3729 } else { 3730 len = head_skb->len - offset; 3731 if (len > mss) 3732 len = mss; 3733 } 3734 3735 hsize = skb_headlen(head_skb) - offset; 3736 if (hsize < 0) 3737 hsize = 0; 3738 if (hsize > len || !sg) 3739 hsize = len; 3740 3741 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3742 (skb_headlen(list_skb) == len || sg)) { 3743 BUG_ON(skb_headlen(list_skb) > len); 3744 3745 i = 0; 3746 nfrags = skb_shinfo(list_skb)->nr_frags; 3747 frag = skb_shinfo(list_skb)->frags; 3748 frag_skb = list_skb; 3749 pos += skb_headlen(list_skb); 3750 3751 while (pos < offset + len) { 3752 BUG_ON(i >= nfrags); 3753 3754 size = skb_frag_size(frag); 3755 if (pos + size > offset + len) 3756 break; 3757 3758 i++; 3759 pos += size; 3760 frag++; 3761 } 3762 3763 nskb = skb_clone(list_skb, GFP_ATOMIC); 3764 list_skb = list_skb->next; 3765 3766 if (unlikely(!nskb)) 3767 goto err; 3768 3769 if (unlikely(pskb_trim(nskb, len))) { 3770 kfree_skb(nskb); 3771 goto err; 3772 } 3773 3774 hsize = skb_end_offset(nskb); 3775 if (skb_cow_head(nskb, doffset + headroom)) { 3776 kfree_skb(nskb); 3777 goto err; 3778 } 3779 3780 nskb->truesize += skb_end_offset(nskb) - hsize; 3781 skb_release_head_state(nskb); 3782 __skb_push(nskb, doffset); 3783 } else { 3784 nskb = __alloc_skb(hsize + doffset + headroom, 3785 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3786 NUMA_NO_NODE); 3787 3788 if (unlikely(!nskb)) 3789 goto err; 3790 3791 skb_reserve(nskb, headroom); 3792 __skb_put(nskb, doffset); 3793 } 3794 3795 if (segs) 3796 tail->next = nskb; 3797 else 3798 segs = nskb; 3799 tail = nskb; 3800 3801 __copy_skb_header(nskb, head_skb); 3802 3803 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3804 skb_reset_mac_len(nskb); 3805 3806 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3807 nskb->data - tnl_hlen, 3808 doffset + tnl_hlen); 3809 3810 if (nskb->len == len + doffset) 3811 goto perform_csum_check; 3812 3813 if (!sg) { 3814 if (!nskb->remcsum_offload) 3815 nskb->ip_summed = CHECKSUM_NONE; 3816 SKB_GSO_CB(nskb)->csum = 3817 skb_copy_and_csum_bits(head_skb, offset, 3818 skb_put(nskb, len), 3819 len, 0); 3820 SKB_GSO_CB(nskb)->csum_start = 3821 skb_headroom(nskb) + doffset; 3822 continue; 3823 } 3824 3825 nskb_frag = skb_shinfo(nskb)->frags; 3826 3827 skb_copy_from_linear_data_offset(head_skb, offset, 3828 skb_put(nskb, hsize), hsize); 3829 3830 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & 3831 SKBTX_SHARED_FRAG; 3832 3833 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 3834 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 3835 goto err; 3836 3837 while (pos < offset + len) { 3838 if (i >= nfrags) { 3839 i = 0; 3840 nfrags = skb_shinfo(list_skb)->nr_frags; 3841 frag = skb_shinfo(list_skb)->frags; 3842 frag_skb = list_skb; 3843 if (!skb_headlen(list_skb)) { 3844 BUG_ON(!nfrags); 3845 } else { 3846 BUG_ON(!list_skb->head_frag); 3847 3848 /* to make room for head_frag. */ 3849 i--; 3850 frag--; 3851 } 3852 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 3853 skb_zerocopy_clone(nskb, frag_skb, 3854 GFP_ATOMIC)) 3855 goto err; 3856 3857 list_skb = list_skb->next; 3858 } 3859 3860 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3861 MAX_SKB_FRAGS)) { 3862 net_warn_ratelimited( 3863 "skb_segment: too many frags: %u %u\n", 3864 pos, mss); 3865 err = -EINVAL; 3866 goto err; 3867 } 3868 3869 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 3870 __skb_frag_ref(nskb_frag); 3871 size = skb_frag_size(nskb_frag); 3872 3873 if (pos < offset) { 3874 nskb_frag->page_offset += offset - pos; 3875 skb_frag_size_sub(nskb_frag, offset - pos); 3876 } 3877 3878 skb_shinfo(nskb)->nr_frags++; 3879 3880 if (pos + size <= offset + len) { 3881 i++; 3882 frag++; 3883 pos += size; 3884 } else { 3885 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3886 goto skip_fraglist; 3887 } 3888 3889 nskb_frag++; 3890 } 3891 3892 skip_fraglist: 3893 nskb->data_len = len - hsize; 3894 nskb->len += nskb->data_len; 3895 nskb->truesize += nskb->data_len; 3896 3897 perform_csum_check: 3898 if (!csum) { 3899 if (skb_has_shared_frag(nskb) && 3900 __skb_linearize(nskb)) 3901 goto err; 3902 3903 if (!nskb->remcsum_offload) 3904 nskb->ip_summed = CHECKSUM_NONE; 3905 SKB_GSO_CB(nskb)->csum = 3906 skb_checksum(nskb, doffset, 3907 nskb->len - doffset, 0); 3908 SKB_GSO_CB(nskb)->csum_start = 3909 skb_headroom(nskb) + doffset; 3910 } 3911 } while ((offset += len) < head_skb->len); 3912 3913 /* Some callers want to get the end of the list. 3914 * Put it in segs->prev to avoid walking the list. 3915 * (see validate_xmit_skb_list() for example) 3916 */ 3917 segs->prev = tail; 3918 3919 if (partial_segs) { 3920 struct sk_buff *iter; 3921 int type = skb_shinfo(head_skb)->gso_type; 3922 unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 3923 3924 /* Update type to add partial and then remove dodgy if set */ 3925 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 3926 type &= ~SKB_GSO_DODGY; 3927 3928 /* Update GSO info and prepare to start updating headers on 3929 * our way back down the stack of protocols. 3930 */ 3931 for (iter = segs; iter; iter = iter->next) { 3932 skb_shinfo(iter)->gso_size = gso_size; 3933 skb_shinfo(iter)->gso_segs = partial_segs; 3934 skb_shinfo(iter)->gso_type = type; 3935 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 3936 } 3937 3938 if (tail->len - doffset <= gso_size) 3939 skb_shinfo(tail)->gso_size = 0; 3940 else if (tail != segs) 3941 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 3942 } 3943 3944 /* Following permits correct backpressure, for protocols 3945 * using skb_set_owner_w(). 3946 * Idea is to tranfert ownership from head_skb to last segment. 3947 */ 3948 if (head_skb->destructor == sock_wfree) { 3949 swap(tail->truesize, head_skb->truesize); 3950 swap(tail->destructor, head_skb->destructor); 3951 swap(tail->sk, head_skb->sk); 3952 } 3953 return segs; 3954 3955 err: 3956 kfree_skb_list(segs); 3957 return ERR_PTR(err); 3958 } 3959 EXPORT_SYMBOL_GPL(skb_segment); 3960 3961 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 3962 { 3963 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3964 unsigned int offset = skb_gro_offset(skb); 3965 unsigned int headlen = skb_headlen(skb); 3966 unsigned int len = skb_gro_len(skb); 3967 unsigned int delta_truesize; 3968 struct sk_buff *lp; 3969 3970 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 3971 return -E2BIG; 3972 3973 lp = NAPI_GRO_CB(p)->last; 3974 pinfo = skb_shinfo(lp); 3975 3976 if (headlen <= offset) { 3977 skb_frag_t *frag; 3978 skb_frag_t *frag2; 3979 int i = skbinfo->nr_frags; 3980 int nr_frags = pinfo->nr_frags + i; 3981 3982 if (nr_frags > MAX_SKB_FRAGS) 3983 goto merge; 3984 3985 offset -= headlen; 3986 pinfo->nr_frags = nr_frags; 3987 skbinfo->nr_frags = 0; 3988 3989 frag = pinfo->frags + nr_frags; 3990 frag2 = skbinfo->frags + i; 3991 do { 3992 *--frag = *--frag2; 3993 } while (--i); 3994 3995 frag->page_offset += offset; 3996 skb_frag_size_sub(frag, offset); 3997 3998 /* all fragments truesize : remove (head size + sk_buff) */ 3999 delta_truesize = skb->truesize - 4000 SKB_TRUESIZE(skb_end_offset(skb)); 4001 4002 skb->truesize -= skb->data_len; 4003 skb->len -= skb->data_len; 4004 skb->data_len = 0; 4005 4006 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 4007 goto done; 4008 } else if (skb->head_frag) { 4009 int nr_frags = pinfo->nr_frags; 4010 skb_frag_t *frag = pinfo->frags + nr_frags; 4011 struct page *page = virt_to_head_page(skb->head); 4012 unsigned int first_size = headlen - offset; 4013 unsigned int first_offset; 4014 4015 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 4016 goto merge; 4017 4018 first_offset = skb->data - 4019 (unsigned char *)page_address(page) + 4020 offset; 4021 4022 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4023 4024 frag->page.p = page; 4025 frag->page_offset = first_offset; 4026 skb_frag_size_set(frag, first_size); 4027 4028 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4029 /* We dont need to clear skbinfo->nr_frags here */ 4030 4031 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4032 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4033 goto done; 4034 } 4035 4036 merge: 4037 delta_truesize = skb->truesize; 4038 if (offset > headlen) { 4039 unsigned int eat = offset - headlen; 4040 4041 skbinfo->frags[0].page_offset += eat; 4042 skb_frag_size_sub(&skbinfo->frags[0], eat); 4043 skb->data_len -= eat; 4044 skb->len -= eat; 4045 offset = headlen; 4046 } 4047 4048 __skb_pull(skb, offset); 4049 4050 if (NAPI_GRO_CB(p)->last == p) 4051 skb_shinfo(p)->frag_list = skb; 4052 else 4053 NAPI_GRO_CB(p)->last->next = skb; 4054 NAPI_GRO_CB(p)->last = skb; 4055 __skb_header_release(skb); 4056 lp = p; 4057 4058 done: 4059 NAPI_GRO_CB(p)->count++; 4060 p->data_len += len; 4061 p->truesize += delta_truesize; 4062 p->len += len; 4063 if (lp != p) { 4064 lp->data_len += len; 4065 lp->truesize += delta_truesize; 4066 lp->len += len; 4067 } 4068 NAPI_GRO_CB(skb)->same_flow = 1; 4069 return 0; 4070 } 4071 EXPORT_SYMBOL_GPL(skb_gro_receive); 4072 4073 #ifdef CONFIG_SKB_EXTENSIONS 4074 #define SKB_EXT_ALIGN_VALUE 8 4075 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4076 4077 static const u8 skb_ext_type_len[] = { 4078 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4079 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4080 #endif 4081 #ifdef CONFIG_XFRM 4082 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 4083 #endif 4084 }; 4085 4086 static __always_inline unsigned int skb_ext_total_length(void) 4087 { 4088 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4089 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4090 skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4091 #endif 4092 #ifdef CONFIG_XFRM 4093 skb_ext_type_len[SKB_EXT_SEC_PATH] + 4094 #endif 4095 0; 4096 } 4097 4098 static void skb_extensions_init(void) 4099 { 4100 BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4101 BUILD_BUG_ON(skb_ext_total_length() > 255); 4102 4103 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4104 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4105 0, 4106 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4107 NULL); 4108 } 4109 #else 4110 static void skb_extensions_init(void) {} 4111 #endif 4112 4113 void __init skb_init(void) 4114 { 4115 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 4116 sizeof(struct sk_buff), 4117 0, 4118 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4119 offsetof(struct sk_buff, cb), 4120 sizeof_field(struct sk_buff, cb), 4121 NULL); 4122 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4123 sizeof(struct sk_buff_fclones), 4124 0, 4125 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4126 NULL); 4127 skb_extensions_init(); 4128 } 4129 4130 static int 4131 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 4132 unsigned int recursion_level) 4133 { 4134 int start = skb_headlen(skb); 4135 int i, copy = start - offset; 4136 struct sk_buff *frag_iter; 4137 int elt = 0; 4138 4139 if (unlikely(recursion_level >= 24)) 4140 return -EMSGSIZE; 4141 4142 if (copy > 0) { 4143 if (copy > len) 4144 copy = len; 4145 sg_set_buf(sg, skb->data + offset, copy); 4146 elt++; 4147 if ((len -= copy) == 0) 4148 return elt; 4149 offset += copy; 4150 } 4151 4152 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 4153 int end; 4154 4155 WARN_ON(start > offset + len); 4156 4157 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4158 if ((copy = end - offset) > 0) { 4159 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4160 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4161 return -EMSGSIZE; 4162 4163 if (copy > len) 4164 copy = len; 4165 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4166 frag->page_offset+offset-start); 4167 elt++; 4168 if (!(len -= copy)) 4169 return elt; 4170 offset += copy; 4171 } 4172 start = end; 4173 } 4174 4175 skb_walk_frags(skb, frag_iter) { 4176 int end, ret; 4177 4178 WARN_ON(start > offset + len); 4179 4180 end = start + frag_iter->len; 4181 if ((copy = end - offset) > 0) { 4182 if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 4183 return -EMSGSIZE; 4184 4185 if (copy > len) 4186 copy = len; 4187 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 4188 copy, recursion_level + 1); 4189 if (unlikely(ret < 0)) 4190 return ret; 4191 elt += ret; 4192 if ((len -= copy) == 0) 4193 return elt; 4194 offset += copy; 4195 } 4196 start = end; 4197 } 4198 BUG_ON(len); 4199 return elt; 4200 } 4201 4202 /** 4203 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 4204 * @skb: Socket buffer containing the buffers to be mapped 4205 * @sg: The scatter-gather list to map into 4206 * @offset: The offset into the buffer's contents to start mapping 4207 * @len: Length of buffer space to be mapped 4208 * 4209 * Fill the specified scatter-gather list with mappings/pointers into a 4210 * region of the buffer space attached to a socket buffer. Returns either 4211 * the number of scatterlist items used, or -EMSGSIZE if the contents 4212 * could not fit. 4213 */ 4214 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 4215 { 4216 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 4217 4218 if (nsg <= 0) 4219 return nsg; 4220 4221 sg_mark_end(&sg[nsg - 1]); 4222 4223 return nsg; 4224 } 4225 EXPORT_SYMBOL_GPL(skb_to_sgvec); 4226 4227 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 4228 * sglist without mark the sg which contain last skb data as the end. 4229 * So the caller can mannipulate sg list as will when padding new data after 4230 * the first call without calling sg_unmark_end to expend sg list. 4231 * 4232 * Scenario to use skb_to_sgvec_nomark: 4233 * 1. sg_init_table 4234 * 2. skb_to_sgvec_nomark(payload1) 4235 * 3. skb_to_sgvec_nomark(payload2) 4236 * 4237 * This is equivalent to: 4238 * 1. sg_init_table 4239 * 2. skb_to_sgvec(payload1) 4240 * 3. sg_unmark_end 4241 * 4. skb_to_sgvec(payload2) 4242 * 4243 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 4244 * is more preferable. 4245 */ 4246 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 4247 int offset, int len) 4248 { 4249 return __skb_to_sgvec(skb, sg, offset, len, 0); 4250 } 4251 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 4252 4253 4254 4255 /** 4256 * skb_cow_data - Check that a socket buffer's data buffers are writable 4257 * @skb: The socket buffer to check. 4258 * @tailbits: Amount of trailing space to be added 4259 * @trailer: Returned pointer to the skb where the @tailbits space begins 4260 * 4261 * Make sure that the data buffers attached to a socket buffer are 4262 * writable. If they are not, private copies are made of the data buffers 4263 * and the socket buffer is set to use these instead. 4264 * 4265 * If @tailbits is given, make sure that there is space to write @tailbits 4266 * bytes of data beyond current end of socket buffer. @trailer will be 4267 * set to point to the skb in which this space begins. 4268 * 4269 * The number of scatterlist elements required to completely map the 4270 * COW'd and extended socket buffer will be returned. 4271 */ 4272 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4273 { 4274 int copyflag; 4275 int elt; 4276 struct sk_buff *skb1, **skb_p; 4277 4278 /* If skb is cloned or its head is paged, reallocate 4279 * head pulling out all the pages (pages are considered not writable 4280 * at the moment even if they are anonymous). 4281 */ 4282 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4283 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 4284 return -ENOMEM; 4285 4286 /* Easy case. Most of packets will go this way. */ 4287 if (!skb_has_frag_list(skb)) { 4288 /* A little of trouble, not enough of space for trailer. 4289 * This should not happen, when stack is tuned to generate 4290 * good frames. OK, on miss we reallocate and reserve even more 4291 * space, 128 bytes is fair. */ 4292 4293 if (skb_tailroom(skb) < tailbits && 4294 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4295 return -ENOMEM; 4296 4297 /* Voila! */ 4298 *trailer = skb; 4299 return 1; 4300 } 4301 4302 /* Misery. We are in troubles, going to mincer fragments... */ 4303 4304 elt = 1; 4305 skb_p = &skb_shinfo(skb)->frag_list; 4306 copyflag = 0; 4307 4308 while ((skb1 = *skb_p) != NULL) { 4309 int ntail = 0; 4310 4311 /* The fragment is partially pulled by someone, 4312 * this can happen on input. Copy it and everything 4313 * after it. */ 4314 4315 if (skb_shared(skb1)) 4316 copyflag = 1; 4317 4318 /* If the skb is the last, worry about trailer. */ 4319 4320 if (skb1->next == NULL && tailbits) { 4321 if (skb_shinfo(skb1)->nr_frags || 4322 skb_has_frag_list(skb1) || 4323 skb_tailroom(skb1) < tailbits) 4324 ntail = tailbits + 128; 4325 } 4326 4327 if (copyflag || 4328 skb_cloned(skb1) || 4329 ntail || 4330 skb_shinfo(skb1)->nr_frags || 4331 skb_has_frag_list(skb1)) { 4332 struct sk_buff *skb2; 4333 4334 /* Fuck, we are miserable poor guys... */ 4335 if (ntail == 0) 4336 skb2 = skb_copy(skb1, GFP_ATOMIC); 4337 else 4338 skb2 = skb_copy_expand(skb1, 4339 skb_headroom(skb1), 4340 ntail, 4341 GFP_ATOMIC); 4342 if (unlikely(skb2 == NULL)) 4343 return -ENOMEM; 4344 4345 if (skb1->sk) 4346 skb_set_owner_w(skb2, skb1->sk); 4347 4348 /* Looking around. Are we still alive? 4349 * OK, link new skb, drop old one */ 4350 4351 skb2->next = skb1->next; 4352 *skb_p = skb2; 4353 kfree_skb(skb1); 4354 skb1 = skb2; 4355 } 4356 elt++; 4357 *trailer = skb1; 4358 skb_p = &skb1->next; 4359 } 4360 4361 return elt; 4362 } 4363 EXPORT_SYMBOL_GPL(skb_cow_data); 4364 4365 static void sock_rmem_free(struct sk_buff *skb) 4366 { 4367 struct sock *sk = skb->sk; 4368 4369 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4370 } 4371 4372 static void skb_set_err_queue(struct sk_buff *skb) 4373 { 4374 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 4375 * So, it is safe to (mis)use it to mark skbs on the error queue. 4376 */ 4377 skb->pkt_type = PACKET_OUTGOING; 4378 BUILD_BUG_ON(PACKET_OUTGOING == 0); 4379 } 4380 4381 /* 4382 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4383 */ 4384 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4385 { 4386 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4387 (unsigned int)sk->sk_rcvbuf) 4388 return -ENOMEM; 4389 4390 skb_orphan(skb); 4391 skb->sk = sk; 4392 skb->destructor = sock_rmem_free; 4393 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 4394 skb_set_err_queue(skb); 4395 4396 /* before exiting rcu section, make sure dst is refcounted */ 4397 skb_dst_force(skb); 4398 4399 skb_queue_tail(&sk->sk_error_queue, skb); 4400 if (!sock_flag(sk, SOCK_DEAD)) 4401 sk->sk_error_report(sk); 4402 return 0; 4403 } 4404 EXPORT_SYMBOL(sock_queue_err_skb); 4405 4406 static bool is_icmp_err_skb(const struct sk_buff *skb) 4407 { 4408 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 4409 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 4410 } 4411 4412 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4413 { 4414 struct sk_buff_head *q = &sk->sk_error_queue; 4415 struct sk_buff *skb, *skb_next = NULL; 4416 bool icmp_next = false; 4417 unsigned long flags; 4418 4419 spin_lock_irqsave(&q->lock, flags); 4420 skb = __skb_dequeue(q); 4421 if (skb && (skb_next = skb_peek(q))) { 4422 icmp_next = is_icmp_err_skb(skb_next); 4423 if (icmp_next) 4424 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; 4425 } 4426 spin_unlock_irqrestore(&q->lock, flags); 4427 4428 if (is_icmp_err_skb(skb) && !icmp_next) 4429 sk->sk_err = 0; 4430 4431 if (skb_next) 4432 sk->sk_error_report(sk); 4433 4434 return skb; 4435 } 4436 EXPORT_SYMBOL(sock_dequeue_err_skb); 4437 4438 /** 4439 * skb_clone_sk - create clone of skb, and take reference to socket 4440 * @skb: the skb to clone 4441 * 4442 * This function creates a clone of a buffer that holds a reference on 4443 * sk_refcnt. Buffers created via this function are meant to be 4444 * returned using sock_queue_err_skb, or free via kfree_skb. 4445 * 4446 * When passing buffers allocated with this function to sock_queue_err_skb 4447 * it is necessary to wrap the call with sock_hold/sock_put in order to 4448 * prevent the socket from being released prior to being enqueued on 4449 * the sk_error_queue. 4450 */ 4451 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 4452 { 4453 struct sock *sk = skb->sk; 4454 struct sk_buff *clone; 4455 4456 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 4457 return NULL; 4458 4459 clone = skb_clone(skb, GFP_ATOMIC); 4460 if (!clone) { 4461 sock_put(sk); 4462 return NULL; 4463 } 4464 4465 clone->sk = sk; 4466 clone->destructor = sock_efree; 4467 4468 return clone; 4469 } 4470 EXPORT_SYMBOL(skb_clone_sk); 4471 4472 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 4473 struct sock *sk, 4474 int tstype, 4475 bool opt_stats) 4476 { 4477 struct sock_exterr_skb *serr; 4478 int err; 4479 4480 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 4481 4482 serr = SKB_EXT_ERR(skb); 4483 memset(serr, 0, sizeof(*serr)); 4484 serr->ee.ee_errno = ENOMSG; 4485 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4486 serr->ee.ee_info = tstype; 4487 serr->opt_stats = opt_stats; 4488 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 4489 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 4490 serr->ee.ee_data = skb_shinfo(skb)->tskey; 4491 if (sk->sk_protocol == IPPROTO_TCP && 4492 sk->sk_type == SOCK_STREAM) 4493 serr->ee.ee_data -= sk->sk_tskey; 4494 } 4495 4496 err = sock_queue_err_skb(sk, skb); 4497 4498 if (err) 4499 kfree_skb(skb); 4500 } 4501 4502 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4503 { 4504 bool ret; 4505 4506 if (likely(sysctl_tstamp_allow_data || tsonly)) 4507 return true; 4508 4509 read_lock_bh(&sk->sk_callback_lock); 4510 ret = sk->sk_socket && sk->sk_socket->file && 4511 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4512 read_unlock_bh(&sk->sk_callback_lock); 4513 return ret; 4514 } 4515 4516 void skb_complete_tx_timestamp(struct sk_buff *skb, 4517 struct skb_shared_hwtstamps *hwtstamps) 4518 { 4519 struct sock *sk = skb->sk; 4520 4521 if (!skb_may_tx_timestamp(sk, false)) 4522 goto err; 4523 4524 /* Take a reference to prevent skb_orphan() from freeing the socket, 4525 * but only if the socket refcount is not zero. 4526 */ 4527 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4528 *skb_hwtstamps(skb) = *hwtstamps; 4529 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 4530 sock_put(sk); 4531 return; 4532 } 4533 4534 err: 4535 kfree_skb(skb); 4536 } 4537 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 4538 4539 void __skb_tstamp_tx(struct sk_buff *orig_skb, 4540 struct skb_shared_hwtstamps *hwtstamps, 4541 struct sock *sk, int tstype) 4542 { 4543 struct sk_buff *skb; 4544 bool tsonly, opt_stats = false; 4545 4546 if (!sk) 4547 return; 4548 4549 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4550 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4551 return; 4552 4553 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 4554 if (!skb_may_tx_timestamp(sk, tsonly)) 4555 return; 4556 4557 if (tsonly) { 4558 #ifdef CONFIG_INET 4559 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 4560 sk->sk_protocol == IPPROTO_TCP && 4561 sk->sk_type == SOCK_STREAM) { 4562 skb = tcp_get_timestamping_opt_stats(sk); 4563 opt_stats = true; 4564 } else 4565 #endif 4566 skb = alloc_skb(0, GFP_ATOMIC); 4567 } else { 4568 skb = skb_clone(orig_skb, GFP_ATOMIC); 4569 } 4570 if (!skb) 4571 return; 4572 4573 if (tsonly) { 4574 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4575 SKBTX_ANY_TSTAMP; 4576 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 4577 } 4578 4579 if (hwtstamps) 4580 *skb_hwtstamps(skb) = *hwtstamps; 4581 else 4582 skb->tstamp = ktime_get_real(); 4583 4584 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 4585 } 4586 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4587 4588 void skb_tstamp_tx(struct sk_buff *orig_skb, 4589 struct skb_shared_hwtstamps *hwtstamps) 4590 { 4591 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 4592 SCM_TSTAMP_SND); 4593 } 4594 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4595 4596 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 4597 { 4598 struct sock *sk = skb->sk; 4599 struct sock_exterr_skb *serr; 4600 int err = 1; 4601 4602 skb->wifi_acked_valid = 1; 4603 skb->wifi_acked = acked; 4604 4605 serr = SKB_EXT_ERR(skb); 4606 memset(serr, 0, sizeof(*serr)); 4607 serr->ee.ee_errno = ENOMSG; 4608 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 4609 4610 /* Take a reference to prevent skb_orphan() from freeing the socket, 4611 * but only if the socket refcount is not zero. 4612 */ 4613 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 4614 err = sock_queue_err_skb(sk, skb); 4615 sock_put(sk); 4616 } 4617 if (err) 4618 kfree_skb(skb); 4619 } 4620 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 4621 4622 /** 4623 * skb_partial_csum_set - set up and verify partial csum values for packet 4624 * @skb: the skb to set 4625 * @start: the number of bytes after skb->data to start checksumming. 4626 * @off: the offset from start to place the checksum. 4627 * 4628 * For untrusted partially-checksummed packets, we need to make sure the values 4629 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4630 * 4631 * This function checks and sets those values and skb->ip_summed: if this 4632 * returns false you should drop the packet. 4633 */ 4634 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4635 { 4636 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 4637 u32 csum_start = skb_headroom(skb) + (u32)start; 4638 4639 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 4640 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 4641 start, off, skb_headroom(skb), skb_headlen(skb)); 4642 return false; 4643 } 4644 skb->ip_summed = CHECKSUM_PARTIAL; 4645 skb->csum_start = csum_start; 4646 skb->csum_offset = off; 4647 skb_set_transport_header(skb, start); 4648 return true; 4649 } 4650 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4651 4652 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4653 unsigned int max) 4654 { 4655 if (skb_headlen(skb) >= len) 4656 return 0; 4657 4658 /* If we need to pullup then pullup to the max, so we 4659 * won't need to do it again. 4660 */ 4661 if (max > skb->len) 4662 max = skb->len; 4663 4664 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4665 return -ENOMEM; 4666 4667 if (skb_headlen(skb) < len) 4668 return -EPROTO; 4669 4670 return 0; 4671 } 4672 4673 #define MAX_TCP_HDR_LEN (15 * 4) 4674 4675 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4676 typeof(IPPROTO_IP) proto, 4677 unsigned int off) 4678 { 4679 switch (proto) { 4680 int err; 4681 4682 case IPPROTO_TCP: 4683 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4684 off + MAX_TCP_HDR_LEN); 4685 if (!err && !skb_partial_csum_set(skb, off, 4686 offsetof(struct tcphdr, 4687 check))) 4688 err = -EPROTO; 4689 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4690 4691 case IPPROTO_UDP: 4692 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4693 off + sizeof(struct udphdr)); 4694 if (!err && !skb_partial_csum_set(skb, off, 4695 offsetof(struct udphdr, 4696 check))) 4697 err = -EPROTO; 4698 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4699 } 4700 4701 return ERR_PTR(-EPROTO); 4702 } 4703 4704 /* This value should be large enough to cover a tagged ethernet header plus 4705 * maximally sized IP and TCP or UDP headers. 4706 */ 4707 #define MAX_IP_HDR_LEN 128 4708 4709 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4710 { 4711 unsigned int off; 4712 bool fragment; 4713 __sum16 *csum; 4714 int err; 4715 4716 fragment = false; 4717 4718 err = skb_maybe_pull_tail(skb, 4719 sizeof(struct iphdr), 4720 MAX_IP_HDR_LEN); 4721 if (err < 0) 4722 goto out; 4723 4724 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 4725 fragment = true; 4726 4727 off = ip_hdrlen(skb); 4728 4729 err = -EPROTO; 4730 4731 if (fragment) 4732 goto out; 4733 4734 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4735 if (IS_ERR(csum)) 4736 return PTR_ERR(csum); 4737 4738 if (recalculate) 4739 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4740 ip_hdr(skb)->daddr, 4741 skb->len - off, 4742 ip_hdr(skb)->protocol, 0); 4743 err = 0; 4744 4745 out: 4746 return err; 4747 } 4748 4749 /* This value should be large enough to cover a tagged ethernet header plus 4750 * an IPv6 header, all options, and a maximal TCP or UDP header. 4751 */ 4752 #define MAX_IPV6_HDR_LEN 256 4753 4754 #define OPT_HDR(type, skb, off) \ 4755 (type *)(skb_network_header(skb) + (off)) 4756 4757 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4758 { 4759 int err; 4760 u8 nexthdr; 4761 unsigned int off; 4762 unsigned int len; 4763 bool fragment; 4764 bool done; 4765 __sum16 *csum; 4766 4767 fragment = false; 4768 done = false; 4769 4770 off = sizeof(struct ipv6hdr); 4771 4772 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 4773 if (err < 0) 4774 goto out; 4775 4776 nexthdr = ipv6_hdr(skb)->nexthdr; 4777 4778 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 4779 while (off <= len && !done) { 4780 switch (nexthdr) { 4781 case IPPROTO_DSTOPTS: 4782 case IPPROTO_HOPOPTS: 4783 case IPPROTO_ROUTING: { 4784 struct ipv6_opt_hdr *hp; 4785 4786 err = skb_maybe_pull_tail(skb, 4787 off + 4788 sizeof(struct ipv6_opt_hdr), 4789 MAX_IPV6_HDR_LEN); 4790 if (err < 0) 4791 goto out; 4792 4793 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 4794 nexthdr = hp->nexthdr; 4795 off += ipv6_optlen(hp); 4796 break; 4797 } 4798 case IPPROTO_AH: { 4799 struct ip_auth_hdr *hp; 4800 4801 err = skb_maybe_pull_tail(skb, 4802 off + 4803 sizeof(struct ip_auth_hdr), 4804 MAX_IPV6_HDR_LEN); 4805 if (err < 0) 4806 goto out; 4807 4808 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 4809 nexthdr = hp->nexthdr; 4810 off += ipv6_authlen(hp); 4811 break; 4812 } 4813 case IPPROTO_FRAGMENT: { 4814 struct frag_hdr *hp; 4815 4816 err = skb_maybe_pull_tail(skb, 4817 off + 4818 sizeof(struct frag_hdr), 4819 MAX_IPV6_HDR_LEN); 4820 if (err < 0) 4821 goto out; 4822 4823 hp = OPT_HDR(struct frag_hdr, skb, off); 4824 4825 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 4826 fragment = true; 4827 4828 nexthdr = hp->nexthdr; 4829 off += sizeof(struct frag_hdr); 4830 break; 4831 } 4832 default: 4833 done = true; 4834 break; 4835 } 4836 } 4837 4838 err = -EPROTO; 4839 4840 if (!done || fragment) 4841 goto out; 4842 4843 csum = skb_checksum_setup_ip(skb, nexthdr, off); 4844 if (IS_ERR(csum)) 4845 return PTR_ERR(csum); 4846 4847 if (recalculate) 4848 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4849 &ipv6_hdr(skb)->daddr, 4850 skb->len - off, nexthdr, 0); 4851 err = 0; 4852 4853 out: 4854 return err; 4855 } 4856 4857 /** 4858 * skb_checksum_setup - set up partial checksum offset 4859 * @skb: the skb to set up 4860 * @recalculate: if true the pseudo-header checksum will be recalculated 4861 */ 4862 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 4863 { 4864 int err; 4865 4866 switch (skb->protocol) { 4867 case htons(ETH_P_IP): 4868 err = skb_checksum_setup_ipv4(skb, recalculate); 4869 break; 4870 4871 case htons(ETH_P_IPV6): 4872 err = skb_checksum_setup_ipv6(skb, recalculate); 4873 break; 4874 4875 default: 4876 err = -EPROTO; 4877 break; 4878 } 4879 4880 return err; 4881 } 4882 EXPORT_SYMBOL(skb_checksum_setup); 4883 4884 /** 4885 * skb_checksum_maybe_trim - maybe trims the given skb 4886 * @skb: the skb to check 4887 * @transport_len: the data length beyond the network header 4888 * 4889 * Checks whether the given skb has data beyond the given transport length. 4890 * If so, returns a cloned skb trimmed to this transport length. 4891 * Otherwise returns the provided skb. Returns NULL in error cases 4892 * (e.g. transport_len exceeds skb length or out-of-memory). 4893 * 4894 * Caller needs to set the skb transport header and free any returned skb if it 4895 * differs from the provided skb. 4896 */ 4897 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 4898 unsigned int transport_len) 4899 { 4900 struct sk_buff *skb_chk; 4901 unsigned int len = skb_transport_offset(skb) + transport_len; 4902 int ret; 4903 4904 if (skb->len < len) 4905 return NULL; 4906 else if (skb->len == len) 4907 return skb; 4908 4909 skb_chk = skb_clone(skb, GFP_ATOMIC); 4910 if (!skb_chk) 4911 return NULL; 4912 4913 ret = pskb_trim_rcsum(skb_chk, len); 4914 if (ret) { 4915 kfree_skb(skb_chk); 4916 return NULL; 4917 } 4918 4919 return skb_chk; 4920 } 4921 4922 /** 4923 * skb_checksum_trimmed - validate checksum of an skb 4924 * @skb: the skb to check 4925 * @transport_len: the data length beyond the network header 4926 * @skb_chkf: checksum function to use 4927 * 4928 * Applies the given checksum function skb_chkf to the provided skb. 4929 * Returns a checked and maybe trimmed skb. Returns NULL on error. 4930 * 4931 * If the skb has data beyond the given transport length, then a 4932 * trimmed & cloned skb is checked and returned. 4933 * 4934 * Caller needs to set the skb transport header and free any returned skb if it 4935 * differs from the provided skb. 4936 */ 4937 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 4938 unsigned int transport_len, 4939 __sum16(*skb_chkf)(struct sk_buff *skb)) 4940 { 4941 struct sk_buff *skb_chk; 4942 unsigned int offset = skb_transport_offset(skb); 4943 __sum16 ret; 4944 4945 skb_chk = skb_checksum_maybe_trim(skb, transport_len); 4946 if (!skb_chk) 4947 goto err; 4948 4949 if (!pskb_may_pull(skb_chk, offset)) 4950 goto err; 4951 4952 skb_pull_rcsum(skb_chk, offset); 4953 ret = skb_chkf(skb_chk); 4954 skb_push_rcsum(skb_chk, offset); 4955 4956 if (ret) 4957 goto err; 4958 4959 return skb_chk; 4960 4961 err: 4962 if (skb_chk && skb_chk != skb) 4963 kfree_skb(skb_chk); 4964 4965 return NULL; 4966 4967 } 4968 EXPORT_SYMBOL(skb_checksum_trimmed); 4969 4970 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4971 { 4972 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4973 skb->dev->name); 4974 } 4975 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4976 4977 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4978 { 4979 if (head_stolen) { 4980 skb_release_head_state(skb); 4981 kmem_cache_free(skbuff_head_cache, skb); 4982 } else { 4983 __kfree_skb(skb); 4984 } 4985 } 4986 EXPORT_SYMBOL(kfree_skb_partial); 4987 4988 /** 4989 * skb_try_coalesce - try to merge skb to prior one 4990 * @to: prior buffer 4991 * @from: buffer to add 4992 * @fragstolen: pointer to boolean 4993 * @delta_truesize: how much more was allocated than was requested 4994 */ 4995 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4996 bool *fragstolen, int *delta_truesize) 4997 { 4998 struct skb_shared_info *to_shinfo, *from_shinfo; 4999 int i, delta, len = from->len; 5000 5001 *fragstolen = false; 5002 5003 if (skb_cloned(to)) 5004 return false; 5005 5006 if (len <= skb_tailroom(to)) { 5007 if (len) 5008 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5009 *delta_truesize = 0; 5010 return true; 5011 } 5012 5013 to_shinfo = skb_shinfo(to); 5014 from_shinfo = skb_shinfo(from); 5015 if (to_shinfo->frag_list || from_shinfo->frag_list) 5016 return false; 5017 if (skb_zcopy(to) || skb_zcopy(from)) 5018 return false; 5019 5020 if (skb_headlen(from) != 0) { 5021 struct page *page; 5022 unsigned int offset; 5023 5024 if (to_shinfo->nr_frags + 5025 from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5026 return false; 5027 5028 if (skb_head_is_locked(from)) 5029 return false; 5030 5031 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5032 5033 page = virt_to_head_page(from->head); 5034 offset = from->data - (unsigned char *)page_address(page); 5035 5036 skb_fill_page_desc(to, to_shinfo->nr_frags, 5037 page, offset, skb_headlen(from)); 5038 *fragstolen = true; 5039 } else { 5040 if (to_shinfo->nr_frags + 5041 from_shinfo->nr_frags > MAX_SKB_FRAGS) 5042 return false; 5043 5044 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5045 } 5046 5047 WARN_ON_ONCE(delta < len); 5048 5049 memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5050 from_shinfo->frags, 5051 from_shinfo->nr_frags * sizeof(skb_frag_t)); 5052 to_shinfo->nr_frags += from_shinfo->nr_frags; 5053 5054 if (!skb_cloned(from)) 5055 from_shinfo->nr_frags = 0; 5056 5057 /* if the skb is not cloned this does nothing 5058 * since we set nr_frags to 0. 5059 */ 5060 for (i = 0; i < from_shinfo->nr_frags; i++) 5061 __skb_frag_ref(&from_shinfo->frags[i]); 5062 5063 to->truesize += delta; 5064 to->len += len; 5065 to->data_len += len; 5066 5067 *delta_truesize = delta; 5068 return true; 5069 } 5070 EXPORT_SYMBOL(skb_try_coalesce); 5071 5072 /** 5073 * skb_scrub_packet - scrub an skb 5074 * 5075 * @skb: buffer to clean 5076 * @xnet: packet is crossing netns 5077 * 5078 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 5079 * into/from a tunnel. Some information have to be cleared during these 5080 * operations. 5081 * skb_scrub_packet can also be used to clean a skb before injecting it in 5082 * another namespace (@xnet == true). We have to clear all information in the 5083 * skb that could impact namespace isolation. 5084 */ 5085 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5086 { 5087 skb->pkt_type = PACKET_HOST; 5088 skb->skb_iif = 0; 5089 skb->ignore_df = 0; 5090 skb_dst_drop(skb); 5091 secpath_reset(skb); 5092 nf_reset(skb); 5093 nf_reset_trace(skb); 5094 5095 #ifdef CONFIG_NET_SWITCHDEV 5096 skb->offload_fwd_mark = 0; 5097 skb->offload_l3_fwd_mark = 0; 5098 #endif 5099 5100 if (!xnet) 5101 return; 5102 5103 ipvs_reset(skb); 5104 skb->mark = 0; 5105 skb->tstamp = 0; 5106 } 5107 EXPORT_SYMBOL_GPL(skb_scrub_packet); 5108 5109 /** 5110 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5111 * 5112 * @skb: GSO skb 5113 * 5114 * skb_gso_transport_seglen is used to determine the real size of the 5115 * individual segments, including Layer4 headers (TCP/UDP). 5116 * 5117 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5118 */ 5119 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5120 { 5121 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5122 unsigned int thlen = 0; 5123 5124 if (skb->encapsulation) { 5125 thlen = skb_inner_transport_header(skb) - 5126 skb_transport_header(skb); 5127 5128 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5129 thlen += inner_tcp_hdrlen(skb); 5130 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5131 thlen = tcp_hdrlen(skb); 5132 } else if (unlikely(skb_is_gso_sctp(skb))) { 5133 thlen = sizeof(struct sctphdr); 5134 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5135 thlen = sizeof(struct udphdr); 5136 } 5137 /* UFO sets gso_size to the size of the fragmentation 5138 * payload, i.e. the size of the L4 (UDP) header is already 5139 * accounted for. 5140 */ 5141 return thlen + shinfo->gso_size; 5142 } 5143 5144 /** 5145 * skb_gso_network_seglen - Return length of individual segments of a gso packet 5146 * 5147 * @skb: GSO skb 5148 * 5149 * skb_gso_network_seglen is used to determine the real size of the 5150 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5151 * 5152 * The MAC/L2 header is not accounted for. 5153 */ 5154 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5155 { 5156 unsigned int hdr_len = skb_transport_header(skb) - 5157 skb_network_header(skb); 5158 5159 return hdr_len + skb_gso_transport_seglen(skb); 5160 } 5161 5162 /** 5163 * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5164 * 5165 * @skb: GSO skb 5166 * 5167 * skb_gso_mac_seglen is used to determine the real size of the 5168 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5169 * headers (TCP/UDP). 5170 */ 5171 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5172 { 5173 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5174 5175 return hdr_len + skb_gso_transport_seglen(skb); 5176 } 5177 5178 /** 5179 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 5180 * 5181 * There are a couple of instances where we have a GSO skb, and we 5182 * want to determine what size it would be after it is segmented. 5183 * 5184 * We might want to check: 5185 * - L3+L4+payload size (e.g. IP forwarding) 5186 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 5187 * 5188 * This is a helper to do that correctly considering GSO_BY_FRAGS. 5189 * 5190 * @skb: GSO skb 5191 * 5192 * @seg_len: The segmented length (from skb_gso_*_seglen). In the 5193 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 5194 * 5195 * @max_len: The maximum permissible length. 5196 * 5197 * Returns true if the segmented length <= max length. 5198 */ 5199 static inline bool skb_gso_size_check(const struct sk_buff *skb, 5200 unsigned int seg_len, 5201 unsigned int max_len) { 5202 const struct skb_shared_info *shinfo = skb_shinfo(skb); 5203 const struct sk_buff *iter; 5204 5205 if (shinfo->gso_size != GSO_BY_FRAGS) 5206 return seg_len <= max_len; 5207 5208 /* Undo this so we can re-use header sizes */ 5209 seg_len -= GSO_BY_FRAGS; 5210 5211 skb_walk_frags(skb, iter) { 5212 if (seg_len + skb_headlen(iter) > max_len) 5213 return false; 5214 } 5215 5216 return true; 5217 } 5218 5219 /** 5220 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5221 * 5222 * @skb: GSO skb 5223 * @mtu: MTU to validate against 5224 * 5225 * skb_gso_validate_network_len validates if a given skb will fit a 5226 * wanted MTU once split. It considers L3 headers, L4 headers, and the 5227 * payload. 5228 */ 5229 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5230 { 5231 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5232 } 5233 EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5234 5235 /** 5236 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 5237 * 5238 * @skb: GSO skb 5239 * @len: length to validate against 5240 * 5241 * skb_gso_validate_mac_len validates if a given skb will fit a wanted 5242 * length once split, including L2, L3 and L4 headers and the payload. 5243 */ 5244 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 5245 { 5246 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 5247 } 5248 EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 5249 5250 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5251 { 5252 int mac_len, meta_len; 5253 void *meta; 5254 5255 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5256 kfree_skb(skb); 5257 return NULL; 5258 } 5259 5260 mac_len = skb->data - skb_mac_header(skb); 5261 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 5262 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5263 mac_len - VLAN_HLEN - ETH_TLEN); 5264 } 5265 5266 meta_len = skb_metadata_len(skb); 5267 if (meta_len) { 5268 meta = skb_metadata_end(skb) - meta_len; 5269 memmove(meta + VLAN_HLEN, meta, meta_len); 5270 } 5271 5272 skb->mac_header += VLAN_HLEN; 5273 return skb; 5274 } 5275 5276 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 5277 { 5278 struct vlan_hdr *vhdr; 5279 u16 vlan_tci; 5280 5281 if (unlikely(skb_vlan_tag_present(skb))) { 5282 /* vlan_tci is already set-up so leave this for another time */ 5283 return skb; 5284 } 5285 5286 skb = skb_share_check(skb, GFP_ATOMIC); 5287 if (unlikely(!skb)) 5288 goto err_free; 5289 5290 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 5291 goto err_free; 5292 5293 vhdr = (struct vlan_hdr *)skb->data; 5294 vlan_tci = ntohs(vhdr->h_vlan_TCI); 5295 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 5296 5297 skb_pull_rcsum(skb, VLAN_HLEN); 5298 vlan_set_encap_proto(skb, vhdr); 5299 5300 skb = skb_reorder_vlan_header(skb); 5301 if (unlikely(!skb)) 5302 goto err_free; 5303 5304 skb_reset_network_header(skb); 5305 skb_reset_transport_header(skb); 5306 skb_reset_mac_len(skb); 5307 5308 return skb; 5309 5310 err_free: 5311 kfree_skb(skb); 5312 return NULL; 5313 } 5314 EXPORT_SYMBOL(skb_vlan_untag); 5315 5316 int skb_ensure_writable(struct sk_buff *skb, int write_len) 5317 { 5318 if (!pskb_may_pull(skb, write_len)) 5319 return -ENOMEM; 5320 5321 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5322 return 0; 5323 5324 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5325 } 5326 EXPORT_SYMBOL(skb_ensure_writable); 5327 5328 /* remove VLAN header from packet and update csum accordingly. 5329 * expects a non skb_vlan_tag_present skb with a vlan tag payload 5330 */ 5331 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 5332 { 5333 struct vlan_hdr *vhdr; 5334 int offset = skb->data - skb_mac_header(skb); 5335 int err; 5336 5337 if (WARN_ONCE(offset, 5338 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5339 offset)) { 5340 return -EINVAL; 5341 } 5342 5343 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 5344 if (unlikely(err)) 5345 return err; 5346 5347 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5348 5349 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 5350 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 5351 5352 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 5353 __skb_pull(skb, VLAN_HLEN); 5354 5355 vlan_set_encap_proto(skb, vhdr); 5356 skb->mac_header += VLAN_HLEN; 5357 5358 if (skb_network_offset(skb) < ETH_HLEN) 5359 skb_set_network_header(skb, ETH_HLEN); 5360 5361 skb_reset_mac_len(skb); 5362 5363 return err; 5364 } 5365 EXPORT_SYMBOL(__skb_vlan_pop); 5366 5367 /* Pop a vlan tag either from hwaccel or from payload. 5368 * Expects skb->data at mac header. 5369 */ 5370 int skb_vlan_pop(struct sk_buff *skb) 5371 { 5372 u16 vlan_tci; 5373 __be16 vlan_proto; 5374 int err; 5375 5376 if (likely(skb_vlan_tag_present(skb))) { 5377 __vlan_hwaccel_clear_tag(skb); 5378 } else { 5379 if (unlikely(!eth_type_vlan(skb->protocol))) 5380 return 0; 5381 5382 err = __skb_vlan_pop(skb, &vlan_tci); 5383 if (err) 5384 return err; 5385 } 5386 /* move next vlan tag to hw accel tag */ 5387 if (likely(!eth_type_vlan(skb->protocol))) 5388 return 0; 5389 5390 vlan_proto = skb->protocol; 5391 err = __skb_vlan_pop(skb, &vlan_tci); 5392 if (unlikely(err)) 5393 return err; 5394 5395 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5396 return 0; 5397 } 5398 EXPORT_SYMBOL(skb_vlan_pop); 5399 5400 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5401 * Expects skb->data at mac header. 5402 */ 5403 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 5404 { 5405 if (skb_vlan_tag_present(skb)) { 5406 int offset = skb->data - skb_mac_header(skb); 5407 int err; 5408 5409 if (WARN_ONCE(offset, 5410 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5411 offset)) { 5412 return -EINVAL; 5413 } 5414 5415 err = __vlan_insert_tag(skb, skb->vlan_proto, 5416 skb_vlan_tag_get(skb)); 5417 if (err) 5418 return err; 5419 5420 skb->protocol = skb->vlan_proto; 5421 skb->mac_len += VLAN_HLEN; 5422 5423 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 5424 } 5425 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 5426 return 0; 5427 } 5428 EXPORT_SYMBOL(skb_vlan_push); 5429 5430 /* Update the ethertype of hdr and the skb csum value if required. */ 5431 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 5432 __be16 ethertype) 5433 { 5434 if (skb->ip_summed == CHECKSUM_COMPLETE) { 5435 __be16 diff[] = { ~hdr->h_proto, ethertype }; 5436 5437 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5438 } 5439 5440 hdr->h_proto = ethertype; 5441 } 5442 5443 /** 5444 * skb_mpls_push() - push a new MPLS header after the mac header 5445 * 5446 * @skb: buffer 5447 * @mpls_lse: MPLS label stack entry to push 5448 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5449 * 5450 * Expects skb->data at mac header. 5451 * 5452 * Returns 0 on success, -errno otherwise. 5453 */ 5454 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto) 5455 { 5456 struct mpls_shim_hdr *lse; 5457 int err; 5458 5459 if (unlikely(!eth_p_mpls(mpls_proto))) 5460 return -EINVAL; 5461 5462 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 5463 if (skb->encapsulation) 5464 return -EINVAL; 5465 5466 err = skb_cow_head(skb, MPLS_HLEN); 5467 if (unlikely(err)) 5468 return err; 5469 5470 if (!skb->inner_protocol) { 5471 skb_set_inner_network_header(skb, skb->mac_len); 5472 skb_set_inner_protocol(skb, skb->protocol); 5473 } 5474 5475 skb_push(skb, MPLS_HLEN); 5476 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5477 skb->mac_len); 5478 skb_reset_mac_header(skb); 5479 skb_set_network_header(skb, skb->mac_len); 5480 5481 lse = mpls_hdr(skb); 5482 lse->label_stack_entry = mpls_lse; 5483 skb_postpush_rcsum(skb, lse, MPLS_HLEN); 5484 5485 if (skb->dev && skb->dev->type == ARPHRD_ETHER) 5486 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 5487 skb->protocol = mpls_proto; 5488 5489 return 0; 5490 } 5491 EXPORT_SYMBOL_GPL(skb_mpls_push); 5492 5493 /** 5494 * skb_mpls_pop() - pop the outermost MPLS header 5495 * 5496 * @skb: buffer 5497 * @next_proto: ethertype of header after popped MPLS header 5498 * 5499 * Expects skb->data at mac header. 5500 * 5501 * Returns 0 on success, -errno otherwise. 5502 */ 5503 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto) 5504 { 5505 int err; 5506 5507 if (unlikely(!eth_p_mpls(skb->protocol))) 5508 return -EINVAL; 5509 5510 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5511 if (unlikely(err)) 5512 return err; 5513 5514 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5515 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5516 skb->mac_len); 5517 5518 __skb_pull(skb, MPLS_HLEN); 5519 skb_reset_mac_header(skb); 5520 skb_set_network_header(skb, skb->mac_len); 5521 5522 if (skb->dev && skb->dev->type == ARPHRD_ETHER) { 5523 struct ethhdr *hdr; 5524 5525 /* use mpls_hdr() to get ethertype to account for VLANs. */ 5526 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5527 skb_mod_eth_type(skb, hdr, next_proto); 5528 } 5529 skb->protocol = next_proto; 5530 5531 return 0; 5532 } 5533 EXPORT_SYMBOL_GPL(skb_mpls_pop); 5534 5535 /** 5536 * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5537 * 5538 * @skb: buffer 5539 * @mpls_lse: new MPLS label stack entry to update to 5540 * 5541 * Expects skb->data at mac header. 5542 * 5543 * Returns 0 on success, -errno otherwise. 5544 */ 5545 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5546 { 5547 int err; 5548 5549 if (unlikely(!eth_p_mpls(skb->protocol))) 5550 return -EINVAL; 5551 5552 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5553 if (unlikely(err)) 5554 return err; 5555 5556 if (skb->ip_summed == CHECKSUM_COMPLETE) { 5557 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 5558 5559 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5560 } 5561 5562 mpls_hdr(skb)->label_stack_entry = mpls_lse; 5563 5564 return 0; 5565 } 5566 EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 5567 5568 /** 5569 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 5570 * 5571 * @skb: buffer 5572 * 5573 * Expects skb->data at mac header. 5574 * 5575 * Returns 0 on success, -errno otherwise. 5576 */ 5577 int skb_mpls_dec_ttl(struct sk_buff *skb) 5578 { 5579 u32 lse; 5580 u8 ttl; 5581 5582 if (unlikely(!eth_p_mpls(skb->protocol))) 5583 return -EINVAL; 5584 5585 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 5586 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 5587 if (!--ttl) 5588 return -EINVAL; 5589 5590 lse &= ~MPLS_LS_TTL_MASK; 5591 lse |= ttl << MPLS_LS_TTL_SHIFT; 5592 5593 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 5594 } 5595 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 5596 5597 /** 5598 * alloc_skb_with_frags - allocate skb with page frags 5599 * 5600 * @header_len: size of linear part 5601 * @data_len: needed length in frags 5602 * @max_page_order: max page order desired. 5603 * @errcode: pointer to error code if any 5604 * @gfp_mask: allocation mask 5605 * 5606 * This can be used to allocate a paged skb, given a maximal order for frags. 5607 */ 5608 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 5609 unsigned long data_len, 5610 int max_page_order, 5611 int *errcode, 5612 gfp_t gfp_mask) 5613 { 5614 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 5615 unsigned long chunk; 5616 struct sk_buff *skb; 5617 struct page *page; 5618 int i; 5619 5620 *errcode = -EMSGSIZE; 5621 /* Note this test could be relaxed, if we succeed to allocate 5622 * high order pages... 5623 */ 5624 if (npages > MAX_SKB_FRAGS) 5625 return NULL; 5626 5627 *errcode = -ENOBUFS; 5628 skb = alloc_skb(header_len, gfp_mask); 5629 if (!skb) 5630 return NULL; 5631 5632 skb->truesize += npages << PAGE_SHIFT; 5633 5634 for (i = 0; npages > 0; i++) { 5635 int order = max_page_order; 5636 5637 while (order) { 5638 if (npages >= 1 << order) { 5639 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 5640 __GFP_COMP | 5641 __GFP_NOWARN, 5642 order); 5643 if (page) 5644 goto fill_page; 5645 /* Do not retry other high order allocations */ 5646 order = 1; 5647 max_page_order = 0; 5648 } 5649 order--; 5650 } 5651 page = alloc_page(gfp_mask); 5652 if (!page) 5653 goto failure; 5654 fill_page: 5655 chunk = min_t(unsigned long, data_len, 5656 PAGE_SIZE << order); 5657 skb_fill_page_desc(skb, i, page, 0, chunk); 5658 data_len -= chunk; 5659 npages -= 1 << order; 5660 } 5661 return skb; 5662 5663 failure: 5664 kfree_skb(skb); 5665 return NULL; 5666 } 5667 EXPORT_SYMBOL(alloc_skb_with_frags); 5668 5669 /* carve out the first off bytes from skb when off < headlen */ 5670 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 5671 const int headlen, gfp_t gfp_mask) 5672 { 5673 int i; 5674 int size = skb_end_offset(skb); 5675 int new_hlen = headlen - off; 5676 u8 *data; 5677 5678 size = SKB_DATA_ALIGN(size); 5679 5680 if (skb_pfmemalloc(skb)) 5681 gfp_mask |= __GFP_MEMALLOC; 5682 data = kmalloc_reserve(size + 5683 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 5684 gfp_mask, NUMA_NO_NODE, NULL); 5685 if (!data) 5686 return -ENOMEM; 5687 5688 size = SKB_WITH_OVERHEAD(ksize(data)); 5689 5690 /* Copy real data, and all frags */ 5691 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 5692 skb->len -= off; 5693 5694 memcpy((struct skb_shared_info *)(data + size), 5695 skb_shinfo(skb), 5696 offsetof(struct skb_shared_info, 5697 frags[skb_shinfo(skb)->nr_frags])); 5698 if (skb_cloned(skb)) { 5699 /* drop the old head gracefully */ 5700 if (skb_orphan_frags(skb, gfp_mask)) { 5701 kfree(data); 5702 return -ENOMEM; 5703 } 5704 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 5705 skb_frag_ref(skb, i); 5706 if (skb_has_frag_list(skb)) 5707 skb_clone_fraglist(skb); 5708 skb_release_data(skb); 5709 } else { 5710 /* we can reuse existing recount- all we did was 5711 * relocate values 5712 */ 5713 skb_free_head(skb); 5714 } 5715 5716 skb->head = data; 5717 skb->data = data; 5718 skb->head_frag = 0; 5719 #ifdef NET_SKBUFF_DATA_USES_OFFSET 5720 skb->end = size; 5721 #else 5722 skb->end = skb->head + size; 5723 #endif 5724 skb_set_tail_pointer(skb, skb_headlen(skb)); 5725 skb_headers_offset_update(skb, 0); 5726 skb->cloned = 0; 5727 skb->hdr_len = 0; 5728 skb->nohdr = 0; 5729 atomic_set(&skb_shinfo(skb)->dataref, 1); 5730 5731 return 0; 5732 } 5733 5734 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 5735 5736 /* carve out the first eat bytes from skb's frag_list. May recurse into 5737 * pskb_carve() 5738 */ 5739 static int pskb_carve_frag_list(struct sk_buff *skb, 5740 struct skb_shared_info *shinfo, int eat, 5741 gfp_t gfp_mask) 5742 { 5743 struct sk_buff *list = shinfo->frag_list; 5744 struct sk_buff *clone = NULL; 5745 struct sk_buff *insp = NULL; 5746 5747 do { 5748 if (!list) { 5749 pr_err("Not enough bytes to eat. Want %d\n", eat); 5750 return -EFAULT; 5751 } 5752 if (list->len <= eat) { 5753 /* Eaten as whole. */ 5754 eat -= list->len; 5755 list = list->next; 5756 insp = list; 5757 } else { 5758 /* Eaten partially. */ 5759 if (skb_shared(list)) { 5760 clone = skb_clone(list, gfp_mask); 5761 if (!clone) 5762 return -ENOMEM; 5763 insp = list->next; 5764 list = clone; 5765 } else { 5766 /* This may be pulled without problems. */ 5767 insp = list; 5768 } 5769 if (pskb_carve(list, eat, gfp_mask) < 0) { 5770 kfree_skb(clone); 5771 return -ENOMEM; 5772 } 5773 break; 5774 } 5775 } while (eat); 5776 5777 /* Free pulled out fragments. */ 5778 while ((list = shinfo->frag_list) != insp) { 5779 shinfo->frag_list = list->next; 5780 kfree_skb(list); 5781 } 5782 /* And insert new clone at head. */ 5783 if (clone) { 5784 clone->next = list; 5785 shinfo->frag_list = clone; 5786 } 5787 return 0; 5788 } 5789 5790 /* carve off first len bytes from skb. Split line (off) is in the 5791 * non-linear part of skb 5792 */ 5793 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 5794 int pos, gfp_t gfp_mask) 5795 { 5796 int i, k = 0; 5797 int size = skb_end_offset(skb); 5798 u8 *data; 5799 const int nfrags = skb_shinfo(skb)->nr_frags; 5800 struct skb_shared_info *shinfo; 5801 5802 size = SKB_DATA_ALIGN(size); 5803 5804 if (skb_pfmemalloc(skb)) 5805 gfp_mask |= __GFP_MEMALLOC; 5806 data = kmalloc_reserve(size + 5807 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 5808 gfp_mask, NUMA_NO_NODE, NULL); 5809 if (!data) 5810 return -ENOMEM; 5811 5812 size = SKB_WITH_OVERHEAD(ksize(data)); 5813 5814 memcpy((struct skb_shared_info *)(data + size), 5815 skb_shinfo(skb), offsetof(struct skb_shared_info, 5816 frags[skb_shinfo(skb)->nr_frags])); 5817 if (skb_orphan_frags(skb, gfp_mask)) { 5818 kfree(data); 5819 return -ENOMEM; 5820 } 5821 shinfo = (struct skb_shared_info *)(data + size); 5822 for (i = 0; i < nfrags; i++) { 5823 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 5824 5825 if (pos + fsize > off) { 5826 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 5827 5828 if (pos < off) { 5829 /* Split frag. 5830 * We have two variants in this case: 5831 * 1. Move all the frag to the second 5832 * part, if it is possible. F.e. 5833 * this approach is mandatory for TUX, 5834 * where splitting is expensive. 5835 * 2. Split is accurately. We make this. 5836 */ 5837 shinfo->frags[0].page_offset += off - pos; 5838 skb_frag_size_sub(&shinfo->frags[0], off - pos); 5839 } 5840 skb_frag_ref(skb, i); 5841 k++; 5842 } 5843 pos += fsize; 5844 } 5845 shinfo->nr_frags = k; 5846 if (skb_has_frag_list(skb)) 5847 skb_clone_fraglist(skb); 5848 5849 if (k == 0) { 5850 /* split line is in frag list */ 5851 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); 5852 } 5853 skb_release_data(skb); 5854 5855 skb->head = data; 5856 skb->head_frag = 0; 5857 skb->data = data; 5858 #ifdef NET_SKBUFF_DATA_USES_OFFSET 5859 skb->end = size; 5860 #else 5861 skb->end = skb->head + size; 5862 #endif 5863 skb_reset_tail_pointer(skb); 5864 skb_headers_offset_update(skb, 0); 5865 skb->cloned = 0; 5866 skb->hdr_len = 0; 5867 skb->nohdr = 0; 5868 skb->len -= off; 5869 skb->data_len = skb->len; 5870 atomic_set(&skb_shinfo(skb)->dataref, 1); 5871 return 0; 5872 } 5873 5874 /* remove len bytes from the beginning of the skb */ 5875 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 5876 { 5877 int headlen = skb_headlen(skb); 5878 5879 if (len < headlen) 5880 return pskb_carve_inside_header(skb, len, headlen, gfp); 5881 else 5882 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 5883 } 5884 5885 /* Extract to_copy bytes starting at off from skb, and return this in 5886 * a new skb 5887 */ 5888 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 5889 int to_copy, gfp_t gfp) 5890 { 5891 struct sk_buff *clone = skb_clone(skb, gfp); 5892 5893 if (!clone) 5894 return NULL; 5895 5896 if (pskb_carve(clone, off, gfp) < 0 || 5897 pskb_trim(clone, to_copy)) { 5898 kfree_skb(clone); 5899 return NULL; 5900 } 5901 return clone; 5902 } 5903 EXPORT_SYMBOL(pskb_extract); 5904 5905 /** 5906 * skb_condense - try to get rid of fragments/frag_list if possible 5907 * @skb: buffer 5908 * 5909 * Can be used to save memory before skb is added to a busy queue. 5910 * If packet has bytes in frags and enough tail room in skb->head, 5911 * pull all of them, so that we can free the frags right now and adjust 5912 * truesize. 5913 * Notes: 5914 * We do not reallocate skb->head thus can not fail. 5915 * Caller must re-evaluate skb->truesize if needed. 5916 */ 5917 void skb_condense(struct sk_buff *skb) 5918 { 5919 if (skb->data_len) { 5920 if (skb->data_len > skb->end - skb->tail || 5921 skb_cloned(skb)) 5922 return; 5923 5924 /* Nice, we can free page frag(s) right now */ 5925 __pskb_pull_tail(skb, skb->data_len); 5926 } 5927 /* At this point, skb->truesize might be over estimated, 5928 * because skb had a fragment, and fragments do not tell 5929 * their truesize. 5930 * When we pulled its content into skb->head, fragment 5931 * was freed, but __pskb_pull_tail() could not possibly 5932 * adjust skb->truesize, not knowing the frag truesize. 5933 */ 5934 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 5935 } 5936 5937 #ifdef CONFIG_SKB_EXTENSIONS 5938 static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 5939 { 5940 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 5941 } 5942 5943 static struct skb_ext *skb_ext_alloc(void) 5944 { 5945 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 5946 5947 if (new) { 5948 memset(new->offset, 0, sizeof(new->offset)); 5949 refcount_set(&new->refcnt, 1); 5950 } 5951 5952 return new; 5953 } 5954 5955 static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 5956 unsigned int old_active) 5957 { 5958 struct skb_ext *new; 5959 5960 if (refcount_read(&old->refcnt) == 1) 5961 return old; 5962 5963 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 5964 if (!new) 5965 return NULL; 5966 5967 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 5968 refcount_set(&new->refcnt, 1); 5969 5970 #ifdef CONFIG_XFRM 5971 if (old_active & (1 << SKB_EXT_SEC_PATH)) { 5972 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 5973 unsigned int i; 5974 5975 for (i = 0; i < sp->len; i++) 5976 xfrm_state_hold(sp->xvec[i]); 5977 } 5978 #endif 5979 __skb_ext_put(old); 5980 return new; 5981 } 5982 5983 /** 5984 * skb_ext_add - allocate space for given extension, COW if needed 5985 * @skb: buffer 5986 * @id: extension to allocate space for 5987 * 5988 * Allocates enough space for the given extension. 5989 * If the extension is already present, a pointer to that extension 5990 * is returned. 5991 * 5992 * If the skb was cloned, COW applies and the returned memory can be 5993 * modified without changing the extension space of clones buffers. 5994 * 5995 * Returns pointer to the extension or NULL on allocation failure. 5996 */ 5997 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 5998 { 5999 struct skb_ext *new, *old = NULL; 6000 unsigned int newlen, newoff; 6001 6002 if (skb->active_extensions) { 6003 old = skb->extensions; 6004 6005 new = skb_ext_maybe_cow(old, skb->active_extensions); 6006 if (!new) 6007 return NULL; 6008 6009 if (__skb_ext_exist(new, id)) 6010 goto set_active; 6011 6012 newoff = new->chunks; 6013 } else { 6014 newoff = SKB_EXT_CHUNKSIZEOF(*new); 6015 6016 new = skb_ext_alloc(); 6017 if (!new) 6018 return NULL; 6019 } 6020 6021 newlen = newoff + skb_ext_type_len[id]; 6022 new->chunks = newlen; 6023 new->offset[id] = newoff; 6024 set_active: 6025 skb->extensions = new; 6026 skb->active_extensions |= 1 << id; 6027 return skb_ext_get_ptr(new, id); 6028 } 6029 EXPORT_SYMBOL(skb_ext_add); 6030 6031 #ifdef CONFIG_XFRM 6032 static void skb_ext_put_sp(struct sec_path *sp) 6033 { 6034 unsigned int i; 6035 6036 for (i = 0; i < sp->len; i++) 6037 xfrm_state_put(sp->xvec[i]); 6038 } 6039 #endif 6040 6041 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6042 { 6043 struct skb_ext *ext = skb->extensions; 6044 6045 skb->active_extensions &= ~(1 << id); 6046 if (skb->active_extensions == 0) { 6047 skb->extensions = NULL; 6048 __skb_ext_put(ext); 6049 #ifdef CONFIG_XFRM 6050 } else if (id == SKB_EXT_SEC_PATH && 6051 refcount_read(&ext->refcnt) == 1) { 6052 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 6053 6054 skb_ext_put_sp(sp); 6055 sp->len = 0; 6056 #endif 6057 } 6058 } 6059 EXPORT_SYMBOL(__skb_ext_del); 6060 6061 void __skb_ext_put(struct skb_ext *ext) 6062 { 6063 /* If this is last clone, nothing can increment 6064 * it after check passes. Avoids one atomic op. 6065 */ 6066 if (refcount_read(&ext->refcnt) == 1) 6067 goto free_now; 6068 6069 if (!refcount_dec_and_test(&ext->refcnt)) 6070 return; 6071 free_now: 6072 #ifdef CONFIG_XFRM 6073 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 6074 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 6075 #endif 6076 6077 kmem_cache_free(skbuff_ext_cache, ext); 6078 } 6079 EXPORT_SYMBOL(__skb_ext_put); 6080 #endif /* CONFIG_SKB_EXTENSIONS */ 6081