1 /* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Fixes: 8 * Alan Cox : Fixed the worst of the load 9 * balancer bugs. 10 * Dave Platt : Interrupt stacking fix. 11 * Richard Kooijman : Timestamp fixes. 12 * Alan Cox : Changed buffer format. 13 * Alan Cox : destructor hook for AF_UNIX etc. 14 * Linus Torvalds : Better skb_clone. 15 * Alan Cox : Added skb_copy. 16 * Alan Cox : Added all the changed routines Linus 17 * only put in the headers 18 * Ray VanTassle : Fixed --skb->lock in free 19 * Alan Cox : skb_copy copy arp field 20 * Andi Kleen : slabified it. 21 * Robert Olsson : Removed skb_head_pool 22 * 23 * NOTE: 24 * The __skb_ routines should be called with interrupts 25 * disabled, or you better be *real* sure that the operation is atomic 26 * with respect to whatever list is being frobbed (e.g. via lock_sock() 27 * or via disabling bottom half handlers, etc). 28 * 29 * This program is free software; you can redistribute it and/or 30 * modify it under the terms of the GNU General Public License 31 * as published by the Free Software Foundation; either version 32 * 2 of the License, or (at your option) any later version. 33 */ 34 35 /* 36 * The functions in this file will not compile correctly with gcc 2.4.x 37 */ 38 39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40 41 #include <linux/module.h> 42 #include <linux/types.h> 43 #include <linux/kernel.h> 44 #include <linux/kmemcheck.h> 45 #include <linux/mm.h> 46 #include <linux/interrupt.h> 47 #include <linux/in.h> 48 #include <linux/inet.h> 49 #include <linux/slab.h> 50 #include <linux/tcp.h> 51 #include <linux/udp.h> 52 #include <linux/netdevice.h> 53 #ifdef CONFIG_NET_CLS_ACT 54 #include <net/pkt_sched.h> 55 #endif 56 #include <linux/string.h> 57 #include <linux/skbuff.h> 58 #include <linux/splice.h> 59 #include <linux/cache.h> 60 #include <linux/rtnetlink.h> 61 #include <linux/init.h> 62 #include <linux/scatterlist.h> 63 #include <linux/errqueue.h> 64 #include <linux/prefetch.h> 65 #include <linux/if_vlan.h> 66 67 #include <net/protocol.h> 68 #include <net/dst.h> 69 #include <net/sock.h> 70 #include <net/checksum.h> 71 #include <net/ip6_checksum.h> 72 #include <net/xfrm.h> 73 74 #include <asm/uaccess.h> 75 #include <trace/events/skb.h> 76 #include <linux/highmem.h> 77 #include <linux/capability.h> 78 #include <linux/user_namespace.h> 79 80 struct kmem_cache *skbuff_head_cache __read_mostly; 81 static struct kmem_cache *skbuff_fclone_cache __read_mostly; 82 83 /** 84 * skb_panic - private function for out-of-line support 85 * @skb: buffer 86 * @sz: size 87 * @addr: address 88 * @msg: skb_over_panic or skb_under_panic 89 * 90 * Out-of-line support for skb_put() and skb_push(). 91 * Called via the wrapper skb_over_panic() or skb_under_panic(). 92 * Keep out of line to prevent kernel bloat. 93 * __builtin_return_address is not used because it is not always reliable. 94 */ 95 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 96 const char msg[]) 97 { 98 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 99 msg, addr, skb->len, sz, skb->head, skb->data, 100 (unsigned long)skb->tail, (unsigned long)skb->end, 101 skb->dev ? skb->dev->name : "<NULL>"); 102 BUG(); 103 } 104 105 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 106 { 107 skb_panic(skb, sz, addr, __func__); 108 } 109 110 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 111 { 112 skb_panic(skb, sz, addr, __func__); 113 } 114 115 /* 116 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 117 * the caller if emergency pfmemalloc reserves are being used. If it is and 118 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 119 * may be used. Otherwise, the packet data may be discarded until enough 120 * memory is free 121 */ 122 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 123 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 124 125 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 126 unsigned long ip, bool *pfmemalloc) 127 { 128 void *obj; 129 bool ret_pfmemalloc = false; 130 131 /* 132 * Try a regular allocation, when that fails and we're not entitled 133 * to the reserves, fail. 134 */ 135 obj = kmalloc_node_track_caller(size, 136 flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 137 node); 138 if (obj || !(gfp_pfmemalloc_allowed(flags))) 139 goto out; 140 141 /* Try again but now we are using pfmemalloc reserves */ 142 ret_pfmemalloc = true; 143 obj = kmalloc_node_track_caller(size, flags, node); 144 145 out: 146 if (pfmemalloc) 147 *pfmemalloc = ret_pfmemalloc; 148 149 return obj; 150 } 151 152 /* Allocate a new skbuff. We do this ourselves so we can fill in a few 153 * 'private' fields and also do memory statistics to find all the 154 * [BEEP] leaks. 155 * 156 */ 157 158 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 159 { 160 struct sk_buff *skb; 161 162 /* Get the HEAD */ 163 skb = kmem_cache_alloc_node(skbuff_head_cache, 164 gfp_mask & ~__GFP_DMA, node); 165 if (!skb) 166 goto out; 167 168 /* 169 * Only clear those fields we need to clear, not those that we will 170 * actually initialise below. Hence, don't put any more fields after 171 * the tail pointer in struct sk_buff! 172 */ 173 memset(skb, 0, offsetof(struct sk_buff, tail)); 174 skb->head = NULL; 175 skb->truesize = sizeof(struct sk_buff); 176 atomic_set(&skb->users, 1); 177 178 skb->mac_header = (typeof(skb->mac_header))~0U; 179 out: 180 return skb; 181 } 182 183 /** 184 * __alloc_skb - allocate a network buffer 185 * @size: size to allocate 186 * @gfp_mask: allocation mask 187 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 188 * instead of head cache and allocate a cloned (child) skb. 189 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 190 * allocations in case the data is required for writeback 191 * @node: numa node to allocate memory on 192 * 193 * Allocate a new &sk_buff. The returned buffer has no headroom and a 194 * tail room of at least size bytes. The object has a reference count 195 * of one. The return is the buffer. On a failure the return is %NULL. 196 * 197 * Buffers may only be allocated from interrupts using a @gfp_mask of 198 * %GFP_ATOMIC. 199 */ 200 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 201 int flags, int node) 202 { 203 struct kmem_cache *cache; 204 struct skb_shared_info *shinfo; 205 struct sk_buff *skb; 206 u8 *data; 207 bool pfmemalloc; 208 209 cache = (flags & SKB_ALLOC_FCLONE) 210 ? skbuff_fclone_cache : skbuff_head_cache; 211 212 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 213 gfp_mask |= __GFP_MEMALLOC; 214 215 /* Get the HEAD */ 216 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 217 if (!skb) 218 goto out; 219 prefetchw(skb); 220 221 /* We do our best to align skb_shared_info on a separate cache 222 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 223 * aligned memory blocks, unless SLUB/SLAB debug is enabled. 224 * Both skb->head and skb_shared_info are cache line aligned. 225 */ 226 size = SKB_DATA_ALIGN(size); 227 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 228 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 229 if (!data) 230 goto nodata; 231 /* kmalloc(size) might give us more room than requested. 232 * Put skb_shared_info exactly at the end of allocated zone, 233 * to allow max possible filling before reallocation. 234 */ 235 size = SKB_WITH_OVERHEAD(ksize(data)); 236 prefetchw(data + size); 237 238 /* 239 * Only clear those fields we need to clear, not those that we will 240 * actually initialise below. Hence, don't put any more fields after 241 * the tail pointer in struct sk_buff! 242 */ 243 memset(skb, 0, offsetof(struct sk_buff, tail)); 244 /* Account for allocated memory : skb + skb->head */ 245 skb->truesize = SKB_TRUESIZE(size); 246 skb->pfmemalloc = pfmemalloc; 247 atomic_set(&skb->users, 1); 248 skb->head = data; 249 skb->data = data; 250 skb_reset_tail_pointer(skb); 251 skb->end = skb->tail + size; 252 skb->mac_header = (typeof(skb->mac_header))~0U; 253 skb->transport_header = (typeof(skb->transport_header))~0U; 254 255 /* make sure we initialize shinfo sequentially */ 256 shinfo = skb_shinfo(skb); 257 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 258 atomic_set(&shinfo->dataref, 1); 259 kmemcheck_annotate_variable(shinfo->destructor_arg); 260 261 if (flags & SKB_ALLOC_FCLONE) { 262 struct sk_buff_fclones *fclones; 263 264 fclones = container_of(skb, struct sk_buff_fclones, skb1); 265 266 kmemcheck_annotate_bitfield(&fclones->skb2, flags1); 267 skb->fclone = SKB_FCLONE_ORIG; 268 atomic_set(&fclones->fclone_ref, 1); 269 270 fclones->skb2.fclone = SKB_FCLONE_CLONE; 271 fclones->skb2.pfmemalloc = pfmemalloc; 272 } 273 out: 274 return skb; 275 nodata: 276 kmem_cache_free(cache, skb); 277 skb = NULL; 278 goto out; 279 } 280 EXPORT_SYMBOL(__alloc_skb); 281 282 /** 283 * build_skb - build a network buffer 284 * @data: data buffer provided by caller 285 * @frag_size: size of fragment, or 0 if head was kmalloced 286 * 287 * Allocate a new &sk_buff. Caller provides space holding head and 288 * skb_shared_info. @data must have been allocated by kmalloc() only if 289 * @frag_size is 0, otherwise data should come from the page allocator. 290 * The return is the new skb buffer. 291 * On a failure the return is %NULL, and @data is not freed. 292 * Notes : 293 * Before IO, driver allocates only data buffer where NIC put incoming frame 294 * Driver should add room at head (NET_SKB_PAD) and 295 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 296 * After IO, driver calls build_skb(), to allocate sk_buff and populate it 297 * before giving packet to stack. 298 * RX rings only contains data buffers, not full skbs. 299 */ 300 struct sk_buff *build_skb(void *data, unsigned int frag_size) 301 { 302 struct skb_shared_info *shinfo; 303 struct sk_buff *skb; 304 unsigned int size = frag_size ? : ksize(data); 305 306 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 307 if (!skb) 308 return NULL; 309 310 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 311 312 memset(skb, 0, offsetof(struct sk_buff, tail)); 313 skb->truesize = SKB_TRUESIZE(size); 314 skb->head_frag = frag_size != 0; 315 atomic_set(&skb->users, 1); 316 skb->head = data; 317 skb->data = data; 318 skb_reset_tail_pointer(skb); 319 skb->end = skb->tail + size; 320 skb->mac_header = (typeof(skb->mac_header))~0U; 321 skb->transport_header = (typeof(skb->transport_header))~0U; 322 323 /* make sure we initialize shinfo sequentially */ 324 shinfo = skb_shinfo(skb); 325 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 326 atomic_set(&shinfo->dataref, 1); 327 kmemcheck_annotate_variable(shinfo->destructor_arg); 328 329 return skb; 330 } 331 EXPORT_SYMBOL(build_skb); 332 333 struct netdev_alloc_cache { 334 struct page_frag frag; 335 /* we maintain a pagecount bias, so that we dont dirty cache line 336 * containing page->_count every time we allocate a fragment. 337 */ 338 unsigned int pagecnt_bias; 339 }; 340 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 341 static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache); 342 343 static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, 344 gfp_t gfp_mask) 345 { 346 const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER; 347 struct page *page = NULL; 348 gfp_t gfp = gfp_mask; 349 350 if (order) { 351 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; 352 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 353 nc->frag.size = PAGE_SIZE << (page ? order : 0); 354 } 355 356 if (unlikely(!page)) 357 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 358 359 nc->frag.page = page; 360 361 return page; 362 } 363 364 static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache, 365 unsigned int fragsz, gfp_t gfp_mask) 366 { 367 struct netdev_alloc_cache *nc = this_cpu_ptr(cache); 368 struct page *page = nc->frag.page; 369 unsigned int size; 370 int offset; 371 372 if (unlikely(!page)) { 373 refill: 374 page = __page_frag_refill(nc, gfp_mask); 375 if (!page) 376 return NULL; 377 378 /* if size can vary use frag.size else just use PAGE_SIZE */ 379 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; 380 381 /* Even if we own the page, we do not use atomic_set(). 382 * This would break get_page_unless_zero() users. 383 */ 384 atomic_add(size - 1, &page->_count); 385 386 /* reset page count bias and offset to start of new frag */ 387 nc->pagecnt_bias = size; 388 nc->frag.offset = size; 389 } 390 391 offset = nc->frag.offset - fragsz; 392 if (unlikely(offset < 0)) { 393 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) 394 goto refill; 395 396 /* if size can vary use frag.size else just use PAGE_SIZE */ 397 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE; 398 399 /* OK, page count is 0, we can safely set it */ 400 atomic_set(&page->_count, size); 401 402 /* reset page count bias and offset to start of new frag */ 403 nc->pagecnt_bias = size; 404 offset = size - fragsz; 405 } 406 407 nc->pagecnt_bias--; 408 nc->frag.offset = offset; 409 410 return page_address(page) + offset; 411 } 412 413 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 414 { 415 unsigned long flags; 416 void *data; 417 418 local_irq_save(flags); 419 data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask); 420 local_irq_restore(flags); 421 return data; 422 } 423 424 /** 425 * netdev_alloc_frag - allocate a page fragment 426 * @fragsz: fragment size 427 * 428 * Allocates a frag from a page for receive buffer. 429 * Uses GFP_ATOMIC allocations. 430 */ 431 void *netdev_alloc_frag(unsigned int fragsz) 432 { 433 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 434 } 435 EXPORT_SYMBOL(netdev_alloc_frag); 436 437 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 438 { 439 return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask); 440 } 441 442 void *napi_alloc_frag(unsigned int fragsz) 443 { 444 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 445 } 446 EXPORT_SYMBOL(napi_alloc_frag); 447 448 /** 449 * __alloc_rx_skb - allocate an skbuff for rx 450 * @length: length to allocate 451 * @gfp_mask: get_free_pages mask, passed to alloc_skb 452 * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 453 * allocations in case we have to fallback to __alloc_skb() 454 * If SKB_ALLOC_NAPI is set, page fragment will be allocated 455 * from napi_cache instead of netdev_cache. 456 * 457 * Allocate a new &sk_buff and assign it a usage count of one. The 458 * buffer has unspecified headroom built in. Users should allocate 459 * the headroom they think they need without accounting for the 460 * built in space. The built in space is used for optimisations. 461 * 462 * %NULL is returned if there is no free memory. 463 */ 464 static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask, 465 int flags) 466 { 467 struct sk_buff *skb = NULL; 468 unsigned int fragsz = SKB_DATA_ALIGN(length) + 469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 470 471 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 472 void *data; 473 474 if (sk_memalloc_socks()) 475 gfp_mask |= __GFP_MEMALLOC; 476 477 data = (flags & SKB_ALLOC_NAPI) ? 478 __napi_alloc_frag(fragsz, gfp_mask) : 479 __netdev_alloc_frag(fragsz, gfp_mask); 480 481 if (likely(data)) { 482 skb = build_skb(data, fragsz); 483 if (unlikely(!skb)) 484 put_page(virt_to_head_page(data)); 485 } 486 } else { 487 skb = __alloc_skb(length, gfp_mask, 488 SKB_ALLOC_RX, NUMA_NO_NODE); 489 } 490 return skb; 491 } 492 493 /** 494 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 495 * @dev: network device to receive on 496 * @length: length to allocate 497 * @gfp_mask: get_free_pages mask, passed to alloc_skb 498 * 499 * Allocate a new &sk_buff and assign it a usage count of one. The 500 * buffer has NET_SKB_PAD headroom built in. Users should allocate 501 * the headroom they think they need without accounting for the 502 * built in space. The built in space is used for optimisations. 503 * 504 * %NULL is returned if there is no free memory. 505 */ 506 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 507 unsigned int length, gfp_t gfp_mask) 508 { 509 struct sk_buff *skb; 510 511 length += NET_SKB_PAD; 512 skb = __alloc_rx_skb(length, gfp_mask, 0); 513 514 if (likely(skb)) { 515 skb_reserve(skb, NET_SKB_PAD); 516 skb->dev = dev; 517 } 518 519 return skb; 520 } 521 EXPORT_SYMBOL(__netdev_alloc_skb); 522 523 /** 524 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 525 * @napi: napi instance this buffer was allocated for 526 * @length: length to allocate 527 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 528 * 529 * Allocate a new sk_buff for use in NAPI receive. This buffer will 530 * attempt to allocate the head from a special reserved region used 531 * only for NAPI Rx allocation. By doing this we can save several 532 * CPU cycles by avoiding having to disable and re-enable IRQs. 533 * 534 * %NULL is returned if there is no free memory. 535 */ 536 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, 537 unsigned int length, gfp_t gfp_mask) 538 { 539 struct sk_buff *skb; 540 541 length += NET_SKB_PAD + NET_IP_ALIGN; 542 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI); 543 544 if (likely(skb)) { 545 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 546 skb->dev = napi->dev; 547 } 548 549 return skb; 550 } 551 EXPORT_SYMBOL(__napi_alloc_skb); 552 553 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 554 int size, unsigned int truesize) 555 { 556 skb_fill_page_desc(skb, i, page, off, size); 557 skb->len += size; 558 skb->data_len += size; 559 skb->truesize += truesize; 560 } 561 EXPORT_SYMBOL(skb_add_rx_frag); 562 563 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 564 unsigned int truesize) 565 { 566 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 567 568 skb_frag_size_add(frag, size); 569 skb->len += size; 570 skb->data_len += size; 571 skb->truesize += truesize; 572 } 573 EXPORT_SYMBOL(skb_coalesce_rx_frag); 574 575 static void skb_drop_list(struct sk_buff **listp) 576 { 577 kfree_skb_list(*listp); 578 *listp = NULL; 579 } 580 581 static inline void skb_drop_fraglist(struct sk_buff *skb) 582 { 583 skb_drop_list(&skb_shinfo(skb)->frag_list); 584 } 585 586 static void skb_clone_fraglist(struct sk_buff *skb) 587 { 588 struct sk_buff *list; 589 590 skb_walk_frags(skb, list) 591 skb_get(list); 592 } 593 594 static void skb_free_head(struct sk_buff *skb) 595 { 596 if (skb->head_frag) 597 put_page(virt_to_head_page(skb->head)); 598 else 599 kfree(skb->head); 600 } 601 602 static void skb_release_data(struct sk_buff *skb) 603 { 604 struct skb_shared_info *shinfo = skb_shinfo(skb); 605 int i; 606 607 if (skb->cloned && 608 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 609 &shinfo->dataref)) 610 return; 611 612 for (i = 0; i < shinfo->nr_frags; i++) 613 __skb_frag_unref(&shinfo->frags[i]); 614 615 /* 616 * If skb buf is from userspace, we need to notify the caller 617 * the lower device DMA has done; 618 */ 619 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { 620 struct ubuf_info *uarg; 621 622 uarg = shinfo->destructor_arg; 623 if (uarg->callback) 624 uarg->callback(uarg, true); 625 } 626 627 if (shinfo->frag_list) 628 kfree_skb_list(shinfo->frag_list); 629 630 skb_free_head(skb); 631 } 632 633 /* 634 * Free an skbuff by memory without cleaning the state. 635 */ 636 static void kfree_skbmem(struct sk_buff *skb) 637 { 638 struct sk_buff_fclones *fclones; 639 640 switch (skb->fclone) { 641 case SKB_FCLONE_UNAVAILABLE: 642 kmem_cache_free(skbuff_head_cache, skb); 643 return; 644 645 case SKB_FCLONE_ORIG: 646 fclones = container_of(skb, struct sk_buff_fclones, skb1); 647 648 /* We usually free the clone (TX completion) before original skb 649 * This test would have no chance to be true for the clone, 650 * while here, branch prediction will be good. 651 */ 652 if (atomic_read(&fclones->fclone_ref) == 1) 653 goto fastpath; 654 break; 655 656 default: /* SKB_FCLONE_CLONE */ 657 fclones = container_of(skb, struct sk_buff_fclones, skb2); 658 break; 659 } 660 if (!atomic_dec_and_test(&fclones->fclone_ref)) 661 return; 662 fastpath: 663 kmem_cache_free(skbuff_fclone_cache, fclones); 664 } 665 666 static void skb_release_head_state(struct sk_buff *skb) 667 { 668 skb_dst_drop(skb); 669 #ifdef CONFIG_XFRM 670 secpath_put(skb->sp); 671 #endif 672 if (skb->destructor) { 673 WARN_ON(in_irq()); 674 skb->destructor(skb); 675 } 676 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 677 nf_conntrack_put(skb->nfct); 678 #endif 679 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 680 nf_bridge_put(skb->nf_bridge); 681 #endif 682 } 683 684 /* Free everything but the sk_buff shell. */ 685 static void skb_release_all(struct sk_buff *skb) 686 { 687 skb_release_head_state(skb); 688 if (likely(skb->head)) 689 skb_release_data(skb); 690 } 691 692 /** 693 * __kfree_skb - private function 694 * @skb: buffer 695 * 696 * Free an sk_buff. Release anything attached to the buffer. 697 * Clean the state. This is an internal helper function. Users should 698 * always call kfree_skb 699 */ 700 701 void __kfree_skb(struct sk_buff *skb) 702 { 703 skb_release_all(skb); 704 kfree_skbmem(skb); 705 } 706 EXPORT_SYMBOL(__kfree_skb); 707 708 /** 709 * kfree_skb - free an sk_buff 710 * @skb: buffer to free 711 * 712 * Drop a reference to the buffer and free it if the usage count has 713 * hit zero. 714 */ 715 void kfree_skb(struct sk_buff *skb) 716 { 717 if (unlikely(!skb)) 718 return; 719 if (likely(atomic_read(&skb->users) == 1)) 720 smp_rmb(); 721 else if (likely(!atomic_dec_and_test(&skb->users))) 722 return; 723 trace_kfree_skb(skb, __builtin_return_address(0)); 724 __kfree_skb(skb); 725 } 726 EXPORT_SYMBOL(kfree_skb); 727 728 void kfree_skb_list(struct sk_buff *segs) 729 { 730 while (segs) { 731 struct sk_buff *next = segs->next; 732 733 kfree_skb(segs); 734 segs = next; 735 } 736 } 737 EXPORT_SYMBOL(kfree_skb_list); 738 739 /** 740 * skb_tx_error - report an sk_buff xmit error 741 * @skb: buffer that triggered an error 742 * 743 * Report xmit error if a device callback is tracking this skb. 744 * skb must be freed afterwards. 745 */ 746 void skb_tx_error(struct sk_buff *skb) 747 { 748 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 749 struct ubuf_info *uarg; 750 751 uarg = skb_shinfo(skb)->destructor_arg; 752 if (uarg->callback) 753 uarg->callback(uarg, false); 754 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 755 } 756 } 757 EXPORT_SYMBOL(skb_tx_error); 758 759 /** 760 * consume_skb - free an skbuff 761 * @skb: buffer to free 762 * 763 * Drop a ref to the buffer and free it if the usage count has hit zero 764 * Functions identically to kfree_skb, but kfree_skb assumes that the frame 765 * is being dropped after a failure and notes that 766 */ 767 void consume_skb(struct sk_buff *skb) 768 { 769 if (unlikely(!skb)) 770 return; 771 if (likely(atomic_read(&skb->users) == 1)) 772 smp_rmb(); 773 else if (likely(!atomic_dec_and_test(&skb->users))) 774 return; 775 trace_consume_skb(skb); 776 __kfree_skb(skb); 777 } 778 EXPORT_SYMBOL(consume_skb); 779 780 /* Make sure a field is enclosed inside headers_start/headers_end section */ 781 #define CHECK_SKB_FIELD(field) \ 782 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 783 offsetof(struct sk_buff, headers_start)); \ 784 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 785 offsetof(struct sk_buff, headers_end)); \ 786 787 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 788 { 789 new->tstamp = old->tstamp; 790 /* We do not copy old->sk */ 791 new->dev = old->dev; 792 memcpy(new->cb, old->cb, sizeof(old->cb)); 793 skb_dst_copy(new, old); 794 #ifdef CONFIG_XFRM 795 new->sp = secpath_get(old->sp); 796 #endif 797 __nf_copy(new, old, false); 798 799 /* Note : this field could be in headers_start/headers_end section 800 * It is not yet because we do not want to have a 16 bit hole 801 */ 802 new->queue_mapping = old->queue_mapping; 803 804 memcpy(&new->headers_start, &old->headers_start, 805 offsetof(struct sk_buff, headers_end) - 806 offsetof(struct sk_buff, headers_start)); 807 CHECK_SKB_FIELD(protocol); 808 CHECK_SKB_FIELD(csum); 809 CHECK_SKB_FIELD(hash); 810 CHECK_SKB_FIELD(priority); 811 CHECK_SKB_FIELD(skb_iif); 812 CHECK_SKB_FIELD(vlan_proto); 813 CHECK_SKB_FIELD(vlan_tci); 814 CHECK_SKB_FIELD(transport_header); 815 CHECK_SKB_FIELD(network_header); 816 CHECK_SKB_FIELD(mac_header); 817 CHECK_SKB_FIELD(inner_protocol); 818 CHECK_SKB_FIELD(inner_transport_header); 819 CHECK_SKB_FIELD(inner_network_header); 820 CHECK_SKB_FIELD(inner_mac_header); 821 CHECK_SKB_FIELD(mark); 822 #ifdef CONFIG_NETWORK_SECMARK 823 CHECK_SKB_FIELD(secmark); 824 #endif 825 #ifdef CONFIG_NET_RX_BUSY_POLL 826 CHECK_SKB_FIELD(napi_id); 827 #endif 828 #ifdef CONFIG_XPS 829 CHECK_SKB_FIELD(sender_cpu); 830 #endif 831 #ifdef CONFIG_NET_SCHED 832 CHECK_SKB_FIELD(tc_index); 833 #ifdef CONFIG_NET_CLS_ACT 834 CHECK_SKB_FIELD(tc_verd); 835 #endif 836 #endif 837 838 } 839 840 /* 841 * You should not add any new code to this function. Add it to 842 * __copy_skb_header above instead. 843 */ 844 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 845 { 846 #define C(x) n->x = skb->x 847 848 n->next = n->prev = NULL; 849 n->sk = NULL; 850 __copy_skb_header(n, skb); 851 852 C(len); 853 C(data_len); 854 C(mac_len); 855 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 856 n->cloned = 1; 857 n->nohdr = 0; 858 n->destructor = NULL; 859 C(tail); 860 C(end); 861 C(head); 862 C(head_frag); 863 C(data); 864 C(truesize); 865 atomic_set(&n->users, 1); 866 867 atomic_inc(&(skb_shinfo(skb)->dataref)); 868 skb->cloned = 1; 869 870 return n; 871 #undef C 872 } 873 874 /** 875 * skb_morph - morph one skb into another 876 * @dst: the skb to receive the contents 877 * @src: the skb to supply the contents 878 * 879 * This is identical to skb_clone except that the target skb is 880 * supplied by the user. 881 * 882 * The target skb is returned upon exit. 883 */ 884 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 885 { 886 skb_release_all(dst); 887 return __skb_clone(dst, src); 888 } 889 EXPORT_SYMBOL_GPL(skb_morph); 890 891 /** 892 * skb_copy_ubufs - copy userspace skb frags buffers to kernel 893 * @skb: the skb to modify 894 * @gfp_mask: allocation priority 895 * 896 * This must be called on SKBTX_DEV_ZEROCOPY skb. 897 * It will copy all frags into kernel and drop the reference 898 * to userspace pages. 899 * 900 * If this function is called from an interrupt gfp_mask() must be 901 * %GFP_ATOMIC. 902 * 903 * Returns 0 on success or a negative error code on failure 904 * to allocate kernel memory to copy to. 905 */ 906 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 907 { 908 int i; 909 int num_frags = skb_shinfo(skb)->nr_frags; 910 struct page *page, *head = NULL; 911 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 912 913 for (i = 0; i < num_frags; i++) { 914 u8 *vaddr; 915 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 916 917 page = alloc_page(gfp_mask); 918 if (!page) { 919 while (head) { 920 struct page *next = (struct page *)page_private(head); 921 put_page(head); 922 head = next; 923 } 924 return -ENOMEM; 925 } 926 vaddr = kmap_atomic(skb_frag_page(f)); 927 memcpy(page_address(page), 928 vaddr + f->page_offset, skb_frag_size(f)); 929 kunmap_atomic(vaddr); 930 set_page_private(page, (unsigned long)head); 931 head = page; 932 } 933 934 /* skb frags release userspace buffers */ 935 for (i = 0; i < num_frags; i++) 936 skb_frag_unref(skb, i); 937 938 uarg->callback(uarg, false); 939 940 /* skb frags point to kernel buffers */ 941 for (i = num_frags - 1; i >= 0; i--) { 942 __skb_fill_page_desc(skb, i, head, 0, 943 skb_shinfo(skb)->frags[i].size); 944 head = (struct page *)page_private(head); 945 } 946 947 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 948 return 0; 949 } 950 EXPORT_SYMBOL_GPL(skb_copy_ubufs); 951 952 /** 953 * skb_clone - duplicate an sk_buff 954 * @skb: buffer to clone 955 * @gfp_mask: allocation priority 956 * 957 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 958 * copies share the same packet data but not structure. The new 959 * buffer has a reference count of 1. If the allocation fails the 960 * function returns %NULL otherwise the new buffer is returned. 961 * 962 * If this function is called from an interrupt gfp_mask() must be 963 * %GFP_ATOMIC. 964 */ 965 966 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 967 { 968 struct sk_buff_fclones *fclones = container_of(skb, 969 struct sk_buff_fclones, 970 skb1); 971 struct sk_buff *n; 972 973 if (skb_orphan_frags(skb, gfp_mask)) 974 return NULL; 975 976 if (skb->fclone == SKB_FCLONE_ORIG && 977 atomic_read(&fclones->fclone_ref) == 1) { 978 n = &fclones->skb2; 979 atomic_set(&fclones->fclone_ref, 2); 980 } else { 981 if (skb_pfmemalloc(skb)) 982 gfp_mask |= __GFP_MEMALLOC; 983 984 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 985 if (!n) 986 return NULL; 987 988 kmemcheck_annotate_bitfield(n, flags1); 989 n->fclone = SKB_FCLONE_UNAVAILABLE; 990 } 991 992 return __skb_clone(n, skb); 993 } 994 EXPORT_SYMBOL(skb_clone); 995 996 static void skb_headers_offset_update(struct sk_buff *skb, int off) 997 { 998 /* Only adjust this if it actually is csum_start rather than csum */ 999 if (skb->ip_summed == CHECKSUM_PARTIAL) 1000 skb->csum_start += off; 1001 /* {transport,network,mac}_header and tail are relative to skb->head */ 1002 skb->transport_header += off; 1003 skb->network_header += off; 1004 if (skb_mac_header_was_set(skb)) 1005 skb->mac_header += off; 1006 skb->inner_transport_header += off; 1007 skb->inner_network_header += off; 1008 skb->inner_mac_header += off; 1009 } 1010 1011 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 1012 { 1013 __copy_skb_header(new, old); 1014 1015 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 1016 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 1017 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 1018 } 1019 1020 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1021 { 1022 if (skb_pfmemalloc(skb)) 1023 return SKB_ALLOC_RX; 1024 return 0; 1025 } 1026 1027 /** 1028 * skb_copy - create private copy of an sk_buff 1029 * @skb: buffer to copy 1030 * @gfp_mask: allocation priority 1031 * 1032 * Make a copy of both an &sk_buff and its data. This is used when the 1033 * caller wishes to modify the data and needs a private copy of the 1034 * data to alter. Returns %NULL on failure or the pointer to the buffer 1035 * on success. The returned buffer has a reference count of 1. 1036 * 1037 * As by-product this function converts non-linear &sk_buff to linear 1038 * one, so that &sk_buff becomes completely private and caller is allowed 1039 * to modify all the data of returned buffer. This means that this 1040 * function is not recommended for use in circumstances when only 1041 * header is going to be modified. Use pskb_copy() instead. 1042 */ 1043 1044 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 1045 { 1046 int headerlen = skb_headroom(skb); 1047 unsigned int size = skb_end_offset(skb) + skb->data_len; 1048 struct sk_buff *n = __alloc_skb(size, gfp_mask, 1049 skb_alloc_rx_flag(skb), NUMA_NO_NODE); 1050 1051 if (!n) 1052 return NULL; 1053 1054 /* Set the data pointer */ 1055 skb_reserve(n, headerlen); 1056 /* Set the tail pointer and length */ 1057 skb_put(n, skb->len); 1058 1059 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 1060 BUG(); 1061 1062 copy_skb_header(n, skb); 1063 return n; 1064 } 1065 EXPORT_SYMBOL(skb_copy); 1066 1067 /** 1068 * __pskb_copy_fclone - create copy of an sk_buff with private head. 1069 * @skb: buffer to copy 1070 * @headroom: headroom of new skb 1071 * @gfp_mask: allocation priority 1072 * @fclone: if true allocate the copy of the skb from the fclone 1073 * cache instead of the head cache; it is recommended to set this 1074 * to true for the cases where the copy will likely be cloned 1075 * 1076 * Make a copy of both an &sk_buff and part of its data, located 1077 * in header. Fragmented data remain shared. This is used when 1078 * the caller wishes to modify only header of &sk_buff and needs 1079 * private copy of the header to alter. Returns %NULL on failure 1080 * or the pointer to the buffer on success. 1081 * The returned buffer has a reference count of 1. 1082 */ 1083 1084 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1085 gfp_t gfp_mask, bool fclone) 1086 { 1087 unsigned int size = skb_headlen(skb) + headroom; 1088 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1089 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 1090 1091 if (!n) 1092 goto out; 1093 1094 /* Set the data pointer */ 1095 skb_reserve(n, headroom); 1096 /* Set the tail pointer and length */ 1097 skb_put(n, skb_headlen(skb)); 1098 /* Copy the bytes */ 1099 skb_copy_from_linear_data(skb, n->data, n->len); 1100 1101 n->truesize += skb->data_len; 1102 n->data_len = skb->data_len; 1103 n->len = skb->len; 1104 1105 if (skb_shinfo(skb)->nr_frags) { 1106 int i; 1107 1108 if (skb_orphan_frags(skb, gfp_mask)) { 1109 kfree_skb(n); 1110 n = NULL; 1111 goto out; 1112 } 1113 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1114 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1115 skb_frag_ref(skb, i); 1116 } 1117 skb_shinfo(n)->nr_frags = i; 1118 } 1119 1120 if (skb_has_frag_list(skb)) { 1121 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 1122 skb_clone_fraglist(n); 1123 } 1124 1125 copy_skb_header(n, skb); 1126 out: 1127 return n; 1128 } 1129 EXPORT_SYMBOL(__pskb_copy_fclone); 1130 1131 /** 1132 * pskb_expand_head - reallocate header of &sk_buff 1133 * @skb: buffer to reallocate 1134 * @nhead: room to add at head 1135 * @ntail: room to add at tail 1136 * @gfp_mask: allocation priority 1137 * 1138 * Expands (or creates identical copy, if @nhead and @ntail are zero) 1139 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 1140 * reference count of 1. Returns zero in the case of success or error, 1141 * if expansion failed. In the last case, &sk_buff is not changed. 1142 * 1143 * All the pointers pointing into skb header may change and must be 1144 * reloaded after call to this function. 1145 */ 1146 1147 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1148 gfp_t gfp_mask) 1149 { 1150 int i; 1151 u8 *data; 1152 int size = nhead + skb_end_offset(skb) + ntail; 1153 long off; 1154 1155 BUG_ON(nhead < 0); 1156 1157 if (skb_shared(skb)) 1158 BUG(); 1159 1160 size = SKB_DATA_ALIGN(size); 1161 1162 if (skb_pfmemalloc(skb)) 1163 gfp_mask |= __GFP_MEMALLOC; 1164 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1165 gfp_mask, NUMA_NO_NODE, NULL); 1166 if (!data) 1167 goto nodata; 1168 size = SKB_WITH_OVERHEAD(ksize(data)); 1169 1170 /* Copy only real data... and, alas, header. This should be 1171 * optimized for the cases when header is void. 1172 */ 1173 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 1174 1175 memcpy((struct skb_shared_info *)(data + size), 1176 skb_shinfo(skb), 1177 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 1178 1179 /* 1180 * if shinfo is shared we must drop the old head gracefully, but if it 1181 * is not we can just drop the old head and let the existing refcount 1182 * be since all we did is relocate the values 1183 */ 1184 if (skb_cloned(skb)) { 1185 /* copy this zero copy skb frags */ 1186 if (skb_orphan_frags(skb, gfp_mask)) 1187 goto nofrags; 1188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1189 skb_frag_ref(skb, i); 1190 1191 if (skb_has_frag_list(skb)) 1192 skb_clone_fraglist(skb); 1193 1194 skb_release_data(skb); 1195 } else { 1196 skb_free_head(skb); 1197 } 1198 off = (data + nhead) - skb->head; 1199 1200 skb->head = data; 1201 skb->head_frag = 0; 1202 skb->data += off; 1203 #ifdef NET_SKBUFF_DATA_USES_OFFSET 1204 skb->end = size; 1205 off = nhead; 1206 #else 1207 skb->end = skb->head + size; 1208 #endif 1209 skb->tail += off; 1210 skb_headers_offset_update(skb, nhead); 1211 skb->cloned = 0; 1212 skb->hdr_len = 0; 1213 skb->nohdr = 0; 1214 atomic_set(&skb_shinfo(skb)->dataref, 1); 1215 return 0; 1216 1217 nofrags: 1218 kfree(data); 1219 nodata: 1220 return -ENOMEM; 1221 } 1222 EXPORT_SYMBOL(pskb_expand_head); 1223 1224 /* Make private copy of skb with writable head and some headroom */ 1225 1226 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 1227 { 1228 struct sk_buff *skb2; 1229 int delta = headroom - skb_headroom(skb); 1230 1231 if (delta <= 0) 1232 skb2 = pskb_copy(skb, GFP_ATOMIC); 1233 else { 1234 skb2 = skb_clone(skb, GFP_ATOMIC); 1235 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 1236 GFP_ATOMIC)) { 1237 kfree_skb(skb2); 1238 skb2 = NULL; 1239 } 1240 } 1241 return skb2; 1242 } 1243 EXPORT_SYMBOL(skb_realloc_headroom); 1244 1245 /** 1246 * skb_copy_expand - copy and expand sk_buff 1247 * @skb: buffer to copy 1248 * @newheadroom: new free bytes at head 1249 * @newtailroom: new free bytes at tail 1250 * @gfp_mask: allocation priority 1251 * 1252 * Make a copy of both an &sk_buff and its data and while doing so 1253 * allocate additional space. 1254 * 1255 * This is used when the caller wishes to modify the data and needs a 1256 * private copy of the data to alter as well as more space for new fields. 1257 * Returns %NULL on failure or the pointer to the buffer 1258 * on success. The returned buffer has a reference count of 1. 1259 * 1260 * You must pass %GFP_ATOMIC as the allocation priority if this function 1261 * is called from an interrupt. 1262 */ 1263 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 1264 int newheadroom, int newtailroom, 1265 gfp_t gfp_mask) 1266 { 1267 /* 1268 * Allocate the copy buffer 1269 */ 1270 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1271 gfp_mask, skb_alloc_rx_flag(skb), 1272 NUMA_NO_NODE); 1273 int oldheadroom = skb_headroom(skb); 1274 int head_copy_len, head_copy_off; 1275 1276 if (!n) 1277 return NULL; 1278 1279 skb_reserve(n, newheadroom); 1280 1281 /* Set the tail pointer and length */ 1282 skb_put(n, skb->len); 1283 1284 head_copy_len = oldheadroom; 1285 head_copy_off = 0; 1286 if (newheadroom <= head_copy_len) 1287 head_copy_len = newheadroom; 1288 else 1289 head_copy_off = newheadroom - head_copy_len; 1290 1291 /* Copy the linear header and data. */ 1292 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 1293 skb->len + head_copy_len)) 1294 BUG(); 1295 1296 copy_skb_header(n, skb); 1297 1298 skb_headers_offset_update(n, newheadroom - oldheadroom); 1299 1300 return n; 1301 } 1302 EXPORT_SYMBOL(skb_copy_expand); 1303 1304 /** 1305 * skb_pad - zero pad the tail of an skb 1306 * @skb: buffer to pad 1307 * @pad: space to pad 1308 * 1309 * Ensure that a buffer is followed by a padding area that is zero 1310 * filled. Used by network drivers which may DMA or transfer data 1311 * beyond the buffer end onto the wire. 1312 * 1313 * May return error in out of memory cases. The skb is freed on error. 1314 */ 1315 1316 int skb_pad(struct sk_buff *skb, int pad) 1317 { 1318 int err; 1319 int ntail; 1320 1321 /* If the skbuff is non linear tailroom is always zero.. */ 1322 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 1323 memset(skb->data+skb->len, 0, pad); 1324 return 0; 1325 } 1326 1327 ntail = skb->data_len + pad - (skb->end - skb->tail); 1328 if (likely(skb_cloned(skb) || ntail > 0)) { 1329 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 1330 if (unlikely(err)) 1331 goto free_skb; 1332 } 1333 1334 /* FIXME: The use of this function with non-linear skb's really needs 1335 * to be audited. 1336 */ 1337 err = skb_linearize(skb); 1338 if (unlikely(err)) 1339 goto free_skb; 1340 1341 memset(skb->data + skb->len, 0, pad); 1342 return 0; 1343 1344 free_skb: 1345 kfree_skb(skb); 1346 return err; 1347 } 1348 EXPORT_SYMBOL(skb_pad); 1349 1350 /** 1351 * pskb_put - add data to the tail of a potentially fragmented buffer 1352 * @skb: start of the buffer to use 1353 * @tail: tail fragment of the buffer to use 1354 * @len: amount of data to add 1355 * 1356 * This function extends the used data area of the potentially 1357 * fragmented buffer. @tail must be the last fragment of @skb -- or 1358 * @skb itself. If this would exceed the total buffer size the kernel 1359 * will panic. A pointer to the first byte of the extra data is 1360 * returned. 1361 */ 1362 1363 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 1364 { 1365 if (tail != skb) { 1366 skb->data_len += len; 1367 skb->len += len; 1368 } 1369 return skb_put(tail, len); 1370 } 1371 EXPORT_SYMBOL_GPL(pskb_put); 1372 1373 /** 1374 * skb_put - add data to a buffer 1375 * @skb: buffer to use 1376 * @len: amount of data to add 1377 * 1378 * This function extends the used data area of the buffer. If this would 1379 * exceed the total buffer size the kernel will panic. A pointer to the 1380 * first byte of the extra data is returned. 1381 */ 1382 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 1383 { 1384 unsigned char *tmp = skb_tail_pointer(skb); 1385 SKB_LINEAR_ASSERT(skb); 1386 skb->tail += len; 1387 skb->len += len; 1388 if (unlikely(skb->tail > skb->end)) 1389 skb_over_panic(skb, len, __builtin_return_address(0)); 1390 return tmp; 1391 } 1392 EXPORT_SYMBOL(skb_put); 1393 1394 /** 1395 * skb_push - add data to the start of a buffer 1396 * @skb: buffer to use 1397 * @len: amount of data to add 1398 * 1399 * This function extends the used data area of the buffer at the buffer 1400 * start. If this would exceed the total buffer headroom the kernel will 1401 * panic. A pointer to the first byte of the extra data is returned. 1402 */ 1403 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1404 { 1405 skb->data -= len; 1406 skb->len += len; 1407 if (unlikely(skb->data<skb->head)) 1408 skb_under_panic(skb, len, __builtin_return_address(0)); 1409 return skb->data; 1410 } 1411 EXPORT_SYMBOL(skb_push); 1412 1413 /** 1414 * skb_pull - remove data from the start of a buffer 1415 * @skb: buffer to use 1416 * @len: amount of data to remove 1417 * 1418 * This function removes data from the start of a buffer, returning 1419 * the memory to the headroom. A pointer to the next data in the buffer 1420 * is returned. Once the data has been pulled future pushes will overwrite 1421 * the old data. 1422 */ 1423 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 1424 { 1425 return skb_pull_inline(skb, len); 1426 } 1427 EXPORT_SYMBOL(skb_pull); 1428 1429 /** 1430 * skb_trim - remove end from a buffer 1431 * @skb: buffer to alter 1432 * @len: new length 1433 * 1434 * Cut the length of a buffer down by removing data from the tail. If 1435 * the buffer is already under the length specified it is not modified. 1436 * The skb must be linear. 1437 */ 1438 void skb_trim(struct sk_buff *skb, unsigned int len) 1439 { 1440 if (skb->len > len) 1441 __skb_trim(skb, len); 1442 } 1443 EXPORT_SYMBOL(skb_trim); 1444 1445 /* Trims skb to length len. It can change skb pointers. 1446 */ 1447 1448 int ___pskb_trim(struct sk_buff *skb, unsigned int len) 1449 { 1450 struct sk_buff **fragp; 1451 struct sk_buff *frag; 1452 int offset = skb_headlen(skb); 1453 int nfrags = skb_shinfo(skb)->nr_frags; 1454 int i; 1455 int err; 1456 1457 if (skb_cloned(skb) && 1458 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 1459 return err; 1460 1461 i = 0; 1462 if (offset >= len) 1463 goto drop_pages; 1464 1465 for (; i < nfrags; i++) { 1466 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1467 1468 if (end < len) { 1469 offset = end; 1470 continue; 1471 } 1472 1473 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1474 1475 drop_pages: 1476 skb_shinfo(skb)->nr_frags = i; 1477 1478 for (; i < nfrags; i++) 1479 skb_frag_unref(skb, i); 1480 1481 if (skb_has_frag_list(skb)) 1482 skb_drop_fraglist(skb); 1483 goto done; 1484 } 1485 1486 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 1487 fragp = &frag->next) { 1488 int end = offset + frag->len; 1489 1490 if (skb_shared(frag)) { 1491 struct sk_buff *nfrag; 1492 1493 nfrag = skb_clone(frag, GFP_ATOMIC); 1494 if (unlikely(!nfrag)) 1495 return -ENOMEM; 1496 1497 nfrag->next = frag->next; 1498 consume_skb(frag); 1499 frag = nfrag; 1500 *fragp = frag; 1501 } 1502 1503 if (end < len) { 1504 offset = end; 1505 continue; 1506 } 1507 1508 if (end > len && 1509 unlikely((err = pskb_trim(frag, len - offset)))) 1510 return err; 1511 1512 if (frag->next) 1513 skb_drop_list(&frag->next); 1514 break; 1515 } 1516 1517 done: 1518 if (len > skb_headlen(skb)) { 1519 skb->data_len -= skb->len - len; 1520 skb->len = len; 1521 } else { 1522 skb->len = len; 1523 skb->data_len = 0; 1524 skb_set_tail_pointer(skb, len); 1525 } 1526 1527 return 0; 1528 } 1529 EXPORT_SYMBOL(___pskb_trim); 1530 1531 /** 1532 * __pskb_pull_tail - advance tail of skb header 1533 * @skb: buffer to reallocate 1534 * @delta: number of bytes to advance tail 1535 * 1536 * The function makes a sense only on a fragmented &sk_buff, 1537 * it expands header moving its tail forward and copying necessary 1538 * data from fragmented part. 1539 * 1540 * &sk_buff MUST have reference count of 1. 1541 * 1542 * Returns %NULL (and &sk_buff does not change) if pull failed 1543 * or value of new tail of skb in the case of success. 1544 * 1545 * All the pointers pointing into skb header may change and must be 1546 * reloaded after call to this function. 1547 */ 1548 1549 /* Moves tail of skb head forward, copying data from fragmented part, 1550 * when it is necessary. 1551 * 1. It may fail due to malloc failure. 1552 * 2. It may change skb pointers. 1553 * 1554 * It is pretty complicated. Luckily, it is called only in exceptional cases. 1555 */ 1556 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 1557 { 1558 /* If skb has not enough free space at tail, get new one 1559 * plus 128 bytes for future expansions. If we have enough 1560 * room at tail, reallocate without expansion only if skb is cloned. 1561 */ 1562 int i, k, eat = (skb->tail + delta) - skb->end; 1563 1564 if (eat > 0 || skb_cloned(skb)) { 1565 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 1566 GFP_ATOMIC)) 1567 return NULL; 1568 } 1569 1570 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 1571 BUG(); 1572 1573 /* Optimization: no fragments, no reasons to preestimate 1574 * size of pulled pages. Superb. 1575 */ 1576 if (!skb_has_frag_list(skb)) 1577 goto pull_pages; 1578 1579 /* Estimate size of pulled pages. */ 1580 eat = delta; 1581 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1582 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1583 1584 if (size >= eat) 1585 goto pull_pages; 1586 eat -= size; 1587 } 1588 1589 /* If we need update frag list, we are in troubles. 1590 * Certainly, it possible to add an offset to skb data, 1591 * but taking into account that pulling is expected to 1592 * be very rare operation, it is worth to fight against 1593 * further bloating skb head and crucify ourselves here instead. 1594 * Pure masohism, indeed. 8)8) 1595 */ 1596 if (eat) { 1597 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1598 struct sk_buff *clone = NULL; 1599 struct sk_buff *insp = NULL; 1600 1601 do { 1602 BUG_ON(!list); 1603 1604 if (list->len <= eat) { 1605 /* Eaten as whole. */ 1606 eat -= list->len; 1607 list = list->next; 1608 insp = list; 1609 } else { 1610 /* Eaten partially. */ 1611 1612 if (skb_shared(list)) { 1613 /* Sucks! We need to fork list. :-( */ 1614 clone = skb_clone(list, GFP_ATOMIC); 1615 if (!clone) 1616 return NULL; 1617 insp = list->next; 1618 list = clone; 1619 } else { 1620 /* This may be pulled without 1621 * problems. */ 1622 insp = list; 1623 } 1624 if (!pskb_pull(list, eat)) { 1625 kfree_skb(clone); 1626 return NULL; 1627 } 1628 break; 1629 } 1630 } while (eat); 1631 1632 /* Free pulled out fragments. */ 1633 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1634 skb_shinfo(skb)->frag_list = list->next; 1635 kfree_skb(list); 1636 } 1637 /* And insert new clone at head. */ 1638 if (clone) { 1639 clone->next = list; 1640 skb_shinfo(skb)->frag_list = clone; 1641 } 1642 } 1643 /* Success! Now we may commit changes to skb data. */ 1644 1645 pull_pages: 1646 eat = delta; 1647 k = 0; 1648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1649 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1650 1651 if (size <= eat) { 1652 skb_frag_unref(skb, i); 1653 eat -= size; 1654 } else { 1655 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1656 if (eat) { 1657 skb_shinfo(skb)->frags[k].page_offset += eat; 1658 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1659 eat = 0; 1660 } 1661 k++; 1662 } 1663 } 1664 skb_shinfo(skb)->nr_frags = k; 1665 1666 skb->tail += delta; 1667 skb->data_len -= delta; 1668 1669 return skb_tail_pointer(skb); 1670 } 1671 EXPORT_SYMBOL(__pskb_pull_tail); 1672 1673 /** 1674 * skb_copy_bits - copy bits from skb to kernel buffer 1675 * @skb: source skb 1676 * @offset: offset in source 1677 * @to: destination buffer 1678 * @len: number of bytes to copy 1679 * 1680 * Copy the specified number of bytes from the source skb to the 1681 * destination buffer. 1682 * 1683 * CAUTION ! : 1684 * If its prototype is ever changed, 1685 * check arch/{*}/net/{*}.S files, 1686 * since it is called from BPF assembly code. 1687 */ 1688 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1689 { 1690 int start = skb_headlen(skb); 1691 struct sk_buff *frag_iter; 1692 int i, copy; 1693 1694 if (offset > (int)skb->len - len) 1695 goto fault; 1696 1697 /* Copy header. */ 1698 if ((copy = start - offset) > 0) { 1699 if (copy > len) 1700 copy = len; 1701 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1702 if ((len -= copy) == 0) 1703 return 0; 1704 offset += copy; 1705 to += copy; 1706 } 1707 1708 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1709 int end; 1710 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 1711 1712 WARN_ON(start > offset + len); 1713 1714 end = start + skb_frag_size(f); 1715 if ((copy = end - offset) > 0) { 1716 u8 *vaddr; 1717 1718 if (copy > len) 1719 copy = len; 1720 1721 vaddr = kmap_atomic(skb_frag_page(f)); 1722 memcpy(to, 1723 vaddr + f->page_offset + offset - start, 1724 copy); 1725 kunmap_atomic(vaddr); 1726 1727 if ((len -= copy) == 0) 1728 return 0; 1729 offset += copy; 1730 to += copy; 1731 } 1732 start = end; 1733 } 1734 1735 skb_walk_frags(skb, frag_iter) { 1736 int end; 1737 1738 WARN_ON(start > offset + len); 1739 1740 end = start + frag_iter->len; 1741 if ((copy = end - offset) > 0) { 1742 if (copy > len) 1743 copy = len; 1744 if (skb_copy_bits(frag_iter, offset - start, to, copy)) 1745 goto fault; 1746 if ((len -= copy) == 0) 1747 return 0; 1748 offset += copy; 1749 to += copy; 1750 } 1751 start = end; 1752 } 1753 1754 if (!len) 1755 return 0; 1756 1757 fault: 1758 return -EFAULT; 1759 } 1760 EXPORT_SYMBOL(skb_copy_bits); 1761 1762 /* 1763 * Callback from splice_to_pipe(), if we need to release some pages 1764 * at the end of the spd in case we error'ed out in filling the pipe. 1765 */ 1766 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 1767 { 1768 put_page(spd->pages[i]); 1769 } 1770 1771 static struct page *linear_to_page(struct page *page, unsigned int *len, 1772 unsigned int *offset, 1773 struct sock *sk) 1774 { 1775 struct page_frag *pfrag = sk_page_frag(sk); 1776 1777 if (!sk_page_frag_refill(sk, pfrag)) 1778 return NULL; 1779 1780 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 1781 1782 memcpy(page_address(pfrag->page) + pfrag->offset, 1783 page_address(page) + *offset, *len); 1784 *offset = pfrag->offset; 1785 pfrag->offset += *len; 1786 1787 return pfrag->page; 1788 } 1789 1790 static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 1791 struct page *page, 1792 unsigned int offset) 1793 { 1794 return spd->nr_pages && 1795 spd->pages[spd->nr_pages - 1] == page && 1796 (spd->partial[spd->nr_pages - 1].offset + 1797 spd->partial[spd->nr_pages - 1].len == offset); 1798 } 1799 1800 /* 1801 * Fill page/offset/length into spd, if it can hold more pages. 1802 */ 1803 static bool spd_fill_page(struct splice_pipe_desc *spd, 1804 struct pipe_inode_info *pipe, struct page *page, 1805 unsigned int *len, unsigned int offset, 1806 bool linear, 1807 struct sock *sk) 1808 { 1809 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1810 return true; 1811 1812 if (linear) { 1813 page = linear_to_page(page, len, &offset, sk); 1814 if (!page) 1815 return true; 1816 } 1817 if (spd_can_coalesce(spd, page, offset)) { 1818 spd->partial[spd->nr_pages - 1].len += *len; 1819 return false; 1820 } 1821 get_page(page); 1822 spd->pages[spd->nr_pages] = page; 1823 spd->partial[spd->nr_pages].len = *len; 1824 spd->partial[spd->nr_pages].offset = offset; 1825 spd->nr_pages++; 1826 1827 return false; 1828 } 1829 1830 static bool __splice_segment(struct page *page, unsigned int poff, 1831 unsigned int plen, unsigned int *off, 1832 unsigned int *len, 1833 struct splice_pipe_desc *spd, bool linear, 1834 struct sock *sk, 1835 struct pipe_inode_info *pipe) 1836 { 1837 if (!*len) 1838 return true; 1839 1840 /* skip this segment if already processed */ 1841 if (*off >= plen) { 1842 *off -= plen; 1843 return false; 1844 } 1845 1846 /* ignore any bits we already processed */ 1847 poff += *off; 1848 plen -= *off; 1849 *off = 0; 1850 1851 do { 1852 unsigned int flen = min(*len, plen); 1853 1854 if (spd_fill_page(spd, pipe, page, &flen, poff, 1855 linear, sk)) 1856 return true; 1857 poff += flen; 1858 plen -= flen; 1859 *len -= flen; 1860 } while (*len && plen); 1861 1862 return false; 1863 } 1864 1865 /* 1866 * Map linear and fragment data from the skb to spd. It reports true if the 1867 * pipe is full or if we already spliced the requested length. 1868 */ 1869 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1870 unsigned int *offset, unsigned int *len, 1871 struct splice_pipe_desc *spd, struct sock *sk) 1872 { 1873 int seg; 1874 1875 /* map the linear part : 1876 * If skb->head_frag is set, this 'linear' part is backed by a 1877 * fragment, and if the head is not shared with any clones then 1878 * we can avoid a copy since we own the head portion of this page. 1879 */ 1880 if (__splice_segment(virt_to_page(skb->data), 1881 (unsigned long) skb->data & (PAGE_SIZE - 1), 1882 skb_headlen(skb), 1883 offset, len, spd, 1884 skb_head_is_locked(skb), 1885 sk, pipe)) 1886 return true; 1887 1888 /* 1889 * then map the fragments 1890 */ 1891 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 1892 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1893 1894 if (__splice_segment(skb_frag_page(f), 1895 f->page_offset, skb_frag_size(f), 1896 offset, len, spd, false, sk, pipe)) 1897 return true; 1898 } 1899 1900 return false; 1901 } 1902 1903 /* 1904 * Map data from the skb to a pipe. Should handle both the linear part, 1905 * the fragments, and the frag list. It does NOT handle frag lists within 1906 * the frag list, if such a thing exists. We'd probably need to recurse to 1907 * handle that cleanly. 1908 */ 1909 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 1910 struct pipe_inode_info *pipe, unsigned int tlen, 1911 unsigned int flags) 1912 { 1913 struct partial_page partial[MAX_SKB_FRAGS]; 1914 struct page *pages[MAX_SKB_FRAGS]; 1915 struct splice_pipe_desc spd = { 1916 .pages = pages, 1917 .partial = partial, 1918 .nr_pages_max = MAX_SKB_FRAGS, 1919 .flags = flags, 1920 .ops = &nosteal_pipe_buf_ops, 1921 .spd_release = sock_spd_release, 1922 }; 1923 struct sk_buff *frag_iter; 1924 struct sock *sk = skb->sk; 1925 int ret = 0; 1926 1927 /* 1928 * __skb_splice_bits() only fails if the output has no room left, 1929 * so no point in going over the frag_list for the error case. 1930 */ 1931 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 1932 goto done; 1933 else if (!tlen) 1934 goto done; 1935 1936 /* 1937 * now see if we have a frag_list to map 1938 */ 1939 skb_walk_frags(skb, frag_iter) { 1940 if (!tlen) 1941 break; 1942 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1943 break; 1944 } 1945 1946 done: 1947 if (spd.nr_pages) { 1948 /* 1949 * Drop the socket lock, otherwise we have reverse 1950 * locking dependencies between sk_lock and i_mutex 1951 * here as compared to sendfile(). We enter here 1952 * with the socket lock held, and splice_to_pipe() will 1953 * grab the pipe inode lock. For sendfile() emulation, 1954 * we call into ->sendpage() with the i_mutex lock held 1955 * and networking will grab the socket lock. 1956 */ 1957 release_sock(sk); 1958 ret = splice_to_pipe(pipe, &spd); 1959 lock_sock(sk); 1960 } 1961 1962 return ret; 1963 } 1964 1965 /** 1966 * skb_store_bits - store bits from kernel buffer to skb 1967 * @skb: destination buffer 1968 * @offset: offset in destination 1969 * @from: source buffer 1970 * @len: number of bytes to copy 1971 * 1972 * Copy the specified number of bytes from the source buffer to the 1973 * destination skb. This function handles all the messy bits of 1974 * traversing fragment lists and such. 1975 */ 1976 1977 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1978 { 1979 int start = skb_headlen(skb); 1980 struct sk_buff *frag_iter; 1981 int i, copy; 1982 1983 if (offset > (int)skb->len - len) 1984 goto fault; 1985 1986 if ((copy = start - offset) > 0) { 1987 if (copy > len) 1988 copy = len; 1989 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1990 if ((len -= copy) == 0) 1991 return 0; 1992 offset += copy; 1993 from += copy; 1994 } 1995 1996 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1997 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1998 int end; 1999 2000 WARN_ON(start > offset + len); 2001 2002 end = start + skb_frag_size(frag); 2003 if ((copy = end - offset) > 0) { 2004 u8 *vaddr; 2005 2006 if (copy > len) 2007 copy = len; 2008 2009 vaddr = kmap_atomic(skb_frag_page(frag)); 2010 memcpy(vaddr + frag->page_offset + offset - start, 2011 from, copy); 2012 kunmap_atomic(vaddr); 2013 2014 if ((len -= copy) == 0) 2015 return 0; 2016 offset += copy; 2017 from += copy; 2018 } 2019 start = end; 2020 } 2021 2022 skb_walk_frags(skb, frag_iter) { 2023 int end; 2024 2025 WARN_ON(start > offset + len); 2026 2027 end = start + frag_iter->len; 2028 if ((copy = end - offset) > 0) { 2029 if (copy > len) 2030 copy = len; 2031 if (skb_store_bits(frag_iter, offset - start, 2032 from, copy)) 2033 goto fault; 2034 if ((len -= copy) == 0) 2035 return 0; 2036 offset += copy; 2037 from += copy; 2038 } 2039 start = end; 2040 } 2041 if (!len) 2042 return 0; 2043 2044 fault: 2045 return -EFAULT; 2046 } 2047 EXPORT_SYMBOL(skb_store_bits); 2048 2049 /* Checksum skb data. */ 2050 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 2051 __wsum csum, const struct skb_checksum_ops *ops) 2052 { 2053 int start = skb_headlen(skb); 2054 int i, copy = start - offset; 2055 struct sk_buff *frag_iter; 2056 int pos = 0; 2057 2058 /* Checksum header. */ 2059 if (copy > 0) { 2060 if (copy > len) 2061 copy = len; 2062 csum = ops->update(skb->data + offset, copy, csum); 2063 if ((len -= copy) == 0) 2064 return csum; 2065 offset += copy; 2066 pos = copy; 2067 } 2068 2069 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2070 int end; 2071 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2072 2073 WARN_ON(start > offset + len); 2074 2075 end = start + skb_frag_size(frag); 2076 if ((copy = end - offset) > 0) { 2077 __wsum csum2; 2078 u8 *vaddr; 2079 2080 if (copy > len) 2081 copy = len; 2082 vaddr = kmap_atomic(skb_frag_page(frag)); 2083 csum2 = ops->update(vaddr + frag->page_offset + 2084 offset - start, copy, 0); 2085 kunmap_atomic(vaddr); 2086 csum = ops->combine(csum, csum2, pos, copy); 2087 if (!(len -= copy)) 2088 return csum; 2089 offset += copy; 2090 pos += copy; 2091 } 2092 start = end; 2093 } 2094 2095 skb_walk_frags(skb, frag_iter) { 2096 int end; 2097 2098 WARN_ON(start > offset + len); 2099 2100 end = start + frag_iter->len; 2101 if ((copy = end - offset) > 0) { 2102 __wsum csum2; 2103 if (copy > len) 2104 copy = len; 2105 csum2 = __skb_checksum(frag_iter, offset - start, 2106 copy, 0, ops); 2107 csum = ops->combine(csum, csum2, pos, copy); 2108 if ((len -= copy) == 0) 2109 return csum; 2110 offset += copy; 2111 pos += copy; 2112 } 2113 start = end; 2114 } 2115 BUG_ON(len); 2116 2117 return csum; 2118 } 2119 EXPORT_SYMBOL(__skb_checksum); 2120 2121 __wsum skb_checksum(const struct sk_buff *skb, int offset, 2122 int len, __wsum csum) 2123 { 2124 const struct skb_checksum_ops ops = { 2125 .update = csum_partial_ext, 2126 .combine = csum_block_add_ext, 2127 }; 2128 2129 return __skb_checksum(skb, offset, len, csum, &ops); 2130 } 2131 EXPORT_SYMBOL(skb_checksum); 2132 2133 /* Both of above in one bottle. */ 2134 2135 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 2136 u8 *to, int len, __wsum csum) 2137 { 2138 int start = skb_headlen(skb); 2139 int i, copy = start - offset; 2140 struct sk_buff *frag_iter; 2141 int pos = 0; 2142 2143 /* Copy header. */ 2144 if (copy > 0) { 2145 if (copy > len) 2146 copy = len; 2147 csum = csum_partial_copy_nocheck(skb->data + offset, to, 2148 copy, csum); 2149 if ((len -= copy) == 0) 2150 return csum; 2151 offset += copy; 2152 to += copy; 2153 pos = copy; 2154 } 2155 2156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2157 int end; 2158 2159 WARN_ON(start > offset + len); 2160 2161 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2162 if ((copy = end - offset) > 0) { 2163 __wsum csum2; 2164 u8 *vaddr; 2165 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2166 2167 if (copy > len) 2168 copy = len; 2169 vaddr = kmap_atomic(skb_frag_page(frag)); 2170 csum2 = csum_partial_copy_nocheck(vaddr + 2171 frag->page_offset + 2172 offset - start, to, 2173 copy, 0); 2174 kunmap_atomic(vaddr); 2175 csum = csum_block_add(csum, csum2, pos); 2176 if (!(len -= copy)) 2177 return csum; 2178 offset += copy; 2179 to += copy; 2180 pos += copy; 2181 } 2182 start = end; 2183 } 2184 2185 skb_walk_frags(skb, frag_iter) { 2186 __wsum csum2; 2187 int end; 2188 2189 WARN_ON(start > offset + len); 2190 2191 end = start + frag_iter->len; 2192 if ((copy = end - offset) > 0) { 2193 if (copy > len) 2194 copy = len; 2195 csum2 = skb_copy_and_csum_bits(frag_iter, 2196 offset - start, 2197 to, copy, 0); 2198 csum = csum_block_add(csum, csum2, pos); 2199 if ((len -= copy) == 0) 2200 return csum; 2201 offset += copy; 2202 to += copy; 2203 pos += copy; 2204 } 2205 start = end; 2206 } 2207 BUG_ON(len); 2208 return csum; 2209 } 2210 EXPORT_SYMBOL(skb_copy_and_csum_bits); 2211 2212 /** 2213 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2214 * @from: source buffer 2215 * 2216 * Calculates the amount of linear headroom needed in the 'to' skb passed 2217 * into skb_zerocopy(). 2218 */ 2219 unsigned int 2220 skb_zerocopy_headlen(const struct sk_buff *from) 2221 { 2222 unsigned int hlen = 0; 2223 2224 if (!from->head_frag || 2225 skb_headlen(from) < L1_CACHE_BYTES || 2226 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2227 hlen = skb_headlen(from); 2228 2229 if (skb_has_frag_list(from)) 2230 hlen = from->len; 2231 2232 return hlen; 2233 } 2234 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2235 2236 /** 2237 * skb_zerocopy - Zero copy skb to skb 2238 * @to: destination buffer 2239 * @from: source buffer 2240 * @len: number of bytes to copy from source buffer 2241 * @hlen: size of linear headroom in destination buffer 2242 * 2243 * Copies up to `len` bytes from `from` to `to` by creating references 2244 * to the frags in the source buffer. 2245 * 2246 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2247 * headroom in the `to` buffer. 2248 * 2249 * Return value: 2250 * 0: everything is OK 2251 * -ENOMEM: couldn't orphan frags of @from due to lack of memory 2252 * -EFAULT: skb_copy_bits() found some problem with skb geometry 2253 */ 2254 int 2255 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2256 { 2257 int i, j = 0; 2258 int plen = 0; /* length of skb->head fragment */ 2259 int ret; 2260 struct page *page; 2261 unsigned int offset; 2262 2263 BUG_ON(!from->head_frag && !hlen); 2264 2265 /* dont bother with small payloads */ 2266 if (len <= skb_tailroom(to)) 2267 return skb_copy_bits(from, 0, skb_put(to, len), len); 2268 2269 if (hlen) { 2270 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2271 if (unlikely(ret)) 2272 return ret; 2273 len -= hlen; 2274 } else { 2275 plen = min_t(int, skb_headlen(from), len); 2276 if (plen) { 2277 page = virt_to_head_page(from->head); 2278 offset = from->data - (unsigned char *)page_address(page); 2279 __skb_fill_page_desc(to, 0, page, offset, plen); 2280 get_page(page); 2281 j = 1; 2282 len -= plen; 2283 } 2284 } 2285 2286 to->truesize += len + plen; 2287 to->len += len + plen; 2288 to->data_len += len + plen; 2289 2290 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 2291 skb_tx_error(from); 2292 return -ENOMEM; 2293 } 2294 2295 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2296 if (!len) 2297 break; 2298 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2299 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2300 len -= skb_shinfo(to)->frags[j].size; 2301 skb_frag_ref(to, j); 2302 j++; 2303 } 2304 skb_shinfo(to)->nr_frags = j; 2305 2306 return 0; 2307 } 2308 EXPORT_SYMBOL_GPL(skb_zerocopy); 2309 2310 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 2311 { 2312 __wsum csum; 2313 long csstart; 2314 2315 if (skb->ip_summed == CHECKSUM_PARTIAL) 2316 csstart = skb_checksum_start_offset(skb); 2317 else 2318 csstart = skb_headlen(skb); 2319 2320 BUG_ON(csstart > skb_headlen(skb)); 2321 2322 skb_copy_from_linear_data(skb, to, csstart); 2323 2324 csum = 0; 2325 if (csstart != skb->len) 2326 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 2327 skb->len - csstart, 0); 2328 2329 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2330 long csstuff = csstart + skb->csum_offset; 2331 2332 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 2333 } 2334 } 2335 EXPORT_SYMBOL(skb_copy_and_csum_dev); 2336 2337 /** 2338 * skb_dequeue - remove from the head of the queue 2339 * @list: list to dequeue from 2340 * 2341 * Remove the head of the list. The list lock is taken so the function 2342 * may be used safely with other locking list functions. The head item is 2343 * returned or %NULL if the list is empty. 2344 */ 2345 2346 struct sk_buff *skb_dequeue(struct sk_buff_head *list) 2347 { 2348 unsigned long flags; 2349 struct sk_buff *result; 2350 2351 spin_lock_irqsave(&list->lock, flags); 2352 result = __skb_dequeue(list); 2353 spin_unlock_irqrestore(&list->lock, flags); 2354 return result; 2355 } 2356 EXPORT_SYMBOL(skb_dequeue); 2357 2358 /** 2359 * skb_dequeue_tail - remove from the tail of the queue 2360 * @list: list to dequeue from 2361 * 2362 * Remove the tail of the list. The list lock is taken so the function 2363 * may be used safely with other locking list functions. The tail item is 2364 * returned or %NULL if the list is empty. 2365 */ 2366 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 2367 { 2368 unsigned long flags; 2369 struct sk_buff *result; 2370 2371 spin_lock_irqsave(&list->lock, flags); 2372 result = __skb_dequeue_tail(list); 2373 spin_unlock_irqrestore(&list->lock, flags); 2374 return result; 2375 } 2376 EXPORT_SYMBOL(skb_dequeue_tail); 2377 2378 /** 2379 * skb_queue_purge - empty a list 2380 * @list: list to empty 2381 * 2382 * Delete all buffers on an &sk_buff list. Each buffer is removed from 2383 * the list and one reference dropped. This function takes the list 2384 * lock and is atomic with respect to other list locking functions. 2385 */ 2386 void skb_queue_purge(struct sk_buff_head *list) 2387 { 2388 struct sk_buff *skb; 2389 while ((skb = skb_dequeue(list)) != NULL) 2390 kfree_skb(skb); 2391 } 2392 EXPORT_SYMBOL(skb_queue_purge); 2393 2394 /** 2395 * skb_queue_head - queue a buffer at the list head 2396 * @list: list to use 2397 * @newsk: buffer to queue 2398 * 2399 * Queue a buffer at the start of the list. This function takes the 2400 * list lock and can be used safely with other locking &sk_buff functions 2401 * safely. 2402 * 2403 * A buffer cannot be placed on two lists at the same time. 2404 */ 2405 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 2406 { 2407 unsigned long flags; 2408 2409 spin_lock_irqsave(&list->lock, flags); 2410 __skb_queue_head(list, newsk); 2411 spin_unlock_irqrestore(&list->lock, flags); 2412 } 2413 EXPORT_SYMBOL(skb_queue_head); 2414 2415 /** 2416 * skb_queue_tail - queue a buffer at the list tail 2417 * @list: list to use 2418 * @newsk: buffer to queue 2419 * 2420 * Queue a buffer at the tail of the list. This function takes the 2421 * list lock and can be used safely with other locking &sk_buff functions 2422 * safely. 2423 * 2424 * A buffer cannot be placed on two lists at the same time. 2425 */ 2426 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 2427 { 2428 unsigned long flags; 2429 2430 spin_lock_irqsave(&list->lock, flags); 2431 __skb_queue_tail(list, newsk); 2432 spin_unlock_irqrestore(&list->lock, flags); 2433 } 2434 EXPORT_SYMBOL(skb_queue_tail); 2435 2436 /** 2437 * skb_unlink - remove a buffer from a list 2438 * @skb: buffer to remove 2439 * @list: list to use 2440 * 2441 * Remove a packet from a list. The list locks are taken and this 2442 * function is atomic with respect to other list locked calls 2443 * 2444 * You must know what list the SKB is on. 2445 */ 2446 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 2447 { 2448 unsigned long flags; 2449 2450 spin_lock_irqsave(&list->lock, flags); 2451 __skb_unlink(skb, list); 2452 spin_unlock_irqrestore(&list->lock, flags); 2453 } 2454 EXPORT_SYMBOL(skb_unlink); 2455 2456 /** 2457 * skb_append - append a buffer 2458 * @old: buffer to insert after 2459 * @newsk: buffer to insert 2460 * @list: list to use 2461 * 2462 * Place a packet after a given packet in a list. The list locks are taken 2463 * and this function is atomic with respect to other list locked calls. 2464 * A buffer cannot be placed on two lists at the same time. 2465 */ 2466 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2467 { 2468 unsigned long flags; 2469 2470 spin_lock_irqsave(&list->lock, flags); 2471 __skb_queue_after(list, old, newsk); 2472 spin_unlock_irqrestore(&list->lock, flags); 2473 } 2474 EXPORT_SYMBOL(skb_append); 2475 2476 /** 2477 * skb_insert - insert a buffer 2478 * @old: buffer to insert before 2479 * @newsk: buffer to insert 2480 * @list: list to use 2481 * 2482 * Place a packet before a given packet in a list. The list locks are 2483 * taken and this function is atomic with respect to other list locked 2484 * calls. 2485 * 2486 * A buffer cannot be placed on two lists at the same time. 2487 */ 2488 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 2489 { 2490 unsigned long flags; 2491 2492 spin_lock_irqsave(&list->lock, flags); 2493 __skb_insert(newsk, old->prev, old, list); 2494 spin_unlock_irqrestore(&list->lock, flags); 2495 } 2496 EXPORT_SYMBOL(skb_insert); 2497 2498 static inline void skb_split_inside_header(struct sk_buff *skb, 2499 struct sk_buff* skb1, 2500 const u32 len, const int pos) 2501 { 2502 int i; 2503 2504 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2505 pos - len); 2506 /* And move data appendix as is. */ 2507 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 2508 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 2509 2510 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 2511 skb_shinfo(skb)->nr_frags = 0; 2512 skb1->data_len = skb->data_len; 2513 skb1->len += skb1->data_len; 2514 skb->data_len = 0; 2515 skb->len = len; 2516 skb_set_tail_pointer(skb, len); 2517 } 2518 2519 static inline void skb_split_no_header(struct sk_buff *skb, 2520 struct sk_buff* skb1, 2521 const u32 len, int pos) 2522 { 2523 int i, k = 0; 2524 const int nfrags = skb_shinfo(skb)->nr_frags; 2525 2526 skb_shinfo(skb)->nr_frags = 0; 2527 skb1->len = skb1->data_len = skb->len - len; 2528 skb->len = len; 2529 skb->data_len = len - pos; 2530 2531 for (i = 0; i < nfrags; i++) { 2532 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2533 2534 if (pos + size > len) { 2535 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 2536 2537 if (pos < len) { 2538 /* Split frag. 2539 * We have two variants in this case: 2540 * 1. Move all the frag to the second 2541 * part, if it is possible. F.e. 2542 * this approach is mandatory for TUX, 2543 * where splitting is expensive. 2544 * 2. Split is accurately. We make this. 2545 */ 2546 skb_frag_ref(skb, i); 2547 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2548 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2549 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2550 skb_shinfo(skb)->nr_frags++; 2551 } 2552 k++; 2553 } else 2554 skb_shinfo(skb)->nr_frags++; 2555 pos += size; 2556 } 2557 skb_shinfo(skb1)->nr_frags = k; 2558 } 2559 2560 /** 2561 * skb_split - Split fragmented skb to two parts at length len. 2562 * @skb: the buffer to split 2563 * @skb1: the buffer to receive the second part 2564 * @len: new length for skb 2565 */ 2566 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 2567 { 2568 int pos = skb_headlen(skb); 2569 2570 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2571 if (len < pos) /* Split line is inside header. */ 2572 skb_split_inside_header(skb, skb1, len, pos); 2573 else /* Second chunk has no header, nothing to copy. */ 2574 skb_split_no_header(skb, skb1, len, pos); 2575 } 2576 EXPORT_SYMBOL(skb_split); 2577 2578 /* Shifting from/to a cloned skb is a no-go. 2579 * 2580 * Caller cannot keep skb_shinfo related pointers past calling here! 2581 */ 2582 static int skb_prepare_for_shift(struct sk_buff *skb) 2583 { 2584 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2585 } 2586 2587 /** 2588 * skb_shift - Shifts paged data partially from skb to another 2589 * @tgt: buffer into which tail data gets added 2590 * @skb: buffer from which the paged data comes from 2591 * @shiftlen: shift up to this many bytes 2592 * 2593 * Attempts to shift up to shiftlen worth of bytes, which may be less than 2594 * the length of the skb, from skb to tgt. Returns number bytes shifted. 2595 * It's up to caller to free skb if everything was shifted. 2596 * 2597 * If @tgt runs out of frags, the whole operation is aborted. 2598 * 2599 * Skb cannot include anything else but paged data while tgt is allowed 2600 * to have non-paged data as well. 2601 * 2602 * TODO: full sized shift could be optimized but that would need 2603 * specialized skb free'er to handle frags without up-to-date nr_frags. 2604 */ 2605 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2606 { 2607 int from, to, merge, todo; 2608 struct skb_frag_struct *fragfrom, *fragto; 2609 2610 BUG_ON(shiftlen > skb->len); 2611 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2612 2613 todo = shiftlen; 2614 from = 0; 2615 to = skb_shinfo(tgt)->nr_frags; 2616 fragfrom = &skb_shinfo(skb)->frags[from]; 2617 2618 /* Actual merge is delayed until the point when we know we can 2619 * commit all, so that we don't have to undo partial changes 2620 */ 2621 if (!to || 2622 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2623 fragfrom->page_offset)) { 2624 merge = -1; 2625 } else { 2626 merge = to - 1; 2627 2628 todo -= skb_frag_size(fragfrom); 2629 if (todo < 0) { 2630 if (skb_prepare_for_shift(skb) || 2631 skb_prepare_for_shift(tgt)) 2632 return 0; 2633 2634 /* All previous frag pointers might be stale! */ 2635 fragfrom = &skb_shinfo(skb)->frags[from]; 2636 fragto = &skb_shinfo(tgt)->frags[merge]; 2637 2638 skb_frag_size_add(fragto, shiftlen); 2639 skb_frag_size_sub(fragfrom, shiftlen); 2640 fragfrom->page_offset += shiftlen; 2641 2642 goto onlymerged; 2643 } 2644 2645 from++; 2646 } 2647 2648 /* Skip full, not-fitting skb to avoid expensive operations */ 2649 if ((shiftlen == skb->len) && 2650 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2651 return 0; 2652 2653 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2654 return 0; 2655 2656 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2657 if (to == MAX_SKB_FRAGS) 2658 return 0; 2659 2660 fragfrom = &skb_shinfo(skb)->frags[from]; 2661 fragto = &skb_shinfo(tgt)->frags[to]; 2662 2663 if (todo >= skb_frag_size(fragfrom)) { 2664 *fragto = *fragfrom; 2665 todo -= skb_frag_size(fragfrom); 2666 from++; 2667 to++; 2668 2669 } else { 2670 __skb_frag_ref(fragfrom); 2671 fragto->page = fragfrom->page; 2672 fragto->page_offset = fragfrom->page_offset; 2673 skb_frag_size_set(fragto, todo); 2674 2675 fragfrom->page_offset += todo; 2676 skb_frag_size_sub(fragfrom, todo); 2677 todo = 0; 2678 2679 to++; 2680 break; 2681 } 2682 } 2683 2684 /* Ready to "commit" this state change to tgt */ 2685 skb_shinfo(tgt)->nr_frags = to; 2686 2687 if (merge >= 0) { 2688 fragfrom = &skb_shinfo(skb)->frags[0]; 2689 fragto = &skb_shinfo(tgt)->frags[merge]; 2690 2691 skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2692 __skb_frag_unref(fragfrom); 2693 } 2694 2695 /* Reposition in the original skb */ 2696 to = 0; 2697 while (from < skb_shinfo(skb)->nr_frags) 2698 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2699 skb_shinfo(skb)->nr_frags = to; 2700 2701 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2702 2703 onlymerged: 2704 /* Most likely the tgt won't ever need its checksum anymore, skb on 2705 * the other hand might need it if it needs to be resent 2706 */ 2707 tgt->ip_summed = CHECKSUM_PARTIAL; 2708 skb->ip_summed = CHECKSUM_PARTIAL; 2709 2710 /* Yak, is it really working this way? Some helper please? */ 2711 skb->len -= shiftlen; 2712 skb->data_len -= shiftlen; 2713 skb->truesize -= shiftlen; 2714 tgt->len += shiftlen; 2715 tgt->data_len += shiftlen; 2716 tgt->truesize += shiftlen; 2717 2718 return shiftlen; 2719 } 2720 2721 /** 2722 * skb_prepare_seq_read - Prepare a sequential read of skb data 2723 * @skb: the buffer to read 2724 * @from: lower offset of data to be read 2725 * @to: upper offset of data to be read 2726 * @st: state variable 2727 * 2728 * Initializes the specified state variable. Must be called before 2729 * invoking skb_seq_read() for the first time. 2730 */ 2731 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2732 unsigned int to, struct skb_seq_state *st) 2733 { 2734 st->lower_offset = from; 2735 st->upper_offset = to; 2736 st->root_skb = st->cur_skb = skb; 2737 st->frag_idx = st->stepped_offset = 0; 2738 st->frag_data = NULL; 2739 } 2740 EXPORT_SYMBOL(skb_prepare_seq_read); 2741 2742 /** 2743 * skb_seq_read - Sequentially read skb data 2744 * @consumed: number of bytes consumed by the caller so far 2745 * @data: destination pointer for data to be returned 2746 * @st: state variable 2747 * 2748 * Reads a block of skb data at @consumed relative to the 2749 * lower offset specified to skb_prepare_seq_read(). Assigns 2750 * the head of the data block to @data and returns the length 2751 * of the block or 0 if the end of the skb data or the upper 2752 * offset has been reached. 2753 * 2754 * The caller is not required to consume all of the data 2755 * returned, i.e. @consumed is typically set to the number 2756 * of bytes already consumed and the next call to 2757 * skb_seq_read() will return the remaining part of the block. 2758 * 2759 * Note 1: The size of each block of data returned can be arbitrary, 2760 * this limitation is the cost for zerocopy sequential 2761 * reads of potentially non linear data. 2762 * 2763 * Note 2: Fragment lists within fragments are not implemented 2764 * at the moment, state->root_skb could be replaced with 2765 * a stack for this purpose. 2766 */ 2767 unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2768 struct skb_seq_state *st) 2769 { 2770 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2771 skb_frag_t *frag; 2772 2773 if (unlikely(abs_offset >= st->upper_offset)) { 2774 if (st->frag_data) { 2775 kunmap_atomic(st->frag_data); 2776 st->frag_data = NULL; 2777 } 2778 return 0; 2779 } 2780 2781 next_skb: 2782 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2783 2784 if (abs_offset < block_limit && !st->frag_data) { 2785 *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2786 return block_limit - abs_offset; 2787 } 2788 2789 if (st->frag_idx == 0 && !st->frag_data) 2790 st->stepped_offset += skb_headlen(st->cur_skb); 2791 2792 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2793 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2794 block_limit = skb_frag_size(frag) + st->stepped_offset; 2795 2796 if (abs_offset < block_limit) { 2797 if (!st->frag_data) 2798 st->frag_data = kmap_atomic(skb_frag_page(frag)); 2799 2800 *data = (u8 *) st->frag_data + frag->page_offset + 2801 (abs_offset - st->stepped_offset); 2802 2803 return block_limit - abs_offset; 2804 } 2805 2806 if (st->frag_data) { 2807 kunmap_atomic(st->frag_data); 2808 st->frag_data = NULL; 2809 } 2810 2811 st->frag_idx++; 2812 st->stepped_offset += skb_frag_size(frag); 2813 } 2814 2815 if (st->frag_data) { 2816 kunmap_atomic(st->frag_data); 2817 st->frag_data = NULL; 2818 } 2819 2820 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2821 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 2822 st->frag_idx = 0; 2823 goto next_skb; 2824 } else if (st->cur_skb->next) { 2825 st->cur_skb = st->cur_skb->next; 2826 st->frag_idx = 0; 2827 goto next_skb; 2828 } 2829 2830 return 0; 2831 } 2832 EXPORT_SYMBOL(skb_seq_read); 2833 2834 /** 2835 * skb_abort_seq_read - Abort a sequential read of skb data 2836 * @st: state variable 2837 * 2838 * Must be called if skb_seq_read() was not called until it 2839 * returned 0. 2840 */ 2841 void skb_abort_seq_read(struct skb_seq_state *st) 2842 { 2843 if (st->frag_data) 2844 kunmap_atomic(st->frag_data); 2845 } 2846 EXPORT_SYMBOL(skb_abort_seq_read); 2847 2848 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 2849 2850 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 2851 struct ts_config *conf, 2852 struct ts_state *state) 2853 { 2854 return skb_seq_read(offset, text, TS_SKB_CB(state)); 2855 } 2856 2857 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 2858 { 2859 skb_abort_seq_read(TS_SKB_CB(state)); 2860 } 2861 2862 /** 2863 * skb_find_text - Find a text pattern in skb data 2864 * @skb: the buffer to look in 2865 * @from: search offset 2866 * @to: search limit 2867 * @config: textsearch configuration 2868 * 2869 * Finds a pattern in the skb data according to the specified 2870 * textsearch configuration. Use textsearch_next() to retrieve 2871 * subsequent occurrences of the pattern. Returns the offset 2872 * to the first occurrence or UINT_MAX if no match was found. 2873 */ 2874 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 2875 unsigned int to, struct ts_config *config) 2876 { 2877 struct ts_state state; 2878 unsigned int ret; 2879 2880 config->get_next_block = skb_ts_get_next_block; 2881 config->finish = skb_ts_finish; 2882 2883 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 2884 2885 ret = textsearch_find(config, &state); 2886 return (ret <= to - from ? ret : UINT_MAX); 2887 } 2888 EXPORT_SYMBOL(skb_find_text); 2889 2890 /** 2891 * skb_append_datato_frags - append the user data to a skb 2892 * @sk: sock structure 2893 * @skb: skb structure to be appended with user data. 2894 * @getfrag: call back function to be used for getting the user data 2895 * @from: pointer to user message iov 2896 * @length: length of the iov message 2897 * 2898 * Description: This procedure append the user data in the fragment part 2899 * of the skb if any page alloc fails user this procedure returns -ENOMEM 2900 */ 2901 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2902 int (*getfrag)(void *from, char *to, int offset, 2903 int len, int odd, struct sk_buff *skb), 2904 void *from, int length) 2905 { 2906 int frg_cnt = skb_shinfo(skb)->nr_frags; 2907 int copy; 2908 int offset = 0; 2909 int ret; 2910 struct page_frag *pfrag = ¤t->task_frag; 2911 2912 do { 2913 /* Return error if we don't have space for new frag */ 2914 if (frg_cnt >= MAX_SKB_FRAGS) 2915 return -EMSGSIZE; 2916 2917 if (!sk_page_frag_refill(sk, pfrag)) 2918 return -ENOMEM; 2919 2920 /* copy the user data to page */ 2921 copy = min_t(int, length, pfrag->size - pfrag->offset); 2922 2923 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2924 offset, copy, 0, skb); 2925 if (ret < 0) 2926 return -EFAULT; 2927 2928 /* copy was successful so update the size parameters */ 2929 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2930 copy); 2931 frg_cnt++; 2932 pfrag->offset += copy; 2933 get_page(pfrag->page); 2934 2935 skb->truesize += copy; 2936 atomic_add(copy, &sk->sk_wmem_alloc); 2937 skb->len += copy; 2938 skb->data_len += copy; 2939 offset += copy; 2940 length -= copy; 2941 2942 } while (length > 0); 2943 2944 return 0; 2945 } 2946 EXPORT_SYMBOL(skb_append_datato_frags); 2947 2948 /** 2949 * skb_pull_rcsum - pull skb and update receive checksum 2950 * @skb: buffer to update 2951 * @len: length of data pulled 2952 * 2953 * This function performs an skb_pull on the packet and updates 2954 * the CHECKSUM_COMPLETE checksum. It should be used on 2955 * receive path processing instead of skb_pull unless you know 2956 * that the checksum difference is zero (e.g., a valid IP header) 2957 * or you are setting ip_summed to CHECKSUM_NONE. 2958 */ 2959 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2960 { 2961 BUG_ON(len > skb->len); 2962 skb->len -= len; 2963 BUG_ON(skb->len < skb->data_len); 2964 skb_postpull_rcsum(skb, skb->data, len); 2965 return skb->data += len; 2966 } 2967 EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2968 2969 /** 2970 * skb_segment - Perform protocol segmentation on skb. 2971 * @head_skb: buffer to segment 2972 * @features: features for the output path (see dev->features) 2973 * 2974 * This function performs segmentation on the given skb. It returns 2975 * a pointer to the first in a list of new skbs for the segments. 2976 * In case of error it returns ERR_PTR(err). 2977 */ 2978 struct sk_buff *skb_segment(struct sk_buff *head_skb, 2979 netdev_features_t features) 2980 { 2981 struct sk_buff *segs = NULL; 2982 struct sk_buff *tail = NULL; 2983 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 2984 skb_frag_t *frag = skb_shinfo(head_skb)->frags; 2985 unsigned int mss = skb_shinfo(head_skb)->gso_size; 2986 unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 2987 struct sk_buff *frag_skb = head_skb; 2988 unsigned int offset = doffset; 2989 unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 2990 unsigned int headroom; 2991 unsigned int len; 2992 __be16 proto; 2993 bool csum; 2994 int sg = !!(features & NETIF_F_SG); 2995 int nfrags = skb_shinfo(head_skb)->nr_frags; 2996 int err = -ENOMEM; 2997 int i = 0; 2998 int pos; 2999 int dummy; 3000 3001 __skb_push(head_skb, doffset); 3002 proto = skb_network_protocol(head_skb, &dummy); 3003 if (unlikely(!proto)) 3004 return ERR_PTR(-EINVAL); 3005 3006 csum = !head_skb->encap_hdr_csum && 3007 !!can_checksum_protocol(features, proto); 3008 3009 headroom = skb_headroom(head_skb); 3010 pos = skb_headlen(head_skb); 3011 3012 do { 3013 struct sk_buff *nskb; 3014 skb_frag_t *nskb_frag; 3015 int hsize; 3016 int size; 3017 3018 len = head_skb->len - offset; 3019 if (len > mss) 3020 len = mss; 3021 3022 hsize = skb_headlen(head_skb) - offset; 3023 if (hsize < 0) 3024 hsize = 0; 3025 if (hsize > len || !sg) 3026 hsize = len; 3027 3028 if (!hsize && i >= nfrags && skb_headlen(list_skb) && 3029 (skb_headlen(list_skb) == len || sg)) { 3030 BUG_ON(skb_headlen(list_skb) > len); 3031 3032 i = 0; 3033 nfrags = skb_shinfo(list_skb)->nr_frags; 3034 frag = skb_shinfo(list_skb)->frags; 3035 frag_skb = list_skb; 3036 pos += skb_headlen(list_skb); 3037 3038 while (pos < offset + len) { 3039 BUG_ON(i >= nfrags); 3040 3041 size = skb_frag_size(frag); 3042 if (pos + size > offset + len) 3043 break; 3044 3045 i++; 3046 pos += size; 3047 frag++; 3048 } 3049 3050 nskb = skb_clone(list_skb, GFP_ATOMIC); 3051 list_skb = list_skb->next; 3052 3053 if (unlikely(!nskb)) 3054 goto err; 3055 3056 if (unlikely(pskb_trim(nskb, len))) { 3057 kfree_skb(nskb); 3058 goto err; 3059 } 3060 3061 hsize = skb_end_offset(nskb); 3062 if (skb_cow_head(nskb, doffset + headroom)) { 3063 kfree_skb(nskb); 3064 goto err; 3065 } 3066 3067 nskb->truesize += skb_end_offset(nskb) - hsize; 3068 skb_release_head_state(nskb); 3069 __skb_push(nskb, doffset); 3070 } else { 3071 nskb = __alloc_skb(hsize + doffset + headroom, 3072 GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3073 NUMA_NO_NODE); 3074 3075 if (unlikely(!nskb)) 3076 goto err; 3077 3078 skb_reserve(nskb, headroom); 3079 __skb_put(nskb, doffset); 3080 } 3081 3082 if (segs) 3083 tail->next = nskb; 3084 else 3085 segs = nskb; 3086 tail = nskb; 3087 3088 __copy_skb_header(nskb, head_skb); 3089 3090 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3091 skb_reset_mac_len(nskb); 3092 3093 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 3094 nskb->data - tnl_hlen, 3095 doffset + tnl_hlen); 3096 3097 if (nskb->len == len + doffset) 3098 goto perform_csum_check; 3099 3100 if (!sg && !nskb->remcsum_offload) { 3101 nskb->ip_summed = CHECKSUM_NONE; 3102 nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 3103 skb_put(nskb, len), 3104 len, 0); 3105 SKB_GSO_CB(nskb)->csum_start = 3106 skb_headroom(nskb) + doffset; 3107 continue; 3108 } 3109 3110 nskb_frag = skb_shinfo(nskb)->frags; 3111 3112 skb_copy_from_linear_data_offset(head_skb, offset, 3113 skb_put(nskb, hsize), hsize); 3114 3115 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 3116 SKBTX_SHARED_FRAG; 3117 3118 while (pos < offset + len) { 3119 if (i >= nfrags) { 3120 BUG_ON(skb_headlen(list_skb)); 3121 3122 i = 0; 3123 nfrags = skb_shinfo(list_skb)->nr_frags; 3124 frag = skb_shinfo(list_skb)->frags; 3125 frag_skb = list_skb; 3126 3127 BUG_ON(!nfrags); 3128 3129 list_skb = list_skb->next; 3130 } 3131 3132 if (unlikely(skb_shinfo(nskb)->nr_frags >= 3133 MAX_SKB_FRAGS)) { 3134 net_warn_ratelimited( 3135 "skb_segment: too many frags: %u %u\n", 3136 pos, mss); 3137 goto err; 3138 } 3139 3140 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 3141 goto err; 3142 3143 *nskb_frag = *frag; 3144 __skb_frag_ref(nskb_frag); 3145 size = skb_frag_size(nskb_frag); 3146 3147 if (pos < offset) { 3148 nskb_frag->page_offset += offset - pos; 3149 skb_frag_size_sub(nskb_frag, offset - pos); 3150 } 3151 3152 skb_shinfo(nskb)->nr_frags++; 3153 3154 if (pos + size <= offset + len) { 3155 i++; 3156 frag++; 3157 pos += size; 3158 } else { 3159 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 3160 goto skip_fraglist; 3161 } 3162 3163 nskb_frag++; 3164 } 3165 3166 skip_fraglist: 3167 nskb->data_len = len - hsize; 3168 nskb->len += nskb->data_len; 3169 nskb->truesize += nskb->data_len; 3170 3171 perform_csum_check: 3172 if (!csum && !nskb->remcsum_offload) { 3173 nskb->csum = skb_checksum(nskb, doffset, 3174 nskb->len - doffset, 0); 3175 nskb->ip_summed = CHECKSUM_NONE; 3176 SKB_GSO_CB(nskb)->csum_start = 3177 skb_headroom(nskb) + doffset; 3178 } 3179 } while ((offset += len) < head_skb->len); 3180 3181 /* Some callers want to get the end of the list. 3182 * Put it in segs->prev to avoid walking the list. 3183 * (see validate_xmit_skb_list() for example) 3184 */ 3185 segs->prev = tail; 3186 3187 /* Following permits correct backpressure, for protocols 3188 * using skb_set_owner_w(). 3189 * Idea is to tranfert ownership from head_skb to last segment. 3190 */ 3191 if (head_skb->destructor == sock_wfree) { 3192 swap(tail->truesize, head_skb->truesize); 3193 swap(tail->destructor, head_skb->destructor); 3194 swap(tail->sk, head_skb->sk); 3195 } 3196 return segs; 3197 3198 err: 3199 kfree_skb_list(segs); 3200 return ERR_PTR(err); 3201 } 3202 EXPORT_SYMBOL_GPL(skb_segment); 3203 3204 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3205 { 3206 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3207 unsigned int offset = skb_gro_offset(skb); 3208 unsigned int headlen = skb_headlen(skb); 3209 unsigned int len = skb_gro_len(skb); 3210 struct sk_buff *lp, *p = *head; 3211 unsigned int delta_truesize; 3212 3213 if (unlikely(p->len + len >= 65536)) 3214 return -E2BIG; 3215 3216 lp = NAPI_GRO_CB(p)->last; 3217 pinfo = skb_shinfo(lp); 3218 3219 if (headlen <= offset) { 3220 skb_frag_t *frag; 3221 skb_frag_t *frag2; 3222 int i = skbinfo->nr_frags; 3223 int nr_frags = pinfo->nr_frags + i; 3224 3225 if (nr_frags > MAX_SKB_FRAGS) 3226 goto merge; 3227 3228 offset -= headlen; 3229 pinfo->nr_frags = nr_frags; 3230 skbinfo->nr_frags = 0; 3231 3232 frag = pinfo->frags + nr_frags; 3233 frag2 = skbinfo->frags + i; 3234 do { 3235 *--frag = *--frag2; 3236 } while (--i); 3237 3238 frag->page_offset += offset; 3239 skb_frag_size_sub(frag, offset); 3240 3241 /* all fragments truesize : remove (head size + sk_buff) */ 3242 delta_truesize = skb->truesize - 3243 SKB_TRUESIZE(skb_end_offset(skb)); 3244 3245 skb->truesize -= skb->data_len; 3246 skb->len -= skb->data_len; 3247 skb->data_len = 0; 3248 3249 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 3250 goto done; 3251 } else if (skb->head_frag) { 3252 int nr_frags = pinfo->nr_frags; 3253 skb_frag_t *frag = pinfo->frags + nr_frags; 3254 struct page *page = virt_to_head_page(skb->head); 3255 unsigned int first_size = headlen - offset; 3256 unsigned int first_offset; 3257 3258 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 3259 goto merge; 3260 3261 first_offset = skb->data - 3262 (unsigned char *)page_address(page) + 3263 offset; 3264 3265 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3266 3267 frag->page.p = page; 3268 frag->page_offset = first_offset; 3269 skb_frag_size_set(frag, first_size); 3270 3271 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3272 /* We dont need to clear skbinfo->nr_frags here */ 3273 3274 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3275 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3276 goto done; 3277 } 3278 3279 merge: 3280 delta_truesize = skb->truesize; 3281 if (offset > headlen) { 3282 unsigned int eat = offset - headlen; 3283 3284 skbinfo->frags[0].page_offset += eat; 3285 skb_frag_size_sub(&skbinfo->frags[0], eat); 3286 skb->data_len -= eat; 3287 skb->len -= eat; 3288 offset = headlen; 3289 } 3290 3291 __skb_pull(skb, offset); 3292 3293 if (NAPI_GRO_CB(p)->last == p) 3294 skb_shinfo(p)->frag_list = skb; 3295 else 3296 NAPI_GRO_CB(p)->last->next = skb; 3297 NAPI_GRO_CB(p)->last = skb; 3298 __skb_header_release(skb); 3299 lp = p; 3300 3301 done: 3302 NAPI_GRO_CB(p)->count++; 3303 p->data_len += len; 3304 p->truesize += delta_truesize; 3305 p->len += len; 3306 if (lp != p) { 3307 lp->data_len += len; 3308 lp->truesize += delta_truesize; 3309 lp->len += len; 3310 } 3311 NAPI_GRO_CB(skb)->same_flow = 1; 3312 return 0; 3313 } 3314 3315 void __init skb_init(void) 3316 { 3317 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 3318 sizeof(struct sk_buff), 3319 0, 3320 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3321 NULL); 3322 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3323 sizeof(struct sk_buff_fclones), 3324 0, 3325 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3326 NULL); 3327 } 3328 3329 /** 3330 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3331 * @skb: Socket buffer containing the buffers to be mapped 3332 * @sg: The scatter-gather list to map into 3333 * @offset: The offset into the buffer's contents to start mapping 3334 * @len: Length of buffer space to be mapped 3335 * 3336 * Fill the specified scatter-gather list with mappings/pointers into a 3337 * region of the buffer space attached to a socket buffer. 3338 */ 3339 static int 3340 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3341 { 3342 int start = skb_headlen(skb); 3343 int i, copy = start - offset; 3344 struct sk_buff *frag_iter; 3345 int elt = 0; 3346 3347 if (copy > 0) { 3348 if (copy > len) 3349 copy = len; 3350 sg_set_buf(sg, skb->data + offset, copy); 3351 elt++; 3352 if ((len -= copy) == 0) 3353 return elt; 3354 offset += copy; 3355 } 3356 3357 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 3358 int end; 3359 3360 WARN_ON(start > offset + len); 3361 3362 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3363 if ((copy = end - offset) > 0) { 3364 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3365 3366 if (copy > len) 3367 copy = len; 3368 sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3369 frag->page_offset+offset-start); 3370 elt++; 3371 if (!(len -= copy)) 3372 return elt; 3373 offset += copy; 3374 } 3375 start = end; 3376 } 3377 3378 skb_walk_frags(skb, frag_iter) { 3379 int end; 3380 3381 WARN_ON(start > offset + len); 3382 3383 end = start + frag_iter->len; 3384 if ((copy = end - offset) > 0) { 3385 if (copy > len) 3386 copy = len; 3387 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 3388 copy); 3389 if ((len -= copy) == 0) 3390 return elt; 3391 offset += copy; 3392 } 3393 start = end; 3394 } 3395 BUG_ON(len); 3396 return elt; 3397 } 3398 3399 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 3400 * sglist without mark the sg which contain last skb data as the end. 3401 * So the caller can mannipulate sg list as will when padding new data after 3402 * the first call without calling sg_unmark_end to expend sg list. 3403 * 3404 * Scenario to use skb_to_sgvec_nomark: 3405 * 1. sg_init_table 3406 * 2. skb_to_sgvec_nomark(payload1) 3407 * 3. skb_to_sgvec_nomark(payload2) 3408 * 3409 * This is equivalent to: 3410 * 1. sg_init_table 3411 * 2. skb_to_sgvec(payload1) 3412 * 3. sg_unmark_end 3413 * 4. skb_to_sgvec(payload2) 3414 * 3415 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 3416 * is more preferable. 3417 */ 3418 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 3419 int offset, int len) 3420 { 3421 return __skb_to_sgvec(skb, sg, offset, len); 3422 } 3423 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 3424 3425 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3426 { 3427 int nsg = __skb_to_sgvec(skb, sg, offset, len); 3428 3429 sg_mark_end(&sg[nsg - 1]); 3430 3431 return nsg; 3432 } 3433 EXPORT_SYMBOL_GPL(skb_to_sgvec); 3434 3435 /** 3436 * skb_cow_data - Check that a socket buffer's data buffers are writable 3437 * @skb: The socket buffer to check. 3438 * @tailbits: Amount of trailing space to be added 3439 * @trailer: Returned pointer to the skb where the @tailbits space begins 3440 * 3441 * Make sure that the data buffers attached to a socket buffer are 3442 * writable. If they are not, private copies are made of the data buffers 3443 * and the socket buffer is set to use these instead. 3444 * 3445 * If @tailbits is given, make sure that there is space to write @tailbits 3446 * bytes of data beyond current end of socket buffer. @trailer will be 3447 * set to point to the skb in which this space begins. 3448 * 3449 * The number of scatterlist elements required to completely map the 3450 * COW'd and extended socket buffer will be returned. 3451 */ 3452 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3453 { 3454 int copyflag; 3455 int elt; 3456 struct sk_buff *skb1, **skb_p; 3457 3458 /* If skb is cloned or its head is paged, reallocate 3459 * head pulling out all the pages (pages are considered not writable 3460 * at the moment even if they are anonymous). 3461 */ 3462 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3463 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3464 return -ENOMEM; 3465 3466 /* Easy case. Most of packets will go this way. */ 3467 if (!skb_has_frag_list(skb)) { 3468 /* A little of trouble, not enough of space for trailer. 3469 * This should not happen, when stack is tuned to generate 3470 * good frames. OK, on miss we reallocate and reserve even more 3471 * space, 128 bytes is fair. */ 3472 3473 if (skb_tailroom(skb) < tailbits && 3474 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3475 return -ENOMEM; 3476 3477 /* Voila! */ 3478 *trailer = skb; 3479 return 1; 3480 } 3481 3482 /* Misery. We are in troubles, going to mincer fragments... */ 3483 3484 elt = 1; 3485 skb_p = &skb_shinfo(skb)->frag_list; 3486 copyflag = 0; 3487 3488 while ((skb1 = *skb_p) != NULL) { 3489 int ntail = 0; 3490 3491 /* The fragment is partially pulled by someone, 3492 * this can happen on input. Copy it and everything 3493 * after it. */ 3494 3495 if (skb_shared(skb1)) 3496 copyflag = 1; 3497 3498 /* If the skb is the last, worry about trailer. */ 3499 3500 if (skb1->next == NULL && tailbits) { 3501 if (skb_shinfo(skb1)->nr_frags || 3502 skb_has_frag_list(skb1) || 3503 skb_tailroom(skb1) < tailbits) 3504 ntail = tailbits + 128; 3505 } 3506 3507 if (copyflag || 3508 skb_cloned(skb1) || 3509 ntail || 3510 skb_shinfo(skb1)->nr_frags || 3511 skb_has_frag_list(skb1)) { 3512 struct sk_buff *skb2; 3513 3514 /* Fuck, we are miserable poor guys... */ 3515 if (ntail == 0) 3516 skb2 = skb_copy(skb1, GFP_ATOMIC); 3517 else 3518 skb2 = skb_copy_expand(skb1, 3519 skb_headroom(skb1), 3520 ntail, 3521 GFP_ATOMIC); 3522 if (unlikely(skb2 == NULL)) 3523 return -ENOMEM; 3524 3525 if (skb1->sk) 3526 skb_set_owner_w(skb2, skb1->sk); 3527 3528 /* Looking around. Are we still alive? 3529 * OK, link new skb, drop old one */ 3530 3531 skb2->next = skb1->next; 3532 *skb_p = skb2; 3533 kfree_skb(skb1); 3534 skb1 = skb2; 3535 } 3536 elt++; 3537 *trailer = skb1; 3538 skb_p = &skb1->next; 3539 } 3540 3541 return elt; 3542 } 3543 EXPORT_SYMBOL_GPL(skb_cow_data); 3544 3545 static void sock_rmem_free(struct sk_buff *skb) 3546 { 3547 struct sock *sk = skb->sk; 3548 3549 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3550 } 3551 3552 /* 3553 * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3554 */ 3555 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3556 { 3557 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3558 (unsigned int)sk->sk_rcvbuf) 3559 return -ENOMEM; 3560 3561 skb_orphan(skb); 3562 skb->sk = sk; 3563 skb->destructor = sock_rmem_free; 3564 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3565 3566 /* before exiting rcu section, make sure dst is refcounted */ 3567 skb_dst_force(skb); 3568 3569 skb_queue_tail(&sk->sk_error_queue, skb); 3570 if (!sock_flag(sk, SOCK_DEAD)) 3571 sk->sk_data_ready(sk); 3572 return 0; 3573 } 3574 EXPORT_SYMBOL(sock_queue_err_skb); 3575 3576 struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 3577 { 3578 struct sk_buff_head *q = &sk->sk_error_queue; 3579 struct sk_buff *skb, *skb_next; 3580 unsigned long flags; 3581 int err = 0; 3582 3583 spin_lock_irqsave(&q->lock, flags); 3584 skb = __skb_dequeue(q); 3585 if (skb && (skb_next = skb_peek(q))) 3586 err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 3587 spin_unlock_irqrestore(&q->lock, flags); 3588 3589 sk->sk_err = err; 3590 if (err) 3591 sk->sk_error_report(sk); 3592 3593 return skb; 3594 } 3595 EXPORT_SYMBOL(sock_dequeue_err_skb); 3596 3597 /** 3598 * skb_clone_sk - create clone of skb, and take reference to socket 3599 * @skb: the skb to clone 3600 * 3601 * This function creates a clone of a buffer that holds a reference on 3602 * sk_refcnt. Buffers created via this function are meant to be 3603 * returned using sock_queue_err_skb, or free via kfree_skb. 3604 * 3605 * When passing buffers allocated with this function to sock_queue_err_skb 3606 * it is necessary to wrap the call with sock_hold/sock_put in order to 3607 * prevent the socket from being released prior to being enqueued on 3608 * the sk_error_queue. 3609 */ 3610 struct sk_buff *skb_clone_sk(struct sk_buff *skb) 3611 { 3612 struct sock *sk = skb->sk; 3613 struct sk_buff *clone; 3614 3615 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) 3616 return NULL; 3617 3618 clone = skb_clone(skb, GFP_ATOMIC); 3619 if (!clone) { 3620 sock_put(sk); 3621 return NULL; 3622 } 3623 3624 clone->sk = sk; 3625 clone->destructor = sock_efree; 3626 3627 return clone; 3628 } 3629 EXPORT_SYMBOL(skb_clone_sk); 3630 3631 static void __skb_complete_tx_timestamp(struct sk_buff *skb, 3632 struct sock *sk, 3633 int tstype) 3634 { 3635 struct sock_exterr_skb *serr; 3636 int err; 3637 3638 serr = SKB_EXT_ERR(skb); 3639 memset(serr, 0, sizeof(*serr)); 3640 serr->ee.ee_errno = ENOMSG; 3641 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 3642 serr->ee.ee_info = tstype; 3643 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 3644 serr->ee.ee_data = skb_shinfo(skb)->tskey; 3645 if (sk->sk_protocol == IPPROTO_TCP) 3646 serr->ee.ee_data -= sk->sk_tskey; 3647 } 3648 3649 err = sock_queue_err_skb(sk, skb); 3650 3651 if (err) 3652 kfree_skb(skb); 3653 } 3654 3655 static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 3656 { 3657 bool ret; 3658 3659 if (likely(sysctl_tstamp_allow_data || tsonly)) 3660 return true; 3661 3662 read_lock_bh(&sk->sk_callback_lock); 3663 ret = sk->sk_socket && sk->sk_socket->file && 3664 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 3665 read_unlock_bh(&sk->sk_callback_lock); 3666 return ret; 3667 } 3668 3669 void skb_complete_tx_timestamp(struct sk_buff *skb, 3670 struct skb_shared_hwtstamps *hwtstamps) 3671 { 3672 struct sock *sk = skb->sk; 3673 3674 if (!skb_may_tx_timestamp(sk, false)) 3675 return; 3676 3677 /* take a reference to prevent skb_orphan() from freeing the socket */ 3678 sock_hold(sk); 3679 3680 *skb_hwtstamps(skb) = *hwtstamps; 3681 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); 3682 3683 sock_put(sk); 3684 } 3685 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 3686 3687 void __skb_tstamp_tx(struct sk_buff *orig_skb, 3688 struct skb_shared_hwtstamps *hwtstamps, 3689 struct sock *sk, int tstype) 3690 { 3691 struct sk_buff *skb; 3692 bool tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 3693 3694 if (!sk || !skb_may_tx_timestamp(sk, tsonly)) 3695 return; 3696 3697 if (tsonly) 3698 skb = alloc_skb(0, GFP_ATOMIC); 3699 else 3700 skb = skb_clone(orig_skb, GFP_ATOMIC); 3701 if (!skb) 3702 return; 3703 3704 if (tsonly) { 3705 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; 3706 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 3707 } 3708 3709 if (hwtstamps) 3710 *skb_hwtstamps(skb) = *hwtstamps; 3711 else 3712 skb->tstamp = ktime_get_real(); 3713 3714 __skb_complete_tx_timestamp(skb, sk, tstype); 3715 } 3716 EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 3717 3718 void skb_tstamp_tx(struct sk_buff *orig_skb, 3719 struct skb_shared_hwtstamps *hwtstamps) 3720 { 3721 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, 3722 SCM_TSTAMP_SND); 3723 } 3724 EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3725 3726 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 3727 { 3728 struct sock *sk = skb->sk; 3729 struct sock_exterr_skb *serr; 3730 int err; 3731 3732 skb->wifi_acked_valid = 1; 3733 skb->wifi_acked = acked; 3734 3735 serr = SKB_EXT_ERR(skb); 3736 memset(serr, 0, sizeof(*serr)); 3737 serr->ee.ee_errno = ENOMSG; 3738 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 3739 3740 /* take a reference to prevent skb_orphan() from freeing the socket */ 3741 sock_hold(sk); 3742 3743 err = sock_queue_err_skb(sk, skb); 3744 if (err) 3745 kfree_skb(skb); 3746 3747 sock_put(sk); 3748 } 3749 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 3750 3751 3752 /** 3753 * skb_partial_csum_set - set up and verify partial csum values for packet 3754 * @skb: the skb to set 3755 * @start: the number of bytes after skb->data to start checksumming. 3756 * @off: the offset from start to place the checksum. 3757 * 3758 * For untrusted partially-checksummed packets, we need to make sure the values 3759 * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3760 * 3761 * This function checks and sets those values and skb->ip_summed: if this 3762 * returns false you should drop the packet. 3763 */ 3764 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3765 { 3766 if (unlikely(start > skb_headlen(skb)) || 3767 unlikely((int)start + off > skb_headlen(skb) - 2)) { 3768 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 3769 start, off, skb_headlen(skb)); 3770 return false; 3771 } 3772 skb->ip_summed = CHECKSUM_PARTIAL; 3773 skb->csum_start = skb_headroom(skb) + start; 3774 skb->csum_offset = off; 3775 skb_set_transport_header(skb, start); 3776 return true; 3777 } 3778 EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3779 3780 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 3781 unsigned int max) 3782 { 3783 if (skb_headlen(skb) >= len) 3784 return 0; 3785 3786 /* If we need to pullup then pullup to the max, so we 3787 * won't need to do it again. 3788 */ 3789 if (max > skb->len) 3790 max = skb->len; 3791 3792 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 3793 return -ENOMEM; 3794 3795 if (skb_headlen(skb) < len) 3796 return -EPROTO; 3797 3798 return 0; 3799 } 3800 3801 #define MAX_TCP_HDR_LEN (15 * 4) 3802 3803 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 3804 typeof(IPPROTO_IP) proto, 3805 unsigned int off) 3806 { 3807 switch (proto) { 3808 int err; 3809 3810 case IPPROTO_TCP: 3811 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 3812 off + MAX_TCP_HDR_LEN); 3813 if (!err && !skb_partial_csum_set(skb, off, 3814 offsetof(struct tcphdr, 3815 check))) 3816 err = -EPROTO; 3817 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 3818 3819 case IPPROTO_UDP: 3820 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 3821 off + sizeof(struct udphdr)); 3822 if (!err && !skb_partial_csum_set(skb, off, 3823 offsetof(struct udphdr, 3824 check))) 3825 err = -EPROTO; 3826 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 3827 } 3828 3829 return ERR_PTR(-EPROTO); 3830 } 3831 3832 /* This value should be large enough to cover a tagged ethernet header plus 3833 * maximally sized IP and TCP or UDP headers. 3834 */ 3835 #define MAX_IP_HDR_LEN 128 3836 3837 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 3838 { 3839 unsigned int off; 3840 bool fragment; 3841 __sum16 *csum; 3842 int err; 3843 3844 fragment = false; 3845 3846 err = skb_maybe_pull_tail(skb, 3847 sizeof(struct iphdr), 3848 MAX_IP_HDR_LEN); 3849 if (err < 0) 3850 goto out; 3851 3852 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 3853 fragment = true; 3854 3855 off = ip_hdrlen(skb); 3856 3857 err = -EPROTO; 3858 3859 if (fragment) 3860 goto out; 3861 3862 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 3863 if (IS_ERR(csum)) 3864 return PTR_ERR(csum); 3865 3866 if (recalculate) 3867 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3868 ip_hdr(skb)->daddr, 3869 skb->len - off, 3870 ip_hdr(skb)->protocol, 0); 3871 err = 0; 3872 3873 out: 3874 return err; 3875 } 3876 3877 /* This value should be large enough to cover a tagged ethernet header plus 3878 * an IPv6 header, all options, and a maximal TCP or UDP header. 3879 */ 3880 #define MAX_IPV6_HDR_LEN 256 3881 3882 #define OPT_HDR(type, skb, off) \ 3883 (type *)(skb_network_header(skb) + (off)) 3884 3885 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 3886 { 3887 int err; 3888 u8 nexthdr; 3889 unsigned int off; 3890 unsigned int len; 3891 bool fragment; 3892 bool done; 3893 __sum16 *csum; 3894 3895 fragment = false; 3896 done = false; 3897 3898 off = sizeof(struct ipv6hdr); 3899 3900 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 3901 if (err < 0) 3902 goto out; 3903 3904 nexthdr = ipv6_hdr(skb)->nexthdr; 3905 3906 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 3907 while (off <= len && !done) { 3908 switch (nexthdr) { 3909 case IPPROTO_DSTOPTS: 3910 case IPPROTO_HOPOPTS: 3911 case IPPROTO_ROUTING: { 3912 struct ipv6_opt_hdr *hp; 3913 3914 err = skb_maybe_pull_tail(skb, 3915 off + 3916 sizeof(struct ipv6_opt_hdr), 3917 MAX_IPV6_HDR_LEN); 3918 if (err < 0) 3919 goto out; 3920 3921 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 3922 nexthdr = hp->nexthdr; 3923 off += ipv6_optlen(hp); 3924 break; 3925 } 3926 case IPPROTO_AH: { 3927 struct ip_auth_hdr *hp; 3928 3929 err = skb_maybe_pull_tail(skb, 3930 off + 3931 sizeof(struct ip_auth_hdr), 3932 MAX_IPV6_HDR_LEN); 3933 if (err < 0) 3934 goto out; 3935 3936 hp = OPT_HDR(struct ip_auth_hdr, skb, off); 3937 nexthdr = hp->nexthdr; 3938 off += ipv6_authlen(hp); 3939 break; 3940 } 3941 case IPPROTO_FRAGMENT: { 3942 struct frag_hdr *hp; 3943 3944 err = skb_maybe_pull_tail(skb, 3945 off + 3946 sizeof(struct frag_hdr), 3947 MAX_IPV6_HDR_LEN); 3948 if (err < 0) 3949 goto out; 3950 3951 hp = OPT_HDR(struct frag_hdr, skb, off); 3952 3953 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 3954 fragment = true; 3955 3956 nexthdr = hp->nexthdr; 3957 off += sizeof(struct frag_hdr); 3958 break; 3959 } 3960 default: 3961 done = true; 3962 break; 3963 } 3964 } 3965 3966 err = -EPROTO; 3967 3968 if (!done || fragment) 3969 goto out; 3970 3971 csum = skb_checksum_setup_ip(skb, nexthdr, off); 3972 if (IS_ERR(csum)) 3973 return PTR_ERR(csum); 3974 3975 if (recalculate) 3976 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3977 &ipv6_hdr(skb)->daddr, 3978 skb->len - off, nexthdr, 0); 3979 err = 0; 3980 3981 out: 3982 return err; 3983 } 3984 3985 /** 3986 * skb_checksum_setup - set up partial checksum offset 3987 * @skb: the skb to set up 3988 * @recalculate: if true the pseudo-header checksum will be recalculated 3989 */ 3990 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 3991 { 3992 int err; 3993 3994 switch (skb->protocol) { 3995 case htons(ETH_P_IP): 3996 err = skb_checksum_setup_ipv4(skb, recalculate); 3997 break; 3998 3999 case htons(ETH_P_IPV6): 4000 err = skb_checksum_setup_ipv6(skb, recalculate); 4001 break; 4002 4003 default: 4004 err = -EPROTO; 4005 break; 4006 } 4007 4008 return err; 4009 } 4010 EXPORT_SYMBOL(skb_checksum_setup); 4011 4012 void __skb_warn_lro_forwarding(const struct sk_buff *skb) 4013 { 4014 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 4015 skb->dev->name); 4016 } 4017 EXPORT_SYMBOL(__skb_warn_lro_forwarding); 4018 4019 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4020 { 4021 if (head_stolen) { 4022 skb_release_head_state(skb); 4023 kmem_cache_free(skbuff_head_cache, skb); 4024 } else { 4025 __kfree_skb(skb); 4026 } 4027 } 4028 EXPORT_SYMBOL(kfree_skb_partial); 4029 4030 /** 4031 * skb_try_coalesce - try to merge skb to prior one 4032 * @to: prior buffer 4033 * @from: buffer to add 4034 * @fragstolen: pointer to boolean 4035 * @delta_truesize: how much more was allocated than was requested 4036 */ 4037 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 4038 bool *fragstolen, int *delta_truesize) 4039 { 4040 int i, delta, len = from->len; 4041 4042 *fragstolen = false; 4043 4044 if (skb_cloned(to)) 4045 return false; 4046 4047 if (len <= skb_tailroom(to)) { 4048 if (len) 4049 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4050 *delta_truesize = 0; 4051 return true; 4052 } 4053 4054 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4055 return false; 4056 4057 if (skb_headlen(from) != 0) { 4058 struct page *page; 4059 unsigned int offset; 4060 4061 if (skb_shinfo(to)->nr_frags + 4062 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4063 return false; 4064 4065 if (skb_head_is_locked(from)) 4066 return false; 4067 4068 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4069 4070 page = virt_to_head_page(from->head); 4071 offset = from->data - (unsigned char *)page_address(page); 4072 4073 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4074 page, offset, skb_headlen(from)); 4075 *fragstolen = true; 4076 } else { 4077 if (skb_shinfo(to)->nr_frags + 4078 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4079 return false; 4080 4081 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 4082 } 4083 4084 WARN_ON_ONCE(delta < len); 4085 4086 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4087 skb_shinfo(from)->frags, 4088 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4089 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4090 4091 if (!skb_cloned(from)) 4092 skb_shinfo(from)->nr_frags = 0; 4093 4094 /* if the skb is not cloned this does nothing 4095 * since we set nr_frags to 0. 4096 */ 4097 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4098 skb_frag_ref(from, i); 4099 4100 to->truesize += delta; 4101 to->len += len; 4102 to->data_len += len; 4103 4104 *delta_truesize = delta; 4105 return true; 4106 } 4107 EXPORT_SYMBOL(skb_try_coalesce); 4108 4109 /** 4110 * skb_scrub_packet - scrub an skb 4111 * 4112 * @skb: buffer to clean 4113 * @xnet: packet is crossing netns 4114 * 4115 * skb_scrub_packet can be used after encapsulating or decapsulting a packet 4116 * into/from a tunnel. Some information have to be cleared during these 4117 * operations. 4118 * skb_scrub_packet can also be used to clean a skb before injecting it in 4119 * another namespace (@xnet == true). We have to clear all information in the 4120 * skb that could impact namespace isolation. 4121 */ 4122 void skb_scrub_packet(struct sk_buff *skb, bool xnet) 4123 { 4124 if (xnet) 4125 skb_orphan(skb); 4126 skb->tstamp.tv64 = 0; 4127 skb->pkt_type = PACKET_HOST; 4128 skb->skb_iif = 0; 4129 skb->ignore_df = 0; 4130 skb_dst_drop(skb); 4131 skb->mark = 0; 4132 skb->sender_cpu = 0; 4133 skb_init_secmark(skb); 4134 secpath_reset(skb); 4135 nf_reset(skb); 4136 nf_reset_trace(skb); 4137 } 4138 EXPORT_SYMBOL_GPL(skb_scrub_packet); 4139 4140 /** 4141 * skb_gso_transport_seglen - Return length of individual segments of a gso packet 4142 * 4143 * @skb: GSO skb 4144 * 4145 * skb_gso_transport_seglen is used to determine the real size of the 4146 * individual segments, including Layer4 headers (TCP/UDP). 4147 * 4148 * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 4149 */ 4150 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4151 { 4152 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4153 unsigned int thlen = 0; 4154 4155 if (skb->encapsulation) { 4156 thlen = skb_inner_transport_header(skb) - 4157 skb_transport_header(skb); 4158 4159 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4160 thlen += inner_tcp_hdrlen(skb); 4161 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 4162 thlen = tcp_hdrlen(skb); 4163 } 4164 /* UFO sets gso_size to the size of the fragmentation 4165 * payload, i.e. the size of the L4 (UDP) header is already 4166 * accounted for. 4167 */ 4168 return thlen + shinfo->gso_size; 4169 } 4170 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4171 4172 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 4173 { 4174 if (skb_cow(skb, skb_headroom(skb)) < 0) { 4175 kfree_skb(skb); 4176 return NULL; 4177 } 4178 4179 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 4180 skb->mac_header += VLAN_HLEN; 4181 return skb; 4182 } 4183 4184 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 4185 { 4186 struct vlan_hdr *vhdr; 4187 u16 vlan_tci; 4188 4189 if (unlikely(skb_vlan_tag_present(skb))) { 4190 /* vlan_tci is already set-up so leave this for another time */ 4191 return skb; 4192 } 4193 4194 skb = skb_share_check(skb, GFP_ATOMIC); 4195 if (unlikely(!skb)) 4196 goto err_free; 4197 4198 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 4199 goto err_free; 4200 4201 vhdr = (struct vlan_hdr *)skb->data; 4202 vlan_tci = ntohs(vhdr->h_vlan_TCI); 4203 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 4204 4205 skb_pull_rcsum(skb, VLAN_HLEN); 4206 vlan_set_encap_proto(skb, vhdr); 4207 4208 skb = skb_reorder_vlan_header(skb); 4209 if (unlikely(!skb)) 4210 goto err_free; 4211 4212 skb_reset_network_header(skb); 4213 skb_reset_transport_header(skb); 4214 skb_reset_mac_len(skb); 4215 4216 return skb; 4217 4218 err_free: 4219 kfree_skb(skb); 4220 return NULL; 4221 } 4222 EXPORT_SYMBOL(skb_vlan_untag); 4223 4224 int skb_ensure_writable(struct sk_buff *skb, int write_len) 4225 { 4226 if (!pskb_may_pull(skb, write_len)) 4227 return -ENOMEM; 4228 4229 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 4230 return 0; 4231 4232 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4233 } 4234 EXPORT_SYMBOL(skb_ensure_writable); 4235 4236 /* remove VLAN header from packet and update csum accordingly. */ 4237 static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 4238 { 4239 struct vlan_hdr *vhdr; 4240 unsigned int offset = skb->data - skb_mac_header(skb); 4241 int err; 4242 4243 __skb_push(skb, offset); 4244 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 4245 if (unlikely(err)) 4246 goto pull; 4247 4248 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 4249 4250 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 4251 *vlan_tci = ntohs(vhdr->h_vlan_TCI); 4252 4253 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 4254 __skb_pull(skb, VLAN_HLEN); 4255 4256 vlan_set_encap_proto(skb, vhdr); 4257 skb->mac_header += VLAN_HLEN; 4258 4259 if (skb_network_offset(skb) < ETH_HLEN) 4260 skb_set_network_header(skb, ETH_HLEN); 4261 4262 skb_reset_mac_len(skb); 4263 pull: 4264 __skb_pull(skb, offset); 4265 4266 return err; 4267 } 4268 4269 int skb_vlan_pop(struct sk_buff *skb) 4270 { 4271 u16 vlan_tci; 4272 __be16 vlan_proto; 4273 int err; 4274 4275 if (likely(skb_vlan_tag_present(skb))) { 4276 skb->vlan_tci = 0; 4277 } else { 4278 if (unlikely((skb->protocol != htons(ETH_P_8021Q) && 4279 skb->protocol != htons(ETH_P_8021AD)) || 4280 skb->len < VLAN_ETH_HLEN)) 4281 return 0; 4282 4283 err = __skb_vlan_pop(skb, &vlan_tci); 4284 if (err) 4285 return err; 4286 } 4287 /* move next vlan tag to hw accel tag */ 4288 if (likely((skb->protocol != htons(ETH_P_8021Q) && 4289 skb->protocol != htons(ETH_P_8021AD)) || 4290 skb->len < VLAN_ETH_HLEN)) 4291 return 0; 4292 4293 vlan_proto = skb->protocol; 4294 err = __skb_vlan_pop(skb, &vlan_tci); 4295 if (unlikely(err)) 4296 return err; 4297 4298 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4299 return 0; 4300 } 4301 EXPORT_SYMBOL(skb_vlan_pop); 4302 4303 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 4304 { 4305 if (skb_vlan_tag_present(skb)) { 4306 unsigned int offset = skb->data - skb_mac_header(skb); 4307 int err; 4308 4309 /* __vlan_insert_tag expect skb->data pointing to mac header. 4310 * So change skb->data before calling it and change back to 4311 * original position later 4312 */ 4313 __skb_push(skb, offset); 4314 err = __vlan_insert_tag(skb, skb->vlan_proto, 4315 skb_vlan_tag_get(skb)); 4316 if (err) 4317 return err; 4318 skb->protocol = skb->vlan_proto; 4319 skb->mac_len += VLAN_HLEN; 4320 __skb_pull(skb, offset); 4321 4322 if (skb->ip_summed == CHECKSUM_COMPLETE) 4323 skb->csum = csum_add(skb->csum, csum_partial(skb->data 4324 + (2 * ETH_ALEN), VLAN_HLEN, 0)); 4325 } 4326 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 4327 return 0; 4328 } 4329 EXPORT_SYMBOL(skb_vlan_push); 4330 4331 /** 4332 * alloc_skb_with_frags - allocate skb with page frags 4333 * 4334 * @header_len: size of linear part 4335 * @data_len: needed length in frags 4336 * @max_page_order: max page order desired. 4337 * @errcode: pointer to error code if any 4338 * @gfp_mask: allocation mask 4339 * 4340 * This can be used to allocate a paged skb, given a maximal order for frags. 4341 */ 4342 struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 4343 unsigned long data_len, 4344 int max_page_order, 4345 int *errcode, 4346 gfp_t gfp_mask) 4347 { 4348 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 4349 unsigned long chunk; 4350 struct sk_buff *skb; 4351 struct page *page; 4352 gfp_t gfp_head; 4353 int i; 4354 4355 *errcode = -EMSGSIZE; 4356 /* Note this test could be relaxed, if we succeed to allocate 4357 * high order pages... 4358 */ 4359 if (npages > MAX_SKB_FRAGS) 4360 return NULL; 4361 4362 gfp_head = gfp_mask; 4363 if (gfp_head & __GFP_WAIT) 4364 gfp_head |= __GFP_REPEAT; 4365 4366 *errcode = -ENOBUFS; 4367 skb = alloc_skb(header_len, gfp_head); 4368 if (!skb) 4369 return NULL; 4370 4371 skb->truesize += npages << PAGE_SHIFT; 4372 4373 for (i = 0; npages > 0; i++) { 4374 int order = max_page_order; 4375 4376 while (order) { 4377 if (npages >= 1 << order) { 4378 page = alloc_pages(gfp_mask | 4379 __GFP_COMP | 4380 __GFP_NOWARN | 4381 __GFP_NORETRY, 4382 order); 4383 if (page) 4384 goto fill_page; 4385 /* Do not retry other high order allocations */ 4386 order = 1; 4387 max_page_order = 0; 4388 } 4389 order--; 4390 } 4391 page = alloc_page(gfp_mask); 4392 if (!page) 4393 goto failure; 4394 fill_page: 4395 chunk = min_t(unsigned long, data_len, 4396 PAGE_SIZE << order); 4397 skb_fill_page_desc(skb, i, page, 0, chunk); 4398 data_len -= chunk; 4399 npages -= 1 << order; 4400 } 4401 return skb; 4402 4403 failure: 4404 kfree_skb(skb); 4405 return NULL; 4406 } 4407 EXPORT_SYMBOL(alloc_skb_with_frags); 4408