11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 39e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40e005d193SJoe Perches 411da177e4SLinus Torvalds #include <linux/module.h> 421da177e4SLinus Torvalds #include <linux/types.h> 431da177e4SLinus Torvalds #include <linux/kernel.h> 44fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/interrupt.h> 471da177e4SLinus Torvalds #include <linux/in.h> 481da177e4SLinus Torvalds #include <linux/inet.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/netdevice.h> 511da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 521da177e4SLinus Torvalds #include <net/pkt_sched.h> 531da177e4SLinus Torvalds #endif 541da177e4SLinus Torvalds #include <linux/string.h> 551da177e4SLinus Torvalds #include <linux/skbuff.h> 569c55e01cSJens Axboe #include <linux/splice.h> 571da177e4SLinus Torvalds #include <linux/cache.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 591da177e4SLinus Torvalds #include <linux/init.h> 60716ea3a7SDavid Howells #include <linux/scatterlist.h> 61ac45f602SPatrick Ohly #include <linux/errqueue.h> 62268bb0ceSLinus Torvalds #include <linux/prefetch.h> 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #include <net/protocol.h> 651da177e4SLinus Torvalds #include <net/dst.h> 661da177e4SLinus Torvalds #include <net/sock.h> 671da177e4SLinus Torvalds #include <net/checksum.h> 681da177e4SLinus Torvalds #include <net/xfrm.h> 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds #include <asm/uaccess.h> 71ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7251c56b00SEric Dumazet #include <linux/highmem.h> 73a1f8e7f7SAl Viro 74d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 75e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 761da177e4SLinus Torvalds 779c55e01cSJens Axboe static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 789c55e01cSJens Axboe struct pipe_buffer *buf) 799c55e01cSJens Axboe { 808b9d3728SJarek Poplawski put_page(buf->page); 819c55e01cSJens Axboe } 829c55e01cSJens Axboe 839c55e01cSJens Axboe static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 849c55e01cSJens Axboe struct pipe_buffer *buf) 859c55e01cSJens Axboe { 868b9d3728SJarek Poplawski get_page(buf->page); 879c55e01cSJens Axboe } 889c55e01cSJens Axboe 899c55e01cSJens Axboe static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 909c55e01cSJens Axboe struct pipe_buffer *buf) 919c55e01cSJens Axboe { 929c55e01cSJens Axboe return 1; 939c55e01cSJens Axboe } 949c55e01cSJens Axboe 959c55e01cSJens Axboe 969c55e01cSJens Axboe /* Pipe buffer operations for a socket. */ 9728dfef8fSAlexey Dobriyan static const struct pipe_buf_operations sock_pipe_buf_ops = { 989c55e01cSJens Axboe .can_merge = 0, 999c55e01cSJens Axboe .map = generic_pipe_buf_map, 1009c55e01cSJens Axboe .unmap = generic_pipe_buf_unmap, 1019c55e01cSJens Axboe .confirm = generic_pipe_buf_confirm, 1029c55e01cSJens Axboe .release = sock_pipe_buf_release, 1039c55e01cSJens Axboe .steal = sock_pipe_buf_steal, 1049c55e01cSJens Axboe .get = sock_pipe_buf_get, 1059c55e01cSJens Axboe }; 1069c55e01cSJens Axboe 1071da177e4SLinus Torvalds /** 108f05de73bSJean Sacren * skb_panic - private function for out-of-line support 1091da177e4SLinus Torvalds * @skb: buffer 1101da177e4SLinus Torvalds * @sz: size 111f05de73bSJean Sacren * @addr: address 11299d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 1131da177e4SLinus Torvalds * 114f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 115f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 116f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 117f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 1181da177e4SLinus Torvalds */ 119f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 12099d5851eSJames Hogan const char msg[]) 1211da177e4SLinus Torvalds { 122e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 12399d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 1244305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 12526095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1261da177e4SLinus Torvalds BUG(); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds 129f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1301da177e4SLinus Torvalds { 131f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 134f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 135f05de73bSJean Sacren { 136f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 137f05de73bSJean Sacren } 138c93bdd0eSMel Gorman 139c93bdd0eSMel Gorman /* 140c93bdd0eSMel Gorman * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 141c93bdd0eSMel Gorman * the caller if emergency pfmemalloc reserves are being used. If it is and 142c93bdd0eSMel Gorman * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 143c93bdd0eSMel Gorman * may be used. Otherwise, the packet data may be discarded until enough 144c93bdd0eSMel Gorman * memory is free 145c93bdd0eSMel Gorman */ 146c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 147c93bdd0eSMel Gorman __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 14861c5e88aSstephen hemminger 14961c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 15061c5e88aSstephen hemminger unsigned long ip, bool *pfmemalloc) 151c93bdd0eSMel Gorman { 152c93bdd0eSMel Gorman void *obj; 153c93bdd0eSMel Gorman bool ret_pfmemalloc = false; 154c93bdd0eSMel Gorman 155c93bdd0eSMel Gorman /* 156c93bdd0eSMel Gorman * Try a regular allocation, when that fails and we're not entitled 157c93bdd0eSMel Gorman * to the reserves, fail. 158c93bdd0eSMel Gorman */ 159c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, 160c93bdd0eSMel Gorman flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 161c93bdd0eSMel Gorman node); 162c93bdd0eSMel Gorman if (obj || !(gfp_pfmemalloc_allowed(flags))) 163c93bdd0eSMel Gorman goto out; 164c93bdd0eSMel Gorman 165c93bdd0eSMel Gorman /* Try again but now we are using pfmemalloc reserves */ 166c93bdd0eSMel Gorman ret_pfmemalloc = true; 167c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, flags, node); 168c93bdd0eSMel Gorman 169c93bdd0eSMel Gorman out: 170c93bdd0eSMel Gorman if (pfmemalloc) 171c93bdd0eSMel Gorman *pfmemalloc = ret_pfmemalloc; 172c93bdd0eSMel Gorman 173c93bdd0eSMel Gorman return obj; 174c93bdd0eSMel Gorman } 175c93bdd0eSMel Gorman 1761da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1771da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1781da177e4SLinus Torvalds * [BEEP] leaks. 1791da177e4SLinus Torvalds * 1801da177e4SLinus Torvalds */ 1811da177e4SLinus Torvalds 1820ebd0ac5SPatrick McHardy struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 1830ebd0ac5SPatrick McHardy { 1840ebd0ac5SPatrick McHardy struct sk_buff *skb; 1850ebd0ac5SPatrick McHardy 1860ebd0ac5SPatrick McHardy /* Get the HEAD */ 1870ebd0ac5SPatrick McHardy skb = kmem_cache_alloc_node(skbuff_head_cache, 1880ebd0ac5SPatrick McHardy gfp_mask & ~__GFP_DMA, node); 1890ebd0ac5SPatrick McHardy if (!skb) 1900ebd0ac5SPatrick McHardy goto out; 1910ebd0ac5SPatrick McHardy 1920ebd0ac5SPatrick McHardy /* 1930ebd0ac5SPatrick McHardy * Only clear those fields we need to clear, not those that we will 1940ebd0ac5SPatrick McHardy * actually initialise below. Hence, don't put any more fields after 1950ebd0ac5SPatrick McHardy * the tail pointer in struct sk_buff! 1960ebd0ac5SPatrick McHardy */ 1970ebd0ac5SPatrick McHardy memset(skb, 0, offsetof(struct sk_buff, tail)); 1985e71d9d7SPablo Neira skb->head = NULL; 1990ebd0ac5SPatrick McHardy skb->truesize = sizeof(struct sk_buff); 2000ebd0ac5SPatrick McHardy atomic_set(&skb->users, 1); 2010ebd0ac5SPatrick McHardy 20235d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 2030ebd0ac5SPatrick McHardy out: 2040ebd0ac5SPatrick McHardy return skb; 2050ebd0ac5SPatrick McHardy } 2060ebd0ac5SPatrick McHardy 2071da177e4SLinus Torvalds /** 208d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 2091da177e4SLinus Torvalds * @size: size to allocate 2101da177e4SLinus Torvalds * @gfp_mask: allocation mask 211c93bdd0eSMel Gorman * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 212c93bdd0eSMel Gorman * instead of head cache and allocate a cloned (child) skb. 213c93bdd0eSMel Gorman * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 214c93bdd0eSMel Gorman * allocations in case the data is required for writeback 215b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 2161da177e4SLinus Torvalds * 2171da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 21894b6042cSBen Hutchings * tail room of at least size bytes. The object has a reference count 21994b6042cSBen Hutchings * of one. The return is the buffer. On a failure the return is %NULL. 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 2221da177e4SLinus Torvalds * %GFP_ATOMIC. 2231da177e4SLinus Torvalds */ 224dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 225c93bdd0eSMel Gorman int flags, int node) 2261da177e4SLinus Torvalds { 227e18b890bSChristoph Lameter struct kmem_cache *cache; 2284947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 2291da177e4SLinus Torvalds struct sk_buff *skb; 2301da177e4SLinus Torvalds u8 *data; 231c93bdd0eSMel Gorman bool pfmemalloc; 2321da177e4SLinus Torvalds 233c93bdd0eSMel Gorman cache = (flags & SKB_ALLOC_FCLONE) 234c93bdd0eSMel Gorman ? skbuff_fclone_cache : skbuff_head_cache; 235c93bdd0eSMel Gorman 236c93bdd0eSMel Gorman if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 237c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 2388798b3fbSHerbert Xu 2391da177e4SLinus Torvalds /* Get the HEAD */ 240b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 2411da177e4SLinus Torvalds if (!skb) 2421da177e4SLinus Torvalds goto out; 243ec7d2f2cSEric Dumazet prefetchw(skb); 2441da177e4SLinus Torvalds 24587fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 24687fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 24787fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 24887fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 24987fb4b7bSEric Dumazet */ 250bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 25187fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 252c93bdd0eSMel Gorman data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 2531da177e4SLinus Torvalds if (!data) 2541da177e4SLinus Torvalds goto nodata; 25587fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 25687fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 25787fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 25887fb4b7bSEric Dumazet */ 25987fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 260ec7d2f2cSEric Dumazet prefetchw(data + size); 2611da177e4SLinus Torvalds 262ca0605a7SArnaldo Carvalho de Melo /* 263c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 264c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 265c8005785SJohannes Berg * the tail pointer in struct sk_buff! 266ca0605a7SArnaldo Carvalho de Melo */ 267ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 26887fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 26987fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 270c93bdd0eSMel Gorman skb->pfmemalloc = pfmemalloc; 2711da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2721da177e4SLinus Torvalds skb->head = data; 2731da177e4SLinus Torvalds skb->data = data; 27427a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2754305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 27635d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 27735d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 27819633e12SStephen Hemminger 2794947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2804947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 281ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2824947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 283c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2844947d3efSBenjamin LaHaise 285c93bdd0eSMel Gorman if (flags & SKB_ALLOC_FCLONE) { 286d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 287d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2881da177e4SLinus Torvalds 289fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 290fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 291d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 292d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 293d179cd12SDavid S. Miller 294d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 295c93bdd0eSMel Gorman child->pfmemalloc = pfmemalloc; 296d179cd12SDavid S. Miller } 2971da177e4SLinus Torvalds out: 2981da177e4SLinus Torvalds return skb; 2991da177e4SLinus Torvalds nodata: 3008798b3fbSHerbert Xu kmem_cache_free(cache, skb); 3011da177e4SLinus Torvalds skb = NULL; 3021da177e4SLinus Torvalds goto out; 3031da177e4SLinus Torvalds } 304b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds /** 307b2b5ce9dSEric Dumazet * build_skb - build a network buffer 308b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 309d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 310b2b5ce9dSEric Dumazet * 311b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 312deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 313deceb4c0SFlorian Fainelli * @frag_size is 0, otherwise data should come from the page allocator. 314b2b5ce9dSEric Dumazet * The return is the new skb buffer. 315b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 316b2b5ce9dSEric Dumazet * Notes : 317b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 318b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 319b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 320b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 321b2b5ce9dSEric Dumazet * before giving packet to stack. 322b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 323b2b5ce9dSEric Dumazet */ 324d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 325b2b5ce9dSEric Dumazet { 326b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 327b2b5ce9dSEric Dumazet struct sk_buff *skb; 328d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 329b2b5ce9dSEric Dumazet 330b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 331b2b5ce9dSEric Dumazet if (!skb) 332b2b5ce9dSEric Dumazet return NULL; 333b2b5ce9dSEric Dumazet 334d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 335b2b5ce9dSEric Dumazet 336b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 337b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 338d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 339b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 340b2b5ce9dSEric Dumazet skb->head = data; 341b2b5ce9dSEric Dumazet skb->data = data; 342b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 343b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 34435d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 34535d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 346b2b5ce9dSEric Dumazet 347b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 348b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 349b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 350b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 351b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 352b2b5ce9dSEric Dumazet 353b2b5ce9dSEric Dumazet return skb; 354b2b5ce9dSEric Dumazet } 355b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 356b2b5ce9dSEric Dumazet 357a1c7fff7SEric Dumazet struct netdev_alloc_cache { 35869b08f62SEric Dumazet struct page_frag frag; 35969b08f62SEric Dumazet /* we maintain a pagecount bias, so that we dont dirty cache line 36069b08f62SEric Dumazet * containing page->_count every time we allocate a fragment. 36169b08f62SEric Dumazet */ 362540eb7bfSAlexander Duyck unsigned int pagecnt_bias; 363a1c7fff7SEric Dumazet }; 364a1c7fff7SEric Dumazet static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 365a1c7fff7SEric Dumazet 366c93bdd0eSMel Gorman static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 3676f532612SEric Dumazet { 3686f532612SEric Dumazet struct netdev_alloc_cache *nc; 3696f532612SEric Dumazet void *data = NULL; 37069b08f62SEric Dumazet int order; 3716f532612SEric Dumazet unsigned long flags; 3726f532612SEric Dumazet 3736f532612SEric Dumazet local_irq_save(flags); 3746f532612SEric Dumazet nc = &__get_cpu_var(netdev_alloc_cache); 37569b08f62SEric Dumazet if (unlikely(!nc->frag.page)) { 3766f532612SEric Dumazet refill: 37769b08f62SEric Dumazet for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 37869b08f62SEric Dumazet gfp_t gfp = gfp_mask; 37969b08f62SEric Dumazet 38069b08f62SEric Dumazet if (order) 38169b08f62SEric Dumazet gfp |= __GFP_COMP | __GFP_NOWARN; 38269b08f62SEric Dumazet nc->frag.page = alloc_pages(gfp, order); 38369b08f62SEric Dumazet if (likely(nc->frag.page)) 38469b08f62SEric Dumazet break; 38569b08f62SEric Dumazet if (--order < 0) 386540eb7bfSAlexander Duyck goto end; 38769b08f62SEric Dumazet } 38869b08f62SEric Dumazet nc->frag.size = PAGE_SIZE << order; 389540eb7bfSAlexander Duyck recycle: 39069b08f62SEric Dumazet atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); 39169b08f62SEric Dumazet nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; 39269b08f62SEric Dumazet nc->frag.offset = 0; 3936f532612SEric Dumazet } 394540eb7bfSAlexander Duyck 39569b08f62SEric Dumazet if (nc->frag.offset + fragsz > nc->frag.size) { 396540eb7bfSAlexander Duyck /* avoid unnecessary locked operations if possible */ 39769b08f62SEric Dumazet if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || 39869b08f62SEric Dumazet atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) 399540eb7bfSAlexander Duyck goto recycle; 4006f532612SEric Dumazet goto refill; 4016f532612SEric Dumazet } 402540eb7bfSAlexander Duyck 40369b08f62SEric Dumazet data = page_address(nc->frag.page) + nc->frag.offset; 40469b08f62SEric Dumazet nc->frag.offset += fragsz; 405540eb7bfSAlexander Duyck nc->pagecnt_bias--; 406540eb7bfSAlexander Duyck end: 4076f532612SEric Dumazet local_irq_restore(flags); 4086f532612SEric Dumazet return data; 4096f532612SEric Dumazet } 410c93bdd0eSMel Gorman 411c93bdd0eSMel Gorman /** 412c93bdd0eSMel Gorman * netdev_alloc_frag - allocate a page fragment 413c93bdd0eSMel Gorman * @fragsz: fragment size 414c93bdd0eSMel Gorman * 415c93bdd0eSMel Gorman * Allocates a frag from a page for receive buffer. 416c93bdd0eSMel Gorman * Uses GFP_ATOMIC allocations. 417c93bdd0eSMel Gorman */ 418c93bdd0eSMel Gorman void *netdev_alloc_frag(unsigned int fragsz) 419c93bdd0eSMel Gorman { 420c93bdd0eSMel Gorman return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 421c93bdd0eSMel Gorman } 4226f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag); 4236f532612SEric Dumazet 4246f532612SEric Dumazet /** 4258af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 4268af27456SChristoph Hellwig * @dev: network device to receive on 4278af27456SChristoph Hellwig * @length: length to allocate 4288af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 4298af27456SChristoph Hellwig * 4308af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 4318af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 4328af27456SChristoph Hellwig * the headroom they think they need without accounting for the 4338af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 4348af27456SChristoph Hellwig * 4358af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 4368af27456SChristoph Hellwig */ 4378af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 4388af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 4398af27456SChristoph Hellwig { 4406f532612SEric Dumazet struct sk_buff *skb = NULL; 441a1c7fff7SEric Dumazet unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 442a1c7fff7SEric Dumazet SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4438af27456SChristoph Hellwig 444310e158cSEric Dumazet if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 445c93bdd0eSMel Gorman void *data; 446c93bdd0eSMel Gorman 447c93bdd0eSMel Gorman if (sk_memalloc_socks()) 448c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 449c93bdd0eSMel Gorman 450c93bdd0eSMel Gorman data = __netdev_alloc_frag(fragsz, gfp_mask); 451a1c7fff7SEric Dumazet 4526f532612SEric Dumazet if (likely(data)) { 4536f532612SEric Dumazet skb = build_skb(data, fragsz); 4546f532612SEric Dumazet if (unlikely(!skb)) 4556f532612SEric Dumazet put_page(virt_to_head_page(data)); 456a1c7fff7SEric Dumazet } 457a1c7fff7SEric Dumazet } else { 458c93bdd0eSMel Gorman skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 459c93bdd0eSMel Gorman SKB_ALLOC_RX, NUMA_NO_NODE); 460a1c7fff7SEric Dumazet } 4617b2e497aSChristoph Hellwig if (likely(skb)) { 4628af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 4637b2e497aSChristoph Hellwig skb->dev = dev; 4647b2e497aSChristoph Hellwig } 4658af27456SChristoph Hellwig return skb; 4668af27456SChristoph Hellwig } 467b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 4681da177e4SLinus Torvalds 469654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 47050269e19SEric Dumazet int size, unsigned int truesize) 471654bed16SPeter Zijlstra { 472654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 473654bed16SPeter Zijlstra skb->len += size; 474654bed16SPeter Zijlstra skb->data_len += size; 47550269e19SEric Dumazet skb->truesize += truesize; 476654bed16SPeter Zijlstra } 477654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 478654bed16SPeter Zijlstra 479f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 480f8e617e1SJason Wang unsigned int truesize) 481f8e617e1SJason Wang { 482f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 483f8e617e1SJason Wang 484f8e617e1SJason Wang skb_frag_size_add(frag, size); 485f8e617e1SJason Wang skb->len += size; 486f8e617e1SJason Wang skb->data_len += size; 487f8e617e1SJason Wang skb->truesize += truesize; 488f8e617e1SJason Wang } 489f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 490f8e617e1SJason Wang 49127b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 4921da177e4SLinus Torvalds { 493bd8a7036SEric Dumazet kfree_skb_list(*listp); 49427b437c8SHerbert Xu *listp = NULL; 4951da177e4SLinus Torvalds } 4961da177e4SLinus Torvalds 49727b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 49827b437c8SHerbert Xu { 49927b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 50027b437c8SHerbert Xu } 50127b437c8SHerbert Xu 5021da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 5031da177e4SLinus Torvalds { 5041da177e4SLinus Torvalds struct sk_buff *list; 5051da177e4SLinus Torvalds 506fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 5071da177e4SLinus Torvalds skb_get(list); 5081da177e4SLinus Torvalds } 5091da177e4SLinus Torvalds 510d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 511d3836f21SEric Dumazet { 512d3836f21SEric Dumazet if (skb->head_frag) 513d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 514d3836f21SEric Dumazet else 515d3836f21SEric Dumazet kfree(skb->head); 516d3836f21SEric Dumazet } 517d3836f21SEric Dumazet 5185bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 5191da177e4SLinus Torvalds { 5201da177e4SLinus Torvalds if (!skb->cloned || 5211da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 5221da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 5231da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 5241da177e4SLinus Torvalds int i; 5251da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 526ea2ab693SIan Campbell skb_frag_unref(skb, i); 5271da177e4SLinus Torvalds } 5281da177e4SLinus Torvalds 529a6686f2fSShirley Ma /* 530a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 531a6686f2fSShirley Ma * the lower device DMA has done; 532a6686f2fSShirley Ma */ 533a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 534a6686f2fSShirley Ma struct ubuf_info *uarg; 535a6686f2fSShirley Ma 536a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 537a6686f2fSShirley Ma if (uarg->callback) 538e19d6763SMichael S. Tsirkin uarg->callback(uarg, true); 539a6686f2fSShirley Ma } 540a6686f2fSShirley Ma 54121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 5421da177e4SLinus Torvalds skb_drop_fraglist(skb); 5431da177e4SLinus Torvalds 544d3836f21SEric Dumazet skb_free_head(skb); 5451da177e4SLinus Torvalds } 5461da177e4SLinus Torvalds } 5471da177e4SLinus Torvalds 5481da177e4SLinus Torvalds /* 5491da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 5501da177e4SLinus Torvalds */ 5512d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 5521da177e4SLinus Torvalds { 553d179cd12SDavid S. Miller struct sk_buff *other; 554d179cd12SDavid S. Miller atomic_t *fclone_ref; 555d179cd12SDavid S. Miller 556d179cd12SDavid S. Miller switch (skb->fclone) { 557d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 5581da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 559d179cd12SDavid S. Miller break; 560d179cd12SDavid S. Miller 561d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 562d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 563d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 564d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 565d179cd12SDavid S. Miller break; 566d179cd12SDavid S. Miller 567d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 568d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 569d179cd12SDavid S. Miller other = skb - 1; 570d179cd12SDavid S. Miller 571d179cd12SDavid S. Miller /* The clone portion is available for 572d179cd12SDavid S. Miller * fast-cloning again. 573d179cd12SDavid S. Miller */ 574d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 575d179cd12SDavid S. Miller 576d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 577d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 578d179cd12SDavid S. Miller break; 5793ff50b79SStephen Hemminger } 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 58204a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 5831da177e4SLinus Torvalds { 584adf30907SEric Dumazet skb_dst_drop(skb); 5851da177e4SLinus Torvalds #ifdef CONFIG_XFRM 5861da177e4SLinus Torvalds secpath_put(skb->sp); 5871da177e4SLinus Torvalds #endif 5881da177e4SLinus Torvalds if (skb->destructor) { 5899c2b3328SStephen Hemminger WARN_ON(in_irq()); 5901da177e4SLinus Torvalds skb->destructor(skb); 5911da177e4SLinus Torvalds } 592a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 5935f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 5942fc72c7bSKOVACS Krisztian #endif 5951da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 5961da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 5971da177e4SLinus Torvalds #endif 5981da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 5991da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 6001da177e4SLinus Torvalds skb->tc_index = 0; 6011da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 6021da177e4SLinus Torvalds skb->tc_verd = 0; 6031da177e4SLinus Torvalds #endif 6041da177e4SLinus Torvalds #endif 60504a4bb55SLennert Buytenhek } 60604a4bb55SLennert Buytenhek 60704a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 60804a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 60904a4bb55SLennert Buytenhek { 61004a4bb55SLennert Buytenhek skb_release_head_state(skb); 6115e71d9d7SPablo Neira if (likely(skb->head)) 6122d4baff8SHerbert Xu skb_release_data(skb); 6132d4baff8SHerbert Xu } 6141da177e4SLinus Torvalds 6152d4baff8SHerbert Xu /** 6162d4baff8SHerbert Xu * __kfree_skb - private function 6172d4baff8SHerbert Xu * @skb: buffer 6182d4baff8SHerbert Xu * 6192d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 6202d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 6212d4baff8SHerbert Xu * always call kfree_skb 6222d4baff8SHerbert Xu */ 6232d4baff8SHerbert Xu 6242d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 6252d4baff8SHerbert Xu { 6262d4baff8SHerbert Xu skb_release_all(skb); 6271da177e4SLinus Torvalds kfree_skbmem(skb); 6281da177e4SLinus Torvalds } 629b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 6301da177e4SLinus Torvalds 6311da177e4SLinus Torvalds /** 632231d06aeSJörn Engel * kfree_skb - free an sk_buff 633231d06aeSJörn Engel * @skb: buffer to free 634231d06aeSJörn Engel * 635231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 636231d06aeSJörn Engel * hit zero. 637231d06aeSJörn Engel */ 638231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 639231d06aeSJörn Engel { 640231d06aeSJörn Engel if (unlikely(!skb)) 641231d06aeSJörn Engel return; 642231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 643231d06aeSJörn Engel smp_rmb(); 644231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 645231d06aeSJörn Engel return; 646ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 647231d06aeSJörn Engel __kfree_skb(skb); 648231d06aeSJörn Engel } 649b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 650231d06aeSJörn Engel 651bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 652bd8a7036SEric Dumazet { 653bd8a7036SEric Dumazet while (segs) { 654bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 655bd8a7036SEric Dumazet 656bd8a7036SEric Dumazet kfree_skb(segs); 657bd8a7036SEric Dumazet segs = next; 658bd8a7036SEric Dumazet } 659bd8a7036SEric Dumazet } 660bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 661bd8a7036SEric Dumazet 662d1a203eaSStephen Hemminger /** 66325121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 66425121173SMichael S. Tsirkin * @skb: buffer that triggered an error 66525121173SMichael S. Tsirkin * 66625121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 66725121173SMichael S. Tsirkin * skb must be freed afterwards. 66825121173SMichael S. Tsirkin */ 66925121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 67025121173SMichael S. Tsirkin { 67125121173SMichael S. Tsirkin if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 67225121173SMichael S. Tsirkin struct ubuf_info *uarg; 67325121173SMichael S. Tsirkin 67425121173SMichael S. Tsirkin uarg = skb_shinfo(skb)->destructor_arg; 67525121173SMichael S. Tsirkin if (uarg->callback) 67625121173SMichael S. Tsirkin uarg->callback(uarg, false); 67725121173SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 67825121173SMichael S. Tsirkin } 67925121173SMichael S. Tsirkin } 68025121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 68125121173SMichael S. Tsirkin 68225121173SMichael S. Tsirkin /** 683ead2ceb0SNeil Horman * consume_skb - free an skbuff 684ead2ceb0SNeil Horman * @skb: buffer to free 685ead2ceb0SNeil Horman * 686ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 687ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 688ead2ceb0SNeil Horman * is being dropped after a failure and notes that 689ead2ceb0SNeil Horman */ 690ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 691ead2ceb0SNeil Horman { 692ead2ceb0SNeil Horman if (unlikely(!skb)) 693ead2ceb0SNeil Horman return; 694ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 695ead2ceb0SNeil Horman smp_rmb(); 696ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 697ead2ceb0SNeil Horman return; 69807dc22e7SKoki Sanagi trace_consume_skb(skb); 699ead2ceb0SNeil Horman __kfree_skb(skb); 700ead2ceb0SNeil Horman } 701ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 702ead2ceb0SNeil Horman 703dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 704dec18810SHerbert Xu { 705dec18810SHerbert Xu new->tstamp = old->tstamp; 706dec18810SHerbert Xu new->dev = old->dev; 707dec18810SHerbert Xu new->transport_header = old->transport_header; 708dec18810SHerbert Xu new->network_header = old->network_header; 709dec18810SHerbert Xu new->mac_header = old->mac_header; 7104bc41b84SJoe Stringer new->inner_protocol = old->inner_protocol; 7116a674e9cSJoseph Gasparakis new->inner_transport_header = old->inner_transport_header; 71292df9b21SPravin B Shelar new->inner_network_header = old->inner_network_header; 713aefbd2b3SPravin B Shelar new->inner_mac_header = old->inner_mac_header; 7147fee226aSEric Dumazet skb_dst_copy(new, old); 7150a9627f2STom Herbert new->rxhash = old->rxhash; 7166461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 717bdeab991STom Herbert new->l4_rxhash = old->l4_rxhash; 7183bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 7196a674e9cSJoseph Gasparakis new->encapsulation = old->encapsulation; 720def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 721dec18810SHerbert Xu new->sp = secpath_get(old->sp); 722dec18810SHerbert Xu #endif 723dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 7249bcb97caSHerbert Xu new->csum = old->csum; 725dec18810SHerbert Xu new->local_df = old->local_df; 726dec18810SHerbert Xu new->pkt_type = old->pkt_type; 727dec18810SHerbert Xu new->ip_summed = old->ip_summed; 728dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 729dec18810SHerbert Xu new->priority = old->priority; 730a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 731dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 732dec18810SHerbert Xu #endif 733c93bdd0eSMel Gorman new->pfmemalloc = old->pfmemalloc; 734dec18810SHerbert Xu new->protocol = old->protocol; 735dec18810SHerbert Xu new->mark = old->mark; 7368964be4aSEric Dumazet new->skb_iif = old->skb_iif; 737dec18810SHerbert Xu __nf_copy(new, old); 738a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 739dec18810SHerbert Xu new->nf_trace = old->nf_trace; 740dec18810SHerbert Xu #endif 741dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 742dec18810SHerbert Xu new->tc_index = old->tc_index; 743dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 744dec18810SHerbert Xu new->tc_verd = old->tc_verd; 745dec18810SHerbert Xu #endif 746dec18810SHerbert Xu #endif 74786a9bad3SPatrick McHardy new->vlan_proto = old->vlan_proto; 7486aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 7496aa895b0SPatrick McHardy 750dec18810SHerbert Xu skb_copy_secmark(new, old); 75106021292SEliezer Tamir 752e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 75306021292SEliezer Tamir new->napi_id = old->napi_id; 75406021292SEliezer Tamir #endif 755dec18810SHerbert Xu } 756dec18810SHerbert Xu 75782c49a35SHerbert Xu /* 75882c49a35SHerbert Xu * You should not add any new code to this function. Add it to 75982c49a35SHerbert Xu * __copy_skb_header above instead. 76082c49a35SHerbert Xu */ 761e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 7621da177e4SLinus Torvalds { 7631da177e4SLinus Torvalds #define C(x) n->x = skb->x 7641da177e4SLinus Torvalds 7651da177e4SLinus Torvalds n->next = n->prev = NULL; 7661da177e4SLinus Torvalds n->sk = NULL; 767dec18810SHerbert Xu __copy_skb_header(n, skb); 768dec18810SHerbert Xu 7691da177e4SLinus Torvalds C(len); 7701da177e4SLinus Torvalds C(data_len); 7713e6b3b2eSAlexey Dobriyan C(mac_len); 772334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 77302f1c89dSPaul Moore n->cloned = 1; 7741da177e4SLinus Torvalds n->nohdr = 0; 7751da177e4SLinus Torvalds n->destructor = NULL; 7761da177e4SLinus Torvalds C(tail); 7771da177e4SLinus Torvalds C(end); 77802f1c89dSPaul Moore C(head); 779d3836f21SEric Dumazet C(head_frag); 78002f1c89dSPaul Moore C(data); 78102f1c89dSPaul Moore C(truesize); 78202f1c89dSPaul Moore atomic_set(&n->users, 1); 7831da177e4SLinus Torvalds 7841da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 7851da177e4SLinus Torvalds skb->cloned = 1; 7861da177e4SLinus Torvalds 7871da177e4SLinus Torvalds return n; 788e0053ec0SHerbert Xu #undef C 789e0053ec0SHerbert Xu } 790e0053ec0SHerbert Xu 791e0053ec0SHerbert Xu /** 792e0053ec0SHerbert Xu * skb_morph - morph one skb into another 793e0053ec0SHerbert Xu * @dst: the skb to receive the contents 794e0053ec0SHerbert Xu * @src: the skb to supply the contents 795e0053ec0SHerbert Xu * 796e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 797e0053ec0SHerbert Xu * supplied by the user. 798e0053ec0SHerbert Xu * 799e0053ec0SHerbert Xu * The target skb is returned upon exit. 800e0053ec0SHerbert Xu */ 801e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 802e0053ec0SHerbert Xu { 8032d4baff8SHerbert Xu skb_release_all(dst); 804e0053ec0SHerbert Xu return __skb_clone(dst, src); 805e0053ec0SHerbert Xu } 806e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 807e0053ec0SHerbert Xu 8082c53040fSBen Hutchings /** 8092c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 81048c83012SMichael S. Tsirkin * @skb: the skb to modify 81148c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 81248c83012SMichael S. Tsirkin * 81348c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 81448c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 81548c83012SMichael S. Tsirkin * to userspace pages. 81648c83012SMichael S. Tsirkin * 81748c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 81848c83012SMichael S. Tsirkin * %GFP_ATOMIC. 81948c83012SMichael S. Tsirkin * 82048c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 82148c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 82248c83012SMichael S. Tsirkin */ 82348c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 824a6686f2fSShirley Ma { 825a6686f2fSShirley Ma int i; 826a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 827a6686f2fSShirley Ma struct page *page, *head = NULL; 828a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 829a6686f2fSShirley Ma 830a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 831a6686f2fSShirley Ma u8 *vaddr; 832a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 833a6686f2fSShirley Ma 83402756ed4SKrishna Kumar page = alloc_page(gfp_mask); 835a6686f2fSShirley Ma if (!page) { 836a6686f2fSShirley Ma while (head) { 83740dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 838a6686f2fSShirley Ma put_page(head); 839a6686f2fSShirley Ma head = next; 840a6686f2fSShirley Ma } 841a6686f2fSShirley Ma return -ENOMEM; 842a6686f2fSShirley Ma } 84351c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 844a6686f2fSShirley Ma memcpy(page_address(page), 8459e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 84651c56b00SEric Dumazet kunmap_atomic(vaddr); 84740dadff2SSunghan Suh set_page_private(page, (unsigned long)head); 848a6686f2fSShirley Ma head = page; 849a6686f2fSShirley Ma } 850a6686f2fSShirley Ma 851a6686f2fSShirley Ma /* skb frags release userspace buffers */ 85202756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 853a8605c60SIan Campbell skb_frag_unref(skb, i); 854a6686f2fSShirley Ma 855e19d6763SMichael S. Tsirkin uarg->callback(uarg, false); 856a6686f2fSShirley Ma 857a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 85802756ed4SKrishna Kumar for (i = num_frags - 1; i >= 0; i--) { 85902756ed4SKrishna Kumar __skb_fill_page_desc(skb, i, head, 0, 86002756ed4SKrishna Kumar skb_shinfo(skb)->frags[i].size); 86140dadff2SSunghan Suh head = (struct page *)page_private(head); 862a6686f2fSShirley Ma } 86348c83012SMichael S. Tsirkin 86448c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 865a6686f2fSShirley Ma return 0; 866a6686f2fSShirley Ma } 867dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 868a6686f2fSShirley Ma 869e0053ec0SHerbert Xu /** 870e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 871e0053ec0SHerbert Xu * @skb: buffer to clone 872e0053ec0SHerbert Xu * @gfp_mask: allocation priority 873e0053ec0SHerbert Xu * 874e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 875e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 876e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 877e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 878e0053ec0SHerbert Xu * 879e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 880e0053ec0SHerbert Xu * %GFP_ATOMIC. 881e0053ec0SHerbert Xu */ 882e0053ec0SHerbert Xu 883e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 884e0053ec0SHerbert Xu { 885e0053ec0SHerbert Xu struct sk_buff *n; 886e0053ec0SHerbert Xu 88770008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 888a6686f2fSShirley Ma return NULL; 889a6686f2fSShirley Ma 890e0053ec0SHerbert Xu n = skb + 1; 891e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 892e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 893e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 894e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 895e0053ec0SHerbert Xu atomic_inc(fclone_ref); 896e0053ec0SHerbert Xu } else { 897c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 898c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 899c93bdd0eSMel Gorman 900e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 901e0053ec0SHerbert Xu if (!n) 902e0053ec0SHerbert Xu return NULL; 903fe55f6d5SVegard Nossum 904fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 905fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 906e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 907e0053ec0SHerbert Xu } 908e0053ec0SHerbert Xu 909e0053ec0SHerbert Xu return __skb_clone(n, skb); 9101da177e4SLinus Torvalds } 911b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 9121da177e4SLinus Torvalds 913f5b17294SPravin B Shelar static void skb_headers_offset_update(struct sk_buff *skb, int off) 914f5b17294SPravin B Shelar { 915030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 916030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 917030737bcSEric Dumazet skb->csum_start += off; 918f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 919f5b17294SPravin B Shelar skb->transport_header += off; 920f5b17294SPravin B Shelar skb->network_header += off; 921f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 922f5b17294SPravin B Shelar skb->mac_header += off; 923f5b17294SPravin B Shelar skb->inner_transport_header += off; 924f5b17294SPravin B Shelar skb->inner_network_header += off; 925aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 926f5b17294SPravin B Shelar } 927f5b17294SPravin B Shelar 9281da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 9291da177e4SLinus Torvalds { 930dec18810SHerbert Xu __copy_skb_header(new, old); 931dec18810SHerbert Xu 9327967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 9337967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 9347967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 9351da177e4SLinus Torvalds } 9361da177e4SLinus Torvalds 937c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 938c93bdd0eSMel Gorman { 939c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 940c93bdd0eSMel Gorman return SKB_ALLOC_RX; 941c93bdd0eSMel Gorman return 0; 942c93bdd0eSMel Gorman } 943c93bdd0eSMel Gorman 9441da177e4SLinus Torvalds /** 9451da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 9461da177e4SLinus Torvalds * @skb: buffer to copy 9471da177e4SLinus Torvalds * @gfp_mask: allocation priority 9481da177e4SLinus Torvalds * 9491da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 9501da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 9511da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 9521da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 9531da177e4SLinus Torvalds * 9541da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 9551da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 9561da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 9571da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 9581da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 9591da177e4SLinus Torvalds */ 9601da177e4SLinus Torvalds 961dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 9621da177e4SLinus Torvalds { 9636602cebbSEric Dumazet int headerlen = skb_headroom(skb); 964ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 965c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 966c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9676602cebbSEric Dumazet 9681da177e4SLinus Torvalds if (!n) 9691da177e4SLinus Torvalds return NULL; 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds /* Set the data pointer */ 9721da177e4SLinus Torvalds skb_reserve(n, headerlen); 9731da177e4SLinus Torvalds /* Set the tail pointer and length */ 9741da177e4SLinus Torvalds skb_put(n, skb->len); 9751da177e4SLinus Torvalds 9761da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 9771da177e4SLinus Torvalds BUG(); 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds copy_skb_header(n, skb); 9801da177e4SLinus Torvalds return n; 9811da177e4SLinus Torvalds } 982b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 9831da177e4SLinus Torvalds 9841da177e4SLinus Torvalds /** 985117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 9861da177e4SLinus Torvalds * @skb: buffer to copy 987117632e6SEric Dumazet * @headroom: headroom of new skb 9881da177e4SLinus Torvalds * @gfp_mask: allocation priority 9891da177e4SLinus Torvalds * 9901da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 9911da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 9921da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 9931da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 9941da177e4SLinus Torvalds * or the pointer to the buffer on success. 9951da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 9961da177e4SLinus Torvalds */ 9971da177e4SLinus Torvalds 998117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 9991da177e4SLinus Torvalds { 1000117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 1001c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 1002c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 10036602cebbSEric Dumazet 10041da177e4SLinus Torvalds if (!n) 10051da177e4SLinus Torvalds goto out; 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds /* Set the data pointer */ 1008117632e6SEric Dumazet skb_reserve(n, headroom); 10091da177e4SLinus Torvalds /* Set the tail pointer and length */ 10101da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 10111da177e4SLinus Torvalds /* Copy the bytes */ 1012d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 10131da177e4SLinus Torvalds 101425f484a6SHerbert Xu n->truesize += skb->data_len; 10151da177e4SLinus Torvalds n->data_len = skb->data_len; 10161da177e4SLinus Torvalds n->len = skb->len; 10171da177e4SLinus Torvalds 10181da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 10191da177e4SLinus Torvalds int i; 10201da177e4SLinus Torvalds 102170008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) { 10221511022cSDan Carpenter kfree_skb(n); 10231511022cSDan Carpenter n = NULL; 1024a6686f2fSShirley Ma goto out; 1025a6686f2fSShirley Ma } 10261da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 10271da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1028ea2ab693SIan Campbell skb_frag_ref(skb, i); 10291da177e4SLinus Torvalds } 10301da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 10311da177e4SLinus Torvalds } 10321da177e4SLinus Torvalds 103321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 10341da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 10351da177e4SLinus Torvalds skb_clone_fraglist(n); 10361da177e4SLinus Torvalds } 10371da177e4SLinus Torvalds 10381da177e4SLinus Torvalds copy_skb_header(n, skb); 10391da177e4SLinus Torvalds out: 10401da177e4SLinus Torvalds return n; 10411da177e4SLinus Torvalds } 1042117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 10431da177e4SLinus Torvalds 10441da177e4SLinus Torvalds /** 10451da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 10461da177e4SLinus Torvalds * @skb: buffer to reallocate 10471da177e4SLinus Torvalds * @nhead: room to add at head 10481da177e4SLinus Torvalds * @ntail: room to add at tail 10491da177e4SLinus Torvalds * @gfp_mask: allocation priority 10501da177e4SLinus Torvalds * 1051bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1052bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 10531da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 10541da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 10551da177e4SLinus Torvalds * 10561da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 10571da177e4SLinus Torvalds * reloaded after call to this function. 10581da177e4SLinus Torvalds */ 10591da177e4SLinus Torvalds 106086a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1061dd0fc66fSAl Viro gfp_t gfp_mask) 10621da177e4SLinus Torvalds { 10631da177e4SLinus Torvalds int i; 10641da177e4SLinus Torvalds u8 *data; 1065ec47ea82SAlexander Duyck int size = nhead + skb_end_offset(skb) + ntail; 10661da177e4SLinus Torvalds long off; 10671da177e4SLinus Torvalds 10684edd87adSHerbert Xu BUG_ON(nhead < 0); 10694edd87adSHerbert Xu 10701da177e4SLinus Torvalds if (skb_shared(skb)) 10711da177e4SLinus Torvalds BUG(); 10721da177e4SLinus Torvalds 10731da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 10741da177e4SLinus Torvalds 1075c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1076c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1077c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1078c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 10791da177e4SLinus Torvalds if (!data) 10801da177e4SLinus Torvalds goto nodata; 108187151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 10846602cebbSEric Dumazet * optimized for the cases when header is void. 10856602cebbSEric Dumazet */ 10866602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 10876602cebbSEric Dumazet 10886602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 10896602cebbSEric Dumazet skb_shinfo(skb), 1090fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 10911da177e4SLinus Torvalds 10923e24591aSAlexander Duyck /* 10933e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 10943e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 10953e24591aSAlexander Duyck * be since all we did is relocate the values 10963e24591aSAlexander Duyck */ 10973e24591aSAlexander Duyck if (skb_cloned(skb)) { 1098a6686f2fSShirley Ma /* copy this zero copy skb frags */ 109970008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1100a6686f2fSShirley Ma goto nofrags; 11011da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1102ea2ab693SIan Campbell skb_frag_ref(skb, i); 11031da177e4SLinus Torvalds 110421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 11051da177e4SLinus Torvalds skb_clone_fraglist(skb); 11061da177e4SLinus Torvalds 11071da177e4SLinus Torvalds skb_release_data(skb); 11083e24591aSAlexander Duyck } else { 11093e24591aSAlexander Duyck skb_free_head(skb); 11101fd63041SEric Dumazet } 11111da177e4SLinus Torvalds off = (data + nhead) - skb->head; 11121da177e4SLinus Torvalds 11131da177e4SLinus Torvalds skb->head = data; 1114d3836f21SEric Dumazet skb->head_frag = 0; 11151da177e4SLinus Torvalds skb->data += off; 11164305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 11174305b541SArnaldo Carvalho de Melo skb->end = size; 111856eb8882SPatrick McHardy off = nhead; 11194305b541SArnaldo Carvalho de Melo #else 11204305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 112156eb8882SPatrick McHardy #endif 112227a884dcSArnaldo Carvalho de Melo skb->tail += off; 1123b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 11241da177e4SLinus Torvalds skb->cloned = 0; 1125334a8132SPatrick McHardy skb->hdr_len = 0; 11261da177e4SLinus Torvalds skb->nohdr = 0; 11271da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 11281da177e4SLinus Torvalds return 0; 11291da177e4SLinus Torvalds 1130a6686f2fSShirley Ma nofrags: 1131a6686f2fSShirley Ma kfree(data); 11321da177e4SLinus Torvalds nodata: 11331da177e4SLinus Torvalds return -ENOMEM; 11341da177e4SLinus Torvalds } 1135b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 11361da177e4SLinus Torvalds 11371da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 11381da177e4SLinus Torvalds 11391da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 11401da177e4SLinus Torvalds { 11411da177e4SLinus Torvalds struct sk_buff *skb2; 11421da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 11431da177e4SLinus Torvalds 11441da177e4SLinus Torvalds if (delta <= 0) 11451da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 11461da177e4SLinus Torvalds else { 11471da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 11481da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 11491da177e4SLinus Torvalds GFP_ATOMIC)) { 11501da177e4SLinus Torvalds kfree_skb(skb2); 11511da177e4SLinus Torvalds skb2 = NULL; 11521da177e4SLinus Torvalds } 11531da177e4SLinus Torvalds } 11541da177e4SLinus Torvalds return skb2; 11551da177e4SLinus Torvalds } 1156b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 11571da177e4SLinus Torvalds 11581da177e4SLinus Torvalds /** 11591da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 11601da177e4SLinus Torvalds * @skb: buffer to copy 11611da177e4SLinus Torvalds * @newheadroom: new free bytes at head 11621da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 11631da177e4SLinus Torvalds * @gfp_mask: allocation priority 11641da177e4SLinus Torvalds * 11651da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 11661da177e4SLinus Torvalds * allocate additional space. 11671da177e4SLinus Torvalds * 11681da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 11691da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 11701da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 11711da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 11721da177e4SLinus Torvalds * 11731da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 11741da177e4SLinus Torvalds * is called from an interrupt. 11751da177e4SLinus Torvalds */ 11761da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 117786a76cafSVictor Fusco int newheadroom, int newtailroom, 1178dd0fc66fSAl Viro gfp_t gfp_mask) 11791da177e4SLinus Torvalds { 11801da177e4SLinus Torvalds /* 11811da177e4SLinus Torvalds * Allocate the copy buffer 11821da177e4SLinus Torvalds */ 1183c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1184c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1185c93bdd0eSMel Gorman NUMA_NO_NODE); 1186efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 11871da177e4SLinus Torvalds int head_copy_len, head_copy_off; 11881da177e4SLinus Torvalds 11891da177e4SLinus Torvalds if (!n) 11901da177e4SLinus Torvalds return NULL; 11911da177e4SLinus Torvalds 11921da177e4SLinus Torvalds skb_reserve(n, newheadroom); 11931da177e4SLinus Torvalds 11941da177e4SLinus Torvalds /* Set the tail pointer and length */ 11951da177e4SLinus Torvalds skb_put(n, skb->len); 11961da177e4SLinus Torvalds 1197efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 11981da177e4SLinus Torvalds head_copy_off = 0; 11991da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 12001da177e4SLinus Torvalds head_copy_len = newheadroom; 12011da177e4SLinus Torvalds else 12021da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 12031da177e4SLinus Torvalds 12041da177e4SLinus Torvalds /* Copy the linear header and data. */ 12051da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 12061da177e4SLinus Torvalds skb->len + head_copy_len)) 12071da177e4SLinus Torvalds BUG(); 12081da177e4SLinus Torvalds 12091da177e4SLinus Torvalds copy_skb_header(n, skb); 12101da177e4SLinus Torvalds 1211030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1212efd1e8d5SPatrick McHardy 12131da177e4SLinus Torvalds return n; 12141da177e4SLinus Torvalds } 1215b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 12161da177e4SLinus Torvalds 12171da177e4SLinus Torvalds /** 12181da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 12191da177e4SLinus Torvalds * @skb: buffer to pad 12201da177e4SLinus Torvalds * @pad: space to pad 12211da177e4SLinus Torvalds * 12221da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 12231da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 12241da177e4SLinus Torvalds * beyond the buffer end onto the wire. 12251da177e4SLinus Torvalds * 12265b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 12271da177e4SLinus Torvalds */ 12281da177e4SLinus Torvalds 12295b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 12301da177e4SLinus Torvalds { 12315b057c6bSHerbert Xu int err; 12325b057c6bSHerbert Xu int ntail; 12331da177e4SLinus Torvalds 12341da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 12355b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 12361da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 12375b057c6bSHerbert Xu return 0; 12381da177e4SLinus Torvalds } 12391da177e4SLinus Torvalds 12404305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 12415b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 12425b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 12435b057c6bSHerbert Xu if (unlikely(err)) 12445b057c6bSHerbert Xu goto free_skb; 12455b057c6bSHerbert Xu } 12465b057c6bSHerbert Xu 12475b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 12485b057c6bSHerbert Xu * to be audited. 12495b057c6bSHerbert Xu */ 12505b057c6bSHerbert Xu err = skb_linearize(skb); 12515b057c6bSHerbert Xu if (unlikely(err)) 12525b057c6bSHerbert Xu goto free_skb; 12535b057c6bSHerbert Xu 12545b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 12555b057c6bSHerbert Xu return 0; 12565b057c6bSHerbert Xu 12575b057c6bSHerbert Xu free_skb: 12581da177e4SLinus Torvalds kfree_skb(skb); 12595b057c6bSHerbert Xu return err; 12601da177e4SLinus Torvalds } 1261b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 12621da177e4SLinus Torvalds 12630dde3e16SIlpo Järvinen /** 12640c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 12650c7ddf36SMathias Krause * @skb: start of the buffer to use 12660c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 12670c7ddf36SMathias Krause * @len: amount of data to add 12680c7ddf36SMathias Krause * 12690c7ddf36SMathias Krause * This function extends the used data area of the potentially 12700c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 12710c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 12720c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 12730c7ddf36SMathias Krause * returned. 12740c7ddf36SMathias Krause */ 12750c7ddf36SMathias Krause 12760c7ddf36SMathias Krause unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 12770c7ddf36SMathias Krause { 12780c7ddf36SMathias Krause if (tail != skb) { 12790c7ddf36SMathias Krause skb->data_len += len; 12800c7ddf36SMathias Krause skb->len += len; 12810c7ddf36SMathias Krause } 12820c7ddf36SMathias Krause return skb_put(tail, len); 12830c7ddf36SMathias Krause } 12840c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 12850c7ddf36SMathias Krause 12860c7ddf36SMathias Krause /** 12870dde3e16SIlpo Järvinen * skb_put - add data to a buffer 12880dde3e16SIlpo Järvinen * @skb: buffer to use 12890dde3e16SIlpo Järvinen * @len: amount of data to add 12900dde3e16SIlpo Järvinen * 12910dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 12920dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 12930dde3e16SIlpo Järvinen * first byte of the extra data is returned. 12940dde3e16SIlpo Järvinen */ 12950dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 12960dde3e16SIlpo Järvinen { 12970dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 12980dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 12990dde3e16SIlpo Järvinen skb->tail += len; 13000dde3e16SIlpo Järvinen skb->len += len; 13010dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 13020dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 13030dde3e16SIlpo Järvinen return tmp; 13040dde3e16SIlpo Järvinen } 13050dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 13060dde3e16SIlpo Järvinen 13076be8ac2fSIlpo Järvinen /** 1308c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1309c2aa270aSIlpo Järvinen * @skb: buffer to use 1310c2aa270aSIlpo Järvinen * @len: amount of data to add 1311c2aa270aSIlpo Järvinen * 1312c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1313c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1314c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1315c2aa270aSIlpo Järvinen */ 1316c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1317c2aa270aSIlpo Järvinen { 1318c2aa270aSIlpo Järvinen skb->data -= len; 1319c2aa270aSIlpo Järvinen skb->len += len; 1320c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1321c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1322c2aa270aSIlpo Järvinen return skb->data; 1323c2aa270aSIlpo Järvinen } 1324c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1325c2aa270aSIlpo Järvinen 1326c2aa270aSIlpo Järvinen /** 13276be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 13286be8ac2fSIlpo Järvinen * @skb: buffer to use 13296be8ac2fSIlpo Järvinen * @len: amount of data to remove 13306be8ac2fSIlpo Järvinen * 13316be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 13326be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 13336be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 13346be8ac2fSIlpo Järvinen * the old data. 13356be8ac2fSIlpo Järvinen */ 13366be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 13376be8ac2fSIlpo Järvinen { 133847d29646SDavid S. Miller return skb_pull_inline(skb, len); 13396be8ac2fSIlpo Järvinen } 13406be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 13416be8ac2fSIlpo Järvinen 1342419ae74eSIlpo Järvinen /** 1343419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1344419ae74eSIlpo Järvinen * @skb: buffer to alter 1345419ae74eSIlpo Järvinen * @len: new length 1346419ae74eSIlpo Järvinen * 1347419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1348419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1349419ae74eSIlpo Järvinen * The skb must be linear. 1350419ae74eSIlpo Järvinen */ 1351419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1352419ae74eSIlpo Järvinen { 1353419ae74eSIlpo Järvinen if (skb->len > len) 1354419ae74eSIlpo Järvinen __skb_trim(skb, len); 1355419ae74eSIlpo Järvinen } 1356419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1357419ae74eSIlpo Järvinen 13583cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 13591da177e4SLinus Torvalds */ 13601da177e4SLinus Torvalds 13613cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 13621da177e4SLinus Torvalds { 136327b437c8SHerbert Xu struct sk_buff **fragp; 136427b437c8SHerbert Xu struct sk_buff *frag; 13651da177e4SLinus Torvalds int offset = skb_headlen(skb); 13661da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 13671da177e4SLinus Torvalds int i; 136827b437c8SHerbert Xu int err; 136927b437c8SHerbert Xu 137027b437c8SHerbert Xu if (skb_cloned(skb) && 137127b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 137227b437c8SHerbert Xu return err; 13731da177e4SLinus Torvalds 1374f4d26fb3SHerbert Xu i = 0; 1375f4d26fb3SHerbert Xu if (offset >= len) 1376f4d26fb3SHerbert Xu goto drop_pages; 1377f4d26fb3SHerbert Xu 1378f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 13799e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 138027b437c8SHerbert Xu 138127b437c8SHerbert Xu if (end < len) { 13821da177e4SLinus Torvalds offset = end; 138327b437c8SHerbert Xu continue; 13841da177e4SLinus Torvalds } 13851da177e4SLinus Torvalds 13869e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 138727b437c8SHerbert Xu 1388f4d26fb3SHerbert Xu drop_pages: 138927b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 139027b437c8SHerbert Xu 139127b437c8SHerbert Xu for (; i < nfrags; i++) 1392ea2ab693SIan Campbell skb_frag_unref(skb, i); 139327b437c8SHerbert Xu 139421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 139527b437c8SHerbert Xu skb_drop_fraglist(skb); 1396f4d26fb3SHerbert Xu goto done; 139727b437c8SHerbert Xu } 139827b437c8SHerbert Xu 139927b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 140027b437c8SHerbert Xu fragp = &frag->next) { 140127b437c8SHerbert Xu int end = offset + frag->len; 140227b437c8SHerbert Xu 140327b437c8SHerbert Xu if (skb_shared(frag)) { 140427b437c8SHerbert Xu struct sk_buff *nfrag; 140527b437c8SHerbert Xu 140627b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 140727b437c8SHerbert Xu if (unlikely(!nfrag)) 140827b437c8SHerbert Xu return -ENOMEM; 140927b437c8SHerbert Xu 141027b437c8SHerbert Xu nfrag->next = frag->next; 141185bb2a60SEric Dumazet consume_skb(frag); 141227b437c8SHerbert Xu frag = nfrag; 141327b437c8SHerbert Xu *fragp = frag; 141427b437c8SHerbert Xu } 141527b437c8SHerbert Xu 141627b437c8SHerbert Xu if (end < len) { 141727b437c8SHerbert Xu offset = end; 141827b437c8SHerbert Xu continue; 141927b437c8SHerbert Xu } 142027b437c8SHerbert Xu 142127b437c8SHerbert Xu if (end > len && 142227b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 142327b437c8SHerbert Xu return err; 142427b437c8SHerbert Xu 142527b437c8SHerbert Xu if (frag->next) 142627b437c8SHerbert Xu skb_drop_list(&frag->next); 142727b437c8SHerbert Xu break; 142827b437c8SHerbert Xu } 142927b437c8SHerbert Xu 1430f4d26fb3SHerbert Xu done: 143127b437c8SHerbert Xu if (len > skb_headlen(skb)) { 14321da177e4SLinus Torvalds skb->data_len -= skb->len - len; 14331da177e4SLinus Torvalds skb->len = len; 14341da177e4SLinus Torvalds } else { 14351da177e4SLinus Torvalds skb->len = len; 14361da177e4SLinus Torvalds skb->data_len = 0; 143727a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 14381da177e4SLinus Torvalds } 14391da177e4SLinus Torvalds 14401da177e4SLinus Torvalds return 0; 14411da177e4SLinus Torvalds } 1442b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 14431da177e4SLinus Torvalds 14441da177e4SLinus Torvalds /** 14451da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 14461da177e4SLinus Torvalds * @skb: buffer to reallocate 14471da177e4SLinus Torvalds * @delta: number of bytes to advance tail 14481da177e4SLinus Torvalds * 14491da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 14501da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 14511da177e4SLinus Torvalds * data from fragmented part. 14521da177e4SLinus Torvalds * 14531da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 14541da177e4SLinus Torvalds * 14551da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 14561da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 14571da177e4SLinus Torvalds * 14581da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 14591da177e4SLinus Torvalds * reloaded after call to this function. 14601da177e4SLinus Torvalds */ 14611da177e4SLinus Torvalds 14621da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 14631da177e4SLinus Torvalds * when it is necessary. 14641da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 14651da177e4SLinus Torvalds * 2. It may change skb pointers. 14661da177e4SLinus Torvalds * 14671da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 14681da177e4SLinus Torvalds */ 14691da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 14701da177e4SLinus Torvalds { 14711da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 14721da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 14731da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 14741da177e4SLinus Torvalds */ 14754305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 14761da177e4SLinus Torvalds 14771da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 14781da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 14791da177e4SLinus Torvalds GFP_ATOMIC)) 14801da177e4SLinus Torvalds return NULL; 14811da177e4SLinus Torvalds } 14821da177e4SLinus Torvalds 148327a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 14841da177e4SLinus Torvalds BUG(); 14851da177e4SLinus Torvalds 14861da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 14871da177e4SLinus Torvalds * size of pulled pages. Superb. 14881da177e4SLinus Torvalds */ 148921dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 14901da177e4SLinus Torvalds goto pull_pages; 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 14931da177e4SLinus Torvalds eat = delta; 14941da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14959e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14969e903e08SEric Dumazet 14979e903e08SEric Dumazet if (size >= eat) 14981da177e4SLinus Torvalds goto pull_pages; 14999e903e08SEric Dumazet eat -= size; 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds 15021da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 15031da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 15041da177e4SLinus Torvalds * but taking into account that pulling is expected to 15051da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 15061da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 15071da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 15081da177e4SLinus Torvalds */ 15091da177e4SLinus Torvalds if (eat) { 15101da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 15111da177e4SLinus Torvalds struct sk_buff *clone = NULL; 15121da177e4SLinus Torvalds struct sk_buff *insp = NULL; 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds do { 151509a62660SKris Katterjohn BUG_ON(!list); 15161da177e4SLinus Torvalds 15171da177e4SLinus Torvalds if (list->len <= eat) { 15181da177e4SLinus Torvalds /* Eaten as whole. */ 15191da177e4SLinus Torvalds eat -= list->len; 15201da177e4SLinus Torvalds list = list->next; 15211da177e4SLinus Torvalds insp = list; 15221da177e4SLinus Torvalds } else { 15231da177e4SLinus Torvalds /* Eaten partially. */ 15241da177e4SLinus Torvalds 15251da177e4SLinus Torvalds if (skb_shared(list)) { 15261da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 15271da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 15281da177e4SLinus Torvalds if (!clone) 15291da177e4SLinus Torvalds return NULL; 15301da177e4SLinus Torvalds insp = list->next; 15311da177e4SLinus Torvalds list = clone; 15321da177e4SLinus Torvalds } else { 15331da177e4SLinus Torvalds /* This may be pulled without 15341da177e4SLinus Torvalds * problems. */ 15351da177e4SLinus Torvalds insp = list; 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 15381da177e4SLinus Torvalds kfree_skb(clone); 15391da177e4SLinus Torvalds return NULL; 15401da177e4SLinus Torvalds } 15411da177e4SLinus Torvalds break; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds } while (eat); 15441da177e4SLinus Torvalds 15451da177e4SLinus Torvalds /* Free pulled out fragments. */ 15461da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 15471da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 15481da177e4SLinus Torvalds kfree_skb(list); 15491da177e4SLinus Torvalds } 15501da177e4SLinus Torvalds /* And insert new clone at head. */ 15511da177e4SLinus Torvalds if (clone) { 15521da177e4SLinus Torvalds clone->next = list; 15531da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 15541da177e4SLinus Torvalds } 15551da177e4SLinus Torvalds } 15561da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 15571da177e4SLinus Torvalds 15581da177e4SLinus Torvalds pull_pages: 15591da177e4SLinus Torvalds eat = delta; 15601da177e4SLinus Torvalds k = 0; 15611da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15629e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 15639e903e08SEric Dumazet 15649e903e08SEric Dumazet if (size <= eat) { 1565ea2ab693SIan Campbell skb_frag_unref(skb, i); 15669e903e08SEric Dumazet eat -= size; 15671da177e4SLinus Torvalds } else { 15681da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 15691da177e4SLinus Torvalds if (eat) { 15701da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 15719e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 15721da177e4SLinus Torvalds eat = 0; 15731da177e4SLinus Torvalds } 15741da177e4SLinus Torvalds k++; 15751da177e4SLinus Torvalds } 15761da177e4SLinus Torvalds } 15771da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds skb->tail += delta; 15801da177e4SLinus Torvalds skb->data_len -= delta; 15811da177e4SLinus Torvalds 158227a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 15831da177e4SLinus Torvalds } 1584b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 15851da177e4SLinus Torvalds 158622019b17SEric Dumazet /** 158722019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 158822019b17SEric Dumazet * @skb: source skb 158922019b17SEric Dumazet * @offset: offset in source 159022019b17SEric Dumazet * @to: destination buffer 159122019b17SEric Dumazet * @len: number of bytes to copy 159222019b17SEric Dumazet * 159322019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 159422019b17SEric Dumazet * destination buffer. 159522019b17SEric Dumazet * 159622019b17SEric Dumazet * CAUTION ! : 159722019b17SEric Dumazet * If its prototype is ever changed, 159822019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 159922019b17SEric Dumazet * since it is called from BPF assembly code. 160022019b17SEric Dumazet */ 16011da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 16021da177e4SLinus Torvalds { 16031a028e50SDavid S. Miller int start = skb_headlen(skb); 1604fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1605fbb398a8SDavid S. Miller int i, copy; 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds if (offset > (int)skb->len - len) 16081da177e4SLinus Torvalds goto fault; 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds /* Copy header. */ 16111a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 16121da177e4SLinus Torvalds if (copy > len) 16131da177e4SLinus Torvalds copy = len; 1614d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 16151da177e4SLinus Torvalds if ((len -= copy) == 0) 16161da177e4SLinus Torvalds return 0; 16171da177e4SLinus Torvalds offset += copy; 16181da177e4SLinus Torvalds to += copy; 16191da177e4SLinus Torvalds } 16201da177e4SLinus Torvalds 16211da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 16221a028e50SDavid S. Miller int end; 162351c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 16241da177e4SLinus Torvalds 1625547b792cSIlpo Järvinen WARN_ON(start > offset + len); 16261a028e50SDavid S. Miller 162751c56b00SEric Dumazet end = start + skb_frag_size(f); 16281da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 16291da177e4SLinus Torvalds u8 *vaddr; 16301da177e4SLinus Torvalds 16311da177e4SLinus Torvalds if (copy > len) 16321da177e4SLinus Torvalds copy = len; 16331da177e4SLinus Torvalds 163451c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 16351da177e4SLinus Torvalds memcpy(to, 163651c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 163751c56b00SEric Dumazet copy); 163851c56b00SEric Dumazet kunmap_atomic(vaddr); 16391da177e4SLinus Torvalds 16401da177e4SLinus Torvalds if ((len -= copy) == 0) 16411da177e4SLinus Torvalds return 0; 16421da177e4SLinus Torvalds offset += copy; 16431da177e4SLinus Torvalds to += copy; 16441da177e4SLinus Torvalds } 16451a028e50SDavid S. Miller start = end; 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds 1648fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 16491a028e50SDavid S. Miller int end; 16501da177e4SLinus Torvalds 1651547b792cSIlpo Järvinen WARN_ON(start > offset + len); 16521a028e50SDavid S. Miller 1653fbb398a8SDavid S. Miller end = start + frag_iter->len; 16541da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 16551da177e4SLinus Torvalds if (copy > len) 16561da177e4SLinus Torvalds copy = len; 1657fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 16581da177e4SLinus Torvalds goto fault; 16591da177e4SLinus Torvalds if ((len -= copy) == 0) 16601da177e4SLinus Torvalds return 0; 16611da177e4SLinus Torvalds offset += copy; 16621da177e4SLinus Torvalds to += copy; 16631da177e4SLinus Torvalds } 16641a028e50SDavid S. Miller start = end; 16651da177e4SLinus Torvalds } 1666a6686f2fSShirley Ma 16671da177e4SLinus Torvalds if (!len) 16681da177e4SLinus Torvalds return 0; 16691da177e4SLinus Torvalds 16701da177e4SLinus Torvalds fault: 16711da177e4SLinus Torvalds return -EFAULT; 16721da177e4SLinus Torvalds } 1673b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 16741da177e4SLinus Torvalds 16759c55e01cSJens Axboe /* 16769c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 16779c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 16789c55e01cSJens Axboe */ 16799c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 16809c55e01cSJens Axboe { 16818b9d3728SJarek Poplawski put_page(spd->pages[i]); 16828b9d3728SJarek Poplawski } 16839c55e01cSJens Axboe 1684a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 16854fb66994SJarek Poplawski unsigned int *offset, 168618aafc62SEric Dumazet struct sock *sk) 16878b9d3728SJarek Poplawski { 16885640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 16898b9d3728SJarek Poplawski 16905640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 16918b9d3728SJarek Poplawski return NULL; 16924fb66994SJarek Poplawski 16935640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 16944fb66994SJarek Poplawski 16955640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 16965640f768SEric Dumazet page_address(page) + *offset, *len); 16975640f768SEric Dumazet *offset = pfrag->offset; 16985640f768SEric Dumazet pfrag->offset += *len; 16994fb66994SJarek Poplawski 17005640f768SEric Dumazet return pfrag->page; 17019c55e01cSJens Axboe } 17029c55e01cSJens Axboe 170341c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 170441c73a0dSEric Dumazet struct page *page, 170541c73a0dSEric Dumazet unsigned int offset) 170641c73a0dSEric Dumazet { 170741c73a0dSEric Dumazet return spd->nr_pages && 170841c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 170941c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 171041c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 171141c73a0dSEric Dumazet } 171241c73a0dSEric Dumazet 17139c55e01cSJens Axboe /* 17149c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 17159c55e01cSJens Axboe */ 1716a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 171735f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 17184fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 171918aafc62SEric Dumazet bool linear, 17207a67e56fSJarek Poplawski struct sock *sk) 17219c55e01cSJens Axboe { 172241c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1723a108d5f3SDavid S. Miller return true; 17249c55e01cSJens Axboe 17258b9d3728SJarek Poplawski if (linear) { 172618aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 17278b9d3728SJarek Poplawski if (!page) 1728a108d5f3SDavid S. Miller return true; 172941c73a0dSEric Dumazet } 173041c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 173141c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1732a108d5f3SDavid S. Miller return false; 173341c73a0dSEric Dumazet } 17348b9d3728SJarek Poplawski get_page(page); 17359c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 17364fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 17379c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 17389c55e01cSJens Axboe spd->nr_pages++; 17398b9d3728SJarek Poplawski 1740a108d5f3SDavid S. Miller return false; 17419c55e01cSJens Axboe } 17429c55e01cSJens Axboe 1743a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 17442870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 174518aafc62SEric Dumazet unsigned int *len, 1746d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 174735f3d14dSJens Axboe struct sock *sk, 174835f3d14dSJens Axboe struct pipe_inode_info *pipe) 17499c55e01cSJens Axboe { 17502870c43dSOctavian Purdila if (!*len) 1751a108d5f3SDavid S. Miller return true; 17529c55e01cSJens Axboe 17532870c43dSOctavian Purdila /* skip this segment if already processed */ 17542870c43dSOctavian Purdila if (*off >= plen) { 17552870c43dSOctavian Purdila *off -= plen; 1756a108d5f3SDavid S. Miller return false; 17572870c43dSOctavian Purdila } 17582870c43dSOctavian Purdila 17592870c43dSOctavian Purdila /* ignore any bits we already processed */ 17609ca1b22dSEric Dumazet poff += *off; 17619ca1b22dSEric Dumazet plen -= *off; 17622870c43dSOctavian Purdila *off = 0; 17632870c43dSOctavian Purdila 176418aafc62SEric Dumazet do { 176518aafc62SEric Dumazet unsigned int flen = min(*len, plen); 17662870c43dSOctavian Purdila 176718aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 176818aafc62SEric Dumazet linear, sk)) 1769a108d5f3SDavid S. Miller return true; 177018aafc62SEric Dumazet poff += flen; 177118aafc62SEric Dumazet plen -= flen; 17722870c43dSOctavian Purdila *len -= flen; 177318aafc62SEric Dumazet } while (*len && plen); 17742870c43dSOctavian Purdila 1775a108d5f3SDavid S. Miller return false; 1776db43a282SOctavian Purdila } 17779c55e01cSJens Axboe 17789c55e01cSJens Axboe /* 1779a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 17802870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 17819c55e01cSJens Axboe */ 1782a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 178335f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 178435f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17852870c43dSOctavian Purdila { 17862870c43dSOctavian Purdila int seg; 17879c55e01cSJens Axboe 17881d0c0b32SEric Dumazet /* map the linear part : 17892996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17902996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17912996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17929c55e01cSJens Axboe */ 17932870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17942870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17952870c43dSOctavian Purdila skb_headlen(skb), 179618aafc62SEric Dumazet offset, len, spd, 17973a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 17981d0c0b32SEric Dumazet sk, pipe)) 1799a108d5f3SDavid S. Miller return true; 18009c55e01cSJens Axboe 18019c55e01cSJens Axboe /* 18029c55e01cSJens Axboe * then map the fragments 18039c55e01cSJens Axboe */ 18049c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 18059c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 18069c55e01cSJens Axboe 1807ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 18089e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 180918aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 1810a108d5f3SDavid S. Miller return true; 18119c55e01cSJens Axboe } 18129c55e01cSJens Axboe 1813a108d5f3SDavid S. Miller return false; 18149c55e01cSJens Axboe } 18159c55e01cSJens Axboe 18169c55e01cSJens Axboe /* 18179c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 18189c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 18199c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 18209c55e01cSJens Axboe * handle that cleanly. 18219c55e01cSJens Axboe */ 18228b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 18239c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 18249c55e01cSJens Axboe unsigned int flags) 18259c55e01cSJens Axboe { 182641c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 182741c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 18289c55e01cSJens Axboe struct splice_pipe_desc spd = { 18299c55e01cSJens Axboe .pages = pages, 18309c55e01cSJens Axboe .partial = partial, 1831047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 18329c55e01cSJens Axboe .flags = flags, 18339c55e01cSJens Axboe .ops = &sock_pipe_buf_ops, 18349c55e01cSJens Axboe .spd_release = sock_spd_release, 18359c55e01cSJens Axboe }; 1836fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18377a67e56fSJarek Poplawski struct sock *sk = skb->sk; 183835f3d14dSJens Axboe int ret = 0; 183935f3d14dSJens Axboe 18409c55e01cSJens Axboe /* 18419c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 18429c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 18439c55e01cSJens Axboe */ 184435f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 18459c55e01cSJens Axboe goto done; 18469c55e01cSJens Axboe else if (!tlen) 18479c55e01cSJens Axboe goto done; 18489c55e01cSJens Axboe 18499c55e01cSJens Axboe /* 18509c55e01cSJens Axboe * now see if we have a frag_list to map 18519c55e01cSJens Axboe */ 1852fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1853fbb398a8SDavid S. Miller if (!tlen) 18549c55e01cSJens Axboe break; 185535f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1856fbb398a8SDavid S. Miller break; 18579c55e01cSJens Axboe } 18589c55e01cSJens Axboe 18599c55e01cSJens Axboe done: 18609c55e01cSJens Axboe if (spd.nr_pages) { 18619c55e01cSJens Axboe /* 18629c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 18639c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 18649c55e01cSJens Axboe * here as compared to sendfile(). We enter here 18659c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 18669c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 18679c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 18689c55e01cSJens Axboe * and networking will grab the socket lock. 18699c55e01cSJens Axboe */ 1870293ad604SOctavian Purdila release_sock(sk); 18719c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1872293ad604SOctavian Purdila lock_sock(sk); 18739c55e01cSJens Axboe } 18749c55e01cSJens Axboe 187535f3d14dSJens Axboe return ret; 18769c55e01cSJens Axboe } 18779c55e01cSJens Axboe 1878357b40a1SHerbert Xu /** 1879357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1880357b40a1SHerbert Xu * @skb: destination buffer 1881357b40a1SHerbert Xu * @offset: offset in destination 1882357b40a1SHerbert Xu * @from: source buffer 1883357b40a1SHerbert Xu * @len: number of bytes to copy 1884357b40a1SHerbert Xu * 1885357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1886357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1887357b40a1SHerbert Xu * traversing fragment lists and such. 1888357b40a1SHerbert Xu */ 1889357b40a1SHerbert Xu 18900c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1891357b40a1SHerbert Xu { 18921a028e50SDavid S. Miller int start = skb_headlen(skb); 1893fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1894fbb398a8SDavid S. Miller int i, copy; 1895357b40a1SHerbert Xu 1896357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1897357b40a1SHerbert Xu goto fault; 1898357b40a1SHerbert Xu 18991a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1900357b40a1SHerbert Xu if (copy > len) 1901357b40a1SHerbert Xu copy = len; 190227d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1903357b40a1SHerbert Xu if ((len -= copy) == 0) 1904357b40a1SHerbert Xu return 0; 1905357b40a1SHerbert Xu offset += copy; 1906357b40a1SHerbert Xu from += copy; 1907357b40a1SHerbert Xu } 1908357b40a1SHerbert Xu 1909357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1910357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19111a028e50SDavid S. Miller int end; 1912357b40a1SHerbert Xu 1913547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19141a028e50SDavid S. Miller 19159e903e08SEric Dumazet end = start + skb_frag_size(frag); 1916357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1917357b40a1SHerbert Xu u8 *vaddr; 1918357b40a1SHerbert Xu 1919357b40a1SHerbert Xu if (copy > len) 1920357b40a1SHerbert Xu copy = len; 1921357b40a1SHerbert Xu 192251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19231a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 19241a028e50SDavid S. Miller from, copy); 192551c56b00SEric Dumazet kunmap_atomic(vaddr); 1926357b40a1SHerbert Xu 1927357b40a1SHerbert Xu if ((len -= copy) == 0) 1928357b40a1SHerbert Xu return 0; 1929357b40a1SHerbert Xu offset += copy; 1930357b40a1SHerbert Xu from += copy; 1931357b40a1SHerbert Xu } 19321a028e50SDavid S. Miller start = end; 1933357b40a1SHerbert Xu } 1934357b40a1SHerbert Xu 1935fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19361a028e50SDavid S. Miller int end; 1937357b40a1SHerbert Xu 1938547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19391a028e50SDavid S. Miller 1940fbb398a8SDavid S. Miller end = start + frag_iter->len; 1941357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1942357b40a1SHerbert Xu if (copy > len) 1943357b40a1SHerbert Xu copy = len; 1944fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 19451a028e50SDavid S. Miller from, copy)) 1946357b40a1SHerbert Xu goto fault; 1947357b40a1SHerbert Xu if ((len -= copy) == 0) 1948357b40a1SHerbert Xu return 0; 1949357b40a1SHerbert Xu offset += copy; 1950357b40a1SHerbert Xu from += copy; 1951357b40a1SHerbert Xu } 19521a028e50SDavid S. Miller start = end; 1953357b40a1SHerbert Xu } 1954357b40a1SHerbert Xu if (!len) 1955357b40a1SHerbert Xu return 0; 1956357b40a1SHerbert Xu 1957357b40a1SHerbert Xu fault: 1958357b40a1SHerbert Xu return -EFAULT; 1959357b40a1SHerbert Xu } 1960357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1961357b40a1SHerbert Xu 19621da177e4SLinus Torvalds /* Checksum skb data. */ 19632817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 19642817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 19651da177e4SLinus Torvalds { 19661a028e50SDavid S. Miller int start = skb_headlen(skb); 19671a028e50SDavid S. Miller int i, copy = start - offset; 1968fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19691da177e4SLinus Torvalds int pos = 0; 19701da177e4SLinus Torvalds 19711da177e4SLinus Torvalds /* Checksum header. */ 19721da177e4SLinus Torvalds if (copy > 0) { 19731da177e4SLinus Torvalds if (copy > len) 19741da177e4SLinus Torvalds copy = len; 19752817a336SDaniel Borkmann csum = ops->update(skb->data + offset, copy, csum); 19761da177e4SLinus Torvalds if ((len -= copy) == 0) 19771da177e4SLinus Torvalds return csum; 19781da177e4SLinus Torvalds offset += copy; 19791da177e4SLinus Torvalds pos = copy; 19801da177e4SLinus Torvalds } 19811da177e4SLinus Torvalds 19821da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19831a028e50SDavid S. Miller int end; 198451c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19851da177e4SLinus Torvalds 1986547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19871a028e50SDavid S. Miller 198851c56b00SEric Dumazet end = start + skb_frag_size(frag); 19891da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 199044bb9363SAl Viro __wsum csum2; 19911da177e4SLinus Torvalds u8 *vaddr; 19921da177e4SLinus Torvalds 19931da177e4SLinus Torvalds if (copy > len) 19941da177e4SLinus Torvalds copy = len; 199551c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19962817a336SDaniel Borkmann csum2 = ops->update(vaddr + frag->page_offset + 19971a028e50SDavid S. Miller offset - start, copy, 0); 199851c56b00SEric Dumazet kunmap_atomic(vaddr); 19992817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 20001da177e4SLinus Torvalds if (!(len -= copy)) 20011da177e4SLinus Torvalds return csum; 20021da177e4SLinus Torvalds offset += copy; 20031da177e4SLinus Torvalds pos += copy; 20041da177e4SLinus Torvalds } 20051a028e50SDavid S. Miller start = end; 20061da177e4SLinus Torvalds } 20071da177e4SLinus Torvalds 2008fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 20091a028e50SDavid S. Miller int end; 20101da177e4SLinus Torvalds 2011547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20121a028e50SDavid S. Miller 2013fbb398a8SDavid S. Miller end = start + frag_iter->len; 20141da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20155f92a738SAl Viro __wsum csum2; 20161da177e4SLinus Torvalds if (copy > len) 20171da177e4SLinus Torvalds copy = len; 20182817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 20192817a336SDaniel Borkmann copy, 0, ops); 20202817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 20211da177e4SLinus Torvalds if ((len -= copy) == 0) 20221da177e4SLinus Torvalds return csum; 20231da177e4SLinus Torvalds offset += copy; 20241da177e4SLinus Torvalds pos += copy; 20251da177e4SLinus Torvalds } 20261a028e50SDavid S. Miller start = end; 20271da177e4SLinus Torvalds } 202809a62660SKris Katterjohn BUG_ON(len); 20291da177e4SLinus Torvalds 20301da177e4SLinus Torvalds return csum; 20311da177e4SLinus Torvalds } 20322817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 20332817a336SDaniel Borkmann 20342817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 20352817a336SDaniel Borkmann int len, __wsum csum) 20362817a336SDaniel Borkmann { 20372817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2038cea80ea8SDaniel Borkmann .update = csum_partial_ext, 20392817a336SDaniel Borkmann .combine = csum_block_add_ext, 20402817a336SDaniel Borkmann }; 20412817a336SDaniel Borkmann 20422817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 20432817a336SDaniel Borkmann } 2044b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 20451da177e4SLinus Torvalds 20461da177e4SLinus Torvalds /* Both of above in one bottle. */ 20471da177e4SLinus Torvalds 204881d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 204981d77662SAl Viro u8 *to, int len, __wsum csum) 20501da177e4SLinus Torvalds { 20511a028e50SDavid S. Miller int start = skb_headlen(skb); 20521a028e50SDavid S. Miller int i, copy = start - offset; 2053fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 20541da177e4SLinus Torvalds int pos = 0; 20551da177e4SLinus Torvalds 20561da177e4SLinus Torvalds /* Copy header. */ 20571da177e4SLinus Torvalds if (copy > 0) { 20581da177e4SLinus Torvalds if (copy > len) 20591da177e4SLinus Torvalds copy = len; 20601da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 20611da177e4SLinus Torvalds copy, csum); 20621da177e4SLinus Torvalds if ((len -= copy) == 0) 20631da177e4SLinus Torvalds return csum; 20641da177e4SLinus Torvalds offset += copy; 20651da177e4SLinus Torvalds to += copy; 20661da177e4SLinus Torvalds pos = copy; 20671da177e4SLinus Torvalds } 20681da177e4SLinus Torvalds 20691da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20701a028e50SDavid S. Miller int end; 20711da177e4SLinus Torvalds 2072547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20731a028e50SDavid S. Miller 20749e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 20751da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20765084205fSAl Viro __wsum csum2; 20771da177e4SLinus Torvalds u8 *vaddr; 20781da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 20791da177e4SLinus Torvalds 20801da177e4SLinus Torvalds if (copy > len) 20811da177e4SLinus Torvalds copy = len; 208251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 20831da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 20841a028e50SDavid S. Miller frag->page_offset + 20851a028e50SDavid S. Miller offset - start, to, 20861a028e50SDavid S. Miller copy, 0); 208751c56b00SEric Dumazet kunmap_atomic(vaddr); 20881da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20891da177e4SLinus Torvalds if (!(len -= copy)) 20901da177e4SLinus Torvalds return csum; 20911da177e4SLinus Torvalds offset += copy; 20921da177e4SLinus Torvalds to += copy; 20931da177e4SLinus Torvalds pos += copy; 20941da177e4SLinus Torvalds } 20951a028e50SDavid S. Miller start = end; 20961da177e4SLinus Torvalds } 20971da177e4SLinus Torvalds 2098fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 209981d77662SAl Viro __wsum csum2; 21001a028e50SDavid S. Miller int end; 21011da177e4SLinus Torvalds 2102547b792cSIlpo Järvinen WARN_ON(start > offset + len); 21031a028e50SDavid S. Miller 2104fbb398a8SDavid S. Miller end = start + frag_iter->len; 21051da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 21061da177e4SLinus Torvalds if (copy > len) 21071da177e4SLinus Torvalds copy = len; 2108fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 21091a028e50SDavid S. Miller offset - start, 21101da177e4SLinus Torvalds to, copy, 0); 21111da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 21121da177e4SLinus Torvalds if ((len -= copy) == 0) 21131da177e4SLinus Torvalds return csum; 21141da177e4SLinus Torvalds offset += copy; 21151da177e4SLinus Torvalds to += copy; 21161da177e4SLinus Torvalds pos += copy; 21171da177e4SLinus Torvalds } 21181a028e50SDavid S. Miller start = end; 21191da177e4SLinus Torvalds } 212009a62660SKris Katterjohn BUG_ON(len); 21211da177e4SLinus Torvalds return csum; 21221da177e4SLinus Torvalds } 2123b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 21241da177e4SLinus Torvalds 2125af2806f8SThomas Graf /** 2126af2806f8SThomas Graf * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2127af2806f8SThomas Graf * @from: source buffer 2128af2806f8SThomas Graf * 2129af2806f8SThomas Graf * Calculates the amount of linear headroom needed in the 'to' skb passed 2130af2806f8SThomas Graf * into skb_zerocopy(). 2131af2806f8SThomas Graf */ 2132af2806f8SThomas Graf unsigned int 2133af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from) 2134af2806f8SThomas Graf { 2135af2806f8SThomas Graf unsigned int hlen = 0; 2136af2806f8SThomas Graf 2137af2806f8SThomas Graf if (!from->head_frag || 2138af2806f8SThomas Graf skb_headlen(from) < L1_CACHE_BYTES || 2139af2806f8SThomas Graf skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2140af2806f8SThomas Graf hlen = skb_headlen(from); 2141af2806f8SThomas Graf 2142af2806f8SThomas Graf if (skb_has_frag_list(from)) 2143af2806f8SThomas Graf hlen = from->len; 2144af2806f8SThomas Graf 2145af2806f8SThomas Graf return hlen; 2146af2806f8SThomas Graf } 2147af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2148af2806f8SThomas Graf 2149af2806f8SThomas Graf /** 2150af2806f8SThomas Graf * skb_zerocopy - Zero copy skb to skb 2151af2806f8SThomas Graf * @to: destination buffer 2152af2806f8SThomas Graf * @source: source buffer 2153af2806f8SThomas Graf * @len: number of bytes to copy from source buffer 2154af2806f8SThomas Graf * @hlen: size of linear headroom in destination buffer 2155af2806f8SThomas Graf * 2156af2806f8SThomas Graf * Copies up to `len` bytes from `from` to `to` by creating references 2157af2806f8SThomas Graf * to the frags in the source buffer. 2158af2806f8SThomas Graf * 2159af2806f8SThomas Graf * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2160af2806f8SThomas Graf * headroom in the `to` buffer. 2161af2806f8SThomas Graf */ 2162af2806f8SThomas Graf void 2163af2806f8SThomas Graf skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) 2164af2806f8SThomas Graf { 2165af2806f8SThomas Graf int i, j = 0; 2166af2806f8SThomas Graf int plen = 0; /* length of skb->head fragment */ 2167af2806f8SThomas Graf struct page *page; 2168af2806f8SThomas Graf unsigned int offset; 2169af2806f8SThomas Graf 2170af2806f8SThomas Graf BUG_ON(!from->head_frag && !hlen); 2171af2806f8SThomas Graf 2172af2806f8SThomas Graf /* dont bother with small payloads */ 2173af2806f8SThomas Graf if (len <= skb_tailroom(to)) { 2174af2806f8SThomas Graf skb_copy_bits(from, 0, skb_put(to, len), len); 2175af2806f8SThomas Graf return; 2176af2806f8SThomas Graf } 2177af2806f8SThomas Graf 2178af2806f8SThomas Graf if (hlen) { 2179af2806f8SThomas Graf skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2180af2806f8SThomas Graf len -= hlen; 2181af2806f8SThomas Graf } else { 2182af2806f8SThomas Graf plen = min_t(int, skb_headlen(from), len); 2183af2806f8SThomas Graf if (plen) { 2184af2806f8SThomas Graf page = virt_to_head_page(from->head); 2185af2806f8SThomas Graf offset = from->data - (unsigned char *)page_address(page); 2186af2806f8SThomas Graf __skb_fill_page_desc(to, 0, page, offset, plen); 2187af2806f8SThomas Graf get_page(page); 2188af2806f8SThomas Graf j = 1; 2189af2806f8SThomas Graf len -= plen; 2190af2806f8SThomas Graf } 2191af2806f8SThomas Graf } 2192af2806f8SThomas Graf 2193af2806f8SThomas Graf to->truesize += len + plen; 2194af2806f8SThomas Graf to->len += len + plen; 2195af2806f8SThomas Graf to->data_len += len + plen; 2196af2806f8SThomas Graf 2197af2806f8SThomas Graf for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2198af2806f8SThomas Graf if (!len) 2199af2806f8SThomas Graf break; 2200af2806f8SThomas Graf skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2201af2806f8SThomas Graf skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2202af2806f8SThomas Graf len -= skb_shinfo(to)->frags[j].size; 2203af2806f8SThomas Graf skb_frag_ref(to, j); 2204af2806f8SThomas Graf j++; 2205af2806f8SThomas Graf } 2206af2806f8SThomas Graf skb_shinfo(to)->nr_frags = j; 2207af2806f8SThomas Graf } 2208af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy); 2209af2806f8SThomas Graf 22101da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 22111da177e4SLinus Torvalds { 2212d3bc23e7SAl Viro __wsum csum; 22131da177e4SLinus Torvalds long csstart; 22141da177e4SLinus Torvalds 221584fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 221655508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 22171da177e4SLinus Torvalds else 22181da177e4SLinus Torvalds csstart = skb_headlen(skb); 22191da177e4SLinus Torvalds 222009a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 22211da177e4SLinus Torvalds 2222d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 22231da177e4SLinus Torvalds 22241da177e4SLinus Torvalds csum = 0; 22251da177e4SLinus Torvalds if (csstart != skb->len) 22261da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 22271da177e4SLinus Torvalds skb->len - csstart, 0); 22281da177e4SLinus Torvalds 222984fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2230ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 22311da177e4SLinus Torvalds 2232d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 22331da177e4SLinus Torvalds } 22341da177e4SLinus Torvalds } 2235b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 22361da177e4SLinus Torvalds 22371da177e4SLinus Torvalds /** 22381da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 22391da177e4SLinus Torvalds * @list: list to dequeue from 22401da177e4SLinus Torvalds * 22411da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 22421da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 22431da177e4SLinus Torvalds * returned or %NULL if the list is empty. 22441da177e4SLinus Torvalds */ 22451da177e4SLinus Torvalds 22461da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 22471da177e4SLinus Torvalds { 22481da177e4SLinus Torvalds unsigned long flags; 22491da177e4SLinus Torvalds struct sk_buff *result; 22501da177e4SLinus Torvalds 22511da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22521da177e4SLinus Torvalds result = __skb_dequeue(list); 22531da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22541da177e4SLinus Torvalds return result; 22551da177e4SLinus Torvalds } 2256b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 22571da177e4SLinus Torvalds 22581da177e4SLinus Torvalds /** 22591da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 22601da177e4SLinus Torvalds * @list: list to dequeue from 22611da177e4SLinus Torvalds * 22621da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 22631da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 22641da177e4SLinus Torvalds * returned or %NULL if the list is empty. 22651da177e4SLinus Torvalds */ 22661da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 22671da177e4SLinus Torvalds { 22681da177e4SLinus Torvalds unsigned long flags; 22691da177e4SLinus Torvalds struct sk_buff *result; 22701da177e4SLinus Torvalds 22711da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22721da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 22731da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22741da177e4SLinus Torvalds return result; 22751da177e4SLinus Torvalds } 2276b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds /** 22791da177e4SLinus Torvalds * skb_queue_purge - empty a list 22801da177e4SLinus Torvalds * @list: list to empty 22811da177e4SLinus Torvalds * 22821da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 22831da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 22841da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 22851da177e4SLinus Torvalds */ 22861da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 22871da177e4SLinus Torvalds { 22881da177e4SLinus Torvalds struct sk_buff *skb; 22891da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 22901da177e4SLinus Torvalds kfree_skb(skb); 22911da177e4SLinus Torvalds } 2292b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 22931da177e4SLinus Torvalds 22941da177e4SLinus Torvalds /** 22951da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 22961da177e4SLinus Torvalds * @list: list to use 22971da177e4SLinus Torvalds * @newsk: buffer to queue 22981da177e4SLinus Torvalds * 22991da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 23001da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 23011da177e4SLinus Torvalds * safely. 23021da177e4SLinus Torvalds * 23031da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23041da177e4SLinus Torvalds */ 23051da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 23061da177e4SLinus Torvalds { 23071da177e4SLinus Torvalds unsigned long flags; 23081da177e4SLinus Torvalds 23091da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 23101da177e4SLinus Torvalds __skb_queue_head(list, newsk); 23111da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 23121da177e4SLinus Torvalds } 2313b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 23141da177e4SLinus Torvalds 23151da177e4SLinus Torvalds /** 23161da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 23171da177e4SLinus Torvalds * @list: list to use 23181da177e4SLinus Torvalds * @newsk: buffer to queue 23191da177e4SLinus Torvalds * 23201da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 23211da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 23221da177e4SLinus Torvalds * safely. 23231da177e4SLinus Torvalds * 23241da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23251da177e4SLinus Torvalds */ 23261da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 23271da177e4SLinus Torvalds { 23281da177e4SLinus Torvalds unsigned long flags; 23291da177e4SLinus Torvalds 23301da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 23311da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 23321da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 23331da177e4SLinus Torvalds } 2334b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 23358728b834SDavid S. Miller 23361da177e4SLinus Torvalds /** 23371da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 23381da177e4SLinus Torvalds * @skb: buffer to remove 23398728b834SDavid S. Miller * @list: list to use 23401da177e4SLinus Torvalds * 23418728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 23428728b834SDavid S. Miller * function is atomic with respect to other list locked calls 23431da177e4SLinus Torvalds * 23448728b834SDavid S. Miller * You must know what list the SKB is on. 23451da177e4SLinus Torvalds */ 23468728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 23471da177e4SLinus Torvalds { 23481da177e4SLinus Torvalds unsigned long flags; 23491da177e4SLinus Torvalds 23501da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 23518728b834SDavid S. Miller __skb_unlink(skb, list); 23521da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 23531da177e4SLinus Torvalds } 2354b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 23551da177e4SLinus Torvalds 23561da177e4SLinus Torvalds /** 23571da177e4SLinus Torvalds * skb_append - append a buffer 23581da177e4SLinus Torvalds * @old: buffer to insert after 23591da177e4SLinus Torvalds * @newsk: buffer to insert 23608728b834SDavid S. Miller * @list: list to use 23611da177e4SLinus Torvalds * 23621da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 23631da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 23641da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23651da177e4SLinus Torvalds */ 23668728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 23671da177e4SLinus Torvalds { 23681da177e4SLinus Torvalds unsigned long flags; 23691da177e4SLinus Torvalds 23708728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 23717de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 23728728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 23731da177e4SLinus Torvalds } 2374b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 23751da177e4SLinus Torvalds 23761da177e4SLinus Torvalds /** 23771da177e4SLinus Torvalds * skb_insert - insert a buffer 23781da177e4SLinus Torvalds * @old: buffer to insert before 23791da177e4SLinus Torvalds * @newsk: buffer to insert 23808728b834SDavid S. Miller * @list: list to use 23811da177e4SLinus Torvalds * 23828728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 23838728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 23848728b834SDavid S. Miller * calls. 23858728b834SDavid S. Miller * 23861da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23871da177e4SLinus Torvalds */ 23888728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 23891da177e4SLinus Torvalds { 23901da177e4SLinus Torvalds unsigned long flags; 23911da177e4SLinus Torvalds 23928728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 23938728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 23948728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 23951da177e4SLinus Torvalds } 2396b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 23971da177e4SLinus Torvalds 23981da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 23991da177e4SLinus Torvalds struct sk_buff* skb1, 24001da177e4SLinus Torvalds const u32 len, const int pos) 24011da177e4SLinus Torvalds { 24021da177e4SLinus Torvalds int i; 24031da177e4SLinus Torvalds 2404d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2405d626f62bSArnaldo Carvalho de Melo pos - len); 24061da177e4SLinus Torvalds /* And move data appendix as is. */ 24071da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 24081da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 24091da177e4SLinus Torvalds 24101da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 24111da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 24121da177e4SLinus Torvalds skb1->data_len = skb->data_len; 24131da177e4SLinus Torvalds skb1->len += skb1->data_len; 24141da177e4SLinus Torvalds skb->data_len = 0; 24151da177e4SLinus Torvalds skb->len = len; 241627a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 24171da177e4SLinus Torvalds } 24181da177e4SLinus Torvalds 24191da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 24201da177e4SLinus Torvalds struct sk_buff* skb1, 24211da177e4SLinus Torvalds const u32 len, int pos) 24221da177e4SLinus Torvalds { 24231da177e4SLinus Torvalds int i, k = 0; 24241da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 24251da177e4SLinus Torvalds 24261da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 24271da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 24281da177e4SLinus Torvalds skb->len = len; 24291da177e4SLinus Torvalds skb->data_len = len - pos; 24301da177e4SLinus Torvalds 24311da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 24329e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 24331da177e4SLinus Torvalds 24341da177e4SLinus Torvalds if (pos + size > len) { 24351da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 24361da177e4SLinus Torvalds 24371da177e4SLinus Torvalds if (pos < len) { 24381da177e4SLinus Torvalds /* Split frag. 24391da177e4SLinus Torvalds * We have two variants in this case: 24401da177e4SLinus Torvalds * 1. Move all the frag to the second 24411da177e4SLinus Torvalds * part, if it is possible. F.e. 24421da177e4SLinus Torvalds * this approach is mandatory for TUX, 24431da177e4SLinus Torvalds * where splitting is expensive. 24441da177e4SLinus Torvalds * 2. Split is accurately. We make this. 24451da177e4SLinus Torvalds */ 2446ea2ab693SIan Campbell skb_frag_ref(skb, i); 24471da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 24489e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 24499e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 24501da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 24511da177e4SLinus Torvalds } 24521da177e4SLinus Torvalds k++; 24531da177e4SLinus Torvalds } else 24541da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 24551da177e4SLinus Torvalds pos += size; 24561da177e4SLinus Torvalds } 24571da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 24581da177e4SLinus Torvalds } 24591da177e4SLinus Torvalds 24601da177e4SLinus Torvalds /** 24611da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 24621da177e4SLinus Torvalds * @skb: the buffer to split 24631da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 24641da177e4SLinus Torvalds * @len: new length for skb 24651da177e4SLinus Torvalds */ 24661da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 24671da177e4SLinus Torvalds { 24681da177e4SLinus Torvalds int pos = skb_headlen(skb); 24691da177e4SLinus Torvalds 247068534c68SAmerigo Wang skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 24711da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 24721da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 24731da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 24741da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 24751da177e4SLinus Torvalds } 2476b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 24771da177e4SLinus Torvalds 24789f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 24799f782db3SIlpo Järvinen * 24809f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 24819f782db3SIlpo Järvinen */ 2482832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2483832d11c5SIlpo Järvinen { 24840ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2485832d11c5SIlpo Järvinen } 2486832d11c5SIlpo Järvinen 2487832d11c5SIlpo Järvinen /** 2488832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2489832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2490832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2491832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2492832d11c5SIlpo Järvinen * 2493832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 249420e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2495832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2496832d11c5SIlpo Järvinen * 2497832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2498832d11c5SIlpo Järvinen * 2499832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2500832d11c5SIlpo Järvinen * to have non-paged data as well. 2501832d11c5SIlpo Järvinen * 2502832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2503832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2504832d11c5SIlpo Järvinen */ 2505832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2506832d11c5SIlpo Järvinen { 2507832d11c5SIlpo Järvinen int from, to, merge, todo; 2508832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2509832d11c5SIlpo Järvinen 2510832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2511832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2512832d11c5SIlpo Järvinen 2513832d11c5SIlpo Järvinen todo = shiftlen; 2514832d11c5SIlpo Järvinen from = 0; 2515832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2516832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2517832d11c5SIlpo Järvinen 2518832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2519832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2520832d11c5SIlpo Järvinen */ 2521832d11c5SIlpo Järvinen if (!to || 2522ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2523ea2ab693SIan Campbell fragfrom->page_offset)) { 2524832d11c5SIlpo Järvinen merge = -1; 2525832d11c5SIlpo Järvinen } else { 2526832d11c5SIlpo Järvinen merge = to - 1; 2527832d11c5SIlpo Järvinen 25289e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2529832d11c5SIlpo Järvinen if (todo < 0) { 2530832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2531832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2532832d11c5SIlpo Järvinen return 0; 2533832d11c5SIlpo Järvinen 25349f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 25359f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2536832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2537832d11c5SIlpo Järvinen 25389e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 25399e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2540832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2541832d11c5SIlpo Järvinen 2542832d11c5SIlpo Järvinen goto onlymerged; 2543832d11c5SIlpo Järvinen } 2544832d11c5SIlpo Järvinen 2545832d11c5SIlpo Järvinen from++; 2546832d11c5SIlpo Järvinen } 2547832d11c5SIlpo Järvinen 2548832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2549832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2550832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2551832d11c5SIlpo Järvinen return 0; 2552832d11c5SIlpo Järvinen 2553832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2554832d11c5SIlpo Järvinen return 0; 2555832d11c5SIlpo Järvinen 2556832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2557832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2558832d11c5SIlpo Järvinen return 0; 2559832d11c5SIlpo Järvinen 2560832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2561832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2562832d11c5SIlpo Järvinen 25639e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2564832d11c5SIlpo Järvinen *fragto = *fragfrom; 25659e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2566832d11c5SIlpo Järvinen from++; 2567832d11c5SIlpo Järvinen to++; 2568832d11c5SIlpo Järvinen 2569832d11c5SIlpo Järvinen } else { 2570ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2571832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2572832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 25739e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2574832d11c5SIlpo Järvinen 2575832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 25769e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2577832d11c5SIlpo Järvinen todo = 0; 2578832d11c5SIlpo Järvinen 2579832d11c5SIlpo Järvinen to++; 2580832d11c5SIlpo Järvinen break; 2581832d11c5SIlpo Järvinen } 2582832d11c5SIlpo Järvinen } 2583832d11c5SIlpo Järvinen 2584832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2585832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2586832d11c5SIlpo Järvinen 2587832d11c5SIlpo Järvinen if (merge >= 0) { 2588832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2589832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2590832d11c5SIlpo Järvinen 25919e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2592ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2593832d11c5SIlpo Järvinen } 2594832d11c5SIlpo Järvinen 2595832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2596832d11c5SIlpo Järvinen to = 0; 2597832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2598832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2599832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2600832d11c5SIlpo Järvinen 2601832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2602832d11c5SIlpo Järvinen 2603832d11c5SIlpo Järvinen onlymerged: 2604832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2605832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2606832d11c5SIlpo Järvinen */ 2607832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2608832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2609832d11c5SIlpo Järvinen 2610832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2611832d11c5SIlpo Järvinen skb->len -= shiftlen; 2612832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2613832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2614832d11c5SIlpo Järvinen tgt->len += shiftlen; 2615832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2616832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2617832d11c5SIlpo Järvinen 2618832d11c5SIlpo Järvinen return shiftlen; 2619832d11c5SIlpo Järvinen } 2620832d11c5SIlpo Järvinen 2621677e90edSThomas Graf /** 2622677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2623677e90edSThomas Graf * @skb: the buffer to read 2624677e90edSThomas Graf * @from: lower offset of data to be read 2625677e90edSThomas Graf * @to: upper offset of data to be read 2626677e90edSThomas Graf * @st: state variable 2627677e90edSThomas Graf * 2628677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2629677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2630677e90edSThomas Graf */ 2631677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2632677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2633677e90edSThomas Graf { 2634677e90edSThomas Graf st->lower_offset = from; 2635677e90edSThomas Graf st->upper_offset = to; 2636677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2637677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2638677e90edSThomas Graf st->frag_data = NULL; 2639677e90edSThomas Graf } 2640b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2641677e90edSThomas Graf 2642677e90edSThomas Graf /** 2643677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2644677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2645677e90edSThomas Graf * @data: destination pointer for data to be returned 2646677e90edSThomas Graf * @st: state variable 2647677e90edSThomas Graf * 2648bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 2649677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2650bc32383cSMathias Krause * the head of the data block to @data and returns the length 2651677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2652677e90edSThomas Graf * offset has been reached. 2653677e90edSThomas Graf * 2654677e90edSThomas Graf * The caller is not required to consume all of the data 2655bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 2656677e90edSThomas Graf * of bytes already consumed and the next call to 2657677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2658677e90edSThomas Graf * 265925985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2660677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2661677e90edSThomas Graf * reads of potentially non linear data. 2662677e90edSThomas Graf * 2663bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2664677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2665677e90edSThomas Graf * a stack for this purpose. 2666677e90edSThomas Graf */ 2667677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2668677e90edSThomas Graf struct skb_seq_state *st) 2669677e90edSThomas Graf { 2670677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2671677e90edSThomas Graf skb_frag_t *frag; 2672677e90edSThomas Graf 2673aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 2674aeb193eaSWedson Almeida Filho if (st->frag_data) { 2675aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 2676aeb193eaSWedson Almeida Filho st->frag_data = NULL; 2677aeb193eaSWedson Almeida Filho } 2678677e90edSThomas Graf return 0; 2679aeb193eaSWedson Almeida Filho } 2680677e90edSThomas Graf 2681677e90edSThomas Graf next_skb: 268295e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2683677e90edSThomas Graf 2684995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 268595e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2686677e90edSThomas Graf return block_limit - abs_offset; 2687677e90edSThomas Graf } 2688677e90edSThomas Graf 2689677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2690677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2691677e90edSThomas Graf 2692677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2693677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 26949e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2695677e90edSThomas Graf 2696677e90edSThomas Graf if (abs_offset < block_limit) { 2697677e90edSThomas Graf if (!st->frag_data) 269851c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2699677e90edSThomas Graf 2700677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2701677e90edSThomas Graf (abs_offset - st->stepped_offset); 2702677e90edSThomas Graf 2703677e90edSThomas Graf return block_limit - abs_offset; 2704677e90edSThomas Graf } 2705677e90edSThomas Graf 2706677e90edSThomas Graf if (st->frag_data) { 270751c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2708677e90edSThomas Graf st->frag_data = NULL; 2709677e90edSThomas Graf } 2710677e90edSThomas Graf 2711677e90edSThomas Graf st->frag_idx++; 27129e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2713677e90edSThomas Graf } 2714677e90edSThomas Graf 27155b5a60daSOlaf Kirch if (st->frag_data) { 271651c56b00SEric Dumazet kunmap_atomic(st->frag_data); 27175b5a60daSOlaf Kirch st->frag_data = NULL; 27185b5a60daSOlaf Kirch } 27195b5a60daSOlaf Kirch 272021dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2721677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 272295e3b24cSHerbert Xu st->frag_idx = 0; 2723677e90edSThomas Graf goto next_skb; 272471b3346dSShyam Iyer } else if (st->cur_skb->next) { 272571b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 272671b3346dSShyam Iyer st->frag_idx = 0; 2727677e90edSThomas Graf goto next_skb; 2728677e90edSThomas Graf } 2729677e90edSThomas Graf 2730677e90edSThomas Graf return 0; 2731677e90edSThomas Graf } 2732b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2733677e90edSThomas Graf 2734677e90edSThomas Graf /** 2735677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2736677e90edSThomas Graf * @st: state variable 2737677e90edSThomas Graf * 2738677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2739677e90edSThomas Graf * returned 0. 2740677e90edSThomas Graf */ 2741677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2742677e90edSThomas Graf { 2743677e90edSThomas Graf if (st->frag_data) 274451c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2745677e90edSThomas Graf } 2746b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2747677e90edSThomas Graf 27483fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 27493fc7e8a6SThomas Graf 27503fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 27513fc7e8a6SThomas Graf struct ts_config *conf, 27523fc7e8a6SThomas Graf struct ts_state *state) 27533fc7e8a6SThomas Graf { 27543fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 27553fc7e8a6SThomas Graf } 27563fc7e8a6SThomas Graf 27573fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 27583fc7e8a6SThomas Graf { 27593fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 27603fc7e8a6SThomas Graf } 27613fc7e8a6SThomas Graf 27623fc7e8a6SThomas Graf /** 27633fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 27643fc7e8a6SThomas Graf * @skb: the buffer to look in 27653fc7e8a6SThomas Graf * @from: search offset 27663fc7e8a6SThomas Graf * @to: search limit 27673fc7e8a6SThomas Graf * @config: textsearch configuration 27683fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 27693fc7e8a6SThomas Graf * 27703fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 27713fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 27723fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 27733fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 27743fc7e8a6SThomas Graf */ 27753fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 27763fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 27773fc7e8a6SThomas Graf struct ts_state *state) 27783fc7e8a6SThomas Graf { 2779f72b948dSPhil Oester unsigned int ret; 2780f72b948dSPhil Oester 27813fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 27823fc7e8a6SThomas Graf config->finish = skb_ts_finish; 27833fc7e8a6SThomas Graf 27843fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 27853fc7e8a6SThomas Graf 2786f72b948dSPhil Oester ret = textsearch_find(config, state); 2787f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 27883fc7e8a6SThomas Graf } 2789b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 27903fc7e8a6SThomas Graf 2791e89e9cf5SAnanda Raju /** 27922c53040fSBen Hutchings * skb_append_datato_frags - append the user data to a skb 2793e89e9cf5SAnanda Raju * @sk: sock structure 2794e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2795e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2796e89e9cf5SAnanda Raju * @from: pointer to user message iov 2797e89e9cf5SAnanda Raju * @length: length of the iov message 2798e89e9cf5SAnanda Raju * 2799e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2800e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2801e89e9cf5SAnanda Raju */ 2802e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2803dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2804e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2805e89e9cf5SAnanda Raju void *from, int length) 2806e89e9cf5SAnanda Raju { 2807b2111724SEric Dumazet int frg_cnt = skb_shinfo(skb)->nr_frags; 2808b2111724SEric Dumazet int copy; 2809e89e9cf5SAnanda Raju int offset = 0; 2810e89e9cf5SAnanda Raju int ret; 2811b2111724SEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 2812e89e9cf5SAnanda Raju 2813e89e9cf5SAnanda Raju do { 2814e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2815e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2816b2111724SEric Dumazet return -EMSGSIZE; 2817e89e9cf5SAnanda Raju 2818b2111724SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 2819e89e9cf5SAnanda Raju return -ENOMEM; 2820e89e9cf5SAnanda Raju 2821e89e9cf5SAnanda Raju /* copy the user data to page */ 2822b2111724SEric Dumazet copy = min_t(int, length, pfrag->size - pfrag->offset); 2823e89e9cf5SAnanda Raju 2824b2111724SEric Dumazet ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2825e89e9cf5SAnanda Raju offset, copy, 0, skb); 2826e89e9cf5SAnanda Raju if (ret < 0) 2827e89e9cf5SAnanda Raju return -EFAULT; 2828e89e9cf5SAnanda Raju 2829e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 2830b2111724SEric Dumazet skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2831b2111724SEric Dumazet copy); 2832b2111724SEric Dumazet frg_cnt++; 2833b2111724SEric Dumazet pfrag->offset += copy; 2834b2111724SEric Dumazet get_page(pfrag->page); 2835b2111724SEric Dumazet 2836b2111724SEric Dumazet skb->truesize += copy; 2837b2111724SEric Dumazet atomic_add(copy, &sk->sk_wmem_alloc); 2838e89e9cf5SAnanda Raju skb->len += copy; 2839e89e9cf5SAnanda Raju skb->data_len += copy; 2840e89e9cf5SAnanda Raju offset += copy; 2841e89e9cf5SAnanda Raju length -= copy; 2842e89e9cf5SAnanda Raju 2843e89e9cf5SAnanda Raju } while (length > 0); 2844e89e9cf5SAnanda Raju 2845e89e9cf5SAnanda Raju return 0; 2846e89e9cf5SAnanda Raju } 2847b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2848e89e9cf5SAnanda Raju 2849cbb042f9SHerbert Xu /** 2850cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2851cbb042f9SHerbert Xu * @skb: buffer to update 2852cbb042f9SHerbert Xu * @len: length of data pulled 2853cbb042f9SHerbert Xu * 2854cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2855fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 285684fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 285784fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 285884fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2859cbb042f9SHerbert Xu */ 2860cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2861cbb042f9SHerbert Xu { 2862cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2863cbb042f9SHerbert Xu skb->len -= len; 2864cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2865cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2866cbb042f9SHerbert Xu return skb->data += len; 2867cbb042f9SHerbert Xu } 2868f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2869f94691acSArnaldo Carvalho de Melo 2870f4c50d99SHerbert Xu /** 2871f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2872f4c50d99SHerbert Xu * @skb: buffer to segment 2873576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2874f4c50d99SHerbert Xu * 2875f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 28764c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 28774c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2878f4c50d99SHerbert Xu */ 2879c8f44affSMichał Mirosław struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2880f4c50d99SHerbert Xu { 2881f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2882f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 288389319d38SHerbert Xu struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 28849d8506ccSHerbert Xu skb_frag_t *skb_frag = skb_shinfo(skb)->frags; 2885f4c50d99SHerbert Xu unsigned int mss = skb_shinfo(skb)->gso_size; 288698e399f8SArnaldo Carvalho de Melo unsigned int doffset = skb->data - skb_mac_header(skb); 2887f4c50d99SHerbert Xu unsigned int offset = doffset; 288868c33163SPravin B Shelar unsigned int tnl_hlen = skb_tnl_header_len(skb); 2889f4c50d99SHerbert Xu unsigned int headroom; 2890f4c50d99SHerbert Xu unsigned int len; 2891ec5f0615SPravin B Shelar __be16 proto; 2892ec5f0615SPravin B Shelar bool csum; 289304ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2894f4c50d99SHerbert Xu int nfrags = skb_shinfo(skb)->nr_frags; 2895f4c50d99SHerbert Xu int err = -ENOMEM; 2896f4c50d99SHerbert Xu int i = 0; 2897f4c50d99SHerbert Xu int pos; 2898f4c50d99SHerbert Xu 2899ec5f0615SPravin B Shelar proto = skb_network_protocol(skb); 2900ec5f0615SPravin B Shelar if (unlikely(!proto)) 2901ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 2902ec5f0615SPravin B Shelar 2903ec5f0615SPravin B Shelar csum = !!can_checksum_protocol(features, proto); 2904f4c50d99SHerbert Xu __skb_push(skb, doffset); 2905f4c50d99SHerbert Xu headroom = skb_headroom(skb); 2906f4c50d99SHerbert Xu pos = skb_headlen(skb); 2907f4c50d99SHerbert Xu 2908f4c50d99SHerbert Xu do { 2909f4c50d99SHerbert Xu struct sk_buff *nskb; 2910f4c50d99SHerbert Xu skb_frag_t *frag; 2911c8884eddSHerbert Xu int hsize; 2912f4c50d99SHerbert Xu int size; 2913f4c50d99SHerbert Xu 2914f4c50d99SHerbert Xu len = skb->len - offset; 2915f4c50d99SHerbert Xu if (len > mss) 2916f4c50d99SHerbert Xu len = mss; 2917f4c50d99SHerbert Xu 2918f4c50d99SHerbert Xu hsize = skb_headlen(skb) - offset; 2919f4c50d99SHerbert Xu if (hsize < 0) 2920f4c50d99SHerbert Xu hsize = 0; 2921c8884eddSHerbert Xu if (hsize > len || !sg) 2922c8884eddSHerbert Xu hsize = len; 2923f4c50d99SHerbert Xu 29249d8506ccSHerbert Xu if (!hsize && i >= nfrags && skb_headlen(fskb) && 29259d8506ccSHerbert Xu (skb_headlen(fskb) == len || sg)) { 29269d8506ccSHerbert Xu BUG_ON(skb_headlen(fskb) > len); 292789319d38SHerbert Xu 29289d8506ccSHerbert Xu i = 0; 29299d8506ccSHerbert Xu nfrags = skb_shinfo(fskb)->nr_frags; 29309d8506ccSHerbert Xu skb_frag = skb_shinfo(fskb)->frags; 29319d8506ccSHerbert Xu pos += skb_headlen(fskb); 29329d8506ccSHerbert Xu 29339d8506ccSHerbert Xu while (pos < offset + len) { 29349d8506ccSHerbert Xu BUG_ON(i >= nfrags); 29359d8506ccSHerbert Xu 29369d8506ccSHerbert Xu size = skb_frag_size(skb_frag); 29379d8506ccSHerbert Xu if (pos + size > offset + len) 29389d8506ccSHerbert Xu break; 29399d8506ccSHerbert Xu 29409d8506ccSHerbert Xu i++; 29419d8506ccSHerbert Xu pos += size; 29429d8506ccSHerbert Xu skb_frag++; 29439d8506ccSHerbert Xu } 29449d8506ccSHerbert Xu 294589319d38SHerbert Xu nskb = skb_clone(fskb, GFP_ATOMIC); 294689319d38SHerbert Xu fskb = fskb->next; 294789319d38SHerbert Xu 2948f4c50d99SHerbert Xu if (unlikely(!nskb)) 2949f4c50d99SHerbert Xu goto err; 2950f4c50d99SHerbert Xu 29519d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 29529d8506ccSHerbert Xu kfree_skb(nskb); 29539d8506ccSHerbert Xu goto err; 29549d8506ccSHerbert Xu } 29559d8506ccSHerbert Xu 2956ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 295789319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 295889319d38SHerbert Xu kfree_skb(nskb); 295989319d38SHerbert Xu goto err; 296089319d38SHerbert Xu } 296189319d38SHerbert Xu 2962ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 296389319d38SHerbert Xu skb_release_head_state(nskb); 296489319d38SHerbert Xu __skb_push(nskb, doffset); 296589319d38SHerbert Xu } else { 2966c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 2967c93bdd0eSMel Gorman GFP_ATOMIC, skb_alloc_rx_flag(skb), 2968c93bdd0eSMel Gorman NUMA_NO_NODE); 296989319d38SHerbert Xu 297089319d38SHerbert Xu if (unlikely(!nskb)) 297189319d38SHerbert Xu goto err; 297289319d38SHerbert Xu 297389319d38SHerbert Xu skb_reserve(nskb, headroom); 297489319d38SHerbert Xu __skb_put(nskb, doffset); 297589319d38SHerbert Xu } 297689319d38SHerbert Xu 2977f4c50d99SHerbert Xu if (segs) 2978f4c50d99SHerbert Xu tail->next = nskb; 2979f4c50d99SHerbert Xu else 2980f4c50d99SHerbert Xu segs = nskb; 2981f4c50d99SHerbert Xu tail = nskb; 2982f4c50d99SHerbert Xu 29836f85a124SHerbert Xu __copy_skb_header(nskb, skb); 2984f4c50d99SHerbert Xu nskb->mac_len = skb->mac_len; 2985f4c50d99SHerbert Xu 2986030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 298768c33163SPravin B Shelar 298868c33163SPravin B Shelar skb_copy_from_linear_data_offset(skb, -tnl_hlen, 298968c33163SPravin B Shelar nskb->data - tnl_hlen, 299068c33163SPravin B Shelar doffset + tnl_hlen); 299189319d38SHerbert Xu 29929d8506ccSHerbert Xu if (nskb->len == len + doffset) 29931cdbcb79SSimon Horman goto perform_csum_check; 299489319d38SHerbert Xu 2995f4c50d99SHerbert Xu if (!sg) { 29966f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2997f4c50d99SHerbert Xu nskb->csum = skb_copy_and_csum_bits(skb, offset, 2998f4c50d99SHerbert Xu skb_put(nskb, len), 2999f4c50d99SHerbert Xu len, 0); 3000f4c50d99SHerbert Xu continue; 3001f4c50d99SHerbert Xu } 3002f4c50d99SHerbert Xu 3003f4c50d99SHerbert Xu frag = skb_shinfo(nskb)->frags; 3004f4c50d99SHerbert Xu 3005d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, 3006d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 3007f4c50d99SHerbert Xu 3008c9af6db4SPravin B Shelar skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 3009cef401deSEric Dumazet 30109d8506ccSHerbert Xu while (pos < offset + len) { 30119d8506ccSHerbert Xu if (i >= nfrags) { 30129d8506ccSHerbert Xu BUG_ON(skb_headlen(fskb)); 30139d8506ccSHerbert Xu 30149d8506ccSHerbert Xu i = 0; 30159d8506ccSHerbert Xu nfrags = skb_shinfo(fskb)->nr_frags; 30169d8506ccSHerbert Xu skb_frag = skb_shinfo(fskb)->frags; 30179d8506ccSHerbert Xu 30189d8506ccSHerbert Xu BUG_ON(!nfrags); 30199d8506ccSHerbert Xu 30209d8506ccSHerbert Xu fskb = fskb->next; 30219d8506ccSHerbert Xu } 30229d8506ccSHerbert Xu 30239d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 30249d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 30259d8506ccSHerbert Xu net_warn_ratelimited( 30269d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 30279d8506ccSHerbert Xu pos, mss); 30289d8506ccSHerbert Xu goto err; 30299d8506ccSHerbert Xu } 30309d8506ccSHerbert Xu 30319d8506ccSHerbert Xu *frag = *skb_frag; 3032ea2ab693SIan Campbell __skb_frag_ref(frag); 30339e903e08SEric Dumazet size = skb_frag_size(frag); 3034f4c50d99SHerbert Xu 3035f4c50d99SHerbert Xu if (pos < offset) { 3036f4c50d99SHerbert Xu frag->page_offset += offset - pos; 30379e903e08SEric Dumazet skb_frag_size_sub(frag, offset - pos); 3038f4c50d99SHerbert Xu } 3039f4c50d99SHerbert Xu 304089319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 3041f4c50d99SHerbert Xu 3042f4c50d99SHerbert Xu if (pos + size <= offset + len) { 3043f4c50d99SHerbert Xu i++; 30449d8506ccSHerbert Xu skb_frag++; 3045f4c50d99SHerbert Xu pos += size; 3046f4c50d99SHerbert Xu } else { 30479e903e08SEric Dumazet skb_frag_size_sub(frag, pos + size - (offset + len)); 304889319d38SHerbert Xu goto skip_fraglist; 3049f4c50d99SHerbert Xu } 3050f4c50d99SHerbert Xu 3051f4c50d99SHerbert Xu frag++; 3052f4c50d99SHerbert Xu } 3053f4c50d99SHerbert Xu 305489319d38SHerbert Xu skip_fraglist: 3055f4c50d99SHerbert Xu nskb->data_len = len - hsize; 3056f4c50d99SHerbert Xu nskb->len += nskb->data_len; 3057f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 3058ec5f0615SPravin B Shelar 30591cdbcb79SSimon Horman perform_csum_check: 3060ec5f0615SPravin B Shelar if (!csum) { 3061ec5f0615SPravin B Shelar nskb->csum = skb_checksum(nskb, doffset, 3062ec5f0615SPravin B Shelar nskb->len - doffset, 0); 3063ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 3064ec5f0615SPravin B Shelar } 3065f4c50d99SHerbert Xu } while ((offset += len) < skb->len); 3066f4c50d99SHerbert Xu 3067f4c50d99SHerbert Xu return segs; 3068f4c50d99SHerbert Xu 3069f4c50d99SHerbert Xu err: 3070f4c50d99SHerbert Xu while ((skb = segs)) { 3071f4c50d99SHerbert Xu segs = skb->next; 3072b08d5840SPatrick McHardy kfree_skb(skb); 3073f4c50d99SHerbert Xu } 3074f4c50d99SHerbert Xu return ERR_PTR(err); 3075f4c50d99SHerbert Xu } 3076f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 3077f4c50d99SHerbert Xu 307871d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 307971d93b39SHerbert Xu { 30808a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 308167147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 308267147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 30838a29111cSEric Dumazet struct sk_buff *nskb, *lp, *p = *head; 30848a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 3085715dc1f3SEric Dumazet unsigned int delta_truesize; 30868a29111cSEric Dumazet unsigned int headroom; 308771d93b39SHerbert Xu 30888a29111cSEric Dumazet if (unlikely(p->len + len >= 65536)) 308971d93b39SHerbert Xu return -E2BIG; 309071d93b39SHerbert Xu 30918a29111cSEric Dumazet lp = NAPI_GRO_CB(p)->last ?: p; 30928a29111cSEric Dumazet pinfo = skb_shinfo(lp); 30938a29111cSEric Dumazet 30948a29111cSEric Dumazet if (headlen <= offset) { 309542da6994SHerbert Xu skb_frag_t *frag; 309666e92fcfSHerbert Xu skb_frag_t *frag2; 30979aaa156cSHerbert Xu int i = skbinfo->nr_frags; 30989aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 309942da6994SHerbert Xu 310066e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 31018a29111cSEric Dumazet goto merge; 310281705ad1SHerbert Xu 31038a29111cSEric Dumazet offset -= headlen; 31049aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 31059aaa156cSHerbert Xu skbinfo->nr_frags = 0; 3106f5572068SHerbert Xu 31079aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 31089aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 310966e92fcfSHerbert Xu do { 311066e92fcfSHerbert Xu *--frag = *--frag2; 311166e92fcfSHerbert Xu } while (--i); 311266e92fcfSHerbert Xu 311366e92fcfSHerbert Xu frag->page_offset += offset; 31149e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 311566e92fcfSHerbert Xu 3116715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 3117ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 3118ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 3119715dc1f3SEric Dumazet 3120f5572068SHerbert Xu skb->truesize -= skb->data_len; 3121f5572068SHerbert Xu skb->len -= skb->data_len; 3122f5572068SHerbert Xu skb->data_len = 0; 3123f5572068SHerbert Xu 3124715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 31255d38a079SHerbert Xu goto done; 3126d7e8883cSEric Dumazet } else if (skb->head_frag) { 3127d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 3128d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 3129d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 3130d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 3131d7e8883cSEric Dumazet unsigned int first_offset; 3132d7e8883cSEric Dumazet 3133d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 31348a29111cSEric Dumazet goto merge; 3135d7e8883cSEric Dumazet 3136d7e8883cSEric Dumazet first_offset = skb->data - 3137d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 3138d7e8883cSEric Dumazet offset; 3139d7e8883cSEric Dumazet 3140d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3141d7e8883cSEric Dumazet 3142d7e8883cSEric Dumazet frag->page.p = page; 3143d7e8883cSEric Dumazet frag->page_offset = first_offset; 3144d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 3145d7e8883cSEric Dumazet 3146d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3147d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 3148d7e8883cSEric Dumazet 3149715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3150d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3151d7e8883cSEric Dumazet goto done; 31528a29111cSEric Dumazet } 31538a29111cSEric Dumazet if (pinfo->frag_list) 31548a29111cSEric Dumazet goto merge; 31558a29111cSEric Dumazet if (skb_gro_len(p) != pinfo->gso_size) 315669c0cab1SHerbert Xu return -E2BIG; 315771d93b39SHerbert Xu 315871d93b39SHerbert Xu headroom = skb_headroom(p); 31593d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 316071d93b39SHerbert Xu if (unlikely(!nskb)) 316171d93b39SHerbert Xu return -ENOMEM; 316271d93b39SHerbert Xu 316371d93b39SHerbert Xu __copy_skb_header(nskb, p); 316471d93b39SHerbert Xu nskb->mac_len = p->mac_len; 316571d93b39SHerbert Xu 316671d93b39SHerbert Xu skb_reserve(nskb, headroom); 316786911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 316871d93b39SHerbert Xu 316986911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 317071d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 317171d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 317271d93b39SHerbert Xu 317386911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 317486911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 317586911732SHerbert Xu p->data - skb_mac_header(p)); 317671d93b39SHerbert Xu 317771d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 31789aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3179622e0ca1SHerbert Xu pinfo->gso_size = 0; 318071d93b39SHerbert Xu skb_header_release(p); 3181c3c7c254SEric Dumazet NAPI_GRO_CB(nskb)->last = p; 318271d93b39SHerbert Xu 318371d93b39SHerbert Xu nskb->data_len += p->len; 3184de8261c2SEric Dumazet nskb->truesize += p->truesize; 318571d93b39SHerbert Xu nskb->len += p->len; 318671d93b39SHerbert Xu 318771d93b39SHerbert Xu *head = nskb; 318871d93b39SHerbert Xu nskb->next = p->next; 318971d93b39SHerbert Xu p->next = NULL; 319071d93b39SHerbert Xu 319171d93b39SHerbert Xu p = nskb; 319271d93b39SHerbert Xu 319371d93b39SHerbert Xu merge: 3194715dc1f3SEric Dumazet delta_truesize = skb->truesize; 319567147ba9SHerbert Xu if (offset > headlen) { 3196d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 3197d1dc7abfSMichal Schmidt 3198d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 31999e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 3200d1dc7abfSMichal Schmidt skb->data_len -= eat; 3201d1dc7abfSMichal Schmidt skb->len -= eat; 320267147ba9SHerbert Xu offset = headlen; 320356035022SHerbert Xu } 320456035022SHerbert Xu 320567147ba9SHerbert Xu __skb_pull(skb, offset); 320656035022SHerbert Xu 32078a29111cSEric Dumazet if (!NAPI_GRO_CB(p)->last) 32088a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 32098a29111cSEric Dumazet else 3210c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 3211c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 321271d93b39SHerbert Xu skb_header_release(skb); 32138a29111cSEric Dumazet lp = p; 321471d93b39SHerbert Xu 32155d38a079SHerbert Xu done: 32165d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 321737fe4732SHerbert Xu p->data_len += len; 3218715dc1f3SEric Dumazet p->truesize += delta_truesize; 321937fe4732SHerbert Xu p->len += len; 32208a29111cSEric Dumazet if (lp != p) { 32218a29111cSEric Dumazet lp->data_len += len; 32228a29111cSEric Dumazet lp->truesize += delta_truesize; 32238a29111cSEric Dumazet lp->len += len; 32248a29111cSEric Dumazet } 322571d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 322671d93b39SHerbert Xu return 0; 322771d93b39SHerbert Xu } 322871d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 322971d93b39SHerbert Xu 32301da177e4SLinus Torvalds void __init skb_init(void) 32311da177e4SLinus Torvalds { 32321da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 32331da177e4SLinus Torvalds sizeof(struct sk_buff), 32341da177e4SLinus Torvalds 0, 3235e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 323620c2df83SPaul Mundt NULL); 3237d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3238d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3239d179cd12SDavid S. Miller sizeof(atomic_t), 3240d179cd12SDavid S. Miller 0, 3241e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 324220c2df83SPaul Mundt NULL); 32431da177e4SLinus Torvalds } 32441da177e4SLinus Torvalds 3245716ea3a7SDavid Howells /** 3246716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3247716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3248716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3249716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3250716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3251716ea3a7SDavid Howells * 3252716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3253716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3254716ea3a7SDavid Howells */ 325551c739d1SDavid S. Miller static int 325651c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3257716ea3a7SDavid Howells { 32581a028e50SDavid S. Miller int start = skb_headlen(skb); 32591a028e50SDavid S. Miller int i, copy = start - offset; 3260fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3261716ea3a7SDavid Howells int elt = 0; 3262716ea3a7SDavid Howells 3263716ea3a7SDavid Howells if (copy > 0) { 3264716ea3a7SDavid Howells if (copy > len) 3265716ea3a7SDavid Howells copy = len; 3266642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3267716ea3a7SDavid Howells elt++; 3268716ea3a7SDavid Howells if ((len -= copy) == 0) 3269716ea3a7SDavid Howells return elt; 3270716ea3a7SDavid Howells offset += copy; 3271716ea3a7SDavid Howells } 3272716ea3a7SDavid Howells 3273716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 32741a028e50SDavid S. Miller int end; 3275716ea3a7SDavid Howells 3276547b792cSIlpo Järvinen WARN_ON(start > offset + len); 32771a028e50SDavid S. Miller 32789e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3279716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3280716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3281716ea3a7SDavid Howells 3282716ea3a7SDavid Howells if (copy > len) 3283716ea3a7SDavid Howells copy = len; 3284ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3285642f1490SJens Axboe frag->page_offset+offset-start); 3286716ea3a7SDavid Howells elt++; 3287716ea3a7SDavid Howells if (!(len -= copy)) 3288716ea3a7SDavid Howells return elt; 3289716ea3a7SDavid Howells offset += copy; 3290716ea3a7SDavid Howells } 32911a028e50SDavid S. Miller start = end; 3292716ea3a7SDavid Howells } 3293716ea3a7SDavid Howells 3294fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 32951a028e50SDavid S. Miller int end; 3296716ea3a7SDavid Howells 3297547b792cSIlpo Järvinen WARN_ON(start > offset + len); 32981a028e50SDavid S. Miller 3299fbb398a8SDavid S. Miller end = start + frag_iter->len; 3300716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3301716ea3a7SDavid Howells if (copy > len) 3302716ea3a7SDavid Howells copy = len; 3303fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 330451c739d1SDavid S. Miller copy); 3305716ea3a7SDavid Howells if ((len -= copy) == 0) 3306716ea3a7SDavid Howells return elt; 3307716ea3a7SDavid Howells offset += copy; 3308716ea3a7SDavid Howells } 33091a028e50SDavid S. Miller start = end; 3310716ea3a7SDavid Howells } 3311716ea3a7SDavid Howells BUG_ON(len); 3312716ea3a7SDavid Howells return elt; 3313716ea3a7SDavid Howells } 3314716ea3a7SDavid Howells 331551c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 331651c739d1SDavid S. Miller { 331751c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 331851c739d1SDavid S. Miller 3319c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 332051c739d1SDavid S. Miller 332151c739d1SDavid S. Miller return nsg; 332251c739d1SDavid S. Miller } 3323b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 332451c739d1SDavid S. Miller 3325716ea3a7SDavid Howells /** 3326716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3327716ea3a7SDavid Howells * @skb: The socket buffer to check. 3328716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3329716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3330716ea3a7SDavid Howells * 3331716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3332716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3333716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3334716ea3a7SDavid Howells * 3335716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3336716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3337716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3338716ea3a7SDavid Howells * 3339716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3340716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3341716ea3a7SDavid Howells */ 3342716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3343716ea3a7SDavid Howells { 3344716ea3a7SDavid Howells int copyflag; 3345716ea3a7SDavid Howells int elt; 3346716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3347716ea3a7SDavid Howells 3348716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3349716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3350716ea3a7SDavid Howells * at the moment even if they are anonymous). 3351716ea3a7SDavid Howells */ 3352716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3353716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3354716ea3a7SDavid Howells return -ENOMEM; 3355716ea3a7SDavid Howells 3356716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 335721dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3358716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3359716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3360716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3361716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3362716ea3a7SDavid Howells 3363716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3364716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3365716ea3a7SDavid Howells return -ENOMEM; 3366716ea3a7SDavid Howells 3367716ea3a7SDavid Howells /* Voila! */ 3368716ea3a7SDavid Howells *trailer = skb; 3369716ea3a7SDavid Howells return 1; 3370716ea3a7SDavid Howells } 3371716ea3a7SDavid Howells 3372716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3373716ea3a7SDavid Howells 3374716ea3a7SDavid Howells elt = 1; 3375716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3376716ea3a7SDavid Howells copyflag = 0; 3377716ea3a7SDavid Howells 3378716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3379716ea3a7SDavid Howells int ntail = 0; 3380716ea3a7SDavid Howells 3381716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3382716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3383716ea3a7SDavid Howells * after it. */ 3384716ea3a7SDavid Howells 3385716ea3a7SDavid Howells if (skb_shared(skb1)) 3386716ea3a7SDavid Howells copyflag = 1; 3387716ea3a7SDavid Howells 3388716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3389716ea3a7SDavid Howells 3390716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3391716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 339221dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3393716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3394716ea3a7SDavid Howells ntail = tailbits + 128; 3395716ea3a7SDavid Howells } 3396716ea3a7SDavid Howells 3397716ea3a7SDavid Howells if (copyflag || 3398716ea3a7SDavid Howells skb_cloned(skb1) || 3399716ea3a7SDavid Howells ntail || 3400716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 340121dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3402716ea3a7SDavid Howells struct sk_buff *skb2; 3403716ea3a7SDavid Howells 3404716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3405716ea3a7SDavid Howells if (ntail == 0) 3406716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3407716ea3a7SDavid Howells else 3408716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3409716ea3a7SDavid Howells skb_headroom(skb1), 3410716ea3a7SDavid Howells ntail, 3411716ea3a7SDavid Howells GFP_ATOMIC); 3412716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3413716ea3a7SDavid Howells return -ENOMEM; 3414716ea3a7SDavid Howells 3415716ea3a7SDavid Howells if (skb1->sk) 3416716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3417716ea3a7SDavid Howells 3418716ea3a7SDavid Howells /* Looking around. Are we still alive? 3419716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3420716ea3a7SDavid Howells 3421716ea3a7SDavid Howells skb2->next = skb1->next; 3422716ea3a7SDavid Howells *skb_p = skb2; 3423716ea3a7SDavid Howells kfree_skb(skb1); 3424716ea3a7SDavid Howells skb1 = skb2; 3425716ea3a7SDavid Howells } 3426716ea3a7SDavid Howells elt++; 3427716ea3a7SDavid Howells *trailer = skb1; 3428716ea3a7SDavid Howells skb_p = &skb1->next; 3429716ea3a7SDavid Howells } 3430716ea3a7SDavid Howells 3431716ea3a7SDavid Howells return elt; 3432716ea3a7SDavid Howells } 3433b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3434716ea3a7SDavid Howells 3435b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3436b1faf566SEric Dumazet { 3437b1faf566SEric Dumazet struct sock *sk = skb->sk; 3438b1faf566SEric Dumazet 3439b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3440b1faf566SEric Dumazet } 3441b1faf566SEric Dumazet 3442b1faf566SEric Dumazet /* 3443b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3444b1faf566SEric Dumazet */ 3445b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3446b1faf566SEric Dumazet { 3447110c4330SEric Dumazet int len = skb->len; 3448110c4330SEric Dumazet 3449b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 345095c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3451b1faf566SEric Dumazet return -ENOMEM; 3452b1faf566SEric Dumazet 3453b1faf566SEric Dumazet skb_orphan(skb); 3454b1faf566SEric Dumazet skb->sk = sk; 3455b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3456b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3457b1faf566SEric Dumazet 3458abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3459abb57ea4SEric Dumazet skb_dst_force(skb); 3460abb57ea4SEric Dumazet 3461b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3462b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3463110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3464b1faf566SEric Dumazet return 0; 3465b1faf566SEric Dumazet } 3466b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3467b1faf566SEric Dumazet 3468ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3469ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3470ac45f602SPatrick Ohly { 3471ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3472ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3473ac45f602SPatrick Ohly struct sk_buff *skb; 3474ac45f602SPatrick Ohly int err; 3475ac45f602SPatrick Ohly 3476ac45f602SPatrick Ohly if (!sk) 3477ac45f602SPatrick Ohly return; 3478ac45f602SPatrick Ohly 3479ac45f602SPatrick Ohly if (hwtstamps) { 34802e31396fSWillem de Bruijn *skb_hwtstamps(orig_skb) = 3481ac45f602SPatrick Ohly *hwtstamps; 3482ac45f602SPatrick Ohly } else { 3483ac45f602SPatrick Ohly /* 3484ac45f602SPatrick Ohly * no hardware time stamps available, 34852244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3486ac45f602SPatrick Ohly * store software time stamp 3487ac45f602SPatrick Ohly */ 34882e31396fSWillem de Bruijn orig_skb->tstamp = ktime_get_real(); 3489ac45f602SPatrick Ohly } 3490ac45f602SPatrick Ohly 34912e31396fSWillem de Bruijn skb = skb_clone(orig_skb, GFP_ATOMIC); 34922e31396fSWillem de Bruijn if (!skb) 34932e31396fSWillem de Bruijn return; 34942e31396fSWillem de Bruijn 3495ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3496ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3497ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3498ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 349929030374SEric Dumazet 3500ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 350129030374SEric Dumazet 3502ac45f602SPatrick Ohly if (err) 3503ac45f602SPatrick Ohly kfree_skb(skb); 3504ac45f602SPatrick Ohly } 3505ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3506ac45f602SPatrick Ohly 35076e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 35086e3e939fSJohannes Berg { 35096e3e939fSJohannes Berg struct sock *sk = skb->sk; 35106e3e939fSJohannes Berg struct sock_exterr_skb *serr; 35116e3e939fSJohannes Berg int err; 35126e3e939fSJohannes Berg 35136e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 35146e3e939fSJohannes Berg skb->wifi_acked = acked; 35156e3e939fSJohannes Berg 35166e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 35176e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 35186e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 35196e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 35206e3e939fSJohannes Berg 35216e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 35226e3e939fSJohannes Berg if (err) 35236e3e939fSJohannes Berg kfree_skb(skb); 35246e3e939fSJohannes Berg } 35256e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 35266e3e939fSJohannes Berg 3527ac45f602SPatrick Ohly 3528f35d9d8aSRusty Russell /** 3529f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3530f35d9d8aSRusty Russell * @skb: the skb to set 3531f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3532f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3533f35d9d8aSRusty Russell * 3534f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3535f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3536f35d9d8aSRusty Russell * 3537f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3538f35d9d8aSRusty Russell * returns false you should drop the packet. 3539f35d9d8aSRusty Russell */ 3540f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3541f35d9d8aSRusty Russell { 35425ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 35435ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3544e87cc472SJoe Perches net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 35455ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3546f35d9d8aSRusty Russell return false; 3547f35d9d8aSRusty Russell } 3548f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3549f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3550f35d9d8aSRusty Russell skb->csum_offset = off; 3551e5d5decaSJason Wang skb_set_transport_header(skb, start); 3552f35d9d8aSRusty Russell return true; 3553f35d9d8aSRusty Russell } 3554b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3555f35d9d8aSRusty Russell 35564497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 35574497b076SBen Hutchings { 3558e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3559e87cc472SJoe Perches skb->dev->name); 35604497b076SBen Hutchings } 35614497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3562bad43ca8SEric Dumazet 3563bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3564bad43ca8SEric Dumazet { 35653d861f66SEric Dumazet if (head_stolen) { 35663d861f66SEric Dumazet skb_release_head_state(skb); 3567bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 35683d861f66SEric Dumazet } else { 3569bad43ca8SEric Dumazet __kfree_skb(skb); 3570bad43ca8SEric Dumazet } 35713d861f66SEric Dumazet } 3572bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 3573bad43ca8SEric Dumazet 3574bad43ca8SEric Dumazet /** 3575bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 3576bad43ca8SEric Dumazet * @to: prior buffer 3577bad43ca8SEric Dumazet * @from: buffer to add 3578bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 3579c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 3580bad43ca8SEric Dumazet */ 3581bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3582bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 3583bad43ca8SEric Dumazet { 3584bad43ca8SEric Dumazet int i, delta, len = from->len; 3585bad43ca8SEric Dumazet 3586bad43ca8SEric Dumazet *fragstolen = false; 3587bad43ca8SEric Dumazet 3588bad43ca8SEric Dumazet if (skb_cloned(to)) 3589bad43ca8SEric Dumazet return false; 3590bad43ca8SEric Dumazet 3591bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 3592bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3593bad43ca8SEric Dumazet *delta_truesize = 0; 3594bad43ca8SEric Dumazet return true; 3595bad43ca8SEric Dumazet } 3596bad43ca8SEric Dumazet 3597bad43ca8SEric Dumazet if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3598bad43ca8SEric Dumazet return false; 3599bad43ca8SEric Dumazet 3600bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 3601bad43ca8SEric Dumazet struct page *page; 3602bad43ca8SEric Dumazet unsigned int offset; 3603bad43ca8SEric Dumazet 3604bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3605bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3606bad43ca8SEric Dumazet return false; 3607bad43ca8SEric Dumazet 3608bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 3609bad43ca8SEric Dumazet return false; 3610bad43ca8SEric Dumazet 3611bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3612bad43ca8SEric Dumazet 3613bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 3614bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 3615bad43ca8SEric Dumazet 3616bad43ca8SEric Dumazet skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3617bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 3618bad43ca8SEric Dumazet *fragstolen = true; 3619bad43ca8SEric Dumazet } else { 3620bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3621bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3622bad43ca8SEric Dumazet return false; 3623bad43ca8SEric Dumazet 3624f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 3625bad43ca8SEric Dumazet } 3626bad43ca8SEric Dumazet 3627bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 3628bad43ca8SEric Dumazet 3629bad43ca8SEric Dumazet memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3630bad43ca8SEric Dumazet skb_shinfo(from)->frags, 3631bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3632bad43ca8SEric Dumazet skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3633bad43ca8SEric Dumazet 3634bad43ca8SEric Dumazet if (!skb_cloned(from)) 3635bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags = 0; 3636bad43ca8SEric Dumazet 36378ea853fdSLi RongQing /* if the skb is not cloned this does nothing 36388ea853fdSLi RongQing * since we set nr_frags to 0. 36398ea853fdSLi RongQing */ 3640bad43ca8SEric Dumazet for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3641bad43ca8SEric Dumazet skb_frag_ref(from, i); 3642bad43ca8SEric Dumazet 3643bad43ca8SEric Dumazet to->truesize += delta; 3644bad43ca8SEric Dumazet to->len += len; 3645bad43ca8SEric Dumazet to->data_len += len; 3646bad43ca8SEric Dumazet 3647bad43ca8SEric Dumazet *delta_truesize = delta; 3648bad43ca8SEric Dumazet return true; 3649bad43ca8SEric Dumazet } 3650bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 3651621e84d6SNicolas Dichtel 3652621e84d6SNicolas Dichtel /** 36538b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 3654621e84d6SNicolas Dichtel * 3655621e84d6SNicolas Dichtel * @skb: buffer to clean 36568b27f277SNicolas Dichtel * @xnet: packet is crossing netns 3657621e84d6SNicolas Dichtel * 36588b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 36598b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 36608b27f277SNicolas Dichtel * operations. 36618b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 36628b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 36638b27f277SNicolas Dichtel * skb that could impact namespace isolation. 3664621e84d6SNicolas Dichtel */ 36658b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 3666621e84d6SNicolas Dichtel { 36678b27f277SNicolas Dichtel if (xnet) 3668621e84d6SNicolas Dichtel skb_orphan(skb); 3669621e84d6SNicolas Dichtel skb->tstamp.tv64 = 0; 3670621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 3671621e84d6SNicolas Dichtel skb->skb_iif = 0; 3672621e84d6SNicolas Dichtel skb_dst_drop(skb); 3673621e84d6SNicolas Dichtel skb->mark = 0; 3674621e84d6SNicolas Dichtel secpath_reset(skb); 3675621e84d6SNicolas Dichtel nf_reset(skb); 3676621e84d6SNicolas Dichtel nf_reset_trace(skb); 3677621e84d6SNicolas Dichtel } 3678621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 3679