11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 39e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40e005d193SJoe Perches 411da177e4SLinus Torvalds #include <linux/module.h> 421da177e4SLinus Torvalds #include <linux/types.h> 431da177e4SLinus Torvalds #include <linux/kernel.h> 44fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/interrupt.h> 471da177e4SLinus Torvalds #include <linux/in.h> 481da177e4SLinus Torvalds #include <linux/inet.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/netdevice.h> 511da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 521da177e4SLinus Torvalds #include <net/pkt_sched.h> 531da177e4SLinus Torvalds #endif 541da177e4SLinus Torvalds #include <linux/string.h> 551da177e4SLinus Torvalds #include <linux/skbuff.h> 569c55e01cSJens Axboe #include <linux/splice.h> 571da177e4SLinus Torvalds #include <linux/cache.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 591da177e4SLinus Torvalds #include <linux/init.h> 60716ea3a7SDavid Howells #include <linux/scatterlist.h> 61ac45f602SPatrick Ohly #include <linux/errqueue.h> 62268bb0ceSLinus Torvalds #include <linux/prefetch.h> 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #include <net/protocol.h> 651da177e4SLinus Torvalds #include <net/dst.h> 661da177e4SLinus Torvalds #include <net/sock.h> 671da177e4SLinus Torvalds #include <net/checksum.h> 681da177e4SLinus Torvalds #include <net/xfrm.h> 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds #include <asm/uaccess.h> 71ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7251c56b00SEric Dumazet #include <linux/highmem.h> 73a1f8e7f7SAl Viro 74d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 75e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 761da177e4SLinus Torvalds 779c55e01cSJens Axboe static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 789c55e01cSJens Axboe struct pipe_buffer *buf) 799c55e01cSJens Axboe { 808b9d3728SJarek Poplawski put_page(buf->page); 819c55e01cSJens Axboe } 829c55e01cSJens Axboe 839c55e01cSJens Axboe static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 849c55e01cSJens Axboe struct pipe_buffer *buf) 859c55e01cSJens Axboe { 868b9d3728SJarek Poplawski get_page(buf->page); 879c55e01cSJens Axboe } 889c55e01cSJens Axboe 899c55e01cSJens Axboe static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 909c55e01cSJens Axboe struct pipe_buffer *buf) 919c55e01cSJens Axboe { 929c55e01cSJens Axboe return 1; 939c55e01cSJens Axboe } 949c55e01cSJens Axboe 959c55e01cSJens Axboe 969c55e01cSJens Axboe /* Pipe buffer operations for a socket. */ 9728dfef8fSAlexey Dobriyan static const struct pipe_buf_operations sock_pipe_buf_ops = { 989c55e01cSJens Axboe .can_merge = 0, 999c55e01cSJens Axboe .map = generic_pipe_buf_map, 1009c55e01cSJens Axboe .unmap = generic_pipe_buf_unmap, 1019c55e01cSJens Axboe .confirm = generic_pipe_buf_confirm, 1029c55e01cSJens Axboe .release = sock_pipe_buf_release, 1039c55e01cSJens Axboe .steal = sock_pipe_buf_steal, 1049c55e01cSJens Axboe .get = sock_pipe_buf_get, 1059c55e01cSJens Axboe }; 1069c55e01cSJens Axboe 1071da177e4SLinus Torvalds /** 108f05de73bSJean Sacren * skb_panic - private function for out-of-line support 1091da177e4SLinus Torvalds * @skb: buffer 1101da177e4SLinus Torvalds * @sz: size 111f05de73bSJean Sacren * @addr: address 11299d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 1131da177e4SLinus Torvalds * 114f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 115f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 116f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 117f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 1181da177e4SLinus Torvalds */ 119f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 12099d5851eSJames Hogan const char msg[]) 1211da177e4SLinus Torvalds { 122e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 12399d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 1244305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 12526095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1261da177e4SLinus Torvalds BUG(); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds 129f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1301da177e4SLinus Torvalds { 131f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds 134f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 135f05de73bSJean Sacren { 136f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 137f05de73bSJean Sacren } 138c93bdd0eSMel Gorman 139c93bdd0eSMel Gorman /* 140c93bdd0eSMel Gorman * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 141c93bdd0eSMel Gorman * the caller if emergency pfmemalloc reserves are being used. If it is and 142c93bdd0eSMel Gorman * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 143c93bdd0eSMel Gorman * may be used. Otherwise, the packet data may be discarded until enough 144c93bdd0eSMel Gorman * memory is free 145c93bdd0eSMel Gorman */ 146c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 147c93bdd0eSMel Gorman __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 14861c5e88aSstephen hemminger 14961c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 15061c5e88aSstephen hemminger unsigned long ip, bool *pfmemalloc) 151c93bdd0eSMel Gorman { 152c93bdd0eSMel Gorman void *obj; 153c93bdd0eSMel Gorman bool ret_pfmemalloc = false; 154c93bdd0eSMel Gorman 155c93bdd0eSMel Gorman /* 156c93bdd0eSMel Gorman * Try a regular allocation, when that fails and we're not entitled 157c93bdd0eSMel Gorman * to the reserves, fail. 158c93bdd0eSMel Gorman */ 159c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, 160c93bdd0eSMel Gorman flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 161c93bdd0eSMel Gorman node); 162c93bdd0eSMel Gorman if (obj || !(gfp_pfmemalloc_allowed(flags))) 163c93bdd0eSMel Gorman goto out; 164c93bdd0eSMel Gorman 165c93bdd0eSMel Gorman /* Try again but now we are using pfmemalloc reserves */ 166c93bdd0eSMel Gorman ret_pfmemalloc = true; 167c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, flags, node); 168c93bdd0eSMel Gorman 169c93bdd0eSMel Gorman out: 170c93bdd0eSMel Gorman if (pfmemalloc) 171c93bdd0eSMel Gorman *pfmemalloc = ret_pfmemalloc; 172c93bdd0eSMel Gorman 173c93bdd0eSMel Gorman return obj; 174c93bdd0eSMel Gorman } 175c93bdd0eSMel Gorman 1761da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1771da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1781da177e4SLinus Torvalds * [BEEP] leaks. 1791da177e4SLinus Torvalds * 1801da177e4SLinus Torvalds */ 1811da177e4SLinus Torvalds 1820ebd0ac5SPatrick McHardy struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 1830ebd0ac5SPatrick McHardy { 1840ebd0ac5SPatrick McHardy struct sk_buff *skb; 1850ebd0ac5SPatrick McHardy 1860ebd0ac5SPatrick McHardy /* Get the HEAD */ 1870ebd0ac5SPatrick McHardy skb = kmem_cache_alloc_node(skbuff_head_cache, 1880ebd0ac5SPatrick McHardy gfp_mask & ~__GFP_DMA, node); 1890ebd0ac5SPatrick McHardy if (!skb) 1900ebd0ac5SPatrick McHardy goto out; 1910ebd0ac5SPatrick McHardy 1920ebd0ac5SPatrick McHardy /* 1930ebd0ac5SPatrick McHardy * Only clear those fields we need to clear, not those that we will 1940ebd0ac5SPatrick McHardy * actually initialise below. Hence, don't put any more fields after 1950ebd0ac5SPatrick McHardy * the tail pointer in struct sk_buff! 1960ebd0ac5SPatrick McHardy */ 1970ebd0ac5SPatrick McHardy memset(skb, 0, offsetof(struct sk_buff, tail)); 1985e71d9d7SPablo Neira skb->head = NULL; 1990ebd0ac5SPatrick McHardy skb->truesize = sizeof(struct sk_buff); 2000ebd0ac5SPatrick McHardy atomic_set(&skb->users, 1); 2010ebd0ac5SPatrick McHardy 20235d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 2030ebd0ac5SPatrick McHardy out: 2040ebd0ac5SPatrick McHardy return skb; 2050ebd0ac5SPatrick McHardy } 2060ebd0ac5SPatrick McHardy 2071da177e4SLinus Torvalds /** 208d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 2091da177e4SLinus Torvalds * @size: size to allocate 2101da177e4SLinus Torvalds * @gfp_mask: allocation mask 211c93bdd0eSMel Gorman * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 212c93bdd0eSMel Gorman * instead of head cache and allocate a cloned (child) skb. 213c93bdd0eSMel Gorman * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 214c93bdd0eSMel Gorman * allocations in case the data is required for writeback 215b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 2161da177e4SLinus Torvalds * 2171da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 21894b6042cSBen Hutchings * tail room of at least size bytes. The object has a reference count 21994b6042cSBen Hutchings * of one. The return is the buffer. On a failure the return is %NULL. 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 2221da177e4SLinus Torvalds * %GFP_ATOMIC. 2231da177e4SLinus Torvalds */ 224dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 225c93bdd0eSMel Gorman int flags, int node) 2261da177e4SLinus Torvalds { 227e18b890bSChristoph Lameter struct kmem_cache *cache; 2284947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 2291da177e4SLinus Torvalds struct sk_buff *skb; 2301da177e4SLinus Torvalds u8 *data; 231c93bdd0eSMel Gorman bool pfmemalloc; 2321da177e4SLinus Torvalds 233c93bdd0eSMel Gorman cache = (flags & SKB_ALLOC_FCLONE) 234c93bdd0eSMel Gorman ? skbuff_fclone_cache : skbuff_head_cache; 235c93bdd0eSMel Gorman 236c93bdd0eSMel Gorman if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 237c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 2388798b3fbSHerbert Xu 2391da177e4SLinus Torvalds /* Get the HEAD */ 240b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 2411da177e4SLinus Torvalds if (!skb) 2421da177e4SLinus Torvalds goto out; 243ec7d2f2cSEric Dumazet prefetchw(skb); 2441da177e4SLinus Torvalds 24587fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 24687fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 24787fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 24887fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 24987fb4b7bSEric Dumazet */ 250bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 25187fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 252c93bdd0eSMel Gorman data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 2531da177e4SLinus Torvalds if (!data) 2541da177e4SLinus Torvalds goto nodata; 25587fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 25687fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 25787fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 25887fb4b7bSEric Dumazet */ 25987fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 260ec7d2f2cSEric Dumazet prefetchw(data + size); 2611da177e4SLinus Torvalds 262ca0605a7SArnaldo Carvalho de Melo /* 263c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 264c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 265c8005785SJohannes Berg * the tail pointer in struct sk_buff! 266ca0605a7SArnaldo Carvalho de Melo */ 267ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 26887fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 26987fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 270c93bdd0eSMel Gorman skb->pfmemalloc = pfmemalloc; 2711da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2721da177e4SLinus Torvalds skb->head = data; 2731da177e4SLinus Torvalds skb->data = data; 27427a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2754305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 27635d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 27735d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 27819633e12SStephen Hemminger 2794947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2804947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 281ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2824947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 283c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2844947d3efSBenjamin LaHaise 285c93bdd0eSMel Gorman if (flags & SKB_ALLOC_FCLONE) { 286d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 287d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2881da177e4SLinus Torvalds 289fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 290fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 291d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 292d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 293d179cd12SDavid S. Miller 294d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 295c93bdd0eSMel Gorman child->pfmemalloc = pfmemalloc; 296d179cd12SDavid S. Miller } 2971da177e4SLinus Torvalds out: 2981da177e4SLinus Torvalds return skb; 2991da177e4SLinus Torvalds nodata: 3008798b3fbSHerbert Xu kmem_cache_free(cache, skb); 3011da177e4SLinus Torvalds skb = NULL; 3021da177e4SLinus Torvalds goto out; 3031da177e4SLinus Torvalds } 304b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 3051da177e4SLinus Torvalds 3061da177e4SLinus Torvalds /** 307b2b5ce9dSEric Dumazet * build_skb - build a network buffer 308b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 309d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 310b2b5ce9dSEric Dumazet * 311b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 312b2b5ce9dSEric Dumazet * skb_shared_info. @data must have been allocated by kmalloc() 313b2b5ce9dSEric Dumazet * The return is the new skb buffer. 314b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 315b2b5ce9dSEric Dumazet * Notes : 316b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 317b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 318b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 319b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 320b2b5ce9dSEric Dumazet * before giving packet to stack. 321b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 322b2b5ce9dSEric Dumazet */ 323d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 324b2b5ce9dSEric Dumazet { 325b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 326b2b5ce9dSEric Dumazet struct sk_buff *skb; 327d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 328b2b5ce9dSEric Dumazet 329b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 330b2b5ce9dSEric Dumazet if (!skb) 331b2b5ce9dSEric Dumazet return NULL; 332b2b5ce9dSEric Dumazet 333d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 334b2b5ce9dSEric Dumazet 335b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 336b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 337d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 338b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 339b2b5ce9dSEric Dumazet skb->head = data; 340b2b5ce9dSEric Dumazet skb->data = data; 341b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 342b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 34335d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 34435d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 345b2b5ce9dSEric Dumazet 346b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 347b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 348b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 349b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 350b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 351b2b5ce9dSEric Dumazet 352b2b5ce9dSEric Dumazet return skb; 353b2b5ce9dSEric Dumazet } 354b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 355b2b5ce9dSEric Dumazet 356a1c7fff7SEric Dumazet struct netdev_alloc_cache { 35769b08f62SEric Dumazet struct page_frag frag; 35869b08f62SEric Dumazet /* we maintain a pagecount bias, so that we dont dirty cache line 35969b08f62SEric Dumazet * containing page->_count every time we allocate a fragment. 36069b08f62SEric Dumazet */ 361540eb7bfSAlexander Duyck unsigned int pagecnt_bias; 362a1c7fff7SEric Dumazet }; 363a1c7fff7SEric Dumazet static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 364a1c7fff7SEric Dumazet 365c93bdd0eSMel Gorman static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 3666f532612SEric Dumazet { 3676f532612SEric Dumazet struct netdev_alloc_cache *nc; 3686f532612SEric Dumazet void *data = NULL; 36969b08f62SEric Dumazet int order; 3706f532612SEric Dumazet unsigned long flags; 3716f532612SEric Dumazet 3726f532612SEric Dumazet local_irq_save(flags); 3736f532612SEric Dumazet nc = &__get_cpu_var(netdev_alloc_cache); 37469b08f62SEric Dumazet if (unlikely(!nc->frag.page)) { 3756f532612SEric Dumazet refill: 37669b08f62SEric Dumazet for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 37769b08f62SEric Dumazet gfp_t gfp = gfp_mask; 37869b08f62SEric Dumazet 37969b08f62SEric Dumazet if (order) 38069b08f62SEric Dumazet gfp |= __GFP_COMP | __GFP_NOWARN; 38169b08f62SEric Dumazet nc->frag.page = alloc_pages(gfp, order); 38269b08f62SEric Dumazet if (likely(nc->frag.page)) 38369b08f62SEric Dumazet break; 38469b08f62SEric Dumazet if (--order < 0) 385540eb7bfSAlexander Duyck goto end; 38669b08f62SEric Dumazet } 38769b08f62SEric Dumazet nc->frag.size = PAGE_SIZE << order; 388540eb7bfSAlexander Duyck recycle: 38969b08f62SEric Dumazet atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); 39069b08f62SEric Dumazet nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; 39169b08f62SEric Dumazet nc->frag.offset = 0; 3926f532612SEric Dumazet } 393540eb7bfSAlexander Duyck 39469b08f62SEric Dumazet if (nc->frag.offset + fragsz > nc->frag.size) { 395540eb7bfSAlexander Duyck /* avoid unnecessary locked operations if possible */ 39669b08f62SEric Dumazet if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || 39769b08f62SEric Dumazet atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) 398540eb7bfSAlexander Duyck goto recycle; 3996f532612SEric Dumazet goto refill; 4006f532612SEric Dumazet } 401540eb7bfSAlexander Duyck 40269b08f62SEric Dumazet data = page_address(nc->frag.page) + nc->frag.offset; 40369b08f62SEric Dumazet nc->frag.offset += fragsz; 404540eb7bfSAlexander Duyck nc->pagecnt_bias--; 405540eb7bfSAlexander Duyck end: 4066f532612SEric Dumazet local_irq_restore(flags); 4076f532612SEric Dumazet return data; 4086f532612SEric Dumazet } 409c93bdd0eSMel Gorman 410c93bdd0eSMel Gorman /** 411c93bdd0eSMel Gorman * netdev_alloc_frag - allocate a page fragment 412c93bdd0eSMel Gorman * @fragsz: fragment size 413c93bdd0eSMel Gorman * 414c93bdd0eSMel Gorman * Allocates a frag from a page for receive buffer. 415c93bdd0eSMel Gorman * Uses GFP_ATOMIC allocations. 416c93bdd0eSMel Gorman */ 417c93bdd0eSMel Gorman void *netdev_alloc_frag(unsigned int fragsz) 418c93bdd0eSMel Gorman { 419c93bdd0eSMel Gorman return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 420c93bdd0eSMel Gorman } 4216f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag); 4226f532612SEric Dumazet 4236f532612SEric Dumazet /** 4248af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 4258af27456SChristoph Hellwig * @dev: network device to receive on 4268af27456SChristoph Hellwig * @length: length to allocate 4278af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 4288af27456SChristoph Hellwig * 4298af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 4308af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 4318af27456SChristoph Hellwig * the headroom they think they need without accounting for the 4328af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 4338af27456SChristoph Hellwig * 4348af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 4358af27456SChristoph Hellwig */ 4368af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 4378af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 4388af27456SChristoph Hellwig { 4396f532612SEric Dumazet struct sk_buff *skb = NULL; 440a1c7fff7SEric Dumazet unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 441a1c7fff7SEric Dumazet SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4428af27456SChristoph Hellwig 443310e158cSEric Dumazet if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 444c93bdd0eSMel Gorman void *data; 445c93bdd0eSMel Gorman 446c93bdd0eSMel Gorman if (sk_memalloc_socks()) 447c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 448c93bdd0eSMel Gorman 449c93bdd0eSMel Gorman data = __netdev_alloc_frag(fragsz, gfp_mask); 450a1c7fff7SEric Dumazet 4516f532612SEric Dumazet if (likely(data)) { 4526f532612SEric Dumazet skb = build_skb(data, fragsz); 4536f532612SEric Dumazet if (unlikely(!skb)) 4546f532612SEric Dumazet put_page(virt_to_head_page(data)); 455a1c7fff7SEric Dumazet } 456a1c7fff7SEric Dumazet } else { 457c93bdd0eSMel Gorman skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 458c93bdd0eSMel Gorman SKB_ALLOC_RX, NUMA_NO_NODE); 459a1c7fff7SEric Dumazet } 4607b2e497aSChristoph Hellwig if (likely(skb)) { 4618af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 4627b2e497aSChristoph Hellwig skb->dev = dev; 4637b2e497aSChristoph Hellwig } 4648af27456SChristoph Hellwig return skb; 4658af27456SChristoph Hellwig } 466b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 4671da177e4SLinus Torvalds 468654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 46950269e19SEric Dumazet int size, unsigned int truesize) 470654bed16SPeter Zijlstra { 471654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 472654bed16SPeter Zijlstra skb->len += size; 473654bed16SPeter Zijlstra skb->data_len += size; 47450269e19SEric Dumazet skb->truesize += truesize; 475654bed16SPeter Zijlstra } 476654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 477654bed16SPeter Zijlstra 47827b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 4791da177e4SLinus Torvalds { 48027b437c8SHerbert Xu struct sk_buff *list = *listp; 4811da177e4SLinus Torvalds 48227b437c8SHerbert Xu *listp = NULL; 4831da177e4SLinus Torvalds 4841da177e4SLinus Torvalds do { 4851da177e4SLinus Torvalds struct sk_buff *this = list; 4861da177e4SLinus Torvalds list = list->next; 4871da177e4SLinus Torvalds kfree_skb(this); 4881da177e4SLinus Torvalds } while (list); 4891da177e4SLinus Torvalds } 4901da177e4SLinus Torvalds 49127b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 49227b437c8SHerbert Xu { 49327b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 49427b437c8SHerbert Xu } 49527b437c8SHerbert Xu 4961da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 4971da177e4SLinus Torvalds { 4981da177e4SLinus Torvalds struct sk_buff *list; 4991da177e4SLinus Torvalds 500fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 5011da177e4SLinus Torvalds skb_get(list); 5021da177e4SLinus Torvalds } 5031da177e4SLinus Torvalds 504d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 505d3836f21SEric Dumazet { 506d3836f21SEric Dumazet if (skb->head_frag) 507d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 508d3836f21SEric Dumazet else 509d3836f21SEric Dumazet kfree(skb->head); 510d3836f21SEric Dumazet } 511d3836f21SEric Dumazet 5125bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 5131da177e4SLinus Torvalds { 5141da177e4SLinus Torvalds if (!skb->cloned || 5151da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 5161da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 5171da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 5181da177e4SLinus Torvalds int i; 5191da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 520ea2ab693SIan Campbell skb_frag_unref(skb, i); 5211da177e4SLinus Torvalds } 5221da177e4SLinus Torvalds 523a6686f2fSShirley Ma /* 524a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 525a6686f2fSShirley Ma * the lower device DMA has done; 526a6686f2fSShirley Ma */ 527a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 528a6686f2fSShirley Ma struct ubuf_info *uarg; 529a6686f2fSShirley Ma 530a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 531a6686f2fSShirley Ma if (uarg->callback) 532e19d6763SMichael S. Tsirkin uarg->callback(uarg, true); 533a6686f2fSShirley Ma } 534a6686f2fSShirley Ma 53521dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 5361da177e4SLinus Torvalds skb_drop_fraglist(skb); 5371da177e4SLinus Torvalds 538d3836f21SEric Dumazet skb_free_head(skb); 5391da177e4SLinus Torvalds } 5401da177e4SLinus Torvalds } 5411da177e4SLinus Torvalds 5421da177e4SLinus Torvalds /* 5431da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 5441da177e4SLinus Torvalds */ 5452d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 5461da177e4SLinus Torvalds { 547d179cd12SDavid S. Miller struct sk_buff *other; 548d179cd12SDavid S. Miller atomic_t *fclone_ref; 549d179cd12SDavid S. Miller 550d179cd12SDavid S. Miller switch (skb->fclone) { 551d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 5521da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 553d179cd12SDavid S. Miller break; 554d179cd12SDavid S. Miller 555d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 556d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 557d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 558d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 559d179cd12SDavid S. Miller break; 560d179cd12SDavid S. Miller 561d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 562d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 563d179cd12SDavid S. Miller other = skb - 1; 564d179cd12SDavid S. Miller 565d179cd12SDavid S. Miller /* The clone portion is available for 566d179cd12SDavid S. Miller * fast-cloning again. 567d179cd12SDavid S. Miller */ 568d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 569d179cd12SDavid S. Miller 570d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 571d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 572d179cd12SDavid S. Miller break; 5733ff50b79SStephen Hemminger } 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 57604a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 5771da177e4SLinus Torvalds { 578adf30907SEric Dumazet skb_dst_drop(skb); 5791da177e4SLinus Torvalds #ifdef CONFIG_XFRM 5801da177e4SLinus Torvalds secpath_put(skb->sp); 5811da177e4SLinus Torvalds #endif 5821da177e4SLinus Torvalds if (skb->destructor) { 5839c2b3328SStephen Hemminger WARN_ON(in_irq()); 5841da177e4SLinus Torvalds skb->destructor(skb); 5851da177e4SLinus Torvalds } 586a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 5875f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 5882fc72c7bSKOVACS Krisztian #endif 5892fc72c7bSKOVACS Krisztian #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 5909fb9cbb1SYasuyuki Kozakai nf_conntrack_put_reasm(skb->nfct_reasm); 5919fb9cbb1SYasuyuki Kozakai #endif 5921da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 5931da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 5941da177e4SLinus Torvalds #endif 5951da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 5961da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 5971da177e4SLinus Torvalds skb->tc_index = 0; 5981da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 5991da177e4SLinus Torvalds skb->tc_verd = 0; 6001da177e4SLinus Torvalds #endif 6011da177e4SLinus Torvalds #endif 60204a4bb55SLennert Buytenhek } 60304a4bb55SLennert Buytenhek 60404a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 60504a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 60604a4bb55SLennert Buytenhek { 60704a4bb55SLennert Buytenhek skb_release_head_state(skb); 6085e71d9d7SPablo Neira if (likely(skb->head)) 6092d4baff8SHerbert Xu skb_release_data(skb); 6102d4baff8SHerbert Xu } 6111da177e4SLinus Torvalds 6122d4baff8SHerbert Xu /** 6132d4baff8SHerbert Xu * __kfree_skb - private function 6142d4baff8SHerbert Xu * @skb: buffer 6152d4baff8SHerbert Xu * 6162d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 6172d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 6182d4baff8SHerbert Xu * always call kfree_skb 6192d4baff8SHerbert Xu */ 6202d4baff8SHerbert Xu 6212d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 6222d4baff8SHerbert Xu { 6232d4baff8SHerbert Xu skb_release_all(skb); 6241da177e4SLinus Torvalds kfree_skbmem(skb); 6251da177e4SLinus Torvalds } 626b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds /** 629231d06aeSJörn Engel * kfree_skb - free an sk_buff 630231d06aeSJörn Engel * @skb: buffer to free 631231d06aeSJörn Engel * 632231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 633231d06aeSJörn Engel * hit zero. 634231d06aeSJörn Engel */ 635231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 636231d06aeSJörn Engel { 637231d06aeSJörn Engel if (unlikely(!skb)) 638231d06aeSJörn Engel return; 639231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 640231d06aeSJörn Engel smp_rmb(); 641231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 642231d06aeSJörn Engel return; 643ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 644231d06aeSJörn Engel __kfree_skb(skb); 645231d06aeSJörn Engel } 646b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 647231d06aeSJörn Engel 648d1a203eaSStephen Hemminger /** 64925121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 65025121173SMichael S. Tsirkin * @skb: buffer that triggered an error 65125121173SMichael S. Tsirkin * 65225121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 65325121173SMichael S. Tsirkin * skb must be freed afterwards. 65425121173SMichael S. Tsirkin */ 65525121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 65625121173SMichael S. Tsirkin { 65725121173SMichael S. Tsirkin if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 65825121173SMichael S. Tsirkin struct ubuf_info *uarg; 65925121173SMichael S. Tsirkin 66025121173SMichael S. Tsirkin uarg = skb_shinfo(skb)->destructor_arg; 66125121173SMichael S. Tsirkin if (uarg->callback) 66225121173SMichael S. Tsirkin uarg->callback(uarg, false); 66325121173SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 66425121173SMichael S. Tsirkin } 66525121173SMichael S. Tsirkin } 66625121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 66725121173SMichael S. Tsirkin 66825121173SMichael S. Tsirkin /** 669ead2ceb0SNeil Horman * consume_skb - free an skbuff 670ead2ceb0SNeil Horman * @skb: buffer to free 671ead2ceb0SNeil Horman * 672ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 673ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 674ead2ceb0SNeil Horman * is being dropped after a failure and notes that 675ead2ceb0SNeil Horman */ 676ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 677ead2ceb0SNeil Horman { 678ead2ceb0SNeil Horman if (unlikely(!skb)) 679ead2ceb0SNeil Horman return; 680ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 681ead2ceb0SNeil Horman smp_rmb(); 682ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 683ead2ceb0SNeil Horman return; 68407dc22e7SKoki Sanagi trace_consume_skb(skb); 685ead2ceb0SNeil Horman __kfree_skb(skb); 686ead2ceb0SNeil Horman } 687ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 688ead2ceb0SNeil Horman 689dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 690dec18810SHerbert Xu { 691dec18810SHerbert Xu new->tstamp = old->tstamp; 692dec18810SHerbert Xu new->dev = old->dev; 693dec18810SHerbert Xu new->transport_header = old->transport_header; 694dec18810SHerbert Xu new->network_header = old->network_header; 695dec18810SHerbert Xu new->mac_header = old->mac_header; 6966a674e9cSJoseph Gasparakis new->inner_transport_header = old->inner_transport_header; 69792df9b21SPravin B Shelar new->inner_network_header = old->inner_network_header; 698aefbd2b3SPravin B Shelar new->inner_mac_header = old->inner_mac_header; 6997fee226aSEric Dumazet skb_dst_copy(new, old); 7000a9627f2STom Herbert new->rxhash = old->rxhash; 7016461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 702bdeab991STom Herbert new->l4_rxhash = old->l4_rxhash; 7033bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 7046a674e9cSJoseph Gasparakis new->encapsulation = old->encapsulation; 705def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 706dec18810SHerbert Xu new->sp = secpath_get(old->sp); 707dec18810SHerbert Xu #endif 708dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 7099bcb97caSHerbert Xu new->csum = old->csum; 710dec18810SHerbert Xu new->local_df = old->local_df; 711dec18810SHerbert Xu new->pkt_type = old->pkt_type; 712dec18810SHerbert Xu new->ip_summed = old->ip_summed; 713dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 714dec18810SHerbert Xu new->priority = old->priority; 715a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 716dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 717dec18810SHerbert Xu #endif 718c93bdd0eSMel Gorman new->pfmemalloc = old->pfmemalloc; 719dec18810SHerbert Xu new->protocol = old->protocol; 720dec18810SHerbert Xu new->mark = old->mark; 7218964be4aSEric Dumazet new->skb_iif = old->skb_iif; 722dec18810SHerbert Xu __nf_copy(new, old); 723a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 724dec18810SHerbert Xu new->nf_trace = old->nf_trace; 725dec18810SHerbert Xu #endif 726dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 727dec18810SHerbert Xu new->tc_index = old->tc_index; 728dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 729dec18810SHerbert Xu new->tc_verd = old->tc_verd; 730dec18810SHerbert Xu #endif 731dec18810SHerbert Xu #endif 73286a9bad3SPatrick McHardy new->vlan_proto = old->vlan_proto; 7336aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 7346aa895b0SPatrick McHardy 735dec18810SHerbert Xu skb_copy_secmark(new, old); 73606021292SEliezer Tamir 73706021292SEliezer Tamir #ifdef CONFIG_NET_LL_RX_POLL 73806021292SEliezer Tamir new->napi_id = old->napi_id; 73906021292SEliezer Tamir #endif 740dec18810SHerbert Xu } 741dec18810SHerbert Xu 74282c49a35SHerbert Xu /* 74382c49a35SHerbert Xu * You should not add any new code to this function. Add it to 74482c49a35SHerbert Xu * __copy_skb_header above instead. 74582c49a35SHerbert Xu */ 746e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 7471da177e4SLinus Torvalds { 7481da177e4SLinus Torvalds #define C(x) n->x = skb->x 7491da177e4SLinus Torvalds 7501da177e4SLinus Torvalds n->next = n->prev = NULL; 7511da177e4SLinus Torvalds n->sk = NULL; 752dec18810SHerbert Xu __copy_skb_header(n, skb); 753dec18810SHerbert Xu 7541da177e4SLinus Torvalds C(len); 7551da177e4SLinus Torvalds C(data_len); 7563e6b3b2eSAlexey Dobriyan C(mac_len); 757334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 75802f1c89dSPaul Moore n->cloned = 1; 7591da177e4SLinus Torvalds n->nohdr = 0; 7601da177e4SLinus Torvalds n->destructor = NULL; 7611da177e4SLinus Torvalds C(tail); 7621da177e4SLinus Torvalds C(end); 76302f1c89dSPaul Moore C(head); 764d3836f21SEric Dumazet C(head_frag); 76502f1c89dSPaul Moore C(data); 76602f1c89dSPaul Moore C(truesize); 76702f1c89dSPaul Moore atomic_set(&n->users, 1); 7681da177e4SLinus Torvalds 7691da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 7701da177e4SLinus Torvalds skb->cloned = 1; 7711da177e4SLinus Torvalds 7721da177e4SLinus Torvalds return n; 773e0053ec0SHerbert Xu #undef C 774e0053ec0SHerbert Xu } 775e0053ec0SHerbert Xu 776e0053ec0SHerbert Xu /** 777e0053ec0SHerbert Xu * skb_morph - morph one skb into another 778e0053ec0SHerbert Xu * @dst: the skb to receive the contents 779e0053ec0SHerbert Xu * @src: the skb to supply the contents 780e0053ec0SHerbert Xu * 781e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 782e0053ec0SHerbert Xu * supplied by the user. 783e0053ec0SHerbert Xu * 784e0053ec0SHerbert Xu * The target skb is returned upon exit. 785e0053ec0SHerbert Xu */ 786e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 787e0053ec0SHerbert Xu { 7882d4baff8SHerbert Xu skb_release_all(dst); 789e0053ec0SHerbert Xu return __skb_clone(dst, src); 790e0053ec0SHerbert Xu } 791e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 792e0053ec0SHerbert Xu 7932c53040fSBen Hutchings /** 7942c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 79548c83012SMichael S. Tsirkin * @skb: the skb to modify 79648c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 79748c83012SMichael S. Tsirkin * 79848c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 79948c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 80048c83012SMichael S. Tsirkin * to userspace pages. 80148c83012SMichael S. Tsirkin * 80248c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 80348c83012SMichael S. Tsirkin * %GFP_ATOMIC. 80448c83012SMichael S. Tsirkin * 80548c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 80648c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 80748c83012SMichael S. Tsirkin */ 80848c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 809a6686f2fSShirley Ma { 810a6686f2fSShirley Ma int i; 811a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 812a6686f2fSShirley Ma struct page *page, *head = NULL; 813a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 814a6686f2fSShirley Ma 815a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 816a6686f2fSShirley Ma u8 *vaddr; 817a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 818a6686f2fSShirley Ma 81902756ed4SKrishna Kumar page = alloc_page(gfp_mask); 820a6686f2fSShirley Ma if (!page) { 821a6686f2fSShirley Ma while (head) { 822a6686f2fSShirley Ma struct page *next = (struct page *)head->private; 823a6686f2fSShirley Ma put_page(head); 824a6686f2fSShirley Ma head = next; 825a6686f2fSShirley Ma } 826a6686f2fSShirley Ma return -ENOMEM; 827a6686f2fSShirley Ma } 82851c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 829a6686f2fSShirley Ma memcpy(page_address(page), 8309e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 83151c56b00SEric Dumazet kunmap_atomic(vaddr); 832a6686f2fSShirley Ma page->private = (unsigned long)head; 833a6686f2fSShirley Ma head = page; 834a6686f2fSShirley Ma } 835a6686f2fSShirley Ma 836a6686f2fSShirley Ma /* skb frags release userspace buffers */ 83702756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 838a8605c60SIan Campbell skb_frag_unref(skb, i); 839a6686f2fSShirley Ma 840e19d6763SMichael S. Tsirkin uarg->callback(uarg, false); 841a6686f2fSShirley Ma 842a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 84302756ed4SKrishna Kumar for (i = num_frags - 1; i >= 0; i--) { 84402756ed4SKrishna Kumar __skb_fill_page_desc(skb, i, head, 0, 84502756ed4SKrishna Kumar skb_shinfo(skb)->frags[i].size); 846a6686f2fSShirley Ma head = (struct page *)head->private; 847a6686f2fSShirley Ma } 84848c83012SMichael S. Tsirkin 84948c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 850a6686f2fSShirley Ma return 0; 851a6686f2fSShirley Ma } 852dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 853a6686f2fSShirley Ma 854e0053ec0SHerbert Xu /** 855e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 856e0053ec0SHerbert Xu * @skb: buffer to clone 857e0053ec0SHerbert Xu * @gfp_mask: allocation priority 858e0053ec0SHerbert Xu * 859e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 860e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 861e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 862e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 863e0053ec0SHerbert Xu * 864e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 865e0053ec0SHerbert Xu * %GFP_ATOMIC. 866e0053ec0SHerbert Xu */ 867e0053ec0SHerbert Xu 868e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 869e0053ec0SHerbert Xu { 870e0053ec0SHerbert Xu struct sk_buff *n; 871e0053ec0SHerbert Xu 87270008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 873a6686f2fSShirley Ma return NULL; 874a6686f2fSShirley Ma 875e0053ec0SHerbert Xu n = skb + 1; 876e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 877e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 878e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 879e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 880e0053ec0SHerbert Xu atomic_inc(fclone_ref); 881e0053ec0SHerbert Xu } else { 882c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 883c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 884c93bdd0eSMel Gorman 885e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 886e0053ec0SHerbert Xu if (!n) 887e0053ec0SHerbert Xu return NULL; 888fe55f6d5SVegard Nossum 889fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 890fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 891e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 892e0053ec0SHerbert Xu } 893e0053ec0SHerbert Xu 894e0053ec0SHerbert Xu return __skb_clone(n, skb); 8951da177e4SLinus Torvalds } 896b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 8971da177e4SLinus Torvalds 898f5b17294SPravin B Shelar static void skb_headers_offset_update(struct sk_buff *skb, int off) 899f5b17294SPravin B Shelar { 900f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 901f5b17294SPravin B Shelar skb->transport_header += off; 902f5b17294SPravin B Shelar skb->network_header += off; 903f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 904f5b17294SPravin B Shelar skb->mac_header += off; 905f5b17294SPravin B Shelar skb->inner_transport_header += off; 906f5b17294SPravin B Shelar skb->inner_network_header += off; 907aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 908f5b17294SPravin B Shelar } 909f5b17294SPravin B Shelar 9101da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 9111da177e4SLinus Torvalds { 912dec18810SHerbert Xu __copy_skb_header(new, old); 913dec18810SHerbert Xu 9147967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 9157967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 9167967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 9171da177e4SLinus Torvalds } 9181da177e4SLinus Torvalds 919c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 920c93bdd0eSMel Gorman { 921c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 922c93bdd0eSMel Gorman return SKB_ALLOC_RX; 923c93bdd0eSMel Gorman return 0; 924c93bdd0eSMel Gorman } 925c93bdd0eSMel Gorman 9261da177e4SLinus Torvalds /** 9271da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 9281da177e4SLinus Torvalds * @skb: buffer to copy 9291da177e4SLinus Torvalds * @gfp_mask: allocation priority 9301da177e4SLinus Torvalds * 9311da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 9321da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 9331da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 9341da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 9351da177e4SLinus Torvalds * 9361da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 9371da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 9381da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 9391da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 9401da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 9411da177e4SLinus Torvalds */ 9421da177e4SLinus Torvalds 943dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 9441da177e4SLinus Torvalds { 9456602cebbSEric Dumazet int headerlen = skb_headroom(skb); 946ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 947c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 948c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9496602cebbSEric Dumazet 9501da177e4SLinus Torvalds if (!n) 9511da177e4SLinus Torvalds return NULL; 9521da177e4SLinus Torvalds 9531da177e4SLinus Torvalds /* Set the data pointer */ 9541da177e4SLinus Torvalds skb_reserve(n, headerlen); 9551da177e4SLinus Torvalds /* Set the tail pointer and length */ 9561da177e4SLinus Torvalds skb_put(n, skb->len); 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 9591da177e4SLinus Torvalds BUG(); 9601da177e4SLinus Torvalds 9611da177e4SLinus Torvalds copy_skb_header(n, skb); 9621da177e4SLinus Torvalds return n; 9631da177e4SLinus Torvalds } 964b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 9651da177e4SLinus Torvalds 9661da177e4SLinus Torvalds /** 967117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 9681da177e4SLinus Torvalds * @skb: buffer to copy 969117632e6SEric Dumazet * @headroom: headroom of new skb 9701da177e4SLinus Torvalds * @gfp_mask: allocation priority 9711da177e4SLinus Torvalds * 9721da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 9731da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 9741da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 9751da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 9761da177e4SLinus Torvalds * or the pointer to the buffer on success. 9771da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 9781da177e4SLinus Torvalds */ 9791da177e4SLinus Torvalds 980117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 9811da177e4SLinus Torvalds { 982117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 983c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 984c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9856602cebbSEric Dumazet 9861da177e4SLinus Torvalds if (!n) 9871da177e4SLinus Torvalds goto out; 9881da177e4SLinus Torvalds 9891da177e4SLinus Torvalds /* Set the data pointer */ 990117632e6SEric Dumazet skb_reserve(n, headroom); 9911da177e4SLinus Torvalds /* Set the tail pointer and length */ 9921da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 9931da177e4SLinus Torvalds /* Copy the bytes */ 994d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 9951da177e4SLinus Torvalds 99625f484a6SHerbert Xu n->truesize += skb->data_len; 9971da177e4SLinus Torvalds n->data_len = skb->data_len; 9981da177e4SLinus Torvalds n->len = skb->len; 9991da177e4SLinus Torvalds 10001da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 10011da177e4SLinus Torvalds int i; 10021da177e4SLinus Torvalds 100370008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) { 10041511022cSDan Carpenter kfree_skb(n); 10051511022cSDan Carpenter n = NULL; 1006a6686f2fSShirley Ma goto out; 1007a6686f2fSShirley Ma } 10081da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 10091da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1010ea2ab693SIan Campbell skb_frag_ref(skb, i); 10111da177e4SLinus Torvalds } 10121da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 10131da177e4SLinus Torvalds } 10141da177e4SLinus Torvalds 101521dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 10161da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 10171da177e4SLinus Torvalds skb_clone_fraglist(n); 10181da177e4SLinus Torvalds } 10191da177e4SLinus Torvalds 10201da177e4SLinus Torvalds copy_skb_header(n, skb); 10211da177e4SLinus Torvalds out: 10221da177e4SLinus Torvalds return n; 10231da177e4SLinus Torvalds } 1024117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 10251da177e4SLinus Torvalds 10261da177e4SLinus Torvalds /** 10271da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 10281da177e4SLinus Torvalds * @skb: buffer to reallocate 10291da177e4SLinus Torvalds * @nhead: room to add at head 10301da177e4SLinus Torvalds * @ntail: room to add at tail 10311da177e4SLinus Torvalds * @gfp_mask: allocation priority 10321da177e4SLinus Torvalds * 10331da177e4SLinus Torvalds * Expands (or creates identical copy, if &nhead and &ntail are zero) 10341da177e4SLinus Torvalds * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 10351da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 10361da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 10371da177e4SLinus Torvalds * 10381da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 10391da177e4SLinus Torvalds * reloaded after call to this function. 10401da177e4SLinus Torvalds */ 10411da177e4SLinus Torvalds 104286a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1043dd0fc66fSAl Viro gfp_t gfp_mask) 10441da177e4SLinus Torvalds { 10451da177e4SLinus Torvalds int i; 10461da177e4SLinus Torvalds u8 *data; 1047ec47ea82SAlexander Duyck int size = nhead + skb_end_offset(skb) + ntail; 10481da177e4SLinus Torvalds long off; 10491da177e4SLinus Torvalds 10504edd87adSHerbert Xu BUG_ON(nhead < 0); 10514edd87adSHerbert Xu 10521da177e4SLinus Torvalds if (skb_shared(skb)) 10531da177e4SLinus Torvalds BUG(); 10541da177e4SLinus Torvalds 10551da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 10561da177e4SLinus Torvalds 1057c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1058c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1059c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1060c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 10611da177e4SLinus Torvalds if (!data) 10621da177e4SLinus Torvalds goto nodata; 106387151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 10641da177e4SLinus Torvalds 10651da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 10666602cebbSEric Dumazet * optimized for the cases when header is void. 10676602cebbSEric Dumazet */ 10686602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 10696602cebbSEric Dumazet 10706602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 10716602cebbSEric Dumazet skb_shinfo(skb), 1072fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 10731da177e4SLinus Torvalds 10743e24591aSAlexander Duyck /* 10753e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 10763e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 10773e24591aSAlexander Duyck * be since all we did is relocate the values 10783e24591aSAlexander Duyck */ 10793e24591aSAlexander Duyck if (skb_cloned(skb)) { 1080a6686f2fSShirley Ma /* copy this zero copy skb frags */ 108170008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1082a6686f2fSShirley Ma goto nofrags; 10831da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1084ea2ab693SIan Campbell skb_frag_ref(skb, i); 10851da177e4SLinus Torvalds 108621dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 10871da177e4SLinus Torvalds skb_clone_fraglist(skb); 10881da177e4SLinus Torvalds 10891da177e4SLinus Torvalds skb_release_data(skb); 10903e24591aSAlexander Duyck } else { 10913e24591aSAlexander Duyck skb_free_head(skb); 10921fd63041SEric Dumazet } 10931da177e4SLinus Torvalds off = (data + nhead) - skb->head; 10941da177e4SLinus Torvalds 10951da177e4SLinus Torvalds skb->head = data; 1096d3836f21SEric Dumazet skb->head_frag = 0; 10971da177e4SLinus Torvalds skb->data += off; 10984305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 10994305b541SArnaldo Carvalho de Melo skb->end = size; 110056eb8882SPatrick McHardy off = nhead; 11014305b541SArnaldo Carvalho de Melo #else 11024305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 110356eb8882SPatrick McHardy #endif 110427a884dcSArnaldo Carvalho de Melo skb->tail += off; 1105b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 110600c5a983SAndrea Shepard /* Only adjust this if it actually is csum_start rather than csum */ 110700c5a983SAndrea Shepard if (skb->ip_summed == CHECKSUM_PARTIAL) 1108172a863fSHerbert Xu skb->csum_start += nhead; 11091da177e4SLinus Torvalds skb->cloned = 0; 1110334a8132SPatrick McHardy skb->hdr_len = 0; 11111da177e4SLinus Torvalds skb->nohdr = 0; 11121da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 11131da177e4SLinus Torvalds return 0; 11141da177e4SLinus Torvalds 1115a6686f2fSShirley Ma nofrags: 1116a6686f2fSShirley Ma kfree(data); 11171da177e4SLinus Torvalds nodata: 11181da177e4SLinus Torvalds return -ENOMEM; 11191da177e4SLinus Torvalds } 1120b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 11211da177e4SLinus Torvalds 11221da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 11231da177e4SLinus Torvalds 11241da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 11251da177e4SLinus Torvalds { 11261da177e4SLinus Torvalds struct sk_buff *skb2; 11271da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds if (delta <= 0) 11301da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 11311da177e4SLinus Torvalds else { 11321da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 11331da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 11341da177e4SLinus Torvalds GFP_ATOMIC)) { 11351da177e4SLinus Torvalds kfree_skb(skb2); 11361da177e4SLinus Torvalds skb2 = NULL; 11371da177e4SLinus Torvalds } 11381da177e4SLinus Torvalds } 11391da177e4SLinus Torvalds return skb2; 11401da177e4SLinus Torvalds } 1141b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 11421da177e4SLinus Torvalds 11431da177e4SLinus Torvalds /** 11441da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 11451da177e4SLinus Torvalds * @skb: buffer to copy 11461da177e4SLinus Torvalds * @newheadroom: new free bytes at head 11471da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 11481da177e4SLinus Torvalds * @gfp_mask: allocation priority 11491da177e4SLinus Torvalds * 11501da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 11511da177e4SLinus Torvalds * allocate additional space. 11521da177e4SLinus Torvalds * 11531da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 11541da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 11551da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 11561da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 11571da177e4SLinus Torvalds * 11581da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 11591da177e4SLinus Torvalds * is called from an interrupt. 11601da177e4SLinus Torvalds */ 11611da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 116286a76cafSVictor Fusco int newheadroom, int newtailroom, 1163dd0fc66fSAl Viro gfp_t gfp_mask) 11641da177e4SLinus Torvalds { 11651da177e4SLinus Torvalds /* 11661da177e4SLinus Torvalds * Allocate the copy buffer 11671da177e4SLinus Torvalds */ 1168c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1169c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1170c93bdd0eSMel Gorman NUMA_NO_NODE); 1171efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 11721da177e4SLinus Torvalds int head_copy_len, head_copy_off; 117352886051SHerbert Xu int off; 11741da177e4SLinus Torvalds 11751da177e4SLinus Torvalds if (!n) 11761da177e4SLinus Torvalds return NULL; 11771da177e4SLinus Torvalds 11781da177e4SLinus Torvalds skb_reserve(n, newheadroom); 11791da177e4SLinus Torvalds 11801da177e4SLinus Torvalds /* Set the tail pointer and length */ 11811da177e4SLinus Torvalds skb_put(n, skb->len); 11821da177e4SLinus Torvalds 1183efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 11841da177e4SLinus Torvalds head_copy_off = 0; 11851da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 11861da177e4SLinus Torvalds head_copy_len = newheadroom; 11871da177e4SLinus Torvalds else 11881da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 11891da177e4SLinus Torvalds 11901da177e4SLinus Torvalds /* Copy the linear header and data. */ 11911da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 11921da177e4SLinus Torvalds skb->len + head_copy_len)) 11931da177e4SLinus Torvalds BUG(); 11941da177e4SLinus Torvalds 11951da177e4SLinus Torvalds copy_skb_header(n, skb); 11961da177e4SLinus Torvalds 1197efd1e8d5SPatrick McHardy off = newheadroom - oldheadroom; 1198be2b6e62SDavid S. Miller if (n->ip_summed == CHECKSUM_PARTIAL) 119952886051SHerbert Xu n->csum_start += off; 1200b41abb42SPeter Pan(潘卫平) 1201f5b17294SPravin B Shelar skb_headers_offset_update(n, off); 1202efd1e8d5SPatrick McHardy 12031da177e4SLinus Torvalds return n; 12041da177e4SLinus Torvalds } 1205b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 12061da177e4SLinus Torvalds 12071da177e4SLinus Torvalds /** 12081da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 12091da177e4SLinus Torvalds * @skb: buffer to pad 12101da177e4SLinus Torvalds * @pad: space to pad 12111da177e4SLinus Torvalds * 12121da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 12131da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 12141da177e4SLinus Torvalds * beyond the buffer end onto the wire. 12151da177e4SLinus Torvalds * 12165b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 12171da177e4SLinus Torvalds */ 12181da177e4SLinus Torvalds 12195b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 12201da177e4SLinus Torvalds { 12215b057c6bSHerbert Xu int err; 12225b057c6bSHerbert Xu int ntail; 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 12255b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 12261da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 12275b057c6bSHerbert Xu return 0; 12281da177e4SLinus Torvalds } 12291da177e4SLinus Torvalds 12304305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 12315b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 12325b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 12335b057c6bSHerbert Xu if (unlikely(err)) 12345b057c6bSHerbert Xu goto free_skb; 12355b057c6bSHerbert Xu } 12365b057c6bSHerbert Xu 12375b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 12385b057c6bSHerbert Xu * to be audited. 12395b057c6bSHerbert Xu */ 12405b057c6bSHerbert Xu err = skb_linearize(skb); 12415b057c6bSHerbert Xu if (unlikely(err)) 12425b057c6bSHerbert Xu goto free_skb; 12435b057c6bSHerbert Xu 12445b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 12455b057c6bSHerbert Xu return 0; 12465b057c6bSHerbert Xu 12475b057c6bSHerbert Xu free_skb: 12481da177e4SLinus Torvalds kfree_skb(skb); 12495b057c6bSHerbert Xu return err; 12501da177e4SLinus Torvalds } 1251b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 12521da177e4SLinus Torvalds 12530dde3e16SIlpo Järvinen /** 12540dde3e16SIlpo Järvinen * skb_put - add data to a buffer 12550dde3e16SIlpo Järvinen * @skb: buffer to use 12560dde3e16SIlpo Järvinen * @len: amount of data to add 12570dde3e16SIlpo Järvinen * 12580dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 12590dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 12600dde3e16SIlpo Järvinen * first byte of the extra data is returned. 12610dde3e16SIlpo Järvinen */ 12620dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 12630dde3e16SIlpo Järvinen { 12640dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 12650dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 12660dde3e16SIlpo Järvinen skb->tail += len; 12670dde3e16SIlpo Järvinen skb->len += len; 12680dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 12690dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 12700dde3e16SIlpo Järvinen return tmp; 12710dde3e16SIlpo Järvinen } 12720dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 12730dde3e16SIlpo Järvinen 12746be8ac2fSIlpo Järvinen /** 1275c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1276c2aa270aSIlpo Järvinen * @skb: buffer to use 1277c2aa270aSIlpo Järvinen * @len: amount of data to add 1278c2aa270aSIlpo Järvinen * 1279c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1280c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1281c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1282c2aa270aSIlpo Järvinen */ 1283c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1284c2aa270aSIlpo Järvinen { 1285c2aa270aSIlpo Järvinen skb->data -= len; 1286c2aa270aSIlpo Järvinen skb->len += len; 1287c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1288c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1289c2aa270aSIlpo Järvinen return skb->data; 1290c2aa270aSIlpo Järvinen } 1291c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1292c2aa270aSIlpo Järvinen 1293c2aa270aSIlpo Järvinen /** 12946be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 12956be8ac2fSIlpo Järvinen * @skb: buffer to use 12966be8ac2fSIlpo Järvinen * @len: amount of data to remove 12976be8ac2fSIlpo Järvinen * 12986be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 12996be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 13006be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 13016be8ac2fSIlpo Järvinen * the old data. 13026be8ac2fSIlpo Järvinen */ 13036be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 13046be8ac2fSIlpo Järvinen { 130547d29646SDavid S. Miller return skb_pull_inline(skb, len); 13066be8ac2fSIlpo Järvinen } 13076be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 13086be8ac2fSIlpo Järvinen 1309419ae74eSIlpo Järvinen /** 1310419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1311419ae74eSIlpo Järvinen * @skb: buffer to alter 1312419ae74eSIlpo Järvinen * @len: new length 1313419ae74eSIlpo Järvinen * 1314419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1315419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1316419ae74eSIlpo Järvinen * The skb must be linear. 1317419ae74eSIlpo Järvinen */ 1318419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1319419ae74eSIlpo Järvinen { 1320419ae74eSIlpo Järvinen if (skb->len > len) 1321419ae74eSIlpo Järvinen __skb_trim(skb, len); 1322419ae74eSIlpo Järvinen } 1323419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1324419ae74eSIlpo Järvinen 13253cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 13261da177e4SLinus Torvalds */ 13271da177e4SLinus Torvalds 13283cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 13291da177e4SLinus Torvalds { 133027b437c8SHerbert Xu struct sk_buff **fragp; 133127b437c8SHerbert Xu struct sk_buff *frag; 13321da177e4SLinus Torvalds int offset = skb_headlen(skb); 13331da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 13341da177e4SLinus Torvalds int i; 133527b437c8SHerbert Xu int err; 133627b437c8SHerbert Xu 133727b437c8SHerbert Xu if (skb_cloned(skb) && 133827b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 133927b437c8SHerbert Xu return err; 13401da177e4SLinus Torvalds 1341f4d26fb3SHerbert Xu i = 0; 1342f4d26fb3SHerbert Xu if (offset >= len) 1343f4d26fb3SHerbert Xu goto drop_pages; 1344f4d26fb3SHerbert Xu 1345f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 13469e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 134727b437c8SHerbert Xu 134827b437c8SHerbert Xu if (end < len) { 13491da177e4SLinus Torvalds offset = end; 135027b437c8SHerbert Xu continue; 13511da177e4SLinus Torvalds } 13521da177e4SLinus Torvalds 13539e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 135427b437c8SHerbert Xu 1355f4d26fb3SHerbert Xu drop_pages: 135627b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 135727b437c8SHerbert Xu 135827b437c8SHerbert Xu for (; i < nfrags; i++) 1359ea2ab693SIan Campbell skb_frag_unref(skb, i); 136027b437c8SHerbert Xu 136121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 136227b437c8SHerbert Xu skb_drop_fraglist(skb); 1363f4d26fb3SHerbert Xu goto done; 136427b437c8SHerbert Xu } 136527b437c8SHerbert Xu 136627b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 136727b437c8SHerbert Xu fragp = &frag->next) { 136827b437c8SHerbert Xu int end = offset + frag->len; 136927b437c8SHerbert Xu 137027b437c8SHerbert Xu if (skb_shared(frag)) { 137127b437c8SHerbert Xu struct sk_buff *nfrag; 137227b437c8SHerbert Xu 137327b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 137427b437c8SHerbert Xu if (unlikely(!nfrag)) 137527b437c8SHerbert Xu return -ENOMEM; 137627b437c8SHerbert Xu 137727b437c8SHerbert Xu nfrag->next = frag->next; 137885bb2a60SEric Dumazet consume_skb(frag); 137927b437c8SHerbert Xu frag = nfrag; 138027b437c8SHerbert Xu *fragp = frag; 138127b437c8SHerbert Xu } 138227b437c8SHerbert Xu 138327b437c8SHerbert Xu if (end < len) { 138427b437c8SHerbert Xu offset = end; 138527b437c8SHerbert Xu continue; 138627b437c8SHerbert Xu } 138727b437c8SHerbert Xu 138827b437c8SHerbert Xu if (end > len && 138927b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 139027b437c8SHerbert Xu return err; 139127b437c8SHerbert Xu 139227b437c8SHerbert Xu if (frag->next) 139327b437c8SHerbert Xu skb_drop_list(&frag->next); 139427b437c8SHerbert Xu break; 139527b437c8SHerbert Xu } 139627b437c8SHerbert Xu 1397f4d26fb3SHerbert Xu done: 139827b437c8SHerbert Xu if (len > skb_headlen(skb)) { 13991da177e4SLinus Torvalds skb->data_len -= skb->len - len; 14001da177e4SLinus Torvalds skb->len = len; 14011da177e4SLinus Torvalds } else { 14021da177e4SLinus Torvalds skb->len = len; 14031da177e4SLinus Torvalds skb->data_len = 0; 140427a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 14051da177e4SLinus Torvalds } 14061da177e4SLinus Torvalds 14071da177e4SLinus Torvalds return 0; 14081da177e4SLinus Torvalds } 1409b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 14101da177e4SLinus Torvalds 14111da177e4SLinus Torvalds /** 14121da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 14131da177e4SLinus Torvalds * @skb: buffer to reallocate 14141da177e4SLinus Torvalds * @delta: number of bytes to advance tail 14151da177e4SLinus Torvalds * 14161da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 14171da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 14181da177e4SLinus Torvalds * data from fragmented part. 14191da177e4SLinus Torvalds * 14201da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 14211da177e4SLinus Torvalds * 14221da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 14231da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 14241da177e4SLinus Torvalds * 14251da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 14261da177e4SLinus Torvalds * reloaded after call to this function. 14271da177e4SLinus Torvalds */ 14281da177e4SLinus Torvalds 14291da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 14301da177e4SLinus Torvalds * when it is necessary. 14311da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 14321da177e4SLinus Torvalds * 2. It may change skb pointers. 14331da177e4SLinus Torvalds * 14341da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 14351da177e4SLinus Torvalds */ 14361da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 14371da177e4SLinus Torvalds { 14381da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 14391da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 14401da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 14411da177e4SLinus Torvalds */ 14424305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 14431da177e4SLinus Torvalds 14441da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 14451da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 14461da177e4SLinus Torvalds GFP_ATOMIC)) 14471da177e4SLinus Torvalds return NULL; 14481da177e4SLinus Torvalds } 14491da177e4SLinus Torvalds 145027a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 14511da177e4SLinus Torvalds BUG(); 14521da177e4SLinus Torvalds 14531da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 14541da177e4SLinus Torvalds * size of pulled pages. Superb. 14551da177e4SLinus Torvalds */ 145621dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 14571da177e4SLinus Torvalds goto pull_pages; 14581da177e4SLinus Torvalds 14591da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 14601da177e4SLinus Torvalds eat = delta; 14611da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14629e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14639e903e08SEric Dumazet 14649e903e08SEric Dumazet if (size >= eat) 14651da177e4SLinus Torvalds goto pull_pages; 14669e903e08SEric Dumazet eat -= size; 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 14701da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 14711da177e4SLinus Torvalds * but taking into account that pulling is expected to 14721da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 14731da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 14741da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 14751da177e4SLinus Torvalds */ 14761da177e4SLinus Torvalds if (eat) { 14771da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 14781da177e4SLinus Torvalds struct sk_buff *clone = NULL; 14791da177e4SLinus Torvalds struct sk_buff *insp = NULL; 14801da177e4SLinus Torvalds 14811da177e4SLinus Torvalds do { 148209a62660SKris Katterjohn BUG_ON(!list); 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds if (list->len <= eat) { 14851da177e4SLinus Torvalds /* Eaten as whole. */ 14861da177e4SLinus Torvalds eat -= list->len; 14871da177e4SLinus Torvalds list = list->next; 14881da177e4SLinus Torvalds insp = list; 14891da177e4SLinus Torvalds } else { 14901da177e4SLinus Torvalds /* Eaten partially. */ 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds if (skb_shared(list)) { 14931da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 14941da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 14951da177e4SLinus Torvalds if (!clone) 14961da177e4SLinus Torvalds return NULL; 14971da177e4SLinus Torvalds insp = list->next; 14981da177e4SLinus Torvalds list = clone; 14991da177e4SLinus Torvalds } else { 15001da177e4SLinus Torvalds /* This may be pulled without 15011da177e4SLinus Torvalds * problems. */ 15021da177e4SLinus Torvalds insp = list; 15031da177e4SLinus Torvalds } 15041da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 15051da177e4SLinus Torvalds kfree_skb(clone); 15061da177e4SLinus Torvalds return NULL; 15071da177e4SLinus Torvalds } 15081da177e4SLinus Torvalds break; 15091da177e4SLinus Torvalds } 15101da177e4SLinus Torvalds } while (eat); 15111da177e4SLinus Torvalds 15121da177e4SLinus Torvalds /* Free pulled out fragments. */ 15131da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 15141da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 15151da177e4SLinus Torvalds kfree_skb(list); 15161da177e4SLinus Torvalds } 15171da177e4SLinus Torvalds /* And insert new clone at head. */ 15181da177e4SLinus Torvalds if (clone) { 15191da177e4SLinus Torvalds clone->next = list; 15201da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 15211da177e4SLinus Torvalds } 15221da177e4SLinus Torvalds } 15231da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 15241da177e4SLinus Torvalds 15251da177e4SLinus Torvalds pull_pages: 15261da177e4SLinus Torvalds eat = delta; 15271da177e4SLinus Torvalds k = 0; 15281da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15299e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 15309e903e08SEric Dumazet 15319e903e08SEric Dumazet if (size <= eat) { 1532ea2ab693SIan Campbell skb_frag_unref(skb, i); 15339e903e08SEric Dumazet eat -= size; 15341da177e4SLinus Torvalds } else { 15351da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 15361da177e4SLinus Torvalds if (eat) { 15371da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 15389e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 15391da177e4SLinus Torvalds eat = 0; 15401da177e4SLinus Torvalds } 15411da177e4SLinus Torvalds k++; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds } 15441da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 15451da177e4SLinus Torvalds 15461da177e4SLinus Torvalds skb->tail += delta; 15471da177e4SLinus Torvalds skb->data_len -= delta; 15481da177e4SLinus Torvalds 154927a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 15501da177e4SLinus Torvalds } 1551b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 15521da177e4SLinus Torvalds 155322019b17SEric Dumazet /** 155422019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 155522019b17SEric Dumazet * @skb: source skb 155622019b17SEric Dumazet * @offset: offset in source 155722019b17SEric Dumazet * @to: destination buffer 155822019b17SEric Dumazet * @len: number of bytes to copy 155922019b17SEric Dumazet * 156022019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 156122019b17SEric Dumazet * destination buffer. 156222019b17SEric Dumazet * 156322019b17SEric Dumazet * CAUTION ! : 156422019b17SEric Dumazet * If its prototype is ever changed, 156522019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 156622019b17SEric Dumazet * since it is called from BPF assembly code. 156722019b17SEric Dumazet */ 15681da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 15691da177e4SLinus Torvalds { 15701a028e50SDavid S. Miller int start = skb_headlen(skb); 1571fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1572fbb398a8SDavid S. Miller int i, copy; 15731da177e4SLinus Torvalds 15741da177e4SLinus Torvalds if (offset > (int)skb->len - len) 15751da177e4SLinus Torvalds goto fault; 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds /* Copy header. */ 15781a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 15791da177e4SLinus Torvalds if (copy > len) 15801da177e4SLinus Torvalds copy = len; 1581d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 15821da177e4SLinus Torvalds if ((len -= copy) == 0) 15831da177e4SLinus Torvalds return 0; 15841da177e4SLinus Torvalds offset += copy; 15851da177e4SLinus Torvalds to += copy; 15861da177e4SLinus Torvalds } 15871da177e4SLinus Torvalds 15881da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15891a028e50SDavid S. Miller int end; 159051c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 15911da177e4SLinus Torvalds 1592547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15931a028e50SDavid S. Miller 159451c56b00SEric Dumazet end = start + skb_frag_size(f); 15951da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15961da177e4SLinus Torvalds u8 *vaddr; 15971da177e4SLinus Torvalds 15981da177e4SLinus Torvalds if (copy > len) 15991da177e4SLinus Torvalds copy = len; 16001da177e4SLinus Torvalds 160151c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 16021da177e4SLinus Torvalds memcpy(to, 160351c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 160451c56b00SEric Dumazet copy); 160551c56b00SEric Dumazet kunmap_atomic(vaddr); 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds if ((len -= copy) == 0) 16081da177e4SLinus Torvalds return 0; 16091da177e4SLinus Torvalds offset += copy; 16101da177e4SLinus Torvalds to += copy; 16111da177e4SLinus Torvalds } 16121a028e50SDavid S. Miller start = end; 16131da177e4SLinus Torvalds } 16141da177e4SLinus Torvalds 1615fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 16161a028e50SDavid S. Miller int end; 16171da177e4SLinus Torvalds 1618547b792cSIlpo Järvinen WARN_ON(start > offset + len); 16191a028e50SDavid S. Miller 1620fbb398a8SDavid S. Miller end = start + frag_iter->len; 16211da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 16221da177e4SLinus Torvalds if (copy > len) 16231da177e4SLinus Torvalds copy = len; 1624fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 16251da177e4SLinus Torvalds goto fault; 16261da177e4SLinus Torvalds if ((len -= copy) == 0) 16271da177e4SLinus Torvalds return 0; 16281da177e4SLinus Torvalds offset += copy; 16291da177e4SLinus Torvalds to += copy; 16301da177e4SLinus Torvalds } 16311a028e50SDavid S. Miller start = end; 16321da177e4SLinus Torvalds } 1633a6686f2fSShirley Ma 16341da177e4SLinus Torvalds if (!len) 16351da177e4SLinus Torvalds return 0; 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds fault: 16381da177e4SLinus Torvalds return -EFAULT; 16391da177e4SLinus Torvalds } 1640b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 16411da177e4SLinus Torvalds 16429c55e01cSJens Axboe /* 16439c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 16449c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 16459c55e01cSJens Axboe */ 16469c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 16479c55e01cSJens Axboe { 16488b9d3728SJarek Poplawski put_page(spd->pages[i]); 16498b9d3728SJarek Poplawski } 16509c55e01cSJens Axboe 1651a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 16524fb66994SJarek Poplawski unsigned int *offset, 165318aafc62SEric Dumazet struct sock *sk) 16548b9d3728SJarek Poplawski { 16555640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 16568b9d3728SJarek Poplawski 16575640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 16588b9d3728SJarek Poplawski return NULL; 16594fb66994SJarek Poplawski 16605640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 16614fb66994SJarek Poplawski 16625640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 16635640f768SEric Dumazet page_address(page) + *offset, *len); 16645640f768SEric Dumazet *offset = pfrag->offset; 16655640f768SEric Dumazet pfrag->offset += *len; 16664fb66994SJarek Poplawski 16675640f768SEric Dumazet return pfrag->page; 16689c55e01cSJens Axboe } 16699c55e01cSJens Axboe 167041c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 167141c73a0dSEric Dumazet struct page *page, 167241c73a0dSEric Dumazet unsigned int offset) 167341c73a0dSEric Dumazet { 167441c73a0dSEric Dumazet return spd->nr_pages && 167541c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 167641c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 167741c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 167841c73a0dSEric Dumazet } 167941c73a0dSEric Dumazet 16809c55e01cSJens Axboe /* 16819c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 16829c55e01cSJens Axboe */ 1683a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 168435f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 16854fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 168618aafc62SEric Dumazet bool linear, 16877a67e56fSJarek Poplawski struct sock *sk) 16889c55e01cSJens Axboe { 168941c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1690a108d5f3SDavid S. Miller return true; 16919c55e01cSJens Axboe 16928b9d3728SJarek Poplawski if (linear) { 169318aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 16948b9d3728SJarek Poplawski if (!page) 1695a108d5f3SDavid S. Miller return true; 169641c73a0dSEric Dumazet } 169741c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 169841c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1699a108d5f3SDavid S. Miller return false; 170041c73a0dSEric Dumazet } 17018b9d3728SJarek Poplawski get_page(page); 17029c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 17034fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 17049c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 17059c55e01cSJens Axboe spd->nr_pages++; 17068b9d3728SJarek Poplawski 1707a108d5f3SDavid S. Miller return false; 17089c55e01cSJens Axboe } 17099c55e01cSJens Axboe 1710a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 17112870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 171218aafc62SEric Dumazet unsigned int *len, 1713d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 171435f3d14dSJens Axboe struct sock *sk, 171535f3d14dSJens Axboe struct pipe_inode_info *pipe) 17169c55e01cSJens Axboe { 17172870c43dSOctavian Purdila if (!*len) 1718a108d5f3SDavid S. Miller return true; 17199c55e01cSJens Axboe 17202870c43dSOctavian Purdila /* skip this segment if already processed */ 17212870c43dSOctavian Purdila if (*off >= plen) { 17222870c43dSOctavian Purdila *off -= plen; 1723a108d5f3SDavid S. Miller return false; 17242870c43dSOctavian Purdila } 17252870c43dSOctavian Purdila 17262870c43dSOctavian Purdila /* ignore any bits we already processed */ 17279ca1b22dSEric Dumazet poff += *off; 17289ca1b22dSEric Dumazet plen -= *off; 17292870c43dSOctavian Purdila *off = 0; 17302870c43dSOctavian Purdila 173118aafc62SEric Dumazet do { 173218aafc62SEric Dumazet unsigned int flen = min(*len, plen); 17332870c43dSOctavian Purdila 173418aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 173518aafc62SEric Dumazet linear, sk)) 1736a108d5f3SDavid S. Miller return true; 173718aafc62SEric Dumazet poff += flen; 173818aafc62SEric Dumazet plen -= flen; 17392870c43dSOctavian Purdila *len -= flen; 174018aafc62SEric Dumazet } while (*len && plen); 17412870c43dSOctavian Purdila 1742a108d5f3SDavid S. Miller return false; 1743db43a282SOctavian Purdila } 17449c55e01cSJens Axboe 17459c55e01cSJens Axboe /* 1746a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 17472870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 17489c55e01cSJens Axboe */ 1749a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 175035f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 175135f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17522870c43dSOctavian Purdila { 17532870c43dSOctavian Purdila int seg; 17549c55e01cSJens Axboe 17551d0c0b32SEric Dumazet /* map the linear part : 17562996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17572996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17582996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17599c55e01cSJens Axboe */ 17602870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17612870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17622870c43dSOctavian Purdila skb_headlen(skb), 176318aafc62SEric Dumazet offset, len, spd, 17643a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 17651d0c0b32SEric Dumazet sk, pipe)) 1766a108d5f3SDavid S. Miller return true; 17679c55e01cSJens Axboe 17689c55e01cSJens Axboe /* 17699c55e01cSJens Axboe * then map the fragments 17709c55e01cSJens Axboe */ 17719c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 17729c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 17739c55e01cSJens Axboe 1774ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 17759e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 177618aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 1777a108d5f3SDavid S. Miller return true; 17789c55e01cSJens Axboe } 17799c55e01cSJens Axboe 1780a108d5f3SDavid S. Miller return false; 17819c55e01cSJens Axboe } 17829c55e01cSJens Axboe 17839c55e01cSJens Axboe /* 17849c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 17859c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 17869c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 17879c55e01cSJens Axboe * handle that cleanly. 17889c55e01cSJens Axboe */ 17898b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 17909c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 17919c55e01cSJens Axboe unsigned int flags) 17929c55e01cSJens Axboe { 179341c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 179441c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 17959c55e01cSJens Axboe struct splice_pipe_desc spd = { 17969c55e01cSJens Axboe .pages = pages, 17979c55e01cSJens Axboe .partial = partial, 1798047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 17999c55e01cSJens Axboe .flags = flags, 18009c55e01cSJens Axboe .ops = &sock_pipe_buf_ops, 18019c55e01cSJens Axboe .spd_release = sock_spd_release, 18029c55e01cSJens Axboe }; 1803fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18047a67e56fSJarek Poplawski struct sock *sk = skb->sk; 180535f3d14dSJens Axboe int ret = 0; 180635f3d14dSJens Axboe 18079c55e01cSJens Axboe /* 18089c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 18099c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 18109c55e01cSJens Axboe */ 181135f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 18129c55e01cSJens Axboe goto done; 18139c55e01cSJens Axboe else if (!tlen) 18149c55e01cSJens Axboe goto done; 18159c55e01cSJens Axboe 18169c55e01cSJens Axboe /* 18179c55e01cSJens Axboe * now see if we have a frag_list to map 18189c55e01cSJens Axboe */ 1819fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1820fbb398a8SDavid S. Miller if (!tlen) 18219c55e01cSJens Axboe break; 182235f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1823fbb398a8SDavid S. Miller break; 18249c55e01cSJens Axboe } 18259c55e01cSJens Axboe 18269c55e01cSJens Axboe done: 18279c55e01cSJens Axboe if (spd.nr_pages) { 18289c55e01cSJens Axboe /* 18299c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 18309c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 18319c55e01cSJens Axboe * here as compared to sendfile(). We enter here 18329c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 18339c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 18349c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 18359c55e01cSJens Axboe * and networking will grab the socket lock. 18369c55e01cSJens Axboe */ 1837293ad604SOctavian Purdila release_sock(sk); 18389c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1839293ad604SOctavian Purdila lock_sock(sk); 18409c55e01cSJens Axboe } 18419c55e01cSJens Axboe 184235f3d14dSJens Axboe return ret; 18439c55e01cSJens Axboe } 18449c55e01cSJens Axboe 1845357b40a1SHerbert Xu /** 1846357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1847357b40a1SHerbert Xu * @skb: destination buffer 1848357b40a1SHerbert Xu * @offset: offset in destination 1849357b40a1SHerbert Xu * @from: source buffer 1850357b40a1SHerbert Xu * @len: number of bytes to copy 1851357b40a1SHerbert Xu * 1852357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1853357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1854357b40a1SHerbert Xu * traversing fragment lists and such. 1855357b40a1SHerbert Xu */ 1856357b40a1SHerbert Xu 18570c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1858357b40a1SHerbert Xu { 18591a028e50SDavid S. Miller int start = skb_headlen(skb); 1860fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1861fbb398a8SDavid S. Miller int i, copy; 1862357b40a1SHerbert Xu 1863357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1864357b40a1SHerbert Xu goto fault; 1865357b40a1SHerbert Xu 18661a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1867357b40a1SHerbert Xu if (copy > len) 1868357b40a1SHerbert Xu copy = len; 186927d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1870357b40a1SHerbert Xu if ((len -= copy) == 0) 1871357b40a1SHerbert Xu return 0; 1872357b40a1SHerbert Xu offset += copy; 1873357b40a1SHerbert Xu from += copy; 1874357b40a1SHerbert Xu } 1875357b40a1SHerbert Xu 1876357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1877357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 18781a028e50SDavid S. Miller int end; 1879357b40a1SHerbert Xu 1880547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18811a028e50SDavid S. Miller 18829e903e08SEric Dumazet end = start + skb_frag_size(frag); 1883357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1884357b40a1SHerbert Xu u8 *vaddr; 1885357b40a1SHerbert Xu 1886357b40a1SHerbert Xu if (copy > len) 1887357b40a1SHerbert Xu copy = len; 1888357b40a1SHerbert Xu 188951c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 18901a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 18911a028e50SDavid S. Miller from, copy); 189251c56b00SEric Dumazet kunmap_atomic(vaddr); 1893357b40a1SHerbert Xu 1894357b40a1SHerbert Xu if ((len -= copy) == 0) 1895357b40a1SHerbert Xu return 0; 1896357b40a1SHerbert Xu offset += copy; 1897357b40a1SHerbert Xu from += copy; 1898357b40a1SHerbert Xu } 18991a028e50SDavid S. Miller start = end; 1900357b40a1SHerbert Xu } 1901357b40a1SHerbert Xu 1902fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19031a028e50SDavid S. Miller int end; 1904357b40a1SHerbert Xu 1905547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19061a028e50SDavid S. Miller 1907fbb398a8SDavid S. Miller end = start + frag_iter->len; 1908357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1909357b40a1SHerbert Xu if (copy > len) 1910357b40a1SHerbert Xu copy = len; 1911fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 19121a028e50SDavid S. Miller from, copy)) 1913357b40a1SHerbert Xu goto fault; 1914357b40a1SHerbert Xu if ((len -= copy) == 0) 1915357b40a1SHerbert Xu return 0; 1916357b40a1SHerbert Xu offset += copy; 1917357b40a1SHerbert Xu from += copy; 1918357b40a1SHerbert Xu } 19191a028e50SDavid S. Miller start = end; 1920357b40a1SHerbert Xu } 1921357b40a1SHerbert Xu if (!len) 1922357b40a1SHerbert Xu return 0; 1923357b40a1SHerbert Xu 1924357b40a1SHerbert Xu fault: 1925357b40a1SHerbert Xu return -EFAULT; 1926357b40a1SHerbert Xu } 1927357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1928357b40a1SHerbert Xu 19291da177e4SLinus Torvalds /* Checksum skb data. */ 19301da177e4SLinus Torvalds 19312bbbc868SAl Viro __wsum skb_checksum(const struct sk_buff *skb, int offset, 19322bbbc868SAl Viro int len, __wsum csum) 19331da177e4SLinus Torvalds { 19341a028e50SDavid S. Miller int start = skb_headlen(skb); 19351a028e50SDavid S. Miller int i, copy = start - offset; 1936fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19371da177e4SLinus Torvalds int pos = 0; 19381da177e4SLinus Torvalds 19391da177e4SLinus Torvalds /* Checksum header. */ 19401da177e4SLinus Torvalds if (copy > 0) { 19411da177e4SLinus Torvalds if (copy > len) 19421da177e4SLinus Torvalds copy = len; 19431da177e4SLinus Torvalds csum = csum_partial(skb->data + offset, copy, csum); 19441da177e4SLinus Torvalds if ((len -= copy) == 0) 19451da177e4SLinus Torvalds return csum; 19461da177e4SLinus Torvalds offset += copy; 19471da177e4SLinus Torvalds pos = copy; 19481da177e4SLinus Torvalds } 19491da177e4SLinus Torvalds 19501da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19511a028e50SDavid S. Miller int end; 195251c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19531da177e4SLinus Torvalds 1954547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19551a028e50SDavid S. Miller 195651c56b00SEric Dumazet end = start + skb_frag_size(frag); 19571da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 195844bb9363SAl Viro __wsum csum2; 19591da177e4SLinus Torvalds u8 *vaddr; 19601da177e4SLinus Torvalds 19611da177e4SLinus Torvalds if (copy > len) 19621da177e4SLinus Torvalds copy = len; 196351c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19641a028e50SDavid S. Miller csum2 = csum_partial(vaddr + frag->page_offset + 19651a028e50SDavid S. Miller offset - start, copy, 0); 196651c56b00SEric Dumazet kunmap_atomic(vaddr); 19671da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19681da177e4SLinus Torvalds if (!(len -= copy)) 19691da177e4SLinus Torvalds return csum; 19701da177e4SLinus Torvalds offset += copy; 19711da177e4SLinus Torvalds pos += copy; 19721da177e4SLinus Torvalds } 19731a028e50SDavid S. Miller start = end; 19741da177e4SLinus Torvalds } 19751da177e4SLinus Torvalds 1976fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19771a028e50SDavid S. Miller int end; 19781da177e4SLinus Torvalds 1979547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19801a028e50SDavid S. Miller 1981fbb398a8SDavid S. Miller end = start + frag_iter->len; 19821da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19835f92a738SAl Viro __wsum csum2; 19841da177e4SLinus Torvalds if (copy > len) 19851da177e4SLinus Torvalds copy = len; 1986fbb398a8SDavid S. Miller csum2 = skb_checksum(frag_iter, offset - start, 19871a028e50SDavid S. Miller copy, 0); 19881da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19891da177e4SLinus Torvalds if ((len -= copy) == 0) 19901da177e4SLinus Torvalds return csum; 19911da177e4SLinus Torvalds offset += copy; 19921da177e4SLinus Torvalds pos += copy; 19931da177e4SLinus Torvalds } 19941a028e50SDavid S. Miller start = end; 19951da177e4SLinus Torvalds } 199609a62660SKris Katterjohn BUG_ON(len); 19971da177e4SLinus Torvalds 19981da177e4SLinus Torvalds return csum; 19991da177e4SLinus Torvalds } 2000b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 20011da177e4SLinus Torvalds 20021da177e4SLinus Torvalds /* Both of above in one bottle. */ 20031da177e4SLinus Torvalds 200481d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 200581d77662SAl Viro u8 *to, int len, __wsum csum) 20061da177e4SLinus Torvalds { 20071a028e50SDavid S. Miller int start = skb_headlen(skb); 20081a028e50SDavid S. Miller int i, copy = start - offset; 2009fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 20101da177e4SLinus Torvalds int pos = 0; 20111da177e4SLinus Torvalds 20121da177e4SLinus Torvalds /* Copy header. */ 20131da177e4SLinus Torvalds if (copy > 0) { 20141da177e4SLinus Torvalds if (copy > len) 20151da177e4SLinus Torvalds copy = len; 20161da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 20171da177e4SLinus Torvalds copy, csum); 20181da177e4SLinus Torvalds if ((len -= copy) == 0) 20191da177e4SLinus Torvalds return csum; 20201da177e4SLinus Torvalds offset += copy; 20211da177e4SLinus Torvalds to += copy; 20221da177e4SLinus Torvalds pos = copy; 20231da177e4SLinus Torvalds } 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20261a028e50SDavid S. Miller int end; 20271da177e4SLinus Torvalds 2028547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20291a028e50SDavid S. Miller 20309e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 20311da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20325084205fSAl Viro __wsum csum2; 20331da177e4SLinus Torvalds u8 *vaddr; 20341da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 20351da177e4SLinus Torvalds 20361da177e4SLinus Torvalds if (copy > len) 20371da177e4SLinus Torvalds copy = len; 203851c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 20391da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 20401a028e50SDavid S. Miller frag->page_offset + 20411a028e50SDavid S. Miller offset - start, to, 20421a028e50SDavid S. Miller copy, 0); 204351c56b00SEric Dumazet kunmap_atomic(vaddr); 20441da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20451da177e4SLinus Torvalds if (!(len -= copy)) 20461da177e4SLinus Torvalds return csum; 20471da177e4SLinus Torvalds offset += copy; 20481da177e4SLinus Torvalds to += copy; 20491da177e4SLinus Torvalds pos += copy; 20501da177e4SLinus Torvalds } 20511a028e50SDavid S. Miller start = end; 20521da177e4SLinus Torvalds } 20531da177e4SLinus Torvalds 2054fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 205581d77662SAl Viro __wsum csum2; 20561a028e50SDavid S. Miller int end; 20571da177e4SLinus Torvalds 2058547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20591a028e50SDavid S. Miller 2060fbb398a8SDavid S. Miller end = start + frag_iter->len; 20611da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20621da177e4SLinus Torvalds if (copy > len) 20631da177e4SLinus Torvalds copy = len; 2064fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 20651a028e50SDavid S. Miller offset - start, 20661da177e4SLinus Torvalds to, copy, 0); 20671da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20681da177e4SLinus Torvalds if ((len -= copy) == 0) 20691da177e4SLinus Torvalds return csum; 20701da177e4SLinus Torvalds offset += copy; 20711da177e4SLinus Torvalds to += copy; 20721da177e4SLinus Torvalds pos += copy; 20731da177e4SLinus Torvalds } 20741a028e50SDavid S. Miller start = end; 20751da177e4SLinus Torvalds } 207609a62660SKris Katterjohn BUG_ON(len); 20771da177e4SLinus Torvalds return csum; 20781da177e4SLinus Torvalds } 2079b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 20801da177e4SLinus Torvalds 20811da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 20821da177e4SLinus Torvalds { 2083d3bc23e7SAl Viro __wsum csum; 20841da177e4SLinus Torvalds long csstart; 20851da177e4SLinus Torvalds 208684fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 208755508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 20881da177e4SLinus Torvalds else 20891da177e4SLinus Torvalds csstart = skb_headlen(skb); 20901da177e4SLinus Torvalds 209109a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 20921da177e4SLinus Torvalds 2093d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 20941da177e4SLinus Torvalds 20951da177e4SLinus Torvalds csum = 0; 20961da177e4SLinus Torvalds if (csstart != skb->len) 20971da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 20981da177e4SLinus Torvalds skb->len - csstart, 0); 20991da177e4SLinus Torvalds 210084fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2101ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 21021da177e4SLinus Torvalds 2103d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 21041da177e4SLinus Torvalds } 21051da177e4SLinus Torvalds } 2106b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds /** 21091da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 21101da177e4SLinus Torvalds * @list: list to dequeue from 21111da177e4SLinus Torvalds * 21121da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 21131da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 21141da177e4SLinus Torvalds * returned or %NULL if the list is empty. 21151da177e4SLinus Torvalds */ 21161da177e4SLinus Torvalds 21171da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 21181da177e4SLinus Torvalds { 21191da177e4SLinus Torvalds unsigned long flags; 21201da177e4SLinus Torvalds struct sk_buff *result; 21211da177e4SLinus Torvalds 21221da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21231da177e4SLinus Torvalds result = __skb_dequeue(list); 21241da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21251da177e4SLinus Torvalds return result; 21261da177e4SLinus Torvalds } 2127b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 21281da177e4SLinus Torvalds 21291da177e4SLinus Torvalds /** 21301da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 21311da177e4SLinus Torvalds * @list: list to dequeue from 21321da177e4SLinus Torvalds * 21331da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 21341da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 21351da177e4SLinus Torvalds * returned or %NULL if the list is empty. 21361da177e4SLinus Torvalds */ 21371da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 21381da177e4SLinus Torvalds { 21391da177e4SLinus Torvalds unsigned long flags; 21401da177e4SLinus Torvalds struct sk_buff *result; 21411da177e4SLinus Torvalds 21421da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21431da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 21441da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21451da177e4SLinus Torvalds return result; 21461da177e4SLinus Torvalds } 2147b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 21481da177e4SLinus Torvalds 21491da177e4SLinus Torvalds /** 21501da177e4SLinus Torvalds * skb_queue_purge - empty a list 21511da177e4SLinus Torvalds * @list: list to empty 21521da177e4SLinus Torvalds * 21531da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 21541da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 21551da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 21561da177e4SLinus Torvalds */ 21571da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 21581da177e4SLinus Torvalds { 21591da177e4SLinus Torvalds struct sk_buff *skb; 21601da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 21611da177e4SLinus Torvalds kfree_skb(skb); 21621da177e4SLinus Torvalds } 2163b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 21641da177e4SLinus Torvalds 21651da177e4SLinus Torvalds /** 21661da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 21671da177e4SLinus Torvalds * @list: list to use 21681da177e4SLinus Torvalds * @newsk: buffer to queue 21691da177e4SLinus Torvalds * 21701da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 21711da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21721da177e4SLinus Torvalds * safely. 21731da177e4SLinus Torvalds * 21741da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21751da177e4SLinus Torvalds */ 21761da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 21771da177e4SLinus Torvalds { 21781da177e4SLinus Torvalds unsigned long flags; 21791da177e4SLinus Torvalds 21801da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21811da177e4SLinus Torvalds __skb_queue_head(list, newsk); 21821da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21831da177e4SLinus Torvalds } 2184b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds /** 21871da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 21881da177e4SLinus Torvalds * @list: list to use 21891da177e4SLinus Torvalds * @newsk: buffer to queue 21901da177e4SLinus Torvalds * 21911da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 21921da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21931da177e4SLinus Torvalds * safely. 21941da177e4SLinus Torvalds * 21951da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21961da177e4SLinus Torvalds */ 21971da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 21981da177e4SLinus Torvalds { 21991da177e4SLinus Torvalds unsigned long flags; 22001da177e4SLinus Torvalds 22011da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22021da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 22031da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22041da177e4SLinus Torvalds } 2205b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 22068728b834SDavid S. Miller 22071da177e4SLinus Torvalds /** 22081da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 22091da177e4SLinus Torvalds * @skb: buffer to remove 22108728b834SDavid S. Miller * @list: list to use 22111da177e4SLinus Torvalds * 22128728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 22138728b834SDavid S. Miller * function is atomic with respect to other list locked calls 22141da177e4SLinus Torvalds * 22158728b834SDavid S. Miller * You must know what list the SKB is on. 22161da177e4SLinus Torvalds */ 22178728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 22181da177e4SLinus Torvalds { 22191da177e4SLinus Torvalds unsigned long flags; 22201da177e4SLinus Torvalds 22211da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22228728b834SDavid S. Miller __skb_unlink(skb, list); 22231da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22241da177e4SLinus Torvalds } 2225b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 22261da177e4SLinus Torvalds 22271da177e4SLinus Torvalds /** 22281da177e4SLinus Torvalds * skb_append - append a buffer 22291da177e4SLinus Torvalds * @old: buffer to insert after 22301da177e4SLinus Torvalds * @newsk: buffer to insert 22318728b834SDavid S. Miller * @list: list to use 22321da177e4SLinus Torvalds * 22331da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 22341da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 22351da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22361da177e4SLinus Torvalds */ 22378728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22381da177e4SLinus Torvalds { 22391da177e4SLinus Torvalds unsigned long flags; 22401da177e4SLinus Torvalds 22418728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22427de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 22438728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22441da177e4SLinus Torvalds } 2245b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 22461da177e4SLinus Torvalds 22471da177e4SLinus Torvalds /** 22481da177e4SLinus Torvalds * skb_insert - insert a buffer 22491da177e4SLinus Torvalds * @old: buffer to insert before 22501da177e4SLinus Torvalds * @newsk: buffer to insert 22518728b834SDavid S. Miller * @list: list to use 22521da177e4SLinus Torvalds * 22538728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 22548728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 22558728b834SDavid S. Miller * calls. 22568728b834SDavid S. Miller * 22571da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22581da177e4SLinus Torvalds */ 22598728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22601da177e4SLinus Torvalds { 22611da177e4SLinus Torvalds unsigned long flags; 22621da177e4SLinus Torvalds 22638728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22648728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 22658728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22661da177e4SLinus Torvalds } 2267b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 22681da177e4SLinus Torvalds 22691da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 22701da177e4SLinus Torvalds struct sk_buff* skb1, 22711da177e4SLinus Torvalds const u32 len, const int pos) 22721da177e4SLinus Torvalds { 22731da177e4SLinus Torvalds int i; 22741da177e4SLinus Torvalds 2275d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2276d626f62bSArnaldo Carvalho de Melo pos - len); 22771da177e4SLinus Torvalds /* And move data appendix as is. */ 22781da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 22791da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 22801da177e4SLinus Torvalds 22811da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 22821da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22831da177e4SLinus Torvalds skb1->data_len = skb->data_len; 22841da177e4SLinus Torvalds skb1->len += skb1->data_len; 22851da177e4SLinus Torvalds skb->data_len = 0; 22861da177e4SLinus Torvalds skb->len = len; 228727a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 22881da177e4SLinus Torvalds } 22891da177e4SLinus Torvalds 22901da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 22911da177e4SLinus Torvalds struct sk_buff* skb1, 22921da177e4SLinus Torvalds const u32 len, int pos) 22931da177e4SLinus Torvalds { 22941da177e4SLinus Torvalds int i, k = 0; 22951da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 22961da177e4SLinus Torvalds 22971da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22981da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 22991da177e4SLinus Torvalds skb->len = len; 23001da177e4SLinus Torvalds skb->data_len = len - pos; 23011da177e4SLinus Torvalds 23021da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 23039e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds if (pos + size > len) { 23061da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 23071da177e4SLinus Torvalds 23081da177e4SLinus Torvalds if (pos < len) { 23091da177e4SLinus Torvalds /* Split frag. 23101da177e4SLinus Torvalds * We have two variants in this case: 23111da177e4SLinus Torvalds * 1. Move all the frag to the second 23121da177e4SLinus Torvalds * part, if it is possible. F.e. 23131da177e4SLinus Torvalds * this approach is mandatory for TUX, 23141da177e4SLinus Torvalds * where splitting is expensive. 23151da177e4SLinus Torvalds * 2. Split is accurately. We make this. 23161da177e4SLinus Torvalds */ 2317ea2ab693SIan Campbell skb_frag_ref(skb, i); 23181da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 23199e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 23209e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 23211da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 23221da177e4SLinus Torvalds } 23231da177e4SLinus Torvalds k++; 23241da177e4SLinus Torvalds } else 23251da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 23261da177e4SLinus Torvalds pos += size; 23271da177e4SLinus Torvalds } 23281da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 23291da177e4SLinus Torvalds } 23301da177e4SLinus Torvalds 23311da177e4SLinus Torvalds /** 23321da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 23331da177e4SLinus Torvalds * @skb: the buffer to split 23341da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 23351da177e4SLinus Torvalds * @len: new length for skb 23361da177e4SLinus Torvalds */ 23371da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 23381da177e4SLinus Torvalds { 23391da177e4SLinus Torvalds int pos = skb_headlen(skb); 23401da177e4SLinus Torvalds 234168534c68SAmerigo Wang skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 23421da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 23431da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 23441da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 23451da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 23461da177e4SLinus Torvalds } 2347b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 23481da177e4SLinus Torvalds 23499f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 23509f782db3SIlpo Järvinen * 23519f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 23529f782db3SIlpo Järvinen */ 2353832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2354832d11c5SIlpo Järvinen { 23550ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2356832d11c5SIlpo Järvinen } 2357832d11c5SIlpo Järvinen 2358832d11c5SIlpo Järvinen /** 2359832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2360832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2361832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2362832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2363832d11c5SIlpo Järvinen * 2364832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 236520e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2366832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2367832d11c5SIlpo Järvinen * 2368832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2369832d11c5SIlpo Järvinen * 2370832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2371832d11c5SIlpo Järvinen * to have non-paged data as well. 2372832d11c5SIlpo Järvinen * 2373832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2374832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2375832d11c5SIlpo Järvinen */ 2376832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2377832d11c5SIlpo Järvinen { 2378832d11c5SIlpo Järvinen int from, to, merge, todo; 2379832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2380832d11c5SIlpo Järvinen 2381832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2382832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2383832d11c5SIlpo Järvinen 2384832d11c5SIlpo Järvinen todo = shiftlen; 2385832d11c5SIlpo Järvinen from = 0; 2386832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2387832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2388832d11c5SIlpo Järvinen 2389832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2390832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2391832d11c5SIlpo Järvinen */ 2392832d11c5SIlpo Järvinen if (!to || 2393ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2394ea2ab693SIan Campbell fragfrom->page_offset)) { 2395832d11c5SIlpo Järvinen merge = -1; 2396832d11c5SIlpo Järvinen } else { 2397832d11c5SIlpo Järvinen merge = to - 1; 2398832d11c5SIlpo Järvinen 23999e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2400832d11c5SIlpo Järvinen if (todo < 0) { 2401832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2402832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2403832d11c5SIlpo Järvinen return 0; 2404832d11c5SIlpo Järvinen 24059f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 24069f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2407832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2408832d11c5SIlpo Järvinen 24099e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 24109e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2411832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2412832d11c5SIlpo Järvinen 2413832d11c5SIlpo Järvinen goto onlymerged; 2414832d11c5SIlpo Järvinen } 2415832d11c5SIlpo Järvinen 2416832d11c5SIlpo Järvinen from++; 2417832d11c5SIlpo Järvinen } 2418832d11c5SIlpo Järvinen 2419832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2420832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2421832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2422832d11c5SIlpo Järvinen return 0; 2423832d11c5SIlpo Järvinen 2424832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2425832d11c5SIlpo Järvinen return 0; 2426832d11c5SIlpo Järvinen 2427832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2428832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2429832d11c5SIlpo Järvinen return 0; 2430832d11c5SIlpo Järvinen 2431832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2432832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2433832d11c5SIlpo Järvinen 24349e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2435832d11c5SIlpo Järvinen *fragto = *fragfrom; 24369e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2437832d11c5SIlpo Järvinen from++; 2438832d11c5SIlpo Järvinen to++; 2439832d11c5SIlpo Järvinen 2440832d11c5SIlpo Järvinen } else { 2441ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2442832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2443832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 24449e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2445832d11c5SIlpo Järvinen 2446832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 24479e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2448832d11c5SIlpo Järvinen todo = 0; 2449832d11c5SIlpo Järvinen 2450832d11c5SIlpo Järvinen to++; 2451832d11c5SIlpo Järvinen break; 2452832d11c5SIlpo Järvinen } 2453832d11c5SIlpo Järvinen } 2454832d11c5SIlpo Järvinen 2455832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2456832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2457832d11c5SIlpo Järvinen 2458832d11c5SIlpo Järvinen if (merge >= 0) { 2459832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2460832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2461832d11c5SIlpo Järvinen 24629e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2463ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2464832d11c5SIlpo Järvinen } 2465832d11c5SIlpo Järvinen 2466832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2467832d11c5SIlpo Järvinen to = 0; 2468832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2469832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2470832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2471832d11c5SIlpo Järvinen 2472832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2473832d11c5SIlpo Järvinen 2474832d11c5SIlpo Järvinen onlymerged: 2475832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2476832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2477832d11c5SIlpo Järvinen */ 2478832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2479832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2480832d11c5SIlpo Järvinen 2481832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2482832d11c5SIlpo Järvinen skb->len -= shiftlen; 2483832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2484832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2485832d11c5SIlpo Järvinen tgt->len += shiftlen; 2486832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2487832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2488832d11c5SIlpo Järvinen 2489832d11c5SIlpo Järvinen return shiftlen; 2490832d11c5SIlpo Järvinen } 2491832d11c5SIlpo Järvinen 2492677e90edSThomas Graf /** 2493677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2494677e90edSThomas Graf * @skb: the buffer to read 2495677e90edSThomas Graf * @from: lower offset of data to be read 2496677e90edSThomas Graf * @to: upper offset of data to be read 2497677e90edSThomas Graf * @st: state variable 2498677e90edSThomas Graf * 2499677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2500677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2501677e90edSThomas Graf */ 2502677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2503677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2504677e90edSThomas Graf { 2505677e90edSThomas Graf st->lower_offset = from; 2506677e90edSThomas Graf st->upper_offset = to; 2507677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2508677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2509677e90edSThomas Graf st->frag_data = NULL; 2510677e90edSThomas Graf } 2511b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2512677e90edSThomas Graf 2513677e90edSThomas Graf /** 2514677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2515677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2516677e90edSThomas Graf * @data: destination pointer for data to be returned 2517677e90edSThomas Graf * @st: state variable 2518677e90edSThomas Graf * 2519677e90edSThomas Graf * Reads a block of skb data at &consumed relative to the 2520677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2521677e90edSThomas Graf * the head of the data block to &data and returns the length 2522677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2523677e90edSThomas Graf * offset has been reached. 2524677e90edSThomas Graf * 2525677e90edSThomas Graf * The caller is not required to consume all of the data 2526677e90edSThomas Graf * returned, i.e. &consumed is typically set to the number 2527677e90edSThomas Graf * of bytes already consumed and the next call to 2528677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2529677e90edSThomas Graf * 253025985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2531677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2532677e90edSThomas Graf * reads of potentially non linear data. 2533677e90edSThomas Graf * 2534bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2535677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2536677e90edSThomas Graf * a stack for this purpose. 2537677e90edSThomas Graf */ 2538677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2539677e90edSThomas Graf struct skb_seq_state *st) 2540677e90edSThomas Graf { 2541677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2542677e90edSThomas Graf skb_frag_t *frag; 2543677e90edSThomas Graf 2544aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 2545aeb193eaSWedson Almeida Filho if (st->frag_data) { 2546aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 2547aeb193eaSWedson Almeida Filho st->frag_data = NULL; 2548aeb193eaSWedson Almeida Filho } 2549677e90edSThomas Graf return 0; 2550aeb193eaSWedson Almeida Filho } 2551677e90edSThomas Graf 2552677e90edSThomas Graf next_skb: 255395e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2554677e90edSThomas Graf 2555995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 255695e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2557677e90edSThomas Graf return block_limit - abs_offset; 2558677e90edSThomas Graf } 2559677e90edSThomas Graf 2560677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2561677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2562677e90edSThomas Graf 2563677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2564677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 25659e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2566677e90edSThomas Graf 2567677e90edSThomas Graf if (abs_offset < block_limit) { 2568677e90edSThomas Graf if (!st->frag_data) 256951c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2570677e90edSThomas Graf 2571677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2572677e90edSThomas Graf (abs_offset - st->stepped_offset); 2573677e90edSThomas Graf 2574677e90edSThomas Graf return block_limit - abs_offset; 2575677e90edSThomas Graf } 2576677e90edSThomas Graf 2577677e90edSThomas Graf if (st->frag_data) { 257851c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2579677e90edSThomas Graf st->frag_data = NULL; 2580677e90edSThomas Graf } 2581677e90edSThomas Graf 2582677e90edSThomas Graf st->frag_idx++; 25839e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2584677e90edSThomas Graf } 2585677e90edSThomas Graf 25865b5a60daSOlaf Kirch if (st->frag_data) { 258751c56b00SEric Dumazet kunmap_atomic(st->frag_data); 25885b5a60daSOlaf Kirch st->frag_data = NULL; 25895b5a60daSOlaf Kirch } 25905b5a60daSOlaf Kirch 259121dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2592677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 259395e3b24cSHerbert Xu st->frag_idx = 0; 2594677e90edSThomas Graf goto next_skb; 259571b3346dSShyam Iyer } else if (st->cur_skb->next) { 259671b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 259771b3346dSShyam Iyer st->frag_idx = 0; 2598677e90edSThomas Graf goto next_skb; 2599677e90edSThomas Graf } 2600677e90edSThomas Graf 2601677e90edSThomas Graf return 0; 2602677e90edSThomas Graf } 2603b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2604677e90edSThomas Graf 2605677e90edSThomas Graf /** 2606677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2607677e90edSThomas Graf * @st: state variable 2608677e90edSThomas Graf * 2609677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2610677e90edSThomas Graf * returned 0. 2611677e90edSThomas Graf */ 2612677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2613677e90edSThomas Graf { 2614677e90edSThomas Graf if (st->frag_data) 261551c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2616677e90edSThomas Graf } 2617b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2618677e90edSThomas Graf 26193fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 26203fc7e8a6SThomas Graf 26213fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 26223fc7e8a6SThomas Graf struct ts_config *conf, 26233fc7e8a6SThomas Graf struct ts_state *state) 26243fc7e8a6SThomas Graf { 26253fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 26263fc7e8a6SThomas Graf } 26273fc7e8a6SThomas Graf 26283fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 26293fc7e8a6SThomas Graf { 26303fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 26313fc7e8a6SThomas Graf } 26323fc7e8a6SThomas Graf 26333fc7e8a6SThomas Graf /** 26343fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 26353fc7e8a6SThomas Graf * @skb: the buffer to look in 26363fc7e8a6SThomas Graf * @from: search offset 26373fc7e8a6SThomas Graf * @to: search limit 26383fc7e8a6SThomas Graf * @config: textsearch configuration 26393fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 26403fc7e8a6SThomas Graf * 26413fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 26423fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 26433fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 26443fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 26453fc7e8a6SThomas Graf */ 26463fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 26473fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 26483fc7e8a6SThomas Graf struct ts_state *state) 26493fc7e8a6SThomas Graf { 2650f72b948dSPhil Oester unsigned int ret; 2651f72b948dSPhil Oester 26523fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 26533fc7e8a6SThomas Graf config->finish = skb_ts_finish; 26543fc7e8a6SThomas Graf 26553fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 26563fc7e8a6SThomas Graf 2657f72b948dSPhil Oester ret = textsearch_find(config, state); 2658f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 26593fc7e8a6SThomas Graf } 2660b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 26613fc7e8a6SThomas Graf 2662e89e9cf5SAnanda Raju /** 26632c53040fSBen Hutchings * skb_append_datato_frags - append the user data to a skb 2664e89e9cf5SAnanda Raju * @sk: sock structure 2665e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2666e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2667e89e9cf5SAnanda Raju * @from: pointer to user message iov 2668e89e9cf5SAnanda Raju * @length: length of the iov message 2669e89e9cf5SAnanda Raju * 2670e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2671e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2672e89e9cf5SAnanda Raju */ 2673e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2674dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2675e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2676e89e9cf5SAnanda Raju void *from, int length) 2677e89e9cf5SAnanda Raju { 2678b2111724SEric Dumazet int frg_cnt = skb_shinfo(skb)->nr_frags; 2679b2111724SEric Dumazet int copy; 2680e89e9cf5SAnanda Raju int offset = 0; 2681e89e9cf5SAnanda Raju int ret; 2682b2111724SEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 2683e89e9cf5SAnanda Raju 2684e89e9cf5SAnanda Raju do { 2685e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2686e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2687b2111724SEric Dumazet return -EMSGSIZE; 2688e89e9cf5SAnanda Raju 2689b2111724SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 2690e89e9cf5SAnanda Raju return -ENOMEM; 2691e89e9cf5SAnanda Raju 2692e89e9cf5SAnanda Raju /* copy the user data to page */ 2693b2111724SEric Dumazet copy = min_t(int, length, pfrag->size - pfrag->offset); 2694e89e9cf5SAnanda Raju 2695b2111724SEric Dumazet ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2696e89e9cf5SAnanda Raju offset, copy, 0, skb); 2697e89e9cf5SAnanda Raju if (ret < 0) 2698e89e9cf5SAnanda Raju return -EFAULT; 2699e89e9cf5SAnanda Raju 2700e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 2701b2111724SEric Dumazet skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2702b2111724SEric Dumazet copy); 2703b2111724SEric Dumazet frg_cnt++; 2704b2111724SEric Dumazet pfrag->offset += copy; 2705b2111724SEric Dumazet get_page(pfrag->page); 2706b2111724SEric Dumazet 2707b2111724SEric Dumazet skb->truesize += copy; 2708b2111724SEric Dumazet atomic_add(copy, &sk->sk_wmem_alloc); 2709e89e9cf5SAnanda Raju skb->len += copy; 2710e89e9cf5SAnanda Raju skb->data_len += copy; 2711e89e9cf5SAnanda Raju offset += copy; 2712e89e9cf5SAnanda Raju length -= copy; 2713e89e9cf5SAnanda Raju 2714e89e9cf5SAnanda Raju } while (length > 0); 2715e89e9cf5SAnanda Raju 2716e89e9cf5SAnanda Raju return 0; 2717e89e9cf5SAnanda Raju } 2718b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2719e89e9cf5SAnanda Raju 2720cbb042f9SHerbert Xu /** 2721cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2722cbb042f9SHerbert Xu * @skb: buffer to update 2723cbb042f9SHerbert Xu * @len: length of data pulled 2724cbb042f9SHerbert Xu * 2725cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2726fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 272784fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 272884fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 272984fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2730cbb042f9SHerbert Xu */ 2731cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2732cbb042f9SHerbert Xu { 2733cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2734cbb042f9SHerbert Xu skb->len -= len; 2735cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2736cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2737cbb042f9SHerbert Xu return skb->data += len; 2738cbb042f9SHerbert Xu } 2739f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2740f94691acSArnaldo Carvalho de Melo 2741f4c50d99SHerbert Xu /** 2742f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2743f4c50d99SHerbert Xu * @skb: buffer to segment 2744576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2745f4c50d99SHerbert Xu * 2746f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 27474c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 27484c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2749f4c50d99SHerbert Xu */ 2750c8f44affSMichał Mirosław struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2751f4c50d99SHerbert Xu { 2752f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2753f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 275489319d38SHerbert Xu struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2755f4c50d99SHerbert Xu unsigned int mss = skb_shinfo(skb)->gso_size; 275698e399f8SArnaldo Carvalho de Melo unsigned int doffset = skb->data - skb_mac_header(skb); 2757f4c50d99SHerbert Xu unsigned int offset = doffset; 275868c33163SPravin B Shelar unsigned int tnl_hlen = skb_tnl_header_len(skb); 2759f4c50d99SHerbert Xu unsigned int headroom; 2760f4c50d99SHerbert Xu unsigned int len; 2761ec5f0615SPravin B Shelar __be16 proto; 2762ec5f0615SPravin B Shelar bool csum; 276304ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2764f4c50d99SHerbert Xu int nfrags = skb_shinfo(skb)->nr_frags; 2765f4c50d99SHerbert Xu int err = -ENOMEM; 2766f4c50d99SHerbert Xu int i = 0; 2767f4c50d99SHerbert Xu int pos; 2768f4c50d99SHerbert Xu 2769ec5f0615SPravin B Shelar proto = skb_network_protocol(skb); 2770ec5f0615SPravin B Shelar if (unlikely(!proto)) 2771ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 2772ec5f0615SPravin B Shelar 2773ec5f0615SPravin B Shelar csum = !!can_checksum_protocol(features, proto); 2774f4c50d99SHerbert Xu __skb_push(skb, doffset); 2775f4c50d99SHerbert Xu headroom = skb_headroom(skb); 2776f4c50d99SHerbert Xu pos = skb_headlen(skb); 2777f4c50d99SHerbert Xu 2778f4c50d99SHerbert Xu do { 2779f4c50d99SHerbert Xu struct sk_buff *nskb; 2780f4c50d99SHerbert Xu skb_frag_t *frag; 2781c8884eddSHerbert Xu int hsize; 2782f4c50d99SHerbert Xu int size; 2783f4c50d99SHerbert Xu 2784f4c50d99SHerbert Xu len = skb->len - offset; 2785f4c50d99SHerbert Xu if (len > mss) 2786f4c50d99SHerbert Xu len = mss; 2787f4c50d99SHerbert Xu 2788f4c50d99SHerbert Xu hsize = skb_headlen(skb) - offset; 2789f4c50d99SHerbert Xu if (hsize < 0) 2790f4c50d99SHerbert Xu hsize = 0; 2791c8884eddSHerbert Xu if (hsize > len || !sg) 2792c8884eddSHerbert Xu hsize = len; 2793f4c50d99SHerbert Xu 279489319d38SHerbert Xu if (!hsize && i >= nfrags) { 279589319d38SHerbert Xu BUG_ON(fskb->len != len); 279689319d38SHerbert Xu 279789319d38SHerbert Xu pos += len; 279889319d38SHerbert Xu nskb = skb_clone(fskb, GFP_ATOMIC); 279989319d38SHerbert Xu fskb = fskb->next; 280089319d38SHerbert Xu 2801f4c50d99SHerbert Xu if (unlikely(!nskb)) 2802f4c50d99SHerbert Xu goto err; 2803f4c50d99SHerbert Xu 2804ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 280589319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 280689319d38SHerbert Xu kfree_skb(nskb); 280789319d38SHerbert Xu goto err; 280889319d38SHerbert Xu } 280989319d38SHerbert Xu 2810ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 281189319d38SHerbert Xu skb_release_head_state(nskb); 281289319d38SHerbert Xu __skb_push(nskb, doffset); 281389319d38SHerbert Xu } else { 2814c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 2815c93bdd0eSMel Gorman GFP_ATOMIC, skb_alloc_rx_flag(skb), 2816c93bdd0eSMel Gorman NUMA_NO_NODE); 281789319d38SHerbert Xu 281889319d38SHerbert Xu if (unlikely(!nskb)) 281989319d38SHerbert Xu goto err; 282089319d38SHerbert Xu 282189319d38SHerbert Xu skb_reserve(nskb, headroom); 282289319d38SHerbert Xu __skb_put(nskb, doffset); 282389319d38SHerbert Xu } 282489319d38SHerbert Xu 2825f4c50d99SHerbert Xu if (segs) 2826f4c50d99SHerbert Xu tail->next = nskb; 2827f4c50d99SHerbert Xu else 2828f4c50d99SHerbert Xu segs = nskb; 2829f4c50d99SHerbert Xu tail = nskb; 2830f4c50d99SHerbert Xu 28316f85a124SHerbert Xu __copy_skb_header(nskb, skb); 2832f4c50d99SHerbert Xu nskb->mac_len = skb->mac_len; 2833f4c50d99SHerbert Xu 28343d3be433SEric Dumazet /* nskb and skb might have different headroom */ 28353d3be433SEric Dumazet if (nskb->ip_summed == CHECKSUM_PARTIAL) 28363d3be433SEric Dumazet nskb->csum_start += skb_headroom(nskb) - headroom; 28373d3be433SEric Dumazet 2838459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(nskb); 2839ddc7b8e3SArnaldo Carvalho de Melo skb_set_network_header(nskb, skb->mac_len); 2840b0e380b1SArnaldo Carvalho de Melo nskb->transport_header = (nskb->network_header + 2841b0e380b1SArnaldo Carvalho de Melo skb_network_header_len(skb)); 284268c33163SPravin B Shelar 284368c33163SPravin B Shelar skb_copy_from_linear_data_offset(skb, -tnl_hlen, 284468c33163SPravin B Shelar nskb->data - tnl_hlen, 284568c33163SPravin B Shelar doffset + tnl_hlen); 284689319d38SHerbert Xu 28472f181855SHerbert Xu if (fskb != skb_shinfo(skb)->frag_list) 28481cdbcb79SSimon Horman goto perform_csum_check; 284989319d38SHerbert Xu 2850f4c50d99SHerbert Xu if (!sg) { 28516f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2852f4c50d99SHerbert Xu nskb->csum = skb_copy_and_csum_bits(skb, offset, 2853f4c50d99SHerbert Xu skb_put(nskb, len), 2854f4c50d99SHerbert Xu len, 0); 2855f4c50d99SHerbert Xu continue; 2856f4c50d99SHerbert Xu } 2857f4c50d99SHerbert Xu 2858f4c50d99SHerbert Xu frag = skb_shinfo(nskb)->frags; 2859f4c50d99SHerbert Xu 2860d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, 2861d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 2862f4c50d99SHerbert Xu 2863c9af6db4SPravin B Shelar skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2864cef401deSEric Dumazet 286589319d38SHerbert Xu while (pos < offset + len && i < nfrags) { 2866f4c50d99SHerbert Xu *frag = skb_shinfo(skb)->frags[i]; 2867ea2ab693SIan Campbell __skb_frag_ref(frag); 28689e903e08SEric Dumazet size = skb_frag_size(frag); 2869f4c50d99SHerbert Xu 2870f4c50d99SHerbert Xu if (pos < offset) { 2871f4c50d99SHerbert Xu frag->page_offset += offset - pos; 28729e903e08SEric Dumazet skb_frag_size_sub(frag, offset - pos); 2873f4c50d99SHerbert Xu } 2874f4c50d99SHerbert Xu 287589319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 2876f4c50d99SHerbert Xu 2877f4c50d99SHerbert Xu if (pos + size <= offset + len) { 2878f4c50d99SHerbert Xu i++; 2879f4c50d99SHerbert Xu pos += size; 2880f4c50d99SHerbert Xu } else { 28819e903e08SEric Dumazet skb_frag_size_sub(frag, pos + size - (offset + len)); 288289319d38SHerbert Xu goto skip_fraglist; 2883f4c50d99SHerbert Xu } 2884f4c50d99SHerbert Xu 2885f4c50d99SHerbert Xu frag++; 2886f4c50d99SHerbert Xu } 2887f4c50d99SHerbert Xu 288889319d38SHerbert Xu if (pos < offset + len) { 288989319d38SHerbert Xu struct sk_buff *fskb2 = fskb; 289089319d38SHerbert Xu 289189319d38SHerbert Xu BUG_ON(pos + fskb->len != offset + len); 289289319d38SHerbert Xu 289389319d38SHerbert Xu pos += fskb->len; 289489319d38SHerbert Xu fskb = fskb->next; 289589319d38SHerbert Xu 289689319d38SHerbert Xu if (fskb2->next) { 289789319d38SHerbert Xu fskb2 = skb_clone(fskb2, GFP_ATOMIC); 289889319d38SHerbert Xu if (!fskb2) 289989319d38SHerbert Xu goto err; 290089319d38SHerbert Xu } else 290189319d38SHerbert Xu skb_get(fskb2); 290289319d38SHerbert Xu 2903fbb398a8SDavid S. Miller SKB_FRAG_ASSERT(nskb); 290489319d38SHerbert Xu skb_shinfo(nskb)->frag_list = fskb2; 290589319d38SHerbert Xu } 290689319d38SHerbert Xu 290789319d38SHerbert Xu skip_fraglist: 2908f4c50d99SHerbert Xu nskb->data_len = len - hsize; 2909f4c50d99SHerbert Xu nskb->len += nskb->data_len; 2910f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 2911ec5f0615SPravin B Shelar 29121cdbcb79SSimon Horman perform_csum_check: 2913ec5f0615SPravin B Shelar if (!csum) { 2914ec5f0615SPravin B Shelar nskb->csum = skb_checksum(nskb, doffset, 2915ec5f0615SPravin B Shelar nskb->len - doffset, 0); 2916ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 2917ec5f0615SPravin B Shelar } 2918f4c50d99SHerbert Xu } while ((offset += len) < skb->len); 2919f4c50d99SHerbert Xu 2920f4c50d99SHerbert Xu return segs; 2921f4c50d99SHerbert Xu 2922f4c50d99SHerbert Xu err: 2923f4c50d99SHerbert Xu while ((skb = segs)) { 2924f4c50d99SHerbert Xu segs = skb->next; 2925b08d5840SPatrick McHardy kfree_skb(skb); 2926f4c50d99SHerbert Xu } 2927f4c50d99SHerbert Xu return ERR_PTR(err); 2928f4c50d99SHerbert Xu } 2929f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 2930f4c50d99SHerbert Xu 293171d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 293271d93b39SHerbert Xu { 293371d93b39SHerbert Xu struct sk_buff *p = *head; 293471d93b39SHerbert Xu struct sk_buff *nskb; 29359aaa156cSHerbert Xu struct skb_shared_info *skbinfo = skb_shinfo(skb); 29369aaa156cSHerbert Xu struct skb_shared_info *pinfo = skb_shinfo(p); 293771d93b39SHerbert Xu unsigned int headroom; 293886911732SHerbert Xu unsigned int len = skb_gro_len(skb); 293967147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 294067147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 2941715dc1f3SEric Dumazet unsigned int delta_truesize; 294271d93b39SHerbert Xu 294386911732SHerbert Xu if (p->len + len >= 65536) 294471d93b39SHerbert Xu return -E2BIG; 294571d93b39SHerbert Xu 29469aaa156cSHerbert Xu if (pinfo->frag_list) 294771d93b39SHerbert Xu goto merge; 294867147ba9SHerbert Xu else if (headlen <= offset) { 294942da6994SHerbert Xu skb_frag_t *frag; 295066e92fcfSHerbert Xu skb_frag_t *frag2; 29519aaa156cSHerbert Xu int i = skbinfo->nr_frags; 29529aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 295342da6994SHerbert Xu 295466e92fcfSHerbert Xu offset -= headlen; 295566e92fcfSHerbert Xu 295666e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 295781705ad1SHerbert Xu return -E2BIG; 295881705ad1SHerbert Xu 29599aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 29609aaa156cSHerbert Xu skbinfo->nr_frags = 0; 2961f5572068SHerbert Xu 29629aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 29639aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 296466e92fcfSHerbert Xu do { 296566e92fcfSHerbert Xu *--frag = *--frag2; 296666e92fcfSHerbert Xu } while (--i); 296766e92fcfSHerbert Xu 296866e92fcfSHerbert Xu frag->page_offset += offset; 29699e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 297066e92fcfSHerbert Xu 2971715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 2972ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 2973ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 2974715dc1f3SEric Dumazet 2975f5572068SHerbert Xu skb->truesize -= skb->data_len; 2976f5572068SHerbert Xu skb->len -= skb->data_len; 2977f5572068SHerbert Xu skb->data_len = 0; 2978f5572068SHerbert Xu 2979715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 29805d38a079SHerbert Xu goto done; 2981d7e8883cSEric Dumazet } else if (skb->head_frag) { 2982d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 2983d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 2984d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 2985d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 2986d7e8883cSEric Dumazet unsigned int first_offset; 2987d7e8883cSEric Dumazet 2988d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 2989d7e8883cSEric Dumazet return -E2BIG; 2990d7e8883cSEric Dumazet 2991d7e8883cSEric Dumazet first_offset = skb->data - 2992d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 2993d7e8883cSEric Dumazet offset; 2994d7e8883cSEric Dumazet 2995d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 2996d7e8883cSEric Dumazet 2997d7e8883cSEric Dumazet frag->page.p = page; 2998d7e8883cSEric Dumazet frag->page_offset = first_offset; 2999d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 3000d7e8883cSEric Dumazet 3001d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3002d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 3003d7e8883cSEric Dumazet 3004715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3005d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3006d7e8883cSEric Dumazet goto done; 300769c0cab1SHerbert Xu } else if (skb_gro_len(p) != pinfo->gso_size) 300869c0cab1SHerbert Xu return -E2BIG; 300971d93b39SHerbert Xu 301071d93b39SHerbert Xu headroom = skb_headroom(p); 30113d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 301271d93b39SHerbert Xu if (unlikely(!nskb)) 301371d93b39SHerbert Xu return -ENOMEM; 301471d93b39SHerbert Xu 301571d93b39SHerbert Xu __copy_skb_header(nskb, p); 301671d93b39SHerbert Xu nskb->mac_len = p->mac_len; 301771d93b39SHerbert Xu 301871d93b39SHerbert Xu skb_reserve(nskb, headroom); 301986911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 302071d93b39SHerbert Xu 302186911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 302271d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 302371d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 302471d93b39SHerbert Xu 302586911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 302686911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 302786911732SHerbert Xu p->data - skb_mac_header(p)); 302871d93b39SHerbert Xu 302971d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 30309aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3031622e0ca1SHerbert Xu pinfo->gso_size = 0; 303271d93b39SHerbert Xu skb_header_release(p); 3033c3c7c254SEric Dumazet NAPI_GRO_CB(nskb)->last = p; 303471d93b39SHerbert Xu 303571d93b39SHerbert Xu nskb->data_len += p->len; 3036de8261c2SEric Dumazet nskb->truesize += p->truesize; 303771d93b39SHerbert Xu nskb->len += p->len; 303871d93b39SHerbert Xu 303971d93b39SHerbert Xu *head = nskb; 304071d93b39SHerbert Xu nskb->next = p->next; 304171d93b39SHerbert Xu p->next = NULL; 304271d93b39SHerbert Xu 304371d93b39SHerbert Xu p = nskb; 304471d93b39SHerbert Xu 304571d93b39SHerbert Xu merge: 3046715dc1f3SEric Dumazet delta_truesize = skb->truesize; 304767147ba9SHerbert Xu if (offset > headlen) { 3048d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 3049d1dc7abfSMichal Schmidt 3050d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 30519e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 3052d1dc7abfSMichal Schmidt skb->data_len -= eat; 3053d1dc7abfSMichal Schmidt skb->len -= eat; 305467147ba9SHerbert Xu offset = headlen; 305556035022SHerbert Xu } 305656035022SHerbert Xu 305767147ba9SHerbert Xu __skb_pull(skb, offset); 305856035022SHerbert Xu 3059c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 3060c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 306171d93b39SHerbert Xu skb_header_release(skb); 306271d93b39SHerbert Xu 30635d38a079SHerbert Xu done: 30645d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 306537fe4732SHerbert Xu p->data_len += len; 3066715dc1f3SEric Dumazet p->truesize += delta_truesize; 306737fe4732SHerbert Xu p->len += len; 306871d93b39SHerbert Xu 306971d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 307071d93b39SHerbert Xu return 0; 307171d93b39SHerbert Xu } 307271d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 307371d93b39SHerbert Xu 30741da177e4SLinus Torvalds void __init skb_init(void) 30751da177e4SLinus Torvalds { 30761da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 30771da177e4SLinus Torvalds sizeof(struct sk_buff), 30781da177e4SLinus Torvalds 0, 3079e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 308020c2df83SPaul Mundt NULL); 3081d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3082d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3083d179cd12SDavid S. Miller sizeof(atomic_t), 3084d179cd12SDavid S. Miller 0, 3085e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 308620c2df83SPaul Mundt NULL); 30871da177e4SLinus Torvalds } 30881da177e4SLinus Torvalds 3089716ea3a7SDavid Howells /** 3090716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3091716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3092716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3093716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3094716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3095716ea3a7SDavid Howells * 3096716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3097716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3098716ea3a7SDavid Howells */ 309951c739d1SDavid S. Miller static int 310051c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3101716ea3a7SDavid Howells { 31021a028e50SDavid S. Miller int start = skb_headlen(skb); 31031a028e50SDavid S. Miller int i, copy = start - offset; 3104fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3105716ea3a7SDavid Howells int elt = 0; 3106716ea3a7SDavid Howells 3107716ea3a7SDavid Howells if (copy > 0) { 3108716ea3a7SDavid Howells if (copy > len) 3109716ea3a7SDavid Howells copy = len; 3110642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3111716ea3a7SDavid Howells elt++; 3112716ea3a7SDavid Howells if ((len -= copy) == 0) 3113716ea3a7SDavid Howells return elt; 3114716ea3a7SDavid Howells offset += copy; 3115716ea3a7SDavid Howells } 3116716ea3a7SDavid Howells 3117716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 31181a028e50SDavid S. Miller int end; 3119716ea3a7SDavid Howells 3120547b792cSIlpo Järvinen WARN_ON(start > offset + len); 31211a028e50SDavid S. Miller 31229e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3123716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3124716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3125716ea3a7SDavid Howells 3126716ea3a7SDavid Howells if (copy > len) 3127716ea3a7SDavid Howells copy = len; 3128ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3129642f1490SJens Axboe frag->page_offset+offset-start); 3130716ea3a7SDavid Howells elt++; 3131716ea3a7SDavid Howells if (!(len -= copy)) 3132716ea3a7SDavid Howells return elt; 3133716ea3a7SDavid Howells offset += copy; 3134716ea3a7SDavid Howells } 31351a028e50SDavid S. Miller start = end; 3136716ea3a7SDavid Howells } 3137716ea3a7SDavid Howells 3138fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 31391a028e50SDavid S. Miller int end; 3140716ea3a7SDavid Howells 3141547b792cSIlpo Järvinen WARN_ON(start > offset + len); 31421a028e50SDavid S. Miller 3143fbb398a8SDavid S. Miller end = start + frag_iter->len; 3144716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3145716ea3a7SDavid Howells if (copy > len) 3146716ea3a7SDavid Howells copy = len; 3147fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 314851c739d1SDavid S. Miller copy); 3149716ea3a7SDavid Howells if ((len -= copy) == 0) 3150716ea3a7SDavid Howells return elt; 3151716ea3a7SDavid Howells offset += copy; 3152716ea3a7SDavid Howells } 31531a028e50SDavid S. Miller start = end; 3154716ea3a7SDavid Howells } 3155716ea3a7SDavid Howells BUG_ON(len); 3156716ea3a7SDavid Howells return elt; 3157716ea3a7SDavid Howells } 3158716ea3a7SDavid Howells 315951c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 316051c739d1SDavid S. Miller { 316151c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 316251c739d1SDavid S. Miller 3163c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 316451c739d1SDavid S. Miller 316551c739d1SDavid S. Miller return nsg; 316651c739d1SDavid S. Miller } 3167b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 316851c739d1SDavid S. Miller 3169716ea3a7SDavid Howells /** 3170716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3171716ea3a7SDavid Howells * @skb: The socket buffer to check. 3172716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3173716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3174716ea3a7SDavid Howells * 3175716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3176716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3177716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3178716ea3a7SDavid Howells * 3179716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3180716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3181716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3182716ea3a7SDavid Howells * 3183716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3184716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3185716ea3a7SDavid Howells */ 3186716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3187716ea3a7SDavid Howells { 3188716ea3a7SDavid Howells int copyflag; 3189716ea3a7SDavid Howells int elt; 3190716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3191716ea3a7SDavid Howells 3192716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3193716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3194716ea3a7SDavid Howells * at the moment even if they are anonymous). 3195716ea3a7SDavid Howells */ 3196716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3197716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3198716ea3a7SDavid Howells return -ENOMEM; 3199716ea3a7SDavid Howells 3200716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 320121dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3202716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3203716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3204716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3205716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3206716ea3a7SDavid Howells 3207716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3208716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3209716ea3a7SDavid Howells return -ENOMEM; 3210716ea3a7SDavid Howells 3211716ea3a7SDavid Howells /* Voila! */ 3212716ea3a7SDavid Howells *trailer = skb; 3213716ea3a7SDavid Howells return 1; 3214716ea3a7SDavid Howells } 3215716ea3a7SDavid Howells 3216716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3217716ea3a7SDavid Howells 3218716ea3a7SDavid Howells elt = 1; 3219716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3220716ea3a7SDavid Howells copyflag = 0; 3221716ea3a7SDavid Howells 3222716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3223716ea3a7SDavid Howells int ntail = 0; 3224716ea3a7SDavid Howells 3225716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3226716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3227716ea3a7SDavid Howells * after it. */ 3228716ea3a7SDavid Howells 3229716ea3a7SDavid Howells if (skb_shared(skb1)) 3230716ea3a7SDavid Howells copyflag = 1; 3231716ea3a7SDavid Howells 3232716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3233716ea3a7SDavid Howells 3234716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3235716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 323621dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3237716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3238716ea3a7SDavid Howells ntail = tailbits + 128; 3239716ea3a7SDavid Howells } 3240716ea3a7SDavid Howells 3241716ea3a7SDavid Howells if (copyflag || 3242716ea3a7SDavid Howells skb_cloned(skb1) || 3243716ea3a7SDavid Howells ntail || 3244716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 324521dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3246716ea3a7SDavid Howells struct sk_buff *skb2; 3247716ea3a7SDavid Howells 3248716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3249716ea3a7SDavid Howells if (ntail == 0) 3250716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3251716ea3a7SDavid Howells else 3252716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3253716ea3a7SDavid Howells skb_headroom(skb1), 3254716ea3a7SDavid Howells ntail, 3255716ea3a7SDavid Howells GFP_ATOMIC); 3256716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3257716ea3a7SDavid Howells return -ENOMEM; 3258716ea3a7SDavid Howells 3259716ea3a7SDavid Howells if (skb1->sk) 3260716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3261716ea3a7SDavid Howells 3262716ea3a7SDavid Howells /* Looking around. Are we still alive? 3263716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3264716ea3a7SDavid Howells 3265716ea3a7SDavid Howells skb2->next = skb1->next; 3266716ea3a7SDavid Howells *skb_p = skb2; 3267716ea3a7SDavid Howells kfree_skb(skb1); 3268716ea3a7SDavid Howells skb1 = skb2; 3269716ea3a7SDavid Howells } 3270716ea3a7SDavid Howells elt++; 3271716ea3a7SDavid Howells *trailer = skb1; 3272716ea3a7SDavid Howells skb_p = &skb1->next; 3273716ea3a7SDavid Howells } 3274716ea3a7SDavid Howells 3275716ea3a7SDavid Howells return elt; 3276716ea3a7SDavid Howells } 3277b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3278716ea3a7SDavid Howells 3279b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3280b1faf566SEric Dumazet { 3281b1faf566SEric Dumazet struct sock *sk = skb->sk; 3282b1faf566SEric Dumazet 3283b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3284b1faf566SEric Dumazet } 3285b1faf566SEric Dumazet 3286b1faf566SEric Dumazet /* 3287b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3288b1faf566SEric Dumazet */ 3289b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3290b1faf566SEric Dumazet { 3291110c4330SEric Dumazet int len = skb->len; 3292110c4330SEric Dumazet 3293b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 329495c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3295b1faf566SEric Dumazet return -ENOMEM; 3296b1faf566SEric Dumazet 3297b1faf566SEric Dumazet skb_orphan(skb); 3298b1faf566SEric Dumazet skb->sk = sk; 3299b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3300b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3301b1faf566SEric Dumazet 3302abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3303abb57ea4SEric Dumazet skb_dst_force(skb); 3304abb57ea4SEric Dumazet 3305b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3306b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3307110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3308b1faf566SEric Dumazet return 0; 3309b1faf566SEric Dumazet } 3310b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3311b1faf566SEric Dumazet 3312ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3313ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3314ac45f602SPatrick Ohly { 3315ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3316ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3317ac45f602SPatrick Ohly struct sk_buff *skb; 3318ac45f602SPatrick Ohly int err; 3319ac45f602SPatrick Ohly 3320ac45f602SPatrick Ohly if (!sk) 3321ac45f602SPatrick Ohly return; 3322ac45f602SPatrick Ohly 3323ac45f602SPatrick Ohly if (hwtstamps) { 33242e31396fSWillem de Bruijn *skb_hwtstamps(orig_skb) = 3325ac45f602SPatrick Ohly *hwtstamps; 3326ac45f602SPatrick Ohly } else { 3327ac45f602SPatrick Ohly /* 3328ac45f602SPatrick Ohly * no hardware time stamps available, 33292244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3330ac45f602SPatrick Ohly * store software time stamp 3331ac45f602SPatrick Ohly */ 33322e31396fSWillem de Bruijn orig_skb->tstamp = ktime_get_real(); 3333ac45f602SPatrick Ohly } 3334ac45f602SPatrick Ohly 33352e31396fSWillem de Bruijn skb = skb_clone(orig_skb, GFP_ATOMIC); 33362e31396fSWillem de Bruijn if (!skb) 33372e31396fSWillem de Bruijn return; 33382e31396fSWillem de Bruijn 3339ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3340ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3341ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3342ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 334329030374SEric Dumazet 3344ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 334529030374SEric Dumazet 3346ac45f602SPatrick Ohly if (err) 3347ac45f602SPatrick Ohly kfree_skb(skb); 3348ac45f602SPatrick Ohly } 3349ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3350ac45f602SPatrick Ohly 33516e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 33526e3e939fSJohannes Berg { 33536e3e939fSJohannes Berg struct sock *sk = skb->sk; 33546e3e939fSJohannes Berg struct sock_exterr_skb *serr; 33556e3e939fSJohannes Berg int err; 33566e3e939fSJohannes Berg 33576e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 33586e3e939fSJohannes Berg skb->wifi_acked = acked; 33596e3e939fSJohannes Berg 33606e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 33616e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 33626e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 33636e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 33646e3e939fSJohannes Berg 33656e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 33666e3e939fSJohannes Berg if (err) 33676e3e939fSJohannes Berg kfree_skb(skb); 33686e3e939fSJohannes Berg } 33696e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 33706e3e939fSJohannes Berg 3371ac45f602SPatrick Ohly 3372f35d9d8aSRusty Russell /** 3373f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3374f35d9d8aSRusty Russell * @skb: the skb to set 3375f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3376f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3377f35d9d8aSRusty Russell * 3378f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3379f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3380f35d9d8aSRusty Russell * 3381f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3382f35d9d8aSRusty Russell * returns false you should drop the packet. 3383f35d9d8aSRusty Russell */ 3384f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3385f35d9d8aSRusty Russell { 33865ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 33875ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3388e87cc472SJoe Perches net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 33895ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3390f35d9d8aSRusty Russell return false; 3391f35d9d8aSRusty Russell } 3392f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3393f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3394f35d9d8aSRusty Russell skb->csum_offset = off; 3395e5d5decaSJason Wang skb_set_transport_header(skb, start); 3396f35d9d8aSRusty Russell return true; 3397f35d9d8aSRusty Russell } 3398b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3399f35d9d8aSRusty Russell 34004497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 34014497b076SBen Hutchings { 3402e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3403e87cc472SJoe Perches skb->dev->name); 34044497b076SBen Hutchings } 34054497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3406bad43ca8SEric Dumazet 3407bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3408bad43ca8SEric Dumazet { 34093d861f66SEric Dumazet if (head_stolen) { 34103d861f66SEric Dumazet skb_release_head_state(skb); 3411bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 34123d861f66SEric Dumazet } else { 3413bad43ca8SEric Dumazet __kfree_skb(skb); 3414bad43ca8SEric Dumazet } 34153d861f66SEric Dumazet } 3416bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 3417bad43ca8SEric Dumazet 3418bad43ca8SEric Dumazet /** 3419bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 3420bad43ca8SEric Dumazet * @to: prior buffer 3421bad43ca8SEric Dumazet * @from: buffer to add 3422bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 3423c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 3424bad43ca8SEric Dumazet */ 3425bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3426bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 3427bad43ca8SEric Dumazet { 3428bad43ca8SEric Dumazet int i, delta, len = from->len; 3429bad43ca8SEric Dumazet 3430bad43ca8SEric Dumazet *fragstolen = false; 3431bad43ca8SEric Dumazet 3432bad43ca8SEric Dumazet if (skb_cloned(to)) 3433bad43ca8SEric Dumazet return false; 3434bad43ca8SEric Dumazet 3435bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 3436bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3437bad43ca8SEric Dumazet *delta_truesize = 0; 3438bad43ca8SEric Dumazet return true; 3439bad43ca8SEric Dumazet } 3440bad43ca8SEric Dumazet 3441bad43ca8SEric Dumazet if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3442bad43ca8SEric Dumazet return false; 3443bad43ca8SEric Dumazet 3444bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 3445bad43ca8SEric Dumazet struct page *page; 3446bad43ca8SEric Dumazet unsigned int offset; 3447bad43ca8SEric Dumazet 3448bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3449bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3450bad43ca8SEric Dumazet return false; 3451bad43ca8SEric Dumazet 3452bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 3453bad43ca8SEric Dumazet return false; 3454bad43ca8SEric Dumazet 3455bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3456bad43ca8SEric Dumazet 3457bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 3458bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 3459bad43ca8SEric Dumazet 3460bad43ca8SEric Dumazet skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3461bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 3462bad43ca8SEric Dumazet *fragstolen = true; 3463bad43ca8SEric Dumazet } else { 3464bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3465bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3466bad43ca8SEric Dumazet return false; 3467bad43ca8SEric Dumazet 3468f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 3469bad43ca8SEric Dumazet } 3470bad43ca8SEric Dumazet 3471bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 3472bad43ca8SEric Dumazet 3473bad43ca8SEric Dumazet memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3474bad43ca8SEric Dumazet skb_shinfo(from)->frags, 3475bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3476bad43ca8SEric Dumazet skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3477bad43ca8SEric Dumazet 3478bad43ca8SEric Dumazet if (!skb_cloned(from)) 3479bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags = 0; 3480bad43ca8SEric Dumazet 34818ea853fdSLi RongQing /* if the skb is not cloned this does nothing 34828ea853fdSLi RongQing * since we set nr_frags to 0. 34838ea853fdSLi RongQing */ 3484bad43ca8SEric Dumazet for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3485bad43ca8SEric Dumazet skb_frag_ref(from, i); 3486bad43ca8SEric Dumazet 3487bad43ca8SEric Dumazet to->truesize += delta; 3488bad43ca8SEric Dumazet to->len += len; 3489bad43ca8SEric Dumazet to->data_len += len; 3490bad43ca8SEric Dumazet 3491bad43ca8SEric Dumazet *delta_truesize = delta; 3492bad43ca8SEric Dumazet return true; 3493bad43ca8SEric Dumazet } 3494bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 3495621e84d6SNicolas Dichtel 3496621e84d6SNicolas Dichtel /** 3497621e84d6SNicolas Dichtel * skb_scrub_packet - scrub an skb before sending it to another netns 3498621e84d6SNicolas Dichtel * 3499621e84d6SNicolas Dichtel * @skb: buffer to clean 3500621e84d6SNicolas Dichtel * 3501621e84d6SNicolas Dichtel * skb_scrub_packet can be used to clean an skb before injecting it in 3502621e84d6SNicolas Dichtel * another namespace. We have to clear all information in the skb that 3503621e84d6SNicolas Dichtel * could impact namespace isolation. 3504621e84d6SNicolas Dichtel */ 3505621e84d6SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb) 3506621e84d6SNicolas Dichtel { 3507621e84d6SNicolas Dichtel skb_orphan(skb); 3508621e84d6SNicolas Dichtel skb->tstamp.tv64 = 0; 3509621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 3510621e84d6SNicolas Dichtel skb->skb_iif = 0; 3511621e84d6SNicolas Dichtel skb_dst_drop(skb); 3512621e84d6SNicolas Dichtel skb->mark = 0; 3513621e84d6SNicolas Dichtel secpath_reset(skb); 3514621e84d6SNicolas Dichtel nf_reset(skb); 3515621e84d6SNicolas Dichtel nf_reset_trace(skb); 3516621e84d6SNicolas Dichtel } 3517621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 3518