11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds #include <linux/module.h> 401da177e4SLinus Torvalds #include <linux/types.h> 411da177e4SLinus Torvalds #include <linux/kernel.h> 42fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 431da177e4SLinus Torvalds #include <linux/mm.h> 441da177e4SLinus Torvalds #include <linux/interrupt.h> 451da177e4SLinus Torvalds #include <linux/in.h> 461da177e4SLinus Torvalds #include <linux/inet.h> 471da177e4SLinus Torvalds #include <linux/slab.h> 481da177e4SLinus Torvalds #include <linux/netdevice.h> 491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 501da177e4SLinus Torvalds #include <net/pkt_sched.h> 511da177e4SLinus Torvalds #endif 521da177e4SLinus Torvalds #include <linux/string.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 549c55e01cSJens Axboe #include <linux/splice.h> 551da177e4SLinus Torvalds #include <linux/cache.h> 561da177e4SLinus Torvalds #include <linux/rtnetlink.h> 571da177e4SLinus Torvalds #include <linux/init.h> 58716ea3a7SDavid Howells #include <linux/scatterlist.h> 59ac45f602SPatrick Ohly #include <linux/errqueue.h> 60268bb0ceSLinus Torvalds #include <linux/prefetch.h> 611da177e4SLinus Torvalds 621da177e4SLinus Torvalds #include <net/protocol.h> 631da177e4SLinus Torvalds #include <net/dst.h> 641da177e4SLinus Torvalds #include <net/sock.h> 651da177e4SLinus Torvalds #include <net/checksum.h> 661da177e4SLinus Torvalds #include <net/xfrm.h> 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds #include <asm/uaccess.h> 69ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7051c56b00SEric Dumazet #include <linux/highmem.h> 71a1f8e7f7SAl Viro 72d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 73e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 741da177e4SLinus Torvalds 759c55e01cSJens Axboe static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 769c55e01cSJens Axboe struct pipe_buffer *buf) 779c55e01cSJens Axboe { 788b9d3728SJarek Poplawski put_page(buf->page); 799c55e01cSJens Axboe } 809c55e01cSJens Axboe 819c55e01cSJens Axboe static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 829c55e01cSJens Axboe struct pipe_buffer *buf) 839c55e01cSJens Axboe { 848b9d3728SJarek Poplawski get_page(buf->page); 859c55e01cSJens Axboe } 869c55e01cSJens Axboe 879c55e01cSJens Axboe static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 889c55e01cSJens Axboe struct pipe_buffer *buf) 899c55e01cSJens Axboe { 909c55e01cSJens Axboe return 1; 919c55e01cSJens Axboe } 929c55e01cSJens Axboe 939c55e01cSJens Axboe 949c55e01cSJens Axboe /* Pipe buffer operations for a socket. */ 9528dfef8fSAlexey Dobriyan static const struct pipe_buf_operations sock_pipe_buf_ops = { 969c55e01cSJens Axboe .can_merge = 0, 979c55e01cSJens Axboe .map = generic_pipe_buf_map, 989c55e01cSJens Axboe .unmap = generic_pipe_buf_unmap, 999c55e01cSJens Axboe .confirm = generic_pipe_buf_confirm, 1009c55e01cSJens Axboe .release = sock_pipe_buf_release, 1019c55e01cSJens Axboe .steal = sock_pipe_buf_steal, 1029c55e01cSJens Axboe .get = sock_pipe_buf_get, 1039c55e01cSJens Axboe }; 1049c55e01cSJens Axboe 1051da177e4SLinus Torvalds /* 1061da177e4SLinus Torvalds * Keep out-of-line to prevent kernel bloat. 1071da177e4SLinus Torvalds * __builtin_return_address is not used because it is not always 1081da177e4SLinus Torvalds * reliable. 1091da177e4SLinus Torvalds */ 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds /** 1121da177e4SLinus Torvalds * skb_over_panic - private function 1131da177e4SLinus Torvalds * @skb: buffer 1141da177e4SLinus Torvalds * @sz: size 1151da177e4SLinus Torvalds * @here: address 1161da177e4SLinus Torvalds * 1171da177e4SLinus Torvalds * Out of line support code for skb_put(). Not user callable. 1181da177e4SLinus Torvalds */ 119ccb7c773SRami Rosen static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 1201da177e4SLinus Torvalds { 12126095455SPatrick McHardy printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 1224305b541SArnaldo Carvalho de Melo "data:%p tail:%#lx end:%#lx dev:%s\n", 12327a884dcSArnaldo Carvalho de Melo here, skb->len, sz, skb->head, skb->data, 1244305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 12526095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1261da177e4SLinus Torvalds BUG(); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds /** 1301da177e4SLinus Torvalds * skb_under_panic - private function 1311da177e4SLinus Torvalds * @skb: buffer 1321da177e4SLinus Torvalds * @sz: size 1331da177e4SLinus Torvalds * @here: address 1341da177e4SLinus Torvalds * 1351da177e4SLinus Torvalds * Out of line support code for skb_push(). Not user callable. 1361da177e4SLinus Torvalds */ 1371da177e4SLinus Torvalds 138ccb7c773SRami Rosen static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 1391da177e4SLinus Torvalds { 14026095455SPatrick McHardy printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 1414305b541SArnaldo Carvalho de Melo "data:%p tail:%#lx end:%#lx dev:%s\n", 14227a884dcSArnaldo Carvalho de Melo here, skb->len, sz, skb->head, skb->data, 1434305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 14426095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1451da177e4SLinus Torvalds BUG(); 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1491da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1501da177e4SLinus Torvalds * [BEEP] leaks. 1511da177e4SLinus Torvalds * 1521da177e4SLinus Torvalds */ 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /** 155d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 1561da177e4SLinus Torvalds * @size: size to allocate 1571da177e4SLinus Torvalds * @gfp_mask: allocation mask 158c83c2486SRandy Dunlap * @fclone: allocate from fclone cache instead of head cache 159c83c2486SRandy Dunlap * and allocate a cloned (child) skb 160b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 1611da177e4SLinus Torvalds * 1621da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 1631da177e4SLinus Torvalds * tail room of size bytes. The object has a reference count of one. 1641da177e4SLinus Torvalds * The return is the buffer. On a failure the return is %NULL. 1651da177e4SLinus Torvalds * 1661da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 1671da177e4SLinus Torvalds * %GFP_ATOMIC. 1681da177e4SLinus Torvalds */ 169dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 170b30973f8SChristoph Hellwig int fclone, int node) 1711da177e4SLinus Torvalds { 172e18b890bSChristoph Lameter struct kmem_cache *cache; 1734947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 1741da177e4SLinus Torvalds struct sk_buff *skb; 1751da177e4SLinus Torvalds u8 *data; 1761da177e4SLinus Torvalds 1778798b3fbSHerbert Xu cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 1788798b3fbSHerbert Xu 1791da177e4SLinus Torvalds /* Get the HEAD */ 180b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 1811da177e4SLinus Torvalds if (!skb) 1821da177e4SLinus Torvalds goto out; 183ec7d2f2cSEric Dumazet prefetchw(skb); 1841da177e4SLinus Torvalds 18587fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 18687fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 18787fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 18887fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 18987fb4b7bSEric Dumazet */ 190bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 19187fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 19287fb4b7bSEric Dumazet data = kmalloc_node_track_caller(size, gfp_mask, node); 1931da177e4SLinus Torvalds if (!data) 1941da177e4SLinus Torvalds goto nodata; 19587fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 19687fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 19787fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 19887fb4b7bSEric Dumazet */ 19987fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 200ec7d2f2cSEric Dumazet prefetchw(data + size); 2011da177e4SLinus Torvalds 202ca0605a7SArnaldo Carvalho de Melo /* 203c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 204c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 205c8005785SJohannes Berg * the tail pointer in struct sk_buff! 206ca0605a7SArnaldo Carvalho de Melo */ 207ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 20887fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 20987fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 2101da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2111da177e4SLinus Torvalds skb->head = data; 2121da177e4SLinus Torvalds skb->data = data; 21327a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2144305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 21519633e12SStephen Hemminger #ifdef NET_SKBUFF_DATA_USES_OFFSET 21619633e12SStephen Hemminger skb->mac_header = ~0U; 21719633e12SStephen Hemminger #endif 21819633e12SStephen Hemminger 2194947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2204947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 221ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2224947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 223c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2244947d3efSBenjamin LaHaise 225d179cd12SDavid S. Miller if (fclone) { 226d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 227d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2281da177e4SLinus Torvalds 229fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 230fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 231d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 232d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 233d179cd12SDavid S. Miller 234d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 235d179cd12SDavid S. Miller } 2361da177e4SLinus Torvalds out: 2371da177e4SLinus Torvalds return skb; 2381da177e4SLinus Torvalds nodata: 2398798b3fbSHerbert Xu kmem_cache_free(cache, skb); 2401da177e4SLinus Torvalds skb = NULL; 2411da177e4SLinus Torvalds goto out; 2421da177e4SLinus Torvalds } 243b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds /** 246b2b5ce9dSEric Dumazet * build_skb - build a network buffer 247b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 248d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 249b2b5ce9dSEric Dumazet * 250b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 251b2b5ce9dSEric Dumazet * skb_shared_info. @data must have been allocated by kmalloc() 252b2b5ce9dSEric Dumazet * The return is the new skb buffer. 253b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 254b2b5ce9dSEric Dumazet * Notes : 255b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 256b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 257b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 258b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 259b2b5ce9dSEric Dumazet * before giving packet to stack. 260b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 261b2b5ce9dSEric Dumazet */ 262d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 263b2b5ce9dSEric Dumazet { 264b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 265b2b5ce9dSEric Dumazet struct sk_buff *skb; 266d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 267b2b5ce9dSEric Dumazet 268b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 269b2b5ce9dSEric Dumazet if (!skb) 270b2b5ce9dSEric Dumazet return NULL; 271b2b5ce9dSEric Dumazet 272d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 273b2b5ce9dSEric Dumazet 274b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 275b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 276d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 277b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 278b2b5ce9dSEric Dumazet skb->head = data; 279b2b5ce9dSEric Dumazet skb->data = data; 280b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 281b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 282b2b5ce9dSEric Dumazet #ifdef NET_SKBUFF_DATA_USES_OFFSET 283b2b5ce9dSEric Dumazet skb->mac_header = ~0U; 284b2b5ce9dSEric Dumazet #endif 285b2b5ce9dSEric Dumazet 286b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 287b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 288b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 289b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 290b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 291b2b5ce9dSEric Dumazet 292b2b5ce9dSEric Dumazet return skb; 293b2b5ce9dSEric Dumazet } 294b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 295b2b5ce9dSEric Dumazet 296b2b5ce9dSEric Dumazet /** 2978af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 2988af27456SChristoph Hellwig * @dev: network device to receive on 2998af27456SChristoph Hellwig * @length: length to allocate 3008af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 3018af27456SChristoph Hellwig * 3028af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 3038af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 3048af27456SChristoph Hellwig * the headroom they think they need without accounting for the 3058af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 3068af27456SChristoph Hellwig * 3078af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 3088af27456SChristoph Hellwig */ 3098af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 3108af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 3118af27456SChristoph Hellwig { 3128af27456SChristoph Hellwig struct sk_buff *skb; 3138af27456SChristoph Hellwig 314564824b0SEric Dumazet skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 3157b2e497aSChristoph Hellwig if (likely(skb)) { 3168af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 3177b2e497aSChristoph Hellwig skb->dev = dev; 3187b2e497aSChristoph Hellwig } 3198af27456SChristoph Hellwig return skb; 3208af27456SChristoph Hellwig } 321b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 3221da177e4SLinus Torvalds 323654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 32450269e19SEric Dumazet int size, unsigned int truesize) 325654bed16SPeter Zijlstra { 326654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 327654bed16SPeter Zijlstra skb->len += size; 328654bed16SPeter Zijlstra skb->data_len += size; 32950269e19SEric Dumazet skb->truesize += truesize; 330654bed16SPeter Zijlstra } 331654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 332654bed16SPeter Zijlstra 333f58518e6SIlpo Järvinen /** 334f58518e6SIlpo Järvinen * dev_alloc_skb - allocate an skbuff for receiving 335f58518e6SIlpo Järvinen * @length: length to allocate 336f58518e6SIlpo Järvinen * 337f58518e6SIlpo Järvinen * Allocate a new &sk_buff and assign it a usage count of one. The 338f58518e6SIlpo Järvinen * buffer has unspecified headroom built in. Users should allocate 339f58518e6SIlpo Järvinen * the headroom they think they need without accounting for the 340f58518e6SIlpo Järvinen * built in space. The built in space is used for optimisations. 341f58518e6SIlpo Järvinen * 342f58518e6SIlpo Järvinen * %NULL is returned if there is no free memory. Although this function 343f58518e6SIlpo Järvinen * allocates memory it can be called from an interrupt. 344f58518e6SIlpo Järvinen */ 345f58518e6SIlpo Järvinen struct sk_buff *dev_alloc_skb(unsigned int length) 346f58518e6SIlpo Järvinen { 3471483b874SDenys Vlasenko /* 3481483b874SDenys Vlasenko * There is more code here than it seems: 349a0f55e0eSDavid S. Miller * __dev_alloc_skb is an inline 3501483b874SDenys Vlasenko */ 351f58518e6SIlpo Järvinen return __dev_alloc_skb(length, GFP_ATOMIC); 352f58518e6SIlpo Järvinen } 353f58518e6SIlpo Järvinen EXPORT_SYMBOL(dev_alloc_skb); 354f58518e6SIlpo Järvinen 35527b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 3561da177e4SLinus Torvalds { 35727b437c8SHerbert Xu struct sk_buff *list = *listp; 3581da177e4SLinus Torvalds 35927b437c8SHerbert Xu *listp = NULL; 3601da177e4SLinus Torvalds 3611da177e4SLinus Torvalds do { 3621da177e4SLinus Torvalds struct sk_buff *this = list; 3631da177e4SLinus Torvalds list = list->next; 3641da177e4SLinus Torvalds kfree_skb(this); 3651da177e4SLinus Torvalds } while (list); 3661da177e4SLinus Torvalds } 3671da177e4SLinus Torvalds 36827b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 36927b437c8SHerbert Xu { 37027b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 37127b437c8SHerbert Xu } 37227b437c8SHerbert Xu 3731da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 3741da177e4SLinus Torvalds { 3751da177e4SLinus Torvalds struct sk_buff *list; 3761da177e4SLinus Torvalds 377fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 3781da177e4SLinus Torvalds skb_get(list); 3791da177e4SLinus Torvalds } 3801da177e4SLinus Torvalds 381d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 382d3836f21SEric Dumazet { 383d3836f21SEric Dumazet if (skb->head_frag) 384d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 385d3836f21SEric Dumazet else 386d3836f21SEric Dumazet kfree(skb->head); 387d3836f21SEric Dumazet } 388d3836f21SEric Dumazet 3895bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 3901da177e4SLinus Torvalds { 3911da177e4SLinus Torvalds if (!skb->cloned || 3921da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 3931da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 3941da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 3951da177e4SLinus Torvalds int i; 3961da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 397ea2ab693SIan Campbell skb_frag_unref(skb, i); 3981da177e4SLinus Torvalds } 3991da177e4SLinus Torvalds 400a6686f2fSShirley Ma /* 401a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 402a6686f2fSShirley Ma * the lower device DMA has done; 403a6686f2fSShirley Ma */ 404a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 405a6686f2fSShirley Ma struct ubuf_info *uarg; 406a6686f2fSShirley Ma 407a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 408a6686f2fSShirley Ma if (uarg->callback) 409a6686f2fSShirley Ma uarg->callback(uarg); 410a6686f2fSShirley Ma } 411a6686f2fSShirley Ma 41221dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 4131da177e4SLinus Torvalds skb_drop_fraglist(skb); 4141da177e4SLinus Torvalds 415d3836f21SEric Dumazet skb_free_head(skb); 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds } 4181da177e4SLinus Torvalds 4191da177e4SLinus Torvalds /* 4201da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 4211da177e4SLinus Torvalds */ 4222d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 4231da177e4SLinus Torvalds { 424d179cd12SDavid S. Miller struct sk_buff *other; 425d179cd12SDavid S. Miller atomic_t *fclone_ref; 426d179cd12SDavid S. Miller 427d179cd12SDavid S. Miller switch (skb->fclone) { 428d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 4291da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 430d179cd12SDavid S. Miller break; 431d179cd12SDavid S. Miller 432d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 433d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 434d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 435d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 436d179cd12SDavid S. Miller break; 437d179cd12SDavid S. Miller 438d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 439d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 440d179cd12SDavid S. Miller other = skb - 1; 441d179cd12SDavid S. Miller 442d179cd12SDavid S. Miller /* The clone portion is available for 443d179cd12SDavid S. Miller * fast-cloning again. 444d179cd12SDavid S. Miller */ 445d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 446d179cd12SDavid S. Miller 447d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 448d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 449d179cd12SDavid S. Miller break; 4503ff50b79SStephen Hemminger } 4511da177e4SLinus Torvalds } 4521da177e4SLinus Torvalds 45304a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 4541da177e4SLinus Torvalds { 455adf30907SEric Dumazet skb_dst_drop(skb); 4561da177e4SLinus Torvalds #ifdef CONFIG_XFRM 4571da177e4SLinus Torvalds secpath_put(skb->sp); 4581da177e4SLinus Torvalds #endif 4591da177e4SLinus Torvalds if (skb->destructor) { 4609c2b3328SStephen Hemminger WARN_ON(in_irq()); 4611da177e4SLinus Torvalds skb->destructor(skb); 4621da177e4SLinus Torvalds } 463a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 4645f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 4652fc72c7bSKOVACS Krisztian #endif 4662fc72c7bSKOVACS Krisztian #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 4679fb9cbb1SYasuyuki Kozakai nf_conntrack_put_reasm(skb->nfct_reasm); 4689fb9cbb1SYasuyuki Kozakai #endif 4691da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 4701da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 4711da177e4SLinus Torvalds #endif 4721da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 4731da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 4741da177e4SLinus Torvalds skb->tc_index = 0; 4751da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 4761da177e4SLinus Torvalds skb->tc_verd = 0; 4771da177e4SLinus Torvalds #endif 4781da177e4SLinus Torvalds #endif 47904a4bb55SLennert Buytenhek } 48004a4bb55SLennert Buytenhek 48104a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 48204a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 48304a4bb55SLennert Buytenhek { 48404a4bb55SLennert Buytenhek skb_release_head_state(skb); 4852d4baff8SHerbert Xu skb_release_data(skb); 4862d4baff8SHerbert Xu } 4871da177e4SLinus Torvalds 4882d4baff8SHerbert Xu /** 4892d4baff8SHerbert Xu * __kfree_skb - private function 4902d4baff8SHerbert Xu * @skb: buffer 4912d4baff8SHerbert Xu * 4922d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 4932d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 4942d4baff8SHerbert Xu * always call kfree_skb 4952d4baff8SHerbert Xu */ 4962d4baff8SHerbert Xu 4972d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 4982d4baff8SHerbert Xu { 4992d4baff8SHerbert Xu skb_release_all(skb); 5001da177e4SLinus Torvalds kfree_skbmem(skb); 5011da177e4SLinus Torvalds } 502b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 5031da177e4SLinus Torvalds 5041da177e4SLinus Torvalds /** 505231d06aeSJörn Engel * kfree_skb - free an sk_buff 506231d06aeSJörn Engel * @skb: buffer to free 507231d06aeSJörn Engel * 508231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 509231d06aeSJörn Engel * hit zero. 510231d06aeSJörn Engel */ 511231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 512231d06aeSJörn Engel { 513231d06aeSJörn Engel if (unlikely(!skb)) 514231d06aeSJörn Engel return; 515231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 516231d06aeSJörn Engel smp_rmb(); 517231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 518231d06aeSJörn Engel return; 519ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 520231d06aeSJörn Engel __kfree_skb(skb); 521231d06aeSJörn Engel } 522b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 523231d06aeSJörn Engel 524d1a203eaSStephen Hemminger /** 525ead2ceb0SNeil Horman * consume_skb - free an skbuff 526ead2ceb0SNeil Horman * @skb: buffer to free 527ead2ceb0SNeil Horman * 528ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 529ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 530ead2ceb0SNeil Horman * is being dropped after a failure and notes that 531ead2ceb0SNeil Horman */ 532ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 533ead2ceb0SNeil Horman { 534ead2ceb0SNeil Horman if (unlikely(!skb)) 535ead2ceb0SNeil Horman return; 536ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 537ead2ceb0SNeil Horman smp_rmb(); 538ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 539ead2ceb0SNeil Horman return; 54007dc22e7SKoki Sanagi trace_consume_skb(skb); 541ead2ceb0SNeil Horman __kfree_skb(skb); 542ead2ceb0SNeil Horman } 543ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 544ead2ceb0SNeil Horman 545ead2ceb0SNeil Horman /** 5463d153a7cSAndy Fleming * skb_recycle - clean up an skb for reuse 5473d153a7cSAndy Fleming * @skb: buffer 5483d153a7cSAndy Fleming * 5493d153a7cSAndy Fleming * Recycles the skb to be reused as a receive buffer. This 5503d153a7cSAndy Fleming * function does any necessary reference count dropping, and 5513d153a7cSAndy Fleming * cleans up the skbuff as if it just came from __alloc_skb(). 5523d153a7cSAndy Fleming */ 5533d153a7cSAndy Fleming void skb_recycle(struct sk_buff *skb) 5543d153a7cSAndy Fleming { 5553d153a7cSAndy Fleming struct skb_shared_info *shinfo; 5563d153a7cSAndy Fleming 5573d153a7cSAndy Fleming skb_release_head_state(skb); 5583d153a7cSAndy Fleming 5593d153a7cSAndy Fleming shinfo = skb_shinfo(skb); 5603d153a7cSAndy Fleming memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 5613d153a7cSAndy Fleming atomic_set(&shinfo->dataref, 1); 5623d153a7cSAndy Fleming 5633d153a7cSAndy Fleming memset(skb, 0, offsetof(struct sk_buff, tail)); 5643d153a7cSAndy Fleming skb->data = skb->head + NET_SKB_PAD; 5653d153a7cSAndy Fleming skb_reset_tail_pointer(skb); 5663d153a7cSAndy Fleming } 5673d153a7cSAndy Fleming EXPORT_SYMBOL(skb_recycle); 5683d153a7cSAndy Fleming 5693d153a7cSAndy Fleming /** 570d1a203eaSStephen Hemminger * skb_recycle_check - check if skb can be reused for receive 571d1a203eaSStephen Hemminger * @skb: buffer 572d1a203eaSStephen Hemminger * @skb_size: minimum receive buffer size 573d1a203eaSStephen Hemminger * 574d1a203eaSStephen Hemminger * Checks that the skb passed in is not shared or cloned, and 575d1a203eaSStephen Hemminger * that it is linear and its head portion at least as large as 576d1a203eaSStephen Hemminger * skb_size so that it can be recycled as a receive buffer. 577d1a203eaSStephen Hemminger * If these conditions are met, this function does any necessary 578d1a203eaSStephen Hemminger * reference count dropping and cleans up the skbuff as if it 579d1a203eaSStephen Hemminger * just came from __alloc_skb(). 580d1a203eaSStephen Hemminger */ 5815b0daa34SChangli Gao bool skb_recycle_check(struct sk_buff *skb, int skb_size) 58204a4bb55SLennert Buytenhek { 5833d153a7cSAndy Fleming if (!skb_is_recycleable(skb, skb_size)) 5845b0daa34SChangli Gao return false; 585e84af6ddSAnton Vorontsov 5863d153a7cSAndy Fleming skb_recycle(skb); 58704a4bb55SLennert Buytenhek 5885b0daa34SChangli Gao return true; 58904a4bb55SLennert Buytenhek } 59004a4bb55SLennert Buytenhek EXPORT_SYMBOL(skb_recycle_check); 59104a4bb55SLennert Buytenhek 592dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 593dec18810SHerbert Xu { 594dec18810SHerbert Xu new->tstamp = old->tstamp; 595dec18810SHerbert Xu new->dev = old->dev; 596dec18810SHerbert Xu new->transport_header = old->transport_header; 597dec18810SHerbert Xu new->network_header = old->network_header; 598dec18810SHerbert Xu new->mac_header = old->mac_header; 5997fee226aSEric Dumazet skb_dst_copy(new, old); 6000a9627f2STom Herbert new->rxhash = old->rxhash; 6016461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 602bdeab991STom Herbert new->l4_rxhash = old->l4_rxhash; 6033bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 604def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 605dec18810SHerbert Xu new->sp = secpath_get(old->sp); 606dec18810SHerbert Xu #endif 607dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 6089bcb97caSHerbert Xu new->csum = old->csum; 609dec18810SHerbert Xu new->local_df = old->local_df; 610dec18810SHerbert Xu new->pkt_type = old->pkt_type; 611dec18810SHerbert Xu new->ip_summed = old->ip_summed; 612dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 613dec18810SHerbert Xu new->priority = old->priority; 614a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 615dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 616dec18810SHerbert Xu #endif 617dec18810SHerbert Xu new->protocol = old->protocol; 618dec18810SHerbert Xu new->mark = old->mark; 6198964be4aSEric Dumazet new->skb_iif = old->skb_iif; 620dec18810SHerbert Xu __nf_copy(new, old); 621a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 622dec18810SHerbert Xu new->nf_trace = old->nf_trace; 623dec18810SHerbert Xu #endif 624dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 625dec18810SHerbert Xu new->tc_index = old->tc_index; 626dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 627dec18810SHerbert Xu new->tc_verd = old->tc_verd; 628dec18810SHerbert Xu #endif 629dec18810SHerbert Xu #endif 6306aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 6316aa895b0SPatrick McHardy 632dec18810SHerbert Xu skb_copy_secmark(new, old); 633dec18810SHerbert Xu } 634dec18810SHerbert Xu 63582c49a35SHerbert Xu /* 63682c49a35SHerbert Xu * You should not add any new code to this function. Add it to 63782c49a35SHerbert Xu * __copy_skb_header above instead. 63882c49a35SHerbert Xu */ 639e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 6401da177e4SLinus Torvalds { 6411da177e4SLinus Torvalds #define C(x) n->x = skb->x 6421da177e4SLinus Torvalds 6431da177e4SLinus Torvalds n->next = n->prev = NULL; 6441da177e4SLinus Torvalds n->sk = NULL; 645dec18810SHerbert Xu __copy_skb_header(n, skb); 646dec18810SHerbert Xu 6471da177e4SLinus Torvalds C(len); 6481da177e4SLinus Torvalds C(data_len); 6493e6b3b2eSAlexey Dobriyan C(mac_len); 650334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 65102f1c89dSPaul Moore n->cloned = 1; 6521da177e4SLinus Torvalds n->nohdr = 0; 6531da177e4SLinus Torvalds n->destructor = NULL; 6541da177e4SLinus Torvalds C(tail); 6551da177e4SLinus Torvalds C(end); 65602f1c89dSPaul Moore C(head); 657d3836f21SEric Dumazet C(head_frag); 65802f1c89dSPaul Moore C(data); 65902f1c89dSPaul Moore C(truesize); 66002f1c89dSPaul Moore atomic_set(&n->users, 1); 6611da177e4SLinus Torvalds 6621da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 6631da177e4SLinus Torvalds skb->cloned = 1; 6641da177e4SLinus Torvalds 6651da177e4SLinus Torvalds return n; 666e0053ec0SHerbert Xu #undef C 667e0053ec0SHerbert Xu } 668e0053ec0SHerbert Xu 669e0053ec0SHerbert Xu /** 670e0053ec0SHerbert Xu * skb_morph - morph one skb into another 671e0053ec0SHerbert Xu * @dst: the skb to receive the contents 672e0053ec0SHerbert Xu * @src: the skb to supply the contents 673e0053ec0SHerbert Xu * 674e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 675e0053ec0SHerbert Xu * supplied by the user. 676e0053ec0SHerbert Xu * 677e0053ec0SHerbert Xu * The target skb is returned upon exit. 678e0053ec0SHerbert Xu */ 679e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 680e0053ec0SHerbert Xu { 6812d4baff8SHerbert Xu skb_release_all(dst); 682e0053ec0SHerbert Xu return __skb_clone(dst, src); 683e0053ec0SHerbert Xu } 684e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 685e0053ec0SHerbert Xu 68648c83012SMichael S. Tsirkin /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 68748c83012SMichael S. Tsirkin * @skb: the skb to modify 68848c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 68948c83012SMichael S. Tsirkin * 69048c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 69148c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 69248c83012SMichael S. Tsirkin * to userspace pages. 69348c83012SMichael S. Tsirkin * 69448c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 69548c83012SMichael S. Tsirkin * %GFP_ATOMIC. 69648c83012SMichael S. Tsirkin * 69748c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 69848c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 69948c83012SMichael S. Tsirkin */ 70048c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 701a6686f2fSShirley Ma { 702a6686f2fSShirley Ma int i; 703a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 704a6686f2fSShirley Ma struct page *page, *head = NULL; 705a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 706a6686f2fSShirley Ma 707a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 708a6686f2fSShirley Ma u8 *vaddr; 709a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 710a6686f2fSShirley Ma 711a6686f2fSShirley Ma page = alloc_page(GFP_ATOMIC); 712a6686f2fSShirley Ma if (!page) { 713a6686f2fSShirley Ma while (head) { 714a6686f2fSShirley Ma struct page *next = (struct page *)head->private; 715a6686f2fSShirley Ma put_page(head); 716a6686f2fSShirley Ma head = next; 717a6686f2fSShirley Ma } 718a6686f2fSShirley Ma return -ENOMEM; 719a6686f2fSShirley Ma } 72051c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 721a6686f2fSShirley Ma memcpy(page_address(page), 7229e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 72351c56b00SEric Dumazet kunmap_atomic(vaddr); 724a6686f2fSShirley Ma page->private = (unsigned long)head; 725a6686f2fSShirley Ma head = page; 726a6686f2fSShirley Ma } 727a6686f2fSShirley Ma 728a6686f2fSShirley Ma /* skb frags release userspace buffers */ 729a6686f2fSShirley Ma for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 730a8605c60SIan Campbell skb_frag_unref(skb, i); 731a6686f2fSShirley Ma 732a6686f2fSShirley Ma uarg->callback(uarg); 733a6686f2fSShirley Ma 734a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 735a6686f2fSShirley Ma for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 736a8605c60SIan Campbell __skb_fill_page_desc(skb, i-1, head, 0, 737a8605c60SIan Campbell skb_shinfo(skb)->frags[i - 1].size); 738a6686f2fSShirley Ma head = (struct page *)head->private; 739a6686f2fSShirley Ma } 74048c83012SMichael S. Tsirkin 74148c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 742a6686f2fSShirley Ma return 0; 743a6686f2fSShirley Ma } 744a6686f2fSShirley Ma 745a6686f2fSShirley Ma 746e0053ec0SHerbert Xu /** 747e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 748e0053ec0SHerbert Xu * @skb: buffer to clone 749e0053ec0SHerbert Xu * @gfp_mask: allocation priority 750e0053ec0SHerbert Xu * 751e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 752e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 753e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 754e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 755e0053ec0SHerbert Xu * 756e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 757e0053ec0SHerbert Xu * %GFP_ATOMIC. 758e0053ec0SHerbert Xu */ 759e0053ec0SHerbert Xu 760e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 761e0053ec0SHerbert Xu { 762e0053ec0SHerbert Xu struct sk_buff *n; 763e0053ec0SHerbert Xu 764a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 765a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) 766a6686f2fSShirley Ma return NULL; 767a6686f2fSShirley Ma } 768a6686f2fSShirley Ma 769e0053ec0SHerbert Xu n = skb + 1; 770e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 771e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 772e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 773e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 774e0053ec0SHerbert Xu atomic_inc(fclone_ref); 775e0053ec0SHerbert Xu } else { 776e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 777e0053ec0SHerbert Xu if (!n) 778e0053ec0SHerbert Xu return NULL; 779fe55f6d5SVegard Nossum 780fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 781fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 782e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 783e0053ec0SHerbert Xu } 784e0053ec0SHerbert Xu 785e0053ec0SHerbert Xu return __skb_clone(n, skb); 7861da177e4SLinus Torvalds } 787b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 7901da177e4SLinus Torvalds { 7912e07fa9cSArnaldo Carvalho de Melo #ifndef NET_SKBUFF_DATA_USES_OFFSET 7921da177e4SLinus Torvalds /* 7931da177e4SLinus Torvalds * Shift between the two data areas in bytes 7941da177e4SLinus Torvalds */ 7951da177e4SLinus Torvalds unsigned long offset = new->data - old->data; 7962e07fa9cSArnaldo Carvalho de Melo #endif 797dec18810SHerbert Xu 798dec18810SHerbert Xu __copy_skb_header(new, old); 799dec18810SHerbert Xu 8002e07fa9cSArnaldo Carvalho de Melo #ifndef NET_SKBUFF_DATA_USES_OFFSET 8012e07fa9cSArnaldo Carvalho de Melo /* {transport,network,mac}_header are relative to skb->head */ 8022e07fa9cSArnaldo Carvalho de Melo new->transport_header += offset; 8032e07fa9cSArnaldo Carvalho de Melo new->network_header += offset; 804603a8bbeSStephen Hemminger if (skb_mac_header_was_set(new)) 8052e07fa9cSArnaldo Carvalho de Melo new->mac_header += offset; 8062e07fa9cSArnaldo Carvalho de Melo #endif 8077967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 8087967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 8097967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 8101da177e4SLinus Torvalds } 8111da177e4SLinus Torvalds 8121da177e4SLinus Torvalds /** 8131da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 8141da177e4SLinus Torvalds * @skb: buffer to copy 8151da177e4SLinus Torvalds * @gfp_mask: allocation priority 8161da177e4SLinus Torvalds * 8171da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 8181da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 8191da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 8201da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 8211da177e4SLinus Torvalds * 8221da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 8231da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 8241da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 8251da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 8261da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 8271da177e4SLinus Torvalds */ 8281da177e4SLinus Torvalds 829dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 8301da177e4SLinus Torvalds { 8316602cebbSEric Dumazet int headerlen = skb_headroom(skb); 8326602cebbSEric Dumazet unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 8336602cebbSEric Dumazet struct sk_buff *n = alloc_skb(size, gfp_mask); 8346602cebbSEric Dumazet 8351da177e4SLinus Torvalds if (!n) 8361da177e4SLinus Torvalds return NULL; 8371da177e4SLinus Torvalds 8381da177e4SLinus Torvalds /* Set the data pointer */ 8391da177e4SLinus Torvalds skb_reserve(n, headerlen); 8401da177e4SLinus Torvalds /* Set the tail pointer and length */ 8411da177e4SLinus Torvalds skb_put(n, skb->len); 8421da177e4SLinus Torvalds 8431da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 8441da177e4SLinus Torvalds BUG(); 8451da177e4SLinus Torvalds 8461da177e4SLinus Torvalds copy_skb_header(n, skb); 8471da177e4SLinus Torvalds return n; 8481da177e4SLinus Torvalds } 849b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 8501da177e4SLinus Torvalds 8511da177e4SLinus Torvalds /** 852117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 8531da177e4SLinus Torvalds * @skb: buffer to copy 854117632e6SEric Dumazet * @headroom: headroom of new skb 8551da177e4SLinus Torvalds * @gfp_mask: allocation priority 8561da177e4SLinus Torvalds * 8571da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 8581da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 8591da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 8601da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 8611da177e4SLinus Torvalds * or the pointer to the buffer on success. 8621da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 8631da177e4SLinus Torvalds */ 8641da177e4SLinus Torvalds 865117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 8661da177e4SLinus Torvalds { 867117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 8686602cebbSEric Dumazet struct sk_buff *n = alloc_skb(size, gfp_mask); 8696602cebbSEric Dumazet 8701da177e4SLinus Torvalds if (!n) 8711da177e4SLinus Torvalds goto out; 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds /* Set the data pointer */ 874117632e6SEric Dumazet skb_reserve(n, headroom); 8751da177e4SLinus Torvalds /* Set the tail pointer and length */ 8761da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 8771da177e4SLinus Torvalds /* Copy the bytes */ 878d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 8791da177e4SLinus Torvalds 88025f484a6SHerbert Xu n->truesize += skb->data_len; 8811da177e4SLinus Torvalds n->data_len = skb->data_len; 8821da177e4SLinus Torvalds n->len = skb->len; 8831da177e4SLinus Torvalds 8841da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 8851da177e4SLinus Torvalds int i; 8861da177e4SLinus Torvalds 887a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 888a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) { 8891511022cSDan Carpenter kfree_skb(n); 8901511022cSDan Carpenter n = NULL; 891a6686f2fSShirley Ma goto out; 892a6686f2fSShirley Ma } 893a6686f2fSShirley Ma } 8941da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 8951da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 896ea2ab693SIan Campbell skb_frag_ref(skb, i); 8971da177e4SLinus Torvalds } 8981da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 8991da177e4SLinus Torvalds } 9001da177e4SLinus Torvalds 90121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 9021da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 9031da177e4SLinus Torvalds skb_clone_fraglist(n); 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 9061da177e4SLinus Torvalds copy_skb_header(n, skb); 9071da177e4SLinus Torvalds out: 9081da177e4SLinus Torvalds return n; 9091da177e4SLinus Torvalds } 910117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 9111da177e4SLinus Torvalds 9121da177e4SLinus Torvalds /** 9131da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 9141da177e4SLinus Torvalds * @skb: buffer to reallocate 9151da177e4SLinus Torvalds * @nhead: room to add at head 9161da177e4SLinus Torvalds * @ntail: room to add at tail 9171da177e4SLinus Torvalds * @gfp_mask: allocation priority 9181da177e4SLinus Torvalds * 9191da177e4SLinus Torvalds * Expands (or creates identical copy, if &nhead and &ntail are zero) 9201da177e4SLinus Torvalds * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 9211da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 9221da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 9231da177e4SLinus Torvalds * 9241da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 9251da177e4SLinus Torvalds * reloaded after call to this function. 9261da177e4SLinus Torvalds */ 9271da177e4SLinus Torvalds 92886a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 929dd0fc66fSAl Viro gfp_t gfp_mask) 9301da177e4SLinus Torvalds { 9311da177e4SLinus Torvalds int i; 9321da177e4SLinus Torvalds u8 *data; 9336602cebbSEric Dumazet int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 9341da177e4SLinus Torvalds long off; 9351fd63041SEric Dumazet bool fastpath; 9361da177e4SLinus Torvalds 9374edd87adSHerbert Xu BUG_ON(nhead < 0); 9384edd87adSHerbert Xu 9391da177e4SLinus Torvalds if (skb_shared(skb)) 9401da177e4SLinus Torvalds BUG(); 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 9431da177e4SLinus Torvalds 944ca44ac38SChangli Gao /* Check if we can avoid taking references on fragments if we own 945ca44ac38SChangli Gao * the last reference on skb->head. (see skb_release_data()) 946ca44ac38SChangli Gao */ 947ca44ac38SChangli Gao if (!skb->cloned) 948ca44ac38SChangli Gao fastpath = true; 949ca44ac38SChangli Gao else { 950ca44ac38SChangli Gao int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 951ca44ac38SChangli Gao fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 952ca44ac38SChangli Gao } 953ca44ac38SChangli Gao 954d3836f21SEric Dumazet if (fastpath && !skb->head_frag && 955ca44ac38SChangli Gao size + sizeof(struct skb_shared_info) <= ksize(skb->head)) { 956ca44ac38SChangli Gao memmove(skb->head + size, skb_shinfo(skb), 957ca44ac38SChangli Gao offsetof(struct skb_shared_info, 958ca44ac38SChangli Gao frags[skb_shinfo(skb)->nr_frags])); 959ca44ac38SChangli Gao memmove(skb->head + nhead, skb->head, 960ca44ac38SChangli Gao skb_tail_pointer(skb) - skb->head); 961ca44ac38SChangli Gao off = nhead; 962ca44ac38SChangli Gao goto adjust_others; 963ca44ac38SChangli Gao } 964ca44ac38SChangli Gao 96587151b86SEric Dumazet data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 96687151b86SEric Dumazet gfp_mask); 9671da177e4SLinus Torvalds if (!data) 9681da177e4SLinus Torvalds goto nodata; 96987151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 9726602cebbSEric Dumazet * optimized for the cases when header is void. 9736602cebbSEric Dumazet */ 9746602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 9756602cebbSEric Dumazet 9766602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 9776602cebbSEric Dumazet skb_shinfo(skb), 978fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 9791da177e4SLinus Torvalds 9801fd63041SEric Dumazet if (fastpath) { 981d3836f21SEric Dumazet skb_free_head(skb); 9821fd63041SEric Dumazet } else { 983a6686f2fSShirley Ma /* copy this zero copy skb frags */ 984a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 985a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) 986a6686f2fSShirley Ma goto nofrags; 987a6686f2fSShirley Ma } 9881da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 989ea2ab693SIan Campbell skb_frag_ref(skb, i); 9901da177e4SLinus Torvalds 99121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 9921da177e4SLinus Torvalds skb_clone_fraglist(skb); 9931da177e4SLinus Torvalds 9941da177e4SLinus Torvalds skb_release_data(skb); 9951fd63041SEric Dumazet } 9961da177e4SLinus Torvalds off = (data + nhead) - skb->head; 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds skb->head = data; 999d3836f21SEric Dumazet skb->head_frag = 0; 1000ca44ac38SChangli Gao adjust_others: 10011da177e4SLinus Torvalds skb->data += off; 10024305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 10034305b541SArnaldo Carvalho de Melo skb->end = size; 100456eb8882SPatrick McHardy off = nhead; 10054305b541SArnaldo Carvalho de Melo #else 10064305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 100756eb8882SPatrick McHardy #endif 100827a884dcSArnaldo Carvalho de Melo /* {transport,network,mac}_header and tail are relative to skb->head */ 100927a884dcSArnaldo Carvalho de Melo skb->tail += off; 1010b0e380b1SArnaldo Carvalho de Melo skb->transport_header += off; 1011b0e380b1SArnaldo Carvalho de Melo skb->network_header += off; 1012603a8bbeSStephen Hemminger if (skb_mac_header_was_set(skb)) 1013b0e380b1SArnaldo Carvalho de Melo skb->mac_header += off; 101400c5a983SAndrea Shepard /* Only adjust this if it actually is csum_start rather than csum */ 101500c5a983SAndrea Shepard if (skb->ip_summed == CHECKSUM_PARTIAL) 1016172a863fSHerbert Xu skb->csum_start += nhead; 10171da177e4SLinus Torvalds skb->cloned = 0; 1018334a8132SPatrick McHardy skb->hdr_len = 0; 10191da177e4SLinus Torvalds skb->nohdr = 0; 10201da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 10211da177e4SLinus Torvalds return 0; 10221da177e4SLinus Torvalds 1023a6686f2fSShirley Ma nofrags: 1024a6686f2fSShirley Ma kfree(data); 10251da177e4SLinus Torvalds nodata: 10261da177e4SLinus Torvalds return -ENOMEM; 10271da177e4SLinus Torvalds } 1028b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 10311da177e4SLinus Torvalds 10321da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 10331da177e4SLinus Torvalds { 10341da177e4SLinus Torvalds struct sk_buff *skb2; 10351da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 10361da177e4SLinus Torvalds 10371da177e4SLinus Torvalds if (delta <= 0) 10381da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 10391da177e4SLinus Torvalds else { 10401da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 10411da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 10421da177e4SLinus Torvalds GFP_ATOMIC)) { 10431da177e4SLinus Torvalds kfree_skb(skb2); 10441da177e4SLinus Torvalds skb2 = NULL; 10451da177e4SLinus Torvalds } 10461da177e4SLinus Torvalds } 10471da177e4SLinus Torvalds return skb2; 10481da177e4SLinus Torvalds } 1049b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 10501da177e4SLinus Torvalds 10511da177e4SLinus Torvalds /** 10521da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 10531da177e4SLinus Torvalds * @skb: buffer to copy 10541da177e4SLinus Torvalds * @newheadroom: new free bytes at head 10551da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 10561da177e4SLinus Torvalds * @gfp_mask: allocation priority 10571da177e4SLinus Torvalds * 10581da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 10591da177e4SLinus Torvalds * allocate additional space. 10601da177e4SLinus Torvalds * 10611da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 10621da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 10631da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 10641da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 10651da177e4SLinus Torvalds * 10661da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 10671da177e4SLinus Torvalds * is called from an interrupt. 10681da177e4SLinus Torvalds */ 10691da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 107086a76cafSVictor Fusco int newheadroom, int newtailroom, 1071dd0fc66fSAl Viro gfp_t gfp_mask) 10721da177e4SLinus Torvalds { 10731da177e4SLinus Torvalds /* 10741da177e4SLinus Torvalds * Allocate the copy buffer 10751da177e4SLinus Torvalds */ 10761da177e4SLinus Torvalds struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 10771da177e4SLinus Torvalds gfp_mask); 1078efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 10791da177e4SLinus Torvalds int head_copy_len, head_copy_off; 108052886051SHerbert Xu int off; 10811da177e4SLinus Torvalds 10821da177e4SLinus Torvalds if (!n) 10831da177e4SLinus Torvalds return NULL; 10841da177e4SLinus Torvalds 10851da177e4SLinus Torvalds skb_reserve(n, newheadroom); 10861da177e4SLinus Torvalds 10871da177e4SLinus Torvalds /* Set the tail pointer and length */ 10881da177e4SLinus Torvalds skb_put(n, skb->len); 10891da177e4SLinus Torvalds 1090efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 10911da177e4SLinus Torvalds head_copy_off = 0; 10921da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 10931da177e4SLinus Torvalds head_copy_len = newheadroom; 10941da177e4SLinus Torvalds else 10951da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds /* Copy the linear header and data. */ 10981da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 10991da177e4SLinus Torvalds skb->len + head_copy_len)) 11001da177e4SLinus Torvalds BUG(); 11011da177e4SLinus Torvalds 11021da177e4SLinus Torvalds copy_skb_header(n, skb); 11031da177e4SLinus Torvalds 1104efd1e8d5SPatrick McHardy off = newheadroom - oldheadroom; 1105be2b6e62SDavid S. Miller if (n->ip_summed == CHECKSUM_PARTIAL) 110652886051SHerbert Xu n->csum_start += off; 110752886051SHerbert Xu #ifdef NET_SKBUFF_DATA_USES_OFFSET 1108efd1e8d5SPatrick McHardy n->transport_header += off; 1109efd1e8d5SPatrick McHardy n->network_header += off; 1110603a8bbeSStephen Hemminger if (skb_mac_header_was_set(skb)) 1111efd1e8d5SPatrick McHardy n->mac_header += off; 111252886051SHerbert Xu #endif 1113efd1e8d5SPatrick McHardy 11141da177e4SLinus Torvalds return n; 11151da177e4SLinus Torvalds } 1116b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds /** 11191da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 11201da177e4SLinus Torvalds * @skb: buffer to pad 11211da177e4SLinus Torvalds * @pad: space to pad 11221da177e4SLinus Torvalds * 11231da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 11241da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 11251da177e4SLinus Torvalds * beyond the buffer end onto the wire. 11261da177e4SLinus Torvalds * 11275b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 11281da177e4SLinus Torvalds */ 11291da177e4SLinus Torvalds 11305b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 11311da177e4SLinus Torvalds { 11325b057c6bSHerbert Xu int err; 11335b057c6bSHerbert Xu int ntail; 11341da177e4SLinus Torvalds 11351da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 11365b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 11371da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 11385b057c6bSHerbert Xu return 0; 11391da177e4SLinus Torvalds } 11401da177e4SLinus Torvalds 11414305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 11425b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 11435b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 11445b057c6bSHerbert Xu if (unlikely(err)) 11455b057c6bSHerbert Xu goto free_skb; 11465b057c6bSHerbert Xu } 11475b057c6bSHerbert Xu 11485b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 11495b057c6bSHerbert Xu * to be audited. 11505b057c6bSHerbert Xu */ 11515b057c6bSHerbert Xu err = skb_linearize(skb); 11525b057c6bSHerbert Xu if (unlikely(err)) 11535b057c6bSHerbert Xu goto free_skb; 11545b057c6bSHerbert Xu 11555b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 11565b057c6bSHerbert Xu return 0; 11575b057c6bSHerbert Xu 11585b057c6bSHerbert Xu free_skb: 11591da177e4SLinus Torvalds kfree_skb(skb); 11605b057c6bSHerbert Xu return err; 11611da177e4SLinus Torvalds } 1162b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 11631da177e4SLinus Torvalds 11640dde3e16SIlpo Järvinen /** 11650dde3e16SIlpo Järvinen * skb_put - add data to a buffer 11660dde3e16SIlpo Järvinen * @skb: buffer to use 11670dde3e16SIlpo Järvinen * @len: amount of data to add 11680dde3e16SIlpo Järvinen * 11690dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 11700dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 11710dde3e16SIlpo Järvinen * first byte of the extra data is returned. 11720dde3e16SIlpo Järvinen */ 11730dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 11740dde3e16SIlpo Järvinen { 11750dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 11760dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 11770dde3e16SIlpo Järvinen skb->tail += len; 11780dde3e16SIlpo Järvinen skb->len += len; 11790dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 11800dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 11810dde3e16SIlpo Järvinen return tmp; 11820dde3e16SIlpo Järvinen } 11830dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 11840dde3e16SIlpo Järvinen 11856be8ac2fSIlpo Järvinen /** 1186c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1187c2aa270aSIlpo Järvinen * @skb: buffer to use 1188c2aa270aSIlpo Järvinen * @len: amount of data to add 1189c2aa270aSIlpo Järvinen * 1190c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1191c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1192c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1193c2aa270aSIlpo Järvinen */ 1194c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1195c2aa270aSIlpo Järvinen { 1196c2aa270aSIlpo Järvinen skb->data -= len; 1197c2aa270aSIlpo Järvinen skb->len += len; 1198c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1199c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1200c2aa270aSIlpo Järvinen return skb->data; 1201c2aa270aSIlpo Järvinen } 1202c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1203c2aa270aSIlpo Järvinen 1204c2aa270aSIlpo Järvinen /** 12056be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 12066be8ac2fSIlpo Järvinen * @skb: buffer to use 12076be8ac2fSIlpo Järvinen * @len: amount of data to remove 12086be8ac2fSIlpo Järvinen * 12096be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 12106be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 12116be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 12126be8ac2fSIlpo Järvinen * the old data. 12136be8ac2fSIlpo Järvinen */ 12146be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 12156be8ac2fSIlpo Järvinen { 121647d29646SDavid S. Miller return skb_pull_inline(skb, len); 12176be8ac2fSIlpo Järvinen } 12186be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 12196be8ac2fSIlpo Järvinen 1220419ae74eSIlpo Järvinen /** 1221419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1222419ae74eSIlpo Järvinen * @skb: buffer to alter 1223419ae74eSIlpo Järvinen * @len: new length 1224419ae74eSIlpo Järvinen * 1225419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1226419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1227419ae74eSIlpo Järvinen * The skb must be linear. 1228419ae74eSIlpo Järvinen */ 1229419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1230419ae74eSIlpo Järvinen { 1231419ae74eSIlpo Järvinen if (skb->len > len) 1232419ae74eSIlpo Järvinen __skb_trim(skb, len); 1233419ae74eSIlpo Järvinen } 1234419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1235419ae74eSIlpo Järvinen 12363cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 12371da177e4SLinus Torvalds */ 12381da177e4SLinus Torvalds 12393cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 12401da177e4SLinus Torvalds { 124127b437c8SHerbert Xu struct sk_buff **fragp; 124227b437c8SHerbert Xu struct sk_buff *frag; 12431da177e4SLinus Torvalds int offset = skb_headlen(skb); 12441da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 12451da177e4SLinus Torvalds int i; 124627b437c8SHerbert Xu int err; 124727b437c8SHerbert Xu 124827b437c8SHerbert Xu if (skb_cloned(skb) && 124927b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 125027b437c8SHerbert Xu return err; 12511da177e4SLinus Torvalds 1252f4d26fb3SHerbert Xu i = 0; 1253f4d26fb3SHerbert Xu if (offset >= len) 1254f4d26fb3SHerbert Xu goto drop_pages; 1255f4d26fb3SHerbert Xu 1256f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 12579e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 125827b437c8SHerbert Xu 125927b437c8SHerbert Xu if (end < len) { 12601da177e4SLinus Torvalds offset = end; 126127b437c8SHerbert Xu continue; 12621da177e4SLinus Torvalds } 12631da177e4SLinus Torvalds 12649e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 126527b437c8SHerbert Xu 1266f4d26fb3SHerbert Xu drop_pages: 126727b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 126827b437c8SHerbert Xu 126927b437c8SHerbert Xu for (; i < nfrags; i++) 1270ea2ab693SIan Campbell skb_frag_unref(skb, i); 127127b437c8SHerbert Xu 127221dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 127327b437c8SHerbert Xu skb_drop_fraglist(skb); 1274f4d26fb3SHerbert Xu goto done; 127527b437c8SHerbert Xu } 127627b437c8SHerbert Xu 127727b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 127827b437c8SHerbert Xu fragp = &frag->next) { 127927b437c8SHerbert Xu int end = offset + frag->len; 128027b437c8SHerbert Xu 128127b437c8SHerbert Xu if (skb_shared(frag)) { 128227b437c8SHerbert Xu struct sk_buff *nfrag; 128327b437c8SHerbert Xu 128427b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 128527b437c8SHerbert Xu if (unlikely(!nfrag)) 128627b437c8SHerbert Xu return -ENOMEM; 128727b437c8SHerbert Xu 128827b437c8SHerbert Xu nfrag->next = frag->next; 128985bb2a60SEric Dumazet consume_skb(frag); 129027b437c8SHerbert Xu frag = nfrag; 129127b437c8SHerbert Xu *fragp = frag; 129227b437c8SHerbert Xu } 129327b437c8SHerbert Xu 129427b437c8SHerbert Xu if (end < len) { 129527b437c8SHerbert Xu offset = end; 129627b437c8SHerbert Xu continue; 129727b437c8SHerbert Xu } 129827b437c8SHerbert Xu 129927b437c8SHerbert Xu if (end > len && 130027b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 130127b437c8SHerbert Xu return err; 130227b437c8SHerbert Xu 130327b437c8SHerbert Xu if (frag->next) 130427b437c8SHerbert Xu skb_drop_list(&frag->next); 130527b437c8SHerbert Xu break; 130627b437c8SHerbert Xu } 130727b437c8SHerbert Xu 1308f4d26fb3SHerbert Xu done: 130927b437c8SHerbert Xu if (len > skb_headlen(skb)) { 13101da177e4SLinus Torvalds skb->data_len -= skb->len - len; 13111da177e4SLinus Torvalds skb->len = len; 13121da177e4SLinus Torvalds } else { 13131da177e4SLinus Torvalds skb->len = len; 13141da177e4SLinus Torvalds skb->data_len = 0; 131527a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 13161da177e4SLinus Torvalds } 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds return 0; 13191da177e4SLinus Torvalds } 1320b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 13211da177e4SLinus Torvalds 13221da177e4SLinus Torvalds /** 13231da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 13241da177e4SLinus Torvalds * @skb: buffer to reallocate 13251da177e4SLinus Torvalds * @delta: number of bytes to advance tail 13261da177e4SLinus Torvalds * 13271da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 13281da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 13291da177e4SLinus Torvalds * data from fragmented part. 13301da177e4SLinus Torvalds * 13311da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 13321da177e4SLinus Torvalds * 13331da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 13341da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 13351da177e4SLinus Torvalds * 13361da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 13371da177e4SLinus Torvalds * reloaded after call to this function. 13381da177e4SLinus Torvalds */ 13391da177e4SLinus Torvalds 13401da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 13411da177e4SLinus Torvalds * when it is necessary. 13421da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 13431da177e4SLinus Torvalds * 2. It may change skb pointers. 13441da177e4SLinus Torvalds * 13451da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 13461da177e4SLinus Torvalds */ 13471da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 13481da177e4SLinus Torvalds { 13491da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 13501da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 13511da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 13521da177e4SLinus Torvalds */ 13534305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 13541da177e4SLinus Torvalds 13551da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 13561da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 13571da177e4SLinus Torvalds GFP_ATOMIC)) 13581da177e4SLinus Torvalds return NULL; 13591da177e4SLinus Torvalds } 13601da177e4SLinus Torvalds 136127a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 13621da177e4SLinus Torvalds BUG(); 13631da177e4SLinus Torvalds 13641da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 13651da177e4SLinus Torvalds * size of pulled pages. Superb. 13661da177e4SLinus Torvalds */ 136721dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 13681da177e4SLinus Torvalds goto pull_pages; 13691da177e4SLinus Torvalds 13701da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 13711da177e4SLinus Torvalds eat = delta; 13721da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 13739e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 13749e903e08SEric Dumazet 13759e903e08SEric Dumazet if (size >= eat) 13761da177e4SLinus Torvalds goto pull_pages; 13779e903e08SEric Dumazet eat -= size; 13781da177e4SLinus Torvalds } 13791da177e4SLinus Torvalds 13801da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 13811da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 13821da177e4SLinus Torvalds * but taking into account that pulling is expected to 13831da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 13841da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 13851da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 13861da177e4SLinus Torvalds */ 13871da177e4SLinus Torvalds if (eat) { 13881da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 13891da177e4SLinus Torvalds struct sk_buff *clone = NULL; 13901da177e4SLinus Torvalds struct sk_buff *insp = NULL; 13911da177e4SLinus Torvalds 13921da177e4SLinus Torvalds do { 139309a62660SKris Katterjohn BUG_ON(!list); 13941da177e4SLinus Torvalds 13951da177e4SLinus Torvalds if (list->len <= eat) { 13961da177e4SLinus Torvalds /* Eaten as whole. */ 13971da177e4SLinus Torvalds eat -= list->len; 13981da177e4SLinus Torvalds list = list->next; 13991da177e4SLinus Torvalds insp = list; 14001da177e4SLinus Torvalds } else { 14011da177e4SLinus Torvalds /* Eaten partially. */ 14021da177e4SLinus Torvalds 14031da177e4SLinus Torvalds if (skb_shared(list)) { 14041da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 14051da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 14061da177e4SLinus Torvalds if (!clone) 14071da177e4SLinus Torvalds return NULL; 14081da177e4SLinus Torvalds insp = list->next; 14091da177e4SLinus Torvalds list = clone; 14101da177e4SLinus Torvalds } else { 14111da177e4SLinus Torvalds /* This may be pulled without 14121da177e4SLinus Torvalds * problems. */ 14131da177e4SLinus Torvalds insp = list; 14141da177e4SLinus Torvalds } 14151da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 14161da177e4SLinus Torvalds kfree_skb(clone); 14171da177e4SLinus Torvalds return NULL; 14181da177e4SLinus Torvalds } 14191da177e4SLinus Torvalds break; 14201da177e4SLinus Torvalds } 14211da177e4SLinus Torvalds } while (eat); 14221da177e4SLinus Torvalds 14231da177e4SLinus Torvalds /* Free pulled out fragments. */ 14241da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 14251da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 14261da177e4SLinus Torvalds kfree_skb(list); 14271da177e4SLinus Torvalds } 14281da177e4SLinus Torvalds /* And insert new clone at head. */ 14291da177e4SLinus Torvalds if (clone) { 14301da177e4SLinus Torvalds clone->next = list; 14311da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 14321da177e4SLinus Torvalds } 14331da177e4SLinus Torvalds } 14341da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 14351da177e4SLinus Torvalds 14361da177e4SLinus Torvalds pull_pages: 14371da177e4SLinus Torvalds eat = delta; 14381da177e4SLinus Torvalds k = 0; 14391da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14409e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14419e903e08SEric Dumazet 14429e903e08SEric Dumazet if (size <= eat) { 1443ea2ab693SIan Campbell skb_frag_unref(skb, i); 14449e903e08SEric Dumazet eat -= size; 14451da177e4SLinus Torvalds } else { 14461da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 14471da177e4SLinus Torvalds if (eat) { 14481da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 14499e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 14501da177e4SLinus Torvalds eat = 0; 14511da177e4SLinus Torvalds } 14521da177e4SLinus Torvalds k++; 14531da177e4SLinus Torvalds } 14541da177e4SLinus Torvalds } 14551da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 14561da177e4SLinus Torvalds 14571da177e4SLinus Torvalds skb->tail += delta; 14581da177e4SLinus Torvalds skb->data_len -= delta; 14591da177e4SLinus Torvalds 146027a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 14611da177e4SLinus Torvalds } 1462b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 14631da177e4SLinus Torvalds 146422019b17SEric Dumazet /** 146522019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 146622019b17SEric Dumazet * @skb: source skb 146722019b17SEric Dumazet * @offset: offset in source 146822019b17SEric Dumazet * @to: destination buffer 146922019b17SEric Dumazet * @len: number of bytes to copy 147022019b17SEric Dumazet * 147122019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 147222019b17SEric Dumazet * destination buffer. 147322019b17SEric Dumazet * 147422019b17SEric Dumazet * CAUTION ! : 147522019b17SEric Dumazet * If its prototype is ever changed, 147622019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 147722019b17SEric Dumazet * since it is called from BPF assembly code. 147822019b17SEric Dumazet */ 14791da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 14801da177e4SLinus Torvalds { 14811a028e50SDavid S. Miller int start = skb_headlen(skb); 1482fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1483fbb398a8SDavid S. Miller int i, copy; 14841da177e4SLinus Torvalds 14851da177e4SLinus Torvalds if (offset > (int)skb->len - len) 14861da177e4SLinus Torvalds goto fault; 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds /* Copy header. */ 14891a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 14901da177e4SLinus Torvalds if (copy > len) 14911da177e4SLinus Torvalds copy = len; 1492d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 14931da177e4SLinus Torvalds if ((len -= copy) == 0) 14941da177e4SLinus Torvalds return 0; 14951da177e4SLinus Torvalds offset += copy; 14961da177e4SLinus Torvalds to += copy; 14971da177e4SLinus Torvalds } 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15001a028e50SDavid S. Miller int end; 150151c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 15021da177e4SLinus Torvalds 1503547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15041a028e50SDavid S. Miller 150551c56b00SEric Dumazet end = start + skb_frag_size(f); 15061da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15071da177e4SLinus Torvalds u8 *vaddr; 15081da177e4SLinus Torvalds 15091da177e4SLinus Torvalds if (copy > len) 15101da177e4SLinus Torvalds copy = len; 15111da177e4SLinus Torvalds 151251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 15131da177e4SLinus Torvalds memcpy(to, 151451c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 151551c56b00SEric Dumazet copy); 151651c56b00SEric Dumazet kunmap_atomic(vaddr); 15171da177e4SLinus Torvalds 15181da177e4SLinus Torvalds if ((len -= copy) == 0) 15191da177e4SLinus Torvalds return 0; 15201da177e4SLinus Torvalds offset += copy; 15211da177e4SLinus Torvalds to += copy; 15221da177e4SLinus Torvalds } 15231a028e50SDavid S. Miller start = end; 15241da177e4SLinus Torvalds } 15251da177e4SLinus Torvalds 1526fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 15271a028e50SDavid S. Miller int end; 15281da177e4SLinus Torvalds 1529547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15301a028e50SDavid S. Miller 1531fbb398a8SDavid S. Miller end = start + frag_iter->len; 15321da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15331da177e4SLinus Torvalds if (copy > len) 15341da177e4SLinus Torvalds copy = len; 1535fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 15361da177e4SLinus Torvalds goto fault; 15371da177e4SLinus Torvalds if ((len -= copy) == 0) 15381da177e4SLinus Torvalds return 0; 15391da177e4SLinus Torvalds offset += copy; 15401da177e4SLinus Torvalds to += copy; 15411da177e4SLinus Torvalds } 15421a028e50SDavid S. Miller start = end; 15431da177e4SLinus Torvalds } 1544a6686f2fSShirley Ma 15451da177e4SLinus Torvalds if (!len) 15461da177e4SLinus Torvalds return 0; 15471da177e4SLinus Torvalds 15481da177e4SLinus Torvalds fault: 15491da177e4SLinus Torvalds return -EFAULT; 15501da177e4SLinus Torvalds } 1551b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 15521da177e4SLinus Torvalds 15539c55e01cSJens Axboe /* 15549c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 15559c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 15569c55e01cSJens Axboe */ 15579c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 15589c55e01cSJens Axboe { 15598b9d3728SJarek Poplawski put_page(spd->pages[i]); 15608b9d3728SJarek Poplawski } 15619c55e01cSJens Axboe 1562a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 15634fb66994SJarek Poplawski unsigned int *offset, 15647a67e56fSJarek Poplawski struct sk_buff *skb, struct sock *sk) 15658b9d3728SJarek Poplawski { 15664fb66994SJarek Poplawski struct page *p = sk->sk_sndmsg_page; 15674fb66994SJarek Poplawski unsigned int off; 15688b9d3728SJarek Poplawski 15694fb66994SJarek Poplawski if (!p) { 15704fb66994SJarek Poplawski new_page: 15714fb66994SJarek Poplawski p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 15728b9d3728SJarek Poplawski if (!p) 15738b9d3728SJarek Poplawski return NULL; 15744fb66994SJarek Poplawski 15754fb66994SJarek Poplawski off = sk->sk_sndmsg_off = 0; 15764fb66994SJarek Poplawski /* hold one ref to this page until it's full */ 15774fb66994SJarek Poplawski } else { 15784fb66994SJarek Poplawski unsigned int mlen; 15794fb66994SJarek Poplawski 1580e66e9a31SEric Dumazet /* If we are the only user of the page, we can reset offset */ 1581e66e9a31SEric Dumazet if (page_count(p) == 1) 1582e66e9a31SEric Dumazet sk->sk_sndmsg_off = 0; 15834fb66994SJarek Poplawski off = sk->sk_sndmsg_off; 15844fb66994SJarek Poplawski mlen = PAGE_SIZE - off; 15854fb66994SJarek Poplawski if (mlen < 64 && mlen < *len) { 15864fb66994SJarek Poplawski put_page(p); 15874fb66994SJarek Poplawski goto new_page; 15884fb66994SJarek Poplawski } 15894fb66994SJarek Poplawski 15904fb66994SJarek Poplawski *len = min_t(unsigned int, *len, mlen); 15914fb66994SJarek Poplawski } 15924fb66994SJarek Poplawski 15934fb66994SJarek Poplawski memcpy(page_address(p) + off, page_address(page) + *offset, *len); 15944fb66994SJarek Poplawski sk->sk_sndmsg_off += *len; 15954fb66994SJarek Poplawski *offset = off; 15968b9d3728SJarek Poplawski 15978b9d3728SJarek Poplawski return p; 15989c55e01cSJens Axboe } 15999c55e01cSJens Axboe 160041c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 160141c73a0dSEric Dumazet struct page *page, 160241c73a0dSEric Dumazet unsigned int offset) 160341c73a0dSEric Dumazet { 160441c73a0dSEric Dumazet return spd->nr_pages && 160541c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 160641c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 160741c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 160841c73a0dSEric Dumazet } 160941c73a0dSEric Dumazet 16109c55e01cSJens Axboe /* 16119c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 16129c55e01cSJens Axboe */ 1613a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 161435f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 16154fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 1616d7ccf7c0SEric Dumazet struct sk_buff *skb, bool linear, 16177a67e56fSJarek Poplawski struct sock *sk) 16189c55e01cSJens Axboe { 161941c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1620a108d5f3SDavid S. Miller return true; 16219c55e01cSJens Axboe 16228b9d3728SJarek Poplawski if (linear) { 16237a67e56fSJarek Poplawski page = linear_to_page(page, len, &offset, skb, sk); 16248b9d3728SJarek Poplawski if (!page) 1625a108d5f3SDavid S. Miller return true; 162641c73a0dSEric Dumazet } 162741c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 162841c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1629a108d5f3SDavid S. Miller return false; 163041c73a0dSEric Dumazet } 16318b9d3728SJarek Poplawski get_page(page); 16329c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 16334fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 16349c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 16359c55e01cSJens Axboe spd->nr_pages++; 16368b9d3728SJarek Poplawski 1637a108d5f3SDavid S. Miller return false; 16389c55e01cSJens Axboe } 16399c55e01cSJens Axboe 16402870c43dSOctavian Purdila static inline void __segment_seek(struct page **page, unsigned int *poff, 16412870c43dSOctavian Purdila unsigned int *plen, unsigned int off) 16422870c43dSOctavian Purdila { 1643ce3dd395SJarek Poplawski unsigned long n; 1644ce3dd395SJarek Poplawski 16452870c43dSOctavian Purdila *poff += off; 1646ce3dd395SJarek Poplawski n = *poff / PAGE_SIZE; 1647ce3dd395SJarek Poplawski if (n) 1648ce3dd395SJarek Poplawski *page = nth_page(*page, n); 1649ce3dd395SJarek Poplawski 16502870c43dSOctavian Purdila *poff = *poff % PAGE_SIZE; 16512870c43dSOctavian Purdila *plen -= off; 16522870c43dSOctavian Purdila } 16532870c43dSOctavian Purdila 1654a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 16552870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 16562870c43dSOctavian Purdila unsigned int *len, struct sk_buff *skb, 1657d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 165835f3d14dSJens Axboe struct sock *sk, 165935f3d14dSJens Axboe struct pipe_inode_info *pipe) 16609c55e01cSJens Axboe { 16612870c43dSOctavian Purdila if (!*len) 1662a108d5f3SDavid S. Miller return true; 16639c55e01cSJens Axboe 16642870c43dSOctavian Purdila /* skip this segment if already processed */ 16652870c43dSOctavian Purdila if (*off >= plen) { 16662870c43dSOctavian Purdila *off -= plen; 1667a108d5f3SDavid S. Miller return false; 16682870c43dSOctavian Purdila } 16692870c43dSOctavian Purdila 16702870c43dSOctavian Purdila /* ignore any bits we already processed */ 16712870c43dSOctavian Purdila if (*off) { 16722870c43dSOctavian Purdila __segment_seek(&page, &poff, &plen, *off); 16732870c43dSOctavian Purdila *off = 0; 16742870c43dSOctavian Purdila } 16752870c43dSOctavian Purdila 16762870c43dSOctavian Purdila do { 16772870c43dSOctavian Purdila unsigned int flen = min(*len, plen); 16782870c43dSOctavian Purdila 16792870c43dSOctavian Purdila /* the linear region may spread across several pages */ 16802870c43dSOctavian Purdila flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 16812870c43dSOctavian Purdila 168235f3d14dSJens Axboe if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1683a108d5f3SDavid S. Miller return true; 16842870c43dSOctavian Purdila 16852870c43dSOctavian Purdila __segment_seek(&page, &poff, &plen, flen); 16862870c43dSOctavian Purdila *len -= flen; 16872870c43dSOctavian Purdila 16882870c43dSOctavian Purdila } while (*len && plen); 16892870c43dSOctavian Purdila 1690a108d5f3SDavid S. Miller return false; 1691db43a282SOctavian Purdila } 16929c55e01cSJens Axboe 16939c55e01cSJens Axboe /* 1694a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 16952870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 16969c55e01cSJens Axboe */ 1697a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 169835f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 169935f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17002870c43dSOctavian Purdila { 17012870c43dSOctavian Purdila int seg; 17022996d31fSAlexander Duyck bool head_is_locked = !skb->head_frag || skb_cloned(skb); 17039c55e01cSJens Axboe 17041d0c0b32SEric Dumazet /* map the linear part : 17052996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17062996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17072996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17089c55e01cSJens Axboe */ 17092870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17102870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17112870c43dSOctavian Purdila skb_headlen(skb), 17121d0c0b32SEric Dumazet offset, len, skb, spd, 17132996d31fSAlexander Duyck head_is_locked, 17141d0c0b32SEric Dumazet sk, pipe)) 1715a108d5f3SDavid S. Miller return true; 17169c55e01cSJens Axboe 17179c55e01cSJens Axboe /* 17189c55e01cSJens Axboe * then map the fragments 17199c55e01cSJens Axboe */ 17209c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 17219c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 17229c55e01cSJens Axboe 1723ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 17249e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 1725d7ccf7c0SEric Dumazet offset, len, skb, spd, false, sk, pipe)) 1726a108d5f3SDavid S. Miller return true; 17279c55e01cSJens Axboe } 17289c55e01cSJens Axboe 1729a108d5f3SDavid S. Miller return false; 17309c55e01cSJens Axboe } 17319c55e01cSJens Axboe 17329c55e01cSJens Axboe /* 17339c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 17349c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 17359c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 17369c55e01cSJens Axboe * handle that cleanly. 17379c55e01cSJens Axboe */ 17388b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 17399c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 17409c55e01cSJens Axboe unsigned int flags) 17419c55e01cSJens Axboe { 174241c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 174341c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 17449c55e01cSJens Axboe struct splice_pipe_desc spd = { 17459c55e01cSJens Axboe .pages = pages, 17469c55e01cSJens Axboe .partial = partial, 17479c55e01cSJens Axboe .flags = flags, 17489c55e01cSJens Axboe .ops = &sock_pipe_buf_ops, 17499c55e01cSJens Axboe .spd_release = sock_spd_release, 17509c55e01cSJens Axboe }; 1751fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 17527a67e56fSJarek Poplawski struct sock *sk = skb->sk; 175335f3d14dSJens Axboe int ret = 0; 175435f3d14dSJens Axboe 17559c55e01cSJens Axboe /* 17569c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 17579c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 17589c55e01cSJens Axboe */ 175935f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 17609c55e01cSJens Axboe goto done; 17619c55e01cSJens Axboe else if (!tlen) 17629c55e01cSJens Axboe goto done; 17639c55e01cSJens Axboe 17649c55e01cSJens Axboe /* 17659c55e01cSJens Axboe * now see if we have a frag_list to map 17669c55e01cSJens Axboe */ 1767fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1768fbb398a8SDavid S. Miller if (!tlen) 17699c55e01cSJens Axboe break; 177035f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1771fbb398a8SDavid S. Miller break; 17729c55e01cSJens Axboe } 17739c55e01cSJens Axboe 17749c55e01cSJens Axboe done: 17759c55e01cSJens Axboe if (spd.nr_pages) { 17769c55e01cSJens Axboe /* 17779c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 17789c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 17799c55e01cSJens Axboe * here as compared to sendfile(). We enter here 17809c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 17819c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 17829c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 17839c55e01cSJens Axboe * and networking will grab the socket lock. 17849c55e01cSJens Axboe */ 1785293ad604SOctavian Purdila release_sock(sk); 17869c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1787293ad604SOctavian Purdila lock_sock(sk); 17889c55e01cSJens Axboe } 17899c55e01cSJens Axboe 179035f3d14dSJens Axboe return ret; 17919c55e01cSJens Axboe } 17929c55e01cSJens Axboe 1793357b40a1SHerbert Xu /** 1794357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1795357b40a1SHerbert Xu * @skb: destination buffer 1796357b40a1SHerbert Xu * @offset: offset in destination 1797357b40a1SHerbert Xu * @from: source buffer 1798357b40a1SHerbert Xu * @len: number of bytes to copy 1799357b40a1SHerbert Xu * 1800357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1801357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1802357b40a1SHerbert Xu * traversing fragment lists and such. 1803357b40a1SHerbert Xu */ 1804357b40a1SHerbert Xu 18050c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1806357b40a1SHerbert Xu { 18071a028e50SDavid S. Miller int start = skb_headlen(skb); 1808fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1809fbb398a8SDavid S. Miller int i, copy; 1810357b40a1SHerbert Xu 1811357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1812357b40a1SHerbert Xu goto fault; 1813357b40a1SHerbert Xu 18141a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1815357b40a1SHerbert Xu if (copy > len) 1816357b40a1SHerbert Xu copy = len; 181727d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1818357b40a1SHerbert Xu if ((len -= copy) == 0) 1819357b40a1SHerbert Xu return 0; 1820357b40a1SHerbert Xu offset += copy; 1821357b40a1SHerbert Xu from += copy; 1822357b40a1SHerbert Xu } 1823357b40a1SHerbert Xu 1824357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1825357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 18261a028e50SDavid S. Miller int end; 1827357b40a1SHerbert Xu 1828547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18291a028e50SDavid S. Miller 18309e903e08SEric Dumazet end = start + skb_frag_size(frag); 1831357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1832357b40a1SHerbert Xu u8 *vaddr; 1833357b40a1SHerbert Xu 1834357b40a1SHerbert Xu if (copy > len) 1835357b40a1SHerbert Xu copy = len; 1836357b40a1SHerbert Xu 183751c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 18381a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 18391a028e50SDavid S. Miller from, copy); 184051c56b00SEric Dumazet kunmap_atomic(vaddr); 1841357b40a1SHerbert Xu 1842357b40a1SHerbert Xu if ((len -= copy) == 0) 1843357b40a1SHerbert Xu return 0; 1844357b40a1SHerbert Xu offset += copy; 1845357b40a1SHerbert Xu from += copy; 1846357b40a1SHerbert Xu } 18471a028e50SDavid S. Miller start = end; 1848357b40a1SHerbert Xu } 1849357b40a1SHerbert Xu 1850fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 18511a028e50SDavid S. Miller int end; 1852357b40a1SHerbert Xu 1853547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18541a028e50SDavid S. Miller 1855fbb398a8SDavid S. Miller end = start + frag_iter->len; 1856357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1857357b40a1SHerbert Xu if (copy > len) 1858357b40a1SHerbert Xu copy = len; 1859fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 18601a028e50SDavid S. Miller from, copy)) 1861357b40a1SHerbert Xu goto fault; 1862357b40a1SHerbert Xu if ((len -= copy) == 0) 1863357b40a1SHerbert Xu return 0; 1864357b40a1SHerbert Xu offset += copy; 1865357b40a1SHerbert Xu from += copy; 1866357b40a1SHerbert Xu } 18671a028e50SDavid S. Miller start = end; 1868357b40a1SHerbert Xu } 1869357b40a1SHerbert Xu if (!len) 1870357b40a1SHerbert Xu return 0; 1871357b40a1SHerbert Xu 1872357b40a1SHerbert Xu fault: 1873357b40a1SHerbert Xu return -EFAULT; 1874357b40a1SHerbert Xu } 1875357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1876357b40a1SHerbert Xu 18771da177e4SLinus Torvalds /* Checksum skb data. */ 18781da177e4SLinus Torvalds 18792bbbc868SAl Viro __wsum skb_checksum(const struct sk_buff *skb, int offset, 18802bbbc868SAl Viro int len, __wsum csum) 18811da177e4SLinus Torvalds { 18821a028e50SDavid S. Miller int start = skb_headlen(skb); 18831a028e50SDavid S. Miller int i, copy = start - offset; 1884fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18851da177e4SLinus Torvalds int pos = 0; 18861da177e4SLinus Torvalds 18871da177e4SLinus Torvalds /* Checksum header. */ 18881da177e4SLinus Torvalds if (copy > 0) { 18891da177e4SLinus Torvalds if (copy > len) 18901da177e4SLinus Torvalds copy = len; 18911da177e4SLinus Torvalds csum = csum_partial(skb->data + offset, copy, csum); 18921da177e4SLinus Torvalds if ((len -= copy) == 0) 18931da177e4SLinus Torvalds return csum; 18941da177e4SLinus Torvalds offset += copy; 18951da177e4SLinus Torvalds pos = copy; 18961da177e4SLinus Torvalds } 18971da177e4SLinus Torvalds 18981da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 18991a028e50SDavid S. Miller int end; 190051c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19011da177e4SLinus Torvalds 1902547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19031a028e50SDavid S. Miller 190451c56b00SEric Dumazet end = start + skb_frag_size(frag); 19051da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 190644bb9363SAl Viro __wsum csum2; 19071da177e4SLinus Torvalds u8 *vaddr; 19081da177e4SLinus Torvalds 19091da177e4SLinus Torvalds if (copy > len) 19101da177e4SLinus Torvalds copy = len; 191151c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19121a028e50SDavid S. Miller csum2 = csum_partial(vaddr + frag->page_offset + 19131a028e50SDavid S. Miller offset - start, copy, 0); 191451c56b00SEric Dumazet kunmap_atomic(vaddr); 19151da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19161da177e4SLinus Torvalds if (!(len -= copy)) 19171da177e4SLinus Torvalds return csum; 19181da177e4SLinus Torvalds offset += copy; 19191da177e4SLinus Torvalds pos += copy; 19201da177e4SLinus Torvalds } 19211a028e50SDavid S. Miller start = end; 19221da177e4SLinus Torvalds } 19231da177e4SLinus Torvalds 1924fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19251a028e50SDavid S. Miller int end; 19261da177e4SLinus Torvalds 1927547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19281a028e50SDavid S. Miller 1929fbb398a8SDavid S. Miller end = start + frag_iter->len; 19301da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19315f92a738SAl Viro __wsum csum2; 19321da177e4SLinus Torvalds if (copy > len) 19331da177e4SLinus Torvalds copy = len; 1934fbb398a8SDavid S. Miller csum2 = skb_checksum(frag_iter, offset - start, 19351a028e50SDavid S. Miller copy, 0); 19361da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19371da177e4SLinus Torvalds if ((len -= copy) == 0) 19381da177e4SLinus Torvalds return csum; 19391da177e4SLinus Torvalds offset += copy; 19401da177e4SLinus Torvalds pos += copy; 19411da177e4SLinus Torvalds } 19421a028e50SDavid S. Miller start = end; 19431da177e4SLinus Torvalds } 194409a62660SKris Katterjohn BUG_ON(len); 19451da177e4SLinus Torvalds 19461da177e4SLinus Torvalds return csum; 19471da177e4SLinus Torvalds } 1948b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 19491da177e4SLinus Torvalds 19501da177e4SLinus Torvalds /* Both of above in one bottle. */ 19511da177e4SLinus Torvalds 195281d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 195381d77662SAl Viro u8 *to, int len, __wsum csum) 19541da177e4SLinus Torvalds { 19551a028e50SDavid S. Miller int start = skb_headlen(skb); 19561a028e50SDavid S. Miller int i, copy = start - offset; 1957fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19581da177e4SLinus Torvalds int pos = 0; 19591da177e4SLinus Torvalds 19601da177e4SLinus Torvalds /* Copy header. */ 19611da177e4SLinus Torvalds if (copy > 0) { 19621da177e4SLinus Torvalds if (copy > len) 19631da177e4SLinus Torvalds copy = len; 19641da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 19651da177e4SLinus Torvalds copy, csum); 19661da177e4SLinus Torvalds if ((len -= copy) == 0) 19671da177e4SLinus Torvalds return csum; 19681da177e4SLinus Torvalds offset += copy; 19691da177e4SLinus Torvalds to += copy; 19701da177e4SLinus Torvalds pos = copy; 19711da177e4SLinus Torvalds } 19721da177e4SLinus Torvalds 19731da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19741a028e50SDavid S. Miller int end; 19751da177e4SLinus Torvalds 1976547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19771a028e50SDavid S. Miller 19789e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 19791da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19805084205fSAl Viro __wsum csum2; 19811da177e4SLinus Torvalds u8 *vaddr; 19821da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19831da177e4SLinus Torvalds 19841da177e4SLinus Torvalds if (copy > len) 19851da177e4SLinus Torvalds copy = len; 198651c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19871da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 19881a028e50SDavid S. Miller frag->page_offset + 19891a028e50SDavid S. Miller offset - start, to, 19901a028e50SDavid S. Miller copy, 0); 199151c56b00SEric Dumazet kunmap_atomic(vaddr); 19921da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19931da177e4SLinus Torvalds if (!(len -= copy)) 19941da177e4SLinus Torvalds return csum; 19951da177e4SLinus Torvalds offset += copy; 19961da177e4SLinus Torvalds to += copy; 19971da177e4SLinus Torvalds pos += copy; 19981da177e4SLinus Torvalds } 19991a028e50SDavid S. Miller start = end; 20001da177e4SLinus Torvalds } 20011da177e4SLinus Torvalds 2002fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 200381d77662SAl Viro __wsum csum2; 20041a028e50SDavid S. Miller int end; 20051da177e4SLinus Torvalds 2006547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20071a028e50SDavid S. Miller 2008fbb398a8SDavid S. Miller end = start + frag_iter->len; 20091da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20101da177e4SLinus Torvalds if (copy > len) 20111da177e4SLinus Torvalds copy = len; 2012fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 20131a028e50SDavid S. Miller offset - start, 20141da177e4SLinus Torvalds to, copy, 0); 20151da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20161da177e4SLinus Torvalds if ((len -= copy) == 0) 20171da177e4SLinus Torvalds return csum; 20181da177e4SLinus Torvalds offset += copy; 20191da177e4SLinus Torvalds to += copy; 20201da177e4SLinus Torvalds pos += copy; 20211da177e4SLinus Torvalds } 20221a028e50SDavid S. Miller start = end; 20231da177e4SLinus Torvalds } 202409a62660SKris Katterjohn BUG_ON(len); 20251da177e4SLinus Torvalds return csum; 20261da177e4SLinus Torvalds } 2027b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 20281da177e4SLinus Torvalds 20291da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 20301da177e4SLinus Torvalds { 2031d3bc23e7SAl Viro __wsum csum; 20321da177e4SLinus Torvalds long csstart; 20331da177e4SLinus Torvalds 203484fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 203555508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 20361da177e4SLinus Torvalds else 20371da177e4SLinus Torvalds csstart = skb_headlen(skb); 20381da177e4SLinus Torvalds 203909a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 20401da177e4SLinus Torvalds 2041d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 20421da177e4SLinus Torvalds 20431da177e4SLinus Torvalds csum = 0; 20441da177e4SLinus Torvalds if (csstart != skb->len) 20451da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 20461da177e4SLinus Torvalds skb->len - csstart, 0); 20471da177e4SLinus Torvalds 204884fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2049ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 20501da177e4SLinus Torvalds 2051d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 20521da177e4SLinus Torvalds } 20531da177e4SLinus Torvalds } 2054b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 20551da177e4SLinus Torvalds 20561da177e4SLinus Torvalds /** 20571da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 20581da177e4SLinus Torvalds * @list: list to dequeue from 20591da177e4SLinus Torvalds * 20601da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 20611da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 20621da177e4SLinus Torvalds * returned or %NULL if the list is empty. 20631da177e4SLinus Torvalds */ 20641da177e4SLinus Torvalds 20651da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 20661da177e4SLinus Torvalds { 20671da177e4SLinus Torvalds unsigned long flags; 20681da177e4SLinus Torvalds struct sk_buff *result; 20691da177e4SLinus Torvalds 20701da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 20711da177e4SLinus Torvalds result = __skb_dequeue(list); 20721da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 20731da177e4SLinus Torvalds return result; 20741da177e4SLinus Torvalds } 2075b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 20761da177e4SLinus Torvalds 20771da177e4SLinus Torvalds /** 20781da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 20791da177e4SLinus Torvalds * @list: list to dequeue from 20801da177e4SLinus Torvalds * 20811da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 20821da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 20831da177e4SLinus Torvalds * returned or %NULL if the list is empty. 20841da177e4SLinus Torvalds */ 20851da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 20861da177e4SLinus Torvalds { 20871da177e4SLinus Torvalds unsigned long flags; 20881da177e4SLinus Torvalds struct sk_buff *result; 20891da177e4SLinus Torvalds 20901da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 20911da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 20921da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 20931da177e4SLinus Torvalds return result; 20941da177e4SLinus Torvalds } 2095b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 20961da177e4SLinus Torvalds 20971da177e4SLinus Torvalds /** 20981da177e4SLinus Torvalds * skb_queue_purge - empty a list 20991da177e4SLinus Torvalds * @list: list to empty 21001da177e4SLinus Torvalds * 21011da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 21021da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 21031da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 21041da177e4SLinus Torvalds */ 21051da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 21061da177e4SLinus Torvalds { 21071da177e4SLinus Torvalds struct sk_buff *skb; 21081da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 21091da177e4SLinus Torvalds kfree_skb(skb); 21101da177e4SLinus Torvalds } 2111b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 21121da177e4SLinus Torvalds 21131da177e4SLinus Torvalds /** 21141da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 21151da177e4SLinus Torvalds * @list: list to use 21161da177e4SLinus Torvalds * @newsk: buffer to queue 21171da177e4SLinus Torvalds * 21181da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 21191da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21201da177e4SLinus Torvalds * safely. 21211da177e4SLinus Torvalds * 21221da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21231da177e4SLinus Torvalds */ 21241da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 21251da177e4SLinus Torvalds { 21261da177e4SLinus Torvalds unsigned long flags; 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21291da177e4SLinus Torvalds __skb_queue_head(list, newsk); 21301da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21311da177e4SLinus Torvalds } 2132b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds /** 21351da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 21361da177e4SLinus Torvalds * @list: list to use 21371da177e4SLinus Torvalds * @newsk: buffer to queue 21381da177e4SLinus Torvalds * 21391da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 21401da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21411da177e4SLinus Torvalds * safely. 21421da177e4SLinus Torvalds * 21431da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21441da177e4SLinus Torvalds */ 21451da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 21461da177e4SLinus Torvalds { 21471da177e4SLinus Torvalds unsigned long flags; 21481da177e4SLinus Torvalds 21491da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21501da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 21511da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21521da177e4SLinus Torvalds } 2153b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 21548728b834SDavid S. Miller 21551da177e4SLinus Torvalds /** 21561da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 21571da177e4SLinus Torvalds * @skb: buffer to remove 21588728b834SDavid S. Miller * @list: list to use 21591da177e4SLinus Torvalds * 21608728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 21618728b834SDavid S. Miller * function is atomic with respect to other list locked calls 21621da177e4SLinus Torvalds * 21638728b834SDavid S. Miller * You must know what list the SKB is on. 21641da177e4SLinus Torvalds */ 21658728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 21661da177e4SLinus Torvalds { 21671da177e4SLinus Torvalds unsigned long flags; 21681da177e4SLinus Torvalds 21691da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21708728b834SDavid S. Miller __skb_unlink(skb, list); 21711da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21721da177e4SLinus Torvalds } 2173b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 21741da177e4SLinus Torvalds 21751da177e4SLinus Torvalds /** 21761da177e4SLinus Torvalds * skb_append - append a buffer 21771da177e4SLinus Torvalds * @old: buffer to insert after 21781da177e4SLinus Torvalds * @newsk: buffer to insert 21798728b834SDavid S. Miller * @list: list to use 21801da177e4SLinus Torvalds * 21811da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 21821da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 21831da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21841da177e4SLinus Torvalds */ 21858728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 21861da177e4SLinus Torvalds { 21871da177e4SLinus Torvalds unsigned long flags; 21881da177e4SLinus Torvalds 21898728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 21907de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 21918728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 21921da177e4SLinus Torvalds } 2193b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 21941da177e4SLinus Torvalds 21951da177e4SLinus Torvalds /** 21961da177e4SLinus Torvalds * skb_insert - insert a buffer 21971da177e4SLinus Torvalds * @old: buffer to insert before 21981da177e4SLinus Torvalds * @newsk: buffer to insert 21998728b834SDavid S. Miller * @list: list to use 22001da177e4SLinus Torvalds * 22018728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 22028728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 22038728b834SDavid S. Miller * calls. 22048728b834SDavid S. Miller * 22051da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22061da177e4SLinus Torvalds */ 22078728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22081da177e4SLinus Torvalds { 22091da177e4SLinus Torvalds unsigned long flags; 22101da177e4SLinus Torvalds 22118728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22128728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 22138728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22141da177e4SLinus Torvalds } 2215b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 22161da177e4SLinus Torvalds 22171da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 22181da177e4SLinus Torvalds struct sk_buff* skb1, 22191da177e4SLinus Torvalds const u32 len, const int pos) 22201da177e4SLinus Torvalds { 22211da177e4SLinus Torvalds int i; 22221da177e4SLinus Torvalds 2223d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2224d626f62bSArnaldo Carvalho de Melo pos - len); 22251da177e4SLinus Torvalds /* And move data appendix as is. */ 22261da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 22271da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 22281da177e4SLinus Torvalds 22291da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 22301da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22311da177e4SLinus Torvalds skb1->data_len = skb->data_len; 22321da177e4SLinus Torvalds skb1->len += skb1->data_len; 22331da177e4SLinus Torvalds skb->data_len = 0; 22341da177e4SLinus Torvalds skb->len = len; 223527a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 22361da177e4SLinus Torvalds } 22371da177e4SLinus Torvalds 22381da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 22391da177e4SLinus Torvalds struct sk_buff* skb1, 22401da177e4SLinus Torvalds const u32 len, int pos) 22411da177e4SLinus Torvalds { 22421da177e4SLinus Torvalds int i, k = 0; 22431da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22461da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 22471da177e4SLinus Torvalds skb->len = len; 22481da177e4SLinus Torvalds skb->data_len = len - pos; 22491da177e4SLinus Torvalds 22501da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 22519e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 22521da177e4SLinus Torvalds 22531da177e4SLinus Torvalds if (pos + size > len) { 22541da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 22551da177e4SLinus Torvalds 22561da177e4SLinus Torvalds if (pos < len) { 22571da177e4SLinus Torvalds /* Split frag. 22581da177e4SLinus Torvalds * We have two variants in this case: 22591da177e4SLinus Torvalds * 1. Move all the frag to the second 22601da177e4SLinus Torvalds * part, if it is possible. F.e. 22611da177e4SLinus Torvalds * this approach is mandatory for TUX, 22621da177e4SLinus Torvalds * where splitting is expensive. 22631da177e4SLinus Torvalds * 2. Split is accurately. We make this. 22641da177e4SLinus Torvalds */ 2265ea2ab693SIan Campbell skb_frag_ref(skb, i); 22661da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 22679e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 22689e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 22691da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 22701da177e4SLinus Torvalds } 22711da177e4SLinus Torvalds k++; 22721da177e4SLinus Torvalds } else 22731da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 22741da177e4SLinus Torvalds pos += size; 22751da177e4SLinus Torvalds } 22761da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 22771da177e4SLinus Torvalds } 22781da177e4SLinus Torvalds 22791da177e4SLinus Torvalds /** 22801da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 22811da177e4SLinus Torvalds * @skb: the buffer to split 22821da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 22831da177e4SLinus Torvalds * @len: new length for skb 22841da177e4SLinus Torvalds */ 22851da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 22861da177e4SLinus Torvalds { 22871da177e4SLinus Torvalds int pos = skb_headlen(skb); 22881da177e4SLinus Torvalds 22891da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 22901da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 22911da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 22921da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 22931da177e4SLinus Torvalds } 2294b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 22951da177e4SLinus Torvalds 22969f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 22979f782db3SIlpo Järvinen * 22989f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 22999f782db3SIlpo Järvinen */ 2300832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2301832d11c5SIlpo Järvinen { 23020ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2303832d11c5SIlpo Järvinen } 2304832d11c5SIlpo Järvinen 2305832d11c5SIlpo Järvinen /** 2306832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2307832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2308832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2309832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2310832d11c5SIlpo Järvinen * 2311832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 231220e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2313832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2314832d11c5SIlpo Järvinen * 2315832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2316832d11c5SIlpo Järvinen * 2317832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2318832d11c5SIlpo Järvinen * to have non-paged data as well. 2319832d11c5SIlpo Järvinen * 2320832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2321832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2322832d11c5SIlpo Järvinen */ 2323832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2324832d11c5SIlpo Järvinen { 2325832d11c5SIlpo Järvinen int from, to, merge, todo; 2326832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2327832d11c5SIlpo Järvinen 2328832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2329832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2330832d11c5SIlpo Järvinen 2331832d11c5SIlpo Järvinen todo = shiftlen; 2332832d11c5SIlpo Järvinen from = 0; 2333832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2334832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2335832d11c5SIlpo Järvinen 2336832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2337832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2338832d11c5SIlpo Järvinen */ 2339832d11c5SIlpo Järvinen if (!to || 2340ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2341ea2ab693SIan Campbell fragfrom->page_offset)) { 2342832d11c5SIlpo Järvinen merge = -1; 2343832d11c5SIlpo Järvinen } else { 2344832d11c5SIlpo Järvinen merge = to - 1; 2345832d11c5SIlpo Järvinen 23469e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2347832d11c5SIlpo Järvinen if (todo < 0) { 2348832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2349832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2350832d11c5SIlpo Järvinen return 0; 2351832d11c5SIlpo Järvinen 23529f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 23539f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2354832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2355832d11c5SIlpo Järvinen 23569e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 23579e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2358832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2359832d11c5SIlpo Järvinen 2360832d11c5SIlpo Järvinen goto onlymerged; 2361832d11c5SIlpo Järvinen } 2362832d11c5SIlpo Järvinen 2363832d11c5SIlpo Järvinen from++; 2364832d11c5SIlpo Järvinen } 2365832d11c5SIlpo Järvinen 2366832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2367832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2368832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2369832d11c5SIlpo Järvinen return 0; 2370832d11c5SIlpo Järvinen 2371832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2372832d11c5SIlpo Järvinen return 0; 2373832d11c5SIlpo Järvinen 2374832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2375832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2376832d11c5SIlpo Järvinen return 0; 2377832d11c5SIlpo Järvinen 2378832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2379832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2380832d11c5SIlpo Järvinen 23819e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2382832d11c5SIlpo Järvinen *fragto = *fragfrom; 23839e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2384832d11c5SIlpo Järvinen from++; 2385832d11c5SIlpo Järvinen to++; 2386832d11c5SIlpo Järvinen 2387832d11c5SIlpo Järvinen } else { 2388ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2389832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2390832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 23919e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2392832d11c5SIlpo Järvinen 2393832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 23949e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2395832d11c5SIlpo Järvinen todo = 0; 2396832d11c5SIlpo Järvinen 2397832d11c5SIlpo Järvinen to++; 2398832d11c5SIlpo Järvinen break; 2399832d11c5SIlpo Järvinen } 2400832d11c5SIlpo Järvinen } 2401832d11c5SIlpo Järvinen 2402832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2403832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2404832d11c5SIlpo Järvinen 2405832d11c5SIlpo Järvinen if (merge >= 0) { 2406832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2407832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2408832d11c5SIlpo Järvinen 24099e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2410ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2411832d11c5SIlpo Järvinen } 2412832d11c5SIlpo Järvinen 2413832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2414832d11c5SIlpo Järvinen to = 0; 2415832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2416832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2417832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2418832d11c5SIlpo Järvinen 2419832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2420832d11c5SIlpo Järvinen 2421832d11c5SIlpo Järvinen onlymerged: 2422832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2423832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2424832d11c5SIlpo Järvinen */ 2425832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2426832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2427832d11c5SIlpo Järvinen 2428832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2429832d11c5SIlpo Järvinen skb->len -= shiftlen; 2430832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2431832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2432832d11c5SIlpo Järvinen tgt->len += shiftlen; 2433832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2434832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2435832d11c5SIlpo Järvinen 2436832d11c5SIlpo Järvinen return shiftlen; 2437832d11c5SIlpo Järvinen } 2438832d11c5SIlpo Järvinen 2439677e90edSThomas Graf /** 2440677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2441677e90edSThomas Graf * @skb: the buffer to read 2442677e90edSThomas Graf * @from: lower offset of data to be read 2443677e90edSThomas Graf * @to: upper offset of data to be read 2444677e90edSThomas Graf * @st: state variable 2445677e90edSThomas Graf * 2446677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2447677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2448677e90edSThomas Graf */ 2449677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2450677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2451677e90edSThomas Graf { 2452677e90edSThomas Graf st->lower_offset = from; 2453677e90edSThomas Graf st->upper_offset = to; 2454677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2455677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2456677e90edSThomas Graf st->frag_data = NULL; 2457677e90edSThomas Graf } 2458b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2459677e90edSThomas Graf 2460677e90edSThomas Graf /** 2461677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2462677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2463677e90edSThomas Graf * @data: destination pointer for data to be returned 2464677e90edSThomas Graf * @st: state variable 2465677e90edSThomas Graf * 2466677e90edSThomas Graf * Reads a block of skb data at &consumed relative to the 2467677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2468677e90edSThomas Graf * the head of the data block to &data and returns the length 2469677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2470677e90edSThomas Graf * offset has been reached. 2471677e90edSThomas Graf * 2472677e90edSThomas Graf * The caller is not required to consume all of the data 2473677e90edSThomas Graf * returned, i.e. &consumed is typically set to the number 2474677e90edSThomas Graf * of bytes already consumed and the next call to 2475677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2476677e90edSThomas Graf * 247725985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2478677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2479677e90edSThomas Graf * reads of potentially non linear data. 2480677e90edSThomas Graf * 2481bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2482677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2483677e90edSThomas Graf * a stack for this purpose. 2484677e90edSThomas Graf */ 2485677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2486677e90edSThomas Graf struct skb_seq_state *st) 2487677e90edSThomas Graf { 2488677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2489677e90edSThomas Graf skb_frag_t *frag; 2490677e90edSThomas Graf 2491677e90edSThomas Graf if (unlikely(abs_offset >= st->upper_offset)) 2492677e90edSThomas Graf return 0; 2493677e90edSThomas Graf 2494677e90edSThomas Graf next_skb: 249595e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2496677e90edSThomas Graf 2497995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 249895e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2499677e90edSThomas Graf return block_limit - abs_offset; 2500677e90edSThomas Graf } 2501677e90edSThomas Graf 2502677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2503677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2504677e90edSThomas Graf 2505677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2506677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 25079e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2508677e90edSThomas Graf 2509677e90edSThomas Graf if (abs_offset < block_limit) { 2510677e90edSThomas Graf if (!st->frag_data) 251151c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2512677e90edSThomas Graf 2513677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2514677e90edSThomas Graf (abs_offset - st->stepped_offset); 2515677e90edSThomas Graf 2516677e90edSThomas Graf return block_limit - abs_offset; 2517677e90edSThomas Graf } 2518677e90edSThomas Graf 2519677e90edSThomas Graf if (st->frag_data) { 252051c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2521677e90edSThomas Graf st->frag_data = NULL; 2522677e90edSThomas Graf } 2523677e90edSThomas Graf 2524677e90edSThomas Graf st->frag_idx++; 25259e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2526677e90edSThomas Graf } 2527677e90edSThomas Graf 25285b5a60daSOlaf Kirch if (st->frag_data) { 252951c56b00SEric Dumazet kunmap_atomic(st->frag_data); 25305b5a60daSOlaf Kirch st->frag_data = NULL; 25315b5a60daSOlaf Kirch } 25325b5a60daSOlaf Kirch 253321dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2534677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 253595e3b24cSHerbert Xu st->frag_idx = 0; 2536677e90edSThomas Graf goto next_skb; 253771b3346dSShyam Iyer } else if (st->cur_skb->next) { 253871b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 253971b3346dSShyam Iyer st->frag_idx = 0; 2540677e90edSThomas Graf goto next_skb; 2541677e90edSThomas Graf } 2542677e90edSThomas Graf 2543677e90edSThomas Graf return 0; 2544677e90edSThomas Graf } 2545b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2546677e90edSThomas Graf 2547677e90edSThomas Graf /** 2548677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2549677e90edSThomas Graf * @st: state variable 2550677e90edSThomas Graf * 2551677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2552677e90edSThomas Graf * returned 0. 2553677e90edSThomas Graf */ 2554677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2555677e90edSThomas Graf { 2556677e90edSThomas Graf if (st->frag_data) 255751c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2558677e90edSThomas Graf } 2559b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2560677e90edSThomas Graf 25613fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 25623fc7e8a6SThomas Graf 25633fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 25643fc7e8a6SThomas Graf struct ts_config *conf, 25653fc7e8a6SThomas Graf struct ts_state *state) 25663fc7e8a6SThomas Graf { 25673fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 25683fc7e8a6SThomas Graf } 25693fc7e8a6SThomas Graf 25703fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 25713fc7e8a6SThomas Graf { 25723fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 25733fc7e8a6SThomas Graf } 25743fc7e8a6SThomas Graf 25753fc7e8a6SThomas Graf /** 25763fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 25773fc7e8a6SThomas Graf * @skb: the buffer to look in 25783fc7e8a6SThomas Graf * @from: search offset 25793fc7e8a6SThomas Graf * @to: search limit 25803fc7e8a6SThomas Graf * @config: textsearch configuration 25813fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 25823fc7e8a6SThomas Graf * 25833fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 25843fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 25853fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 25863fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 25873fc7e8a6SThomas Graf */ 25883fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 25893fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 25903fc7e8a6SThomas Graf struct ts_state *state) 25913fc7e8a6SThomas Graf { 2592f72b948dSPhil Oester unsigned int ret; 2593f72b948dSPhil Oester 25943fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 25953fc7e8a6SThomas Graf config->finish = skb_ts_finish; 25963fc7e8a6SThomas Graf 25973fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 25983fc7e8a6SThomas Graf 2599f72b948dSPhil Oester ret = textsearch_find(config, state); 2600f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 26013fc7e8a6SThomas Graf } 2602b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 26033fc7e8a6SThomas Graf 2604e89e9cf5SAnanda Raju /** 2605e89e9cf5SAnanda Raju * skb_append_datato_frags: - append the user data to a skb 2606e89e9cf5SAnanda Raju * @sk: sock structure 2607e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2608e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2609e89e9cf5SAnanda Raju * @from: pointer to user message iov 2610e89e9cf5SAnanda Raju * @length: length of the iov message 2611e89e9cf5SAnanda Raju * 2612e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2613e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2614e89e9cf5SAnanda Raju */ 2615e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2616dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2617e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2618e89e9cf5SAnanda Raju void *from, int length) 2619e89e9cf5SAnanda Raju { 2620e89e9cf5SAnanda Raju int frg_cnt = 0; 2621e89e9cf5SAnanda Raju skb_frag_t *frag = NULL; 2622e89e9cf5SAnanda Raju struct page *page = NULL; 2623e89e9cf5SAnanda Raju int copy, left; 2624e89e9cf5SAnanda Raju int offset = 0; 2625e89e9cf5SAnanda Raju int ret; 2626e89e9cf5SAnanda Raju 2627e89e9cf5SAnanda Raju do { 2628e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2629e89e9cf5SAnanda Raju frg_cnt = skb_shinfo(skb)->nr_frags; 2630e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2631e89e9cf5SAnanda Raju return -EFAULT; 2632e89e9cf5SAnanda Raju 2633e89e9cf5SAnanda Raju /* allocate a new page for next frag */ 2634e89e9cf5SAnanda Raju page = alloc_pages(sk->sk_allocation, 0); 2635e89e9cf5SAnanda Raju 2636e89e9cf5SAnanda Raju /* If alloc_page fails just return failure and caller will 2637e89e9cf5SAnanda Raju * free previous allocated pages by doing kfree_skb() 2638e89e9cf5SAnanda Raju */ 2639e89e9cf5SAnanda Raju if (page == NULL) 2640e89e9cf5SAnanda Raju return -ENOMEM; 2641e89e9cf5SAnanda Raju 2642e89e9cf5SAnanda Raju /* initialize the next frag */ 2643e89e9cf5SAnanda Raju skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2644e89e9cf5SAnanda Raju skb->truesize += PAGE_SIZE; 2645e89e9cf5SAnanda Raju atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2646e89e9cf5SAnanda Raju 2647e89e9cf5SAnanda Raju /* get the new initialized frag */ 2648e89e9cf5SAnanda Raju frg_cnt = skb_shinfo(skb)->nr_frags; 2649e89e9cf5SAnanda Raju frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2650e89e9cf5SAnanda Raju 2651e89e9cf5SAnanda Raju /* copy the user data to page */ 2652e89e9cf5SAnanda Raju left = PAGE_SIZE - frag->page_offset; 2653e89e9cf5SAnanda Raju copy = (length > left)? left : length; 2654e89e9cf5SAnanda Raju 26559e903e08SEric Dumazet ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2656e89e9cf5SAnanda Raju offset, copy, 0, skb); 2657e89e9cf5SAnanda Raju if (ret < 0) 2658e89e9cf5SAnanda Raju return -EFAULT; 2659e89e9cf5SAnanda Raju 2660e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 26619e903e08SEric Dumazet skb_frag_size_add(frag, copy); 2662e89e9cf5SAnanda Raju skb->len += copy; 2663e89e9cf5SAnanda Raju skb->data_len += copy; 2664e89e9cf5SAnanda Raju offset += copy; 2665e89e9cf5SAnanda Raju length -= copy; 2666e89e9cf5SAnanda Raju 2667e89e9cf5SAnanda Raju } while (length > 0); 2668e89e9cf5SAnanda Raju 2669e89e9cf5SAnanda Raju return 0; 2670e89e9cf5SAnanda Raju } 2671b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2672e89e9cf5SAnanda Raju 2673cbb042f9SHerbert Xu /** 2674cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2675cbb042f9SHerbert Xu * @skb: buffer to update 2676cbb042f9SHerbert Xu * @len: length of data pulled 2677cbb042f9SHerbert Xu * 2678cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2679fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 268084fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 268184fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 268284fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2683cbb042f9SHerbert Xu */ 2684cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2685cbb042f9SHerbert Xu { 2686cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2687cbb042f9SHerbert Xu skb->len -= len; 2688cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2689cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2690cbb042f9SHerbert Xu return skb->data += len; 2691cbb042f9SHerbert Xu } 2692f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2693f94691acSArnaldo Carvalho de Melo 2694f4c50d99SHerbert Xu /** 2695f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2696f4c50d99SHerbert Xu * @skb: buffer to segment 2697576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2698f4c50d99SHerbert Xu * 2699f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 27004c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 27014c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2702f4c50d99SHerbert Xu */ 2703c8f44affSMichał Mirosław struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2704f4c50d99SHerbert Xu { 2705f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2706f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 270789319d38SHerbert Xu struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2708f4c50d99SHerbert Xu unsigned int mss = skb_shinfo(skb)->gso_size; 270998e399f8SArnaldo Carvalho de Melo unsigned int doffset = skb->data - skb_mac_header(skb); 2710f4c50d99SHerbert Xu unsigned int offset = doffset; 2711f4c50d99SHerbert Xu unsigned int headroom; 2712f4c50d99SHerbert Xu unsigned int len; 271304ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2714f4c50d99SHerbert Xu int nfrags = skb_shinfo(skb)->nr_frags; 2715f4c50d99SHerbert Xu int err = -ENOMEM; 2716f4c50d99SHerbert Xu int i = 0; 2717f4c50d99SHerbert Xu int pos; 2718f4c50d99SHerbert Xu 2719f4c50d99SHerbert Xu __skb_push(skb, doffset); 2720f4c50d99SHerbert Xu headroom = skb_headroom(skb); 2721f4c50d99SHerbert Xu pos = skb_headlen(skb); 2722f4c50d99SHerbert Xu 2723f4c50d99SHerbert Xu do { 2724f4c50d99SHerbert Xu struct sk_buff *nskb; 2725f4c50d99SHerbert Xu skb_frag_t *frag; 2726c8884eddSHerbert Xu int hsize; 2727f4c50d99SHerbert Xu int size; 2728f4c50d99SHerbert Xu 2729f4c50d99SHerbert Xu len = skb->len - offset; 2730f4c50d99SHerbert Xu if (len > mss) 2731f4c50d99SHerbert Xu len = mss; 2732f4c50d99SHerbert Xu 2733f4c50d99SHerbert Xu hsize = skb_headlen(skb) - offset; 2734f4c50d99SHerbert Xu if (hsize < 0) 2735f4c50d99SHerbert Xu hsize = 0; 2736c8884eddSHerbert Xu if (hsize > len || !sg) 2737c8884eddSHerbert Xu hsize = len; 2738f4c50d99SHerbert Xu 273989319d38SHerbert Xu if (!hsize && i >= nfrags) { 274089319d38SHerbert Xu BUG_ON(fskb->len != len); 274189319d38SHerbert Xu 274289319d38SHerbert Xu pos += len; 274389319d38SHerbert Xu nskb = skb_clone(fskb, GFP_ATOMIC); 274489319d38SHerbert Xu fskb = fskb->next; 274589319d38SHerbert Xu 2746f4c50d99SHerbert Xu if (unlikely(!nskb)) 2747f4c50d99SHerbert Xu goto err; 2748f4c50d99SHerbert Xu 274989319d38SHerbert Xu hsize = skb_end_pointer(nskb) - nskb->head; 275089319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 275189319d38SHerbert Xu kfree_skb(nskb); 275289319d38SHerbert Xu goto err; 275389319d38SHerbert Xu } 275489319d38SHerbert Xu 275589319d38SHerbert Xu nskb->truesize += skb_end_pointer(nskb) - nskb->head - 275689319d38SHerbert Xu hsize; 275789319d38SHerbert Xu skb_release_head_state(nskb); 275889319d38SHerbert Xu __skb_push(nskb, doffset); 275989319d38SHerbert Xu } else { 276089319d38SHerbert Xu nskb = alloc_skb(hsize + doffset + headroom, 276189319d38SHerbert Xu GFP_ATOMIC); 276289319d38SHerbert Xu 276389319d38SHerbert Xu if (unlikely(!nskb)) 276489319d38SHerbert Xu goto err; 276589319d38SHerbert Xu 276689319d38SHerbert Xu skb_reserve(nskb, headroom); 276789319d38SHerbert Xu __skb_put(nskb, doffset); 276889319d38SHerbert Xu } 276989319d38SHerbert Xu 2770f4c50d99SHerbert Xu if (segs) 2771f4c50d99SHerbert Xu tail->next = nskb; 2772f4c50d99SHerbert Xu else 2773f4c50d99SHerbert Xu segs = nskb; 2774f4c50d99SHerbert Xu tail = nskb; 2775f4c50d99SHerbert Xu 27766f85a124SHerbert Xu __copy_skb_header(nskb, skb); 2777f4c50d99SHerbert Xu nskb->mac_len = skb->mac_len; 2778f4c50d99SHerbert Xu 27793d3be433SEric Dumazet /* nskb and skb might have different headroom */ 27803d3be433SEric Dumazet if (nskb->ip_summed == CHECKSUM_PARTIAL) 27813d3be433SEric Dumazet nskb->csum_start += skb_headroom(nskb) - headroom; 27823d3be433SEric Dumazet 2783459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(nskb); 2784ddc7b8e3SArnaldo Carvalho de Melo skb_set_network_header(nskb, skb->mac_len); 2785b0e380b1SArnaldo Carvalho de Melo nskb->transport_header = (nskb->network_header + 2786b0e380b1SArnaldo Carvalho de Melo skb_network_header_len(skb)); 278789319d38SHerbert Xu skb_copy_from_linear_data(skb, nskb->data, doffset); 278889319d38SHerbert Xu 27892f181855SHerbert Xu if (fskb != skb_shinfo(skb)->frag_list) 279089319d38SHerbert Xu continue; 279189319d38SHerbert Xu 2792f4c50d99SHerbert Xu if (!sg) { 27936f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2794f4c50d99SHerbert Xu nskb->csum = skb_copy_and_csum_bits(skb, offset, 2795f4c50d99SHerbert Xu skb_put(nskb, len), 2796f4c50d99SHerbert Xu len, 0); 2797f4c50d99SHerbert Xu continue; 2798f4c50d99SHerbert Xu } 2799f4c50d99SHerbert Xu 2800f4c50d99SHerbert Xu frag = skb_shinfo(nskb)->frags; 2801f4c50d99SHerbert Xu 2802d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, 2803d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 2804f4c50d99SHerbert Xu 280589319d38SHerbert Xu while (pos < offset + len && i < nfrags) { 2806f4c50d99SHerbert Xu *frag = skb_shinfo(skb)->frags[i]; 2807ea2ab693SIan Campbell __skb_frag_ref(frag); 28089e903e08SEric Dumazet size = skb_frag_size(frag); 2809f4c50d99SHerbert Xu 2810f4c50d99SHerbert Xu if (pos < offset) { 2811f4c50d99SHerbert Xu frag->page_offset += offset - pos; 28129e903e08SEric Dumazet skb_frag_size_sub(frag, offset - pos); 2813f4c50d99SHerbert Xu } 2814f4c50d99SHerbert Xu 281589319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 2816f4c50d99SHerbert Xu 2817f4c50d99SHerbert Xu if (pos + size <= offset + len) { 2818f4c50d99SHerbert Xu i++; 2819f4c50d99SHerbert Xu pos += size; 2820f4c50d99SHerbert Xu } else { 28219e903e08SEric Dumazet skb_frag_size_sub(frag, pos + size - (offset + len)); 282289319d38SHerbert Xu goto skip_fraglist; 2823f4c50d99SHerbert Xu } 2824f4c50d99SHerbert Xu 2825f4c50d99SHerbert Xu frag++; 2826f4c50d99SHerbert Xu } 2827f4c50d99SHerbert Xu 282889319d38SHerbert Xu if (pos < offset + len) { 282989319d38SHerbert Xu struct sk_buff *fskb2 = fskb; 283089319d38SHerbert Xu 283189319d38SHerbert Xu BUG_ON(pos + fskb->len != offset + len); 283289319d38SHerbert Xu 283389319d38SHerbert Xu pos += fskb->len; 283489319d38SHerbert Xu fskb = fskb->next; 283589319d38SHerbert Xu 283689319d38SHerbert Xu if (fskb2->next) { 283789319d38SHerbert Xu fskb2 = skb_clone(fskb2, GFP_ATOMIC); 283889319d38SHerbert Xu if (!fskb2) 283989319d38SHerbert Xu goto err; 284089319d38SHerbert Xu } else 284189319d38SHerbert Xu skb_get(fskb2); 284289319d38SHerbert Xu 2843fbb398a8SDavid S. Miller SKB_FRAG_ASSERT(nskb); 284489319d38SHerbert Xu skb_shinfo(nskb)->frag_list = fskb2; 284589319d38SHerbert Xu } 284689319d38SHerbert Xu 284789319d38SHerbert Xu skip_fraglist: 2848f4c50d99SHerbert Xu nskb->data_len = len - hsize; 2849f4c50d99SHerbert Xu nskb->len += nskb->data_len; 2850f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 2851f4c50d99SHerbert Xu } while ((offset += len) < skb->len); 2852f4c50d99SHerbert Xu 2853f4c50d99SHerbert Xu return segs; 2854f4c50d99SHerbert Xu 2855f4c50d99SHerbert Xu err: 2856f4c50d99SHerbert Xu while ((skb = segs)) { 2857f4c50d99SHerbert Xu segs = skb->next; 2858b08d5840SPatrick McHardy kfree_skb(skb); 2859f4c50d99SHerbert Xu } 2860f4c50d99SHerbert Xu return ERR_PTR(err); 2861f4c50d99SHerbert Xu } 2862f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 2863f4c50d99SHerbert Xu 286471d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 286571d93b39SHerbert Xu { 286671d93b39SHerbert Xu struct sk_buff *p = *head; 286771d93b39SHerbert Xu struct sk_buff *nskb; 28689aaa156cSHerbert Xu struct skb_shared_info *skbinfo = skb_shinfo(skb); 28699aaa156cSHerbert Xu struct skb_shared_info *pinfo = skb_shinfo(p); 287071d93b39SHerbert Xu unsigned int headroom; 287186911732SHerbert Xu unsigned int len = skb_gro_len(skb); 287267147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 287367147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 287471d93b39SHerbert Xu 287586911732SHerbert Xu if (p->len + len >= 65536) 287671d93b39SHerbert Xu return -E2BIG; 287771d93b39SHerbert Xu 28789aaa156cSHerbert Xu if (pinfo->frag_list) 287971d93b39SHerbert Xu goto merge; 288067147ba9SHerbert Xu else if (headlen <= offset) { 288142da6994SHerbert Xu skb_frag_t *frag; 288266e92fcfSHerbert Xu skb_frag_t *frag2; 28839aaa156cSHerbert Xu int i = skbinfo->nr_frags; 28849aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 288542da6994SHerbert Xu 288666e92fcfSHerbert Xu offset -= headlen; 288766e92fcfSHerbert Xu 288866e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 288981705ad1SHerbert Xu return -E2BIG; 289081705ad1SHerbert Xu 28919aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 28929aaa156cSHerbert Xu skbinfo->nr_frags = 0; 2893f5572068SHerbert Xu 28949aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 28959aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 289666e92fcfSHerbert Xu do { 289766e92fcfSHerbert Xu *--frag = *--frag2; 289866e92fcfSHerbert Xu } while (--i); 289966e92fcfSHerbert Xu 290066e92fcfSHerbert Xu frag->page_offset += offset; 29019e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 290266e92fcfSHerbert Xu 2903f5572068SHerbert Xu skb->truesize -= skb->data_len; 2904f5572068SHerbert Xu skb->len -= skb->data_len; 2905f5572068SHerbert Xu skb->data_len = 0; 2906f5572068SHerbert Xu 29075d38a079SHerbert Xu NAPI_GRO_CB(skb)->free = 1; 29085d38a079SHerbert Xu goto done; 2909d7e8883cSEric Dumazet } else if (skb->head_frag) { 2910d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 2911d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 2912d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 2913d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 2914d7e8883cSEric Dumazet unsigned int first_offset; 2915d7e8883cSEric Dumazet 2916d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 2917d7e8883cSEric Dumazet return -E2BIG; 2918d7e8883cSEric Dumazet 2919d7e8883cSEric Dumazet first_offset = skb->data - 2920d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 2921d7e8883cSEric Dumazet offset; 2922d7e8883cSEric Dumazet 2923d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 2924d7e8883cSEric Dumazet 2925d7e8883cSEric Dumazet frag->page.p = page; 2926d7e8883cSEric Dumazet frag->page_offset = first_offset; 2927d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 2928d7e8883cSEric Dumazet 2929d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 2930d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 2931d7e8883cSEric Dumazet 2932d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 2933d7e8883cSEric Dumazet goto done; 293469c0cab1SHerbert Xu } else if (skb_gro_len(p) != pinfo->gso_size) 293569c0cab1SHerbert Xu return -E2BIG; 293671d93b39SHerbert Xu 293771d93b39SHerbert Xu headroom = skb_headroom(p); 29383d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 293971d93b39SHerbert Xu if (unlikely(!nskb)) 294071d93b39SHerbert Xu return -ENOMEM; 294171d93b39SHerbert Xu 294271d93b39SHerbert Xu __copy_skb_header(nskb, p); 294371d93b39SHerbert Xu nskb->mac_len = p->mac_len; 294471d93b39SHerbert Xu 294571d93b39SHerbert Xu skb_reserve(nskb, headroom); 294686911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 294771d93b39SHerbert Xu 294886911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 294971d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 295071d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 295171d93b39SHerbert Xu 295286911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 295386911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 295486911732SHerbert Xu p->data - skb_mac_header(p)); 295571d93b39SHerbert Xu 295671d93b39SHerbert Xu *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 295771d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 29589aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2959622e0ca1SHerbert Xu pinfo->gso_size = 0; 296071d93b39SHerbert Xu skb_header_release(p); 296171d93b39SHerbert Xu nskb->prev = p; 296271d93b39SHerbert Xu 296371d93b39SHerbert Xu nskb->data_len += p->len; 2964de8261c2SEric Dumazet nskb->truesize += p->truesize; 296571d93b39SHerbert Xu nskb->len += p->len; 296671d93b39SHerbert Xu 296771d93b39SHerbert Xu *head = nskb; 296871d93b39SHerbert Xu nskb->next = p->next; 296971d93b39SHerbert Xu p->next = NULL; 297071d93b39SHerbert Xu 297171d93b39SHerbert Xu p = nskb; 297271d93b39SHerbert Xu 297371d93b39SHerbert Xu merge: 2974de8261c2SEric Dumazet p->truesize += skb->truesize - len; 297567147ba9SHerbert Xu if (offset > headlen) { 2976d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 2977d1dc7abfSMichal Schmidt 2978d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 29799e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 2980d1dc7abfSMichal Schmidt skb->data_len -= eat; 2981d1dc7abfSMichal Schmidt skb->len -= eat; 298267147ba9SHerbert Xu offset = headlen; 298356035022SHerbert Xu } 298456035022SHerbert Xu 298567147ba9SHerbert Xu __skb_pull(skb, offset); 298656035022SHerbert Xu 298771d93b39SHerbert Xu p->prev->next = skb; 298871d93b39SHerbert Xu p->prev = skb; 298971d93b39SHerbert Xu skb_header_release(skb); 299071d93b39SHerbert Xu 29915d38a079SHerbert Xu done: 29925d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 299337fe4732SHerbert Xu p->data_len += len; 299437fe4732SHerbert Xu p->truesize += len; 299537fe4732SHerbert Xu p->len += len; 299671d93b39SHerbert Xu 299771d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 299871d93b39SHerbert Xu return 0; 299971d93b39SHerbert Xu } 300071d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 300171d93b39SHerbert Xu 30021da177e4SLinus Torvalds void __init skb_init(void) 30031da177e4SLinus Torvalds { 30041da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 30051da177e4SLinus Torvalds sizeof(struct sk_buff), 30061da177e4SLinus Torvalds 0, 3007e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 300820c2df83SPaul Mundt NULL); 3009d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3010d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3011d179cd12SDavid S. Miller sizeof(atomic_t), 3012d179cd12SDavid S. Miller 0, 3013e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 301420c2df83SPaul Mundt NULL); 30151da177e4SLinus Torvalds } 30161da177e4SLinus Torvalds 3017716ea3a7SDavid Howells /** 3018716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3019716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3020716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3021716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3022716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3023716ea3a7SDavid Howells * 3024716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3025716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3026716ea3a7SDavid Howells */ 302751c739d1SDavid S. Miller static int 302851c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3029716ea3a7SDavid Howells { 30301a028e50SDavid S. Miller int start = skb_headlen(skb); 30311a028e50SDavid S. Miller int i, copy = start - offset; 3032fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3033716ea3a7SDavid Howells int elt = 0; 3034716ea3a7SDavid Howells 3035716ea3a7SDavid Howells if (copy > 0) { 3036716ea3a7SDavid Howells if (copy > len) 3037716ea3a7SDavid Howells copy = len; 3038642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3039716ea3a7SDavid Howells elt++; 3040716ea3a7SDavid Howells if ((len -= copy) == 0) 3041716ea3a7SDavid Howells return elt; 3042716ea3a7SDavid Howells offset += copy; 3043716ea3a7SDavid Howells } 3044716ea3a7SDavid Howells 3045716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 30461a028e50SDavid S. Miller int end; 3047716ea3a7SDavid Howells 3048547b792cSIlpo Järvinen WARN_ON(start > offset + len); 30491a028e50SDavid S. Miller 30509e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3051716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3052716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3053716ea3a7SDavid Howells 3054716ea3a7SDavid Howells if (copy > len) 3055716ea3a7SDavid Howells copy = len; 3056ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3057642f1490SJens Axboe frag->page_offset+offset-start); 3058716ea3a7SDavid Howells elt++; 3059716ea3a7SDavid Howells if (!(len -= copy)) 3060716ea3a7SDavid Howells return elt; 3061716ea3a7SDavid Howells offset += copy; 3062716ea3a7SDavid Howells } 30631a028e50SDavid S. Miller start = end; 3064716ea3a7SDavid Howells } 3065716ea3a7SDavid Howells 3066fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 30671a028e50SDavid S. Miller int end; 3068716ea3a7SDavid Howells 3069547b792cSIlpo Järvinen WARN_ON(start > offset + len); 30701a028e50SDavid S. Miller 3071fbb398a8SDavid S. Miller end = start + frag_iter->len; 3072716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3073716ea3a7SDavid Howells if (copy > len) 3074716ea3a7SDavid Howells copy = len; 3075fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 307651c739d1SDavid S. Miller copy); 3077716ea3a7SDavid Howells if ((len -= copy) == 0) 3078716ea3a7SDavid Howells return elt; 3079716ea3a7SDavid Howells offset += copy; 3080716ea3a7SDavid Howells } 30811a028e50SDavid S. Miller start = end; 3082716ea3a7SDavid Howells } 3083716ea3a7SDavid Howells BUG_ON(len); 3084716ea3a7SDavid Howells return elt; 3085716ea3a7SDavid Howells } 3086716ea3a7SDavid Howells 308751c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 308851c739d1SDavid S. Miller { 308951c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 309051c739d1SDavid S. Miller 3091c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 309251c739d1SDavid S. Miller 309351c739d1SDavid S. Miller return nsg; 309451c739d1SDavid S. Miller } 3095b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 309651c739d1SDavid S. Miller 3097716ea3a7SDavid Howells /** 3098716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3099716ea3a7SDavid Howells * @skb: The socket buffer to check. 3100716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3101716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3102716ea3a7SDavid Howells * 3103716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3104716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3105716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3106716ea3a7SDavid Howells * 3107716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3108716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3109716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3110716ea3a7SDavid Howells * 3111716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3112716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3113716ea3a7SDavid Howells */ 3114716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3115716ea3a7SDavid Howells { 3116716ea3a7SDavid Howells int copyflag; 3117716ea3a7SDavid Howells int elt; 3118716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3119716ea3a7SDavid Howells 3120716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3121716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3122716ea3a7SDavid Howells * at the moment even if they are anonymous). 3123716ea3a7SDavid Howells */ 3124716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3125716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3126716ea3a7SDavid Howells return -ENOMEM; 3127716ea3a7SDavid Howells 3128716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 312921dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3130716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3131716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3132716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3133716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3134716ea3a7SDavid Howells 3135716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3136716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3137716ea3a7SDavid Howells return -ENOMEM; 3138716ea3a7SDavid Howells 3139716ea3a7SDavid Howells /* Voila! */ 3140716ea3a7SDavid Howells *trailer = skb; 3141716ea3a7SDavid Howells return 1; 3142716ea3a7SDavid Howells } 3143716ea3a7SDavid Howells 3144716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3145716ea3a7SDavid Howells 3146716ea3a7SDavid Howells elt = 1; 3147716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3148716ea3a7SDavid Howells copyflag = 0; 3149716ea3a7SDavid Howells 3150716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3151716ea3a7SDavid Howells int ntail = 0; 3152716ea3a7SDavid Howells 3153716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3154716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3155716ea3a7SDavid Howells * after it. */ 3156716ea3a7SDavid Howells 3157716ea3a7SDavid Howells if (skb_shared(skb1)) 3158716ea3a7SDavid Howells copyflag = 1; 3159716ea3a7SDavid Howells 3160716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3161716ea3a7SDavid Howells 3162716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3163716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 316421dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3165716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3166716ea3a7SDavid Howells ntail = tailbits + 128; 3167716ea3a7SDavid Howells } 3168716ea3a7SDavid Howells 3169716ea3a7SDavid Howells if (copyflag || 3170716ea3a7SDavid Howells skb_cloned(skb1) || 3171716ea3a7SDavid Howells ntail || 3172716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 317321dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3174716ea3a7SDavid Howells struct sk_buff *skb2; 3175716ea3a7SDavid Howells 3176716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3177716ea3a7SDavid Howells if (ntail == 0) 3178716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3179716ea3a7SDavid Howells else 3180716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3181716ea3a7SDavid Howells skb_headroom(skb1), 3182716ea3a7SDavid Howells ntail, 3183716ea3a7SDavid Howells GFP_ATOMIC); 3184716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3185716ea3a7SDavid Howells return -ENOMEM; 3186716ea3a7SDavid Howells 3187716ea3a7SDavid Howells if (skb1->sk) 3188716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3189716ea3a7SDavid Howells 3190716ea3a7SDavid Howells /* Looking around. Are we still alive? 3191716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3192716ea3a7SDavid Howells 3193716ea3a7SDavid Howells skb2->next = skb1->next; 3194716ea3a7SDavid Howells *skb_p = skb2; 3195716ea3a7SDavid Howells kfree_skb(skb1); 3196716ea3a7SDavid Howells skb1 = skb2; 3197716ea3a7SDavid Howells } 3198716ea3a7SDavid Howells elt++; 3199716ea3a7SDavid Howells *trailer = skb1; 3200716ea3a7SDavid Howells skb_p = &skb1->next; 3201716ea3a7SDavid Howells } 3202716ea3a7SDavid Howells 3203716ea3a7SDavid Howells return elt; 3204716ea3a7SDavid Howells } 3205b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3206716ea3a7SDavid Howells 3207b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3208b1faf566SEric Dumazet { 3209b1faf566SEric Dumazet struct sock *sk = skb->sk; 3210b1faf566SEric Dumazet 3211b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3212b1faf566SEric Dumazet } 3213b1faf566SEric Dumazet 3214b1faf566SEric Dumazet /* 3215b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3216b1faf566SEric Dumazet */ 3217b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3218b1faf566SEric Dumazet { 3219110c4330SEric Dumazet int len = skb->len; 3220110c4330SEric Dumazet 3221b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 322295c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3223b1faf566SEric Dumazet return -ENOMEM; 3224b1faf566SEric Dumazet 3225b1faf566SEric Dumazet skb_orphan(skb); 3226b1faf566SEric Dumazet skb->sk = sk; 3227b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3228b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3229b1faf566SEric Dumazet 3230abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3231abb57ea4SEric Dumazet skb_dst_force(skb); 3232abb57ea4SEric Dumazet 3233b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3234b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3235110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3236b1faf566SEric Dumazet return 0; 3237b1faf566SEric Dumazet } 3238b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3239b1faf566SEric Dumazet 3240ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3241ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3242ac45f602SPatrick Ohly { 3243ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3244ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3245ac45f602SPatrick Ohly struct sk_buff *skb; 3246ac45f602SPatrick Ohly int err; 3247ac45f602SPatrick Ohly 3248ac45f602SPatrick Ohly if (!sk) 3249ac45f602SPatrick Ohly return; 3250ac45f602SPatrick Ohly 3251ac45f602SPatrick Ohly skb = skb_clone(orig_skb, GFP_ATOMIC); 3252ac45f602SPatrick Ohly if (!skb) 3253ac45f602SPatrick Ohly return; 3254ac45f602SPatrick Ohly 3255ac45f602SPatrick Ohly if (hwtstamps) { 3256ac45f602SPatrick Ohly *skb_hwtstamps(skb) = 3257ac45f602SPatrick Ohly *hwtstamps; 3258ac45f602SPatrick Ohly } else { 3259ac45f602SPatrick Ohly /* 3260ac45f602SPatrick Ohly * no hardware time stamps available, 32612244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3262ac45f602SPatrick Ohly * store software time stamp 3263ac45f602SPatrick Ohly */ 3264ac45f602SPatrick Ohly skb->tstamp = ktime_get_real(); 3265ac45f602SPatrick Ohly } 3266ac45f602SPatrick Ohly 3267ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3268ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3269ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3270ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 327129030374SEric Dumazet 3272ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 327329030374SEric Dumazet 3274ac45f602SPatrick Ohly if (err) 3275ac45f602SPatrick Ohly kfree_skb(skb); 3276ac45f602SPatrick Ohly } 3277ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3278ac45f602SPatrick Ohly 32796e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 32806e3e939fSJohannes Berg { 32816e3e939fSJohannes Berg struct sock *sk = skb->sk; 32826e3e939fSJohannes Berg struct sock_exterr_skb *serr; 32836e3e939fSJohannes Berg int err; 32846e3e939fSJohannes Berg 32856e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 32866e3e939fSJohannes Berg skb->wifi_acked = acked; 32876e3e939fSJohannes Berg 32886e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 32896e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 32906e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 32916e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 32926e3e939fSJohannes Berg 32936e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 32946e3e939fSJohannes Berg if (err) 32956e3e939fSJohannes Berg kfree_skb(skb); 32966e3e939fSJohannes Berg } 32976e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 32986e3e939fSJohannes Berg 3299ac45f602SPatrick Ohly 3300f35d9d8aSRusty Russell /** 3301f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3302f35d9d8aSRusty Russell * @skb: the skb to set 3303f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3304f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3305f35d9d8aSRusty Russell * 3306f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3307f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3308f35d9d8aSRusty Russell * 3309f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3310f35d9d8aSRusty Russell * returns false you should drop the packet. 3311f35d9d8aSRusty Russell */ 3312f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3313f35d9d8aSRusty Russell { 33145ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 33155ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3316f35d9d8aSRusty Russell if (net_ratelimit()) 3317f35d9d8aSRusty Russell printk(KERN_WARNING 3318f35d9d8aSRusty Russell "bad partial csum: csum=%u/%u len=%u\n", 33195ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3320f35d9d8aSRusty Russell return false; 3321f35d9d8aSRusty Russell } 3322f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3323f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3324f35d9d8aSRusty Russell skb->csum_offset = off; 3325f35d9d8aSRusty Russell return true; 3326f35d9d8aSRusty Russell } 3327b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3328f35d9d8aSRusty Russell 33294497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 33304497b076SBen Hutchings { 33314497b076SBen Hutchings if (net_ratelimit()) 33324497b076SBen Hutchings pr_warning("%s: received packets cannot be forwarded" 33334497b076SBen Hutchings " while LRO is enabled\n", skb->dev->name); 33344497b076SBen Hutchings } 33354497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3336