11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 39e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40e005d193SJoe Perches 411da177e4SLinus Torvalds #include <linux/module.h> 421da177e4SLinus Torvalds #include <linux/types.h> 431da177e4SLinus Torvalds #include <linux/kernel.h> 44fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/interrupt.h> 471da177e4SLinus Torvalds #include <linux/in.h> 481da177e4SLinus Torvalds #include <linux/inet.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/netdevice.h> 511da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 521da177e4SLinus Torvalds #include <net/pkt_sched.h> 531da177e4SLinus Torvalds #endif 541da177e4SLinus Torvalds #include <linux/string.h> 551da177e4SLinus Torvalds #include <linux/skbuff.h> 569c55e01cSJens Axboe #include <linux/splice.h> 571da177e4SLinus Torvalds #include <linux/cache.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 591da177e4SLinus Torvalds #include <linux/init.h> 60716ea3a7SDavid Howells #include <linux/scatterlist.h> 61ac45f602SPatrick Ohly #include <linux/errqueue.h> 62268bb0ceSLinus Torvalds #include <linux/prefetch.h> 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #include <net/protocol.h> 651da177e4SLinus Torvalds #include <net/dst.h> 661da177e4SLinus Torvalds #include <net/sock.h> 671da177e4SLinus Torvalds #include <net/checksum.h> 681da177e4SLinus Torvalds #include <net/xfrm.h> 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds #include <asm/uaccess.h> 71ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7251c56b00SEric Dumazet #include <linux/highmem.h> 73a1f8e7f7SAl Viro 74d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 75e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 761da177e4SLinus Torvalds 779c55e01cSJens Axboe static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 789c55e01cSJens Axboe struct pipe_buffer *buf) 799c55e01cSJens Axboe { 808b9d3728SJarek Poplawski put_page(buf->page); 819c55e01cSJens Axboe } 829c55e01cSJens Axboe 839c55e01cSJens Axboe static void sock_pipe_buf_get(struct pipe_inode_info *pipe, 849c55e01cSJens Axboe struct pipe_buffer *buf) 859c55e01cSJens Axboe { 868b9d3728SJarek Poplawski get_page(buf->page); 879c55e01cSJens Axboe } 889c55e01cSJens Axboe 899c55e01cSJens Axboe static int sock_pipe_buf_steal(struct pipe_inode_info *pipe, 909c55e01cSJens Axboe struct pipe_buffer *buf) 919c55e01cSJens Axboe { 929c55e01cSJens Axboe return 1; 939c55e01cSJens Axboe } 949c55e01cSJens Axboe 959c55e01cSJens Axboe 969c55e01cSJens Axboe /* Pipe buffer operations for a socket. */ 9728dfef8fSAlexey Dobriyan static const struct pipe_buf_operations sock_pipe_buf_ops = { 989c55e01cSJens Axboe .can_merge = 0, 999c55e01cSJens Axboe .map = generic_pipe_buf_map, 1009c55e01cSJens Axboe .unmap = generic_pipe_buf_unmap, 1019c55e01cSJens Axboe .confirm = generic_pipe_buf_confirm, 1029c55e01cSJens Axboe .release = sock_pipe_buf_release, 1039c55e01cSJens Axboe .steal = sock_pipe_buf_steal, 1049c55e01cSJens Axboe .get = sock_pipe_buf_get, 1059c55e01cSJens Axboe }; 1069c55e01cSJens Axboe 1071da177e4SLinus Torvalds /* 1081da177e4SLinus Torvalds * Keep out-of-line to prevent kernel bloat. 1091da177e4SLinus Torvalds * __builtin_return_address is not used because it is not always 1101da177e4SLinus Torvalds * reliable. 1111da177e4SLinus Torvalds */ 1121da177e4SLinus Torvalds 1131da177e4SLinus Torvalds /** 1141da177e4SLinus Torvalds * skb_over_panic - private function 1151da177e4SLinus Torvalds * @skb: buffer 1161da177e4SLinus Torvalds * @sz: size 1171da177e4SLinus Torvalds * @here: address 1181da177e4SLinus Torvalds * 1191da177e4SLinus Torvalds * Out of line support code for skb_put(). Not user callable. 1201da177e4SLinus Torvalds */ 121ccb7c773SRami Rosen static void skb_over_panic(struct sk_buff *skb, int sz, void *here) 1221da177e4SLinus Torvalds { 123e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 124e005d193SJoe Perches __func__, here, skb->len, sz, skb->head, skb->data, 1254305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 12626095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1271da177e4SLinus Torvalds BUG(); 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds /** 1311da177e4SLinus Torvalds * skb_under_panic - private function 1321da177e4SLinus Torvalds * @skb: buffer 1331da177e4SLinus Torvalds * @sz: size 1341da177e4SLinus Torvalds * @here: address 1351da177e4SLinus Torvalds * 1361da177e4SLinus Torvalds * Out of line support code for skb_push(). Not user callable. 1371da177e4SLinus Torvalds */ 1381da177e4SLinus Torvalds 139ccb7c773SRami Rosen static void skb_under_panic(struct sk_buff *skb, int sz, void *here) 1401da177e4SLinus Torvalds { 141e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 142e005d193SJoe Perches __func__, here, skb->len, sz, skb->head, skb->data, 1434305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 14426095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1451da177e4SLinus Torvalds BUG(); 1461da177e4SLinus Torvalds } 1471da177e4SLinus Torvalds 1481da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1491da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1501da177e4SLinus Torvalds * [BEEP] leaks. 1511da177e4SLinus Torvalds * 1521da177e4SLinus Torvalds */ 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /** 155d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 1561da177e4SLinus Torvalds * @size: size to allocate 1571da177e4SLinus Torvalds * @gfp_mask: allocation mask 158c83c2486SRandy Dunlap * @fclone: allocate from fclone cache instead of head cache 159c83c2486SRandy Dunlap * and allocate a cloned (child) skb 160b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 1611da177e4SLinus Torvalds * 1621da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 1631da177e4SLinus Torvalds * tail room of size bytes. The object has a reference count of one. 1641da177e4SLinus Torvalds * The return is the buffer. On a failure the return is %NULL. 1651da177e4SLinus Torvalds * 1661da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 1671da177e4SLinus Torvalds * %GFP_ATOMIC. 1681da177e4SLinus Torvalds */ 169dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 170b30973f8SChristoph Hellwig int fclone, int node) 1711da177e4SLinus Torvalds { 172e18b890bSChristoph Lameter struct kmem_cache *cache; 1734947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 1741da177e4SLinus Torvalds struct sk_buff *skb; 1751da177e4SLinus Torvalds u8 *data; 1761da177e4SLinus Torvalds 1778798b3fbSHerbert Xu cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 1788798b3fbSHerbert Xu 1791da177e4SLinus Torvalds /* Get the HEAD */ 180b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 1811da177e4SLinus Torvalds if (!skb) 1821da177e4SLinus Torvalds goto out; 183ec7d2f2cSEric Dumazet prefetchw(skb); 1841da177e4SLinus Torvalds 18587fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 18687fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 18787fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 18887fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 18987fb4b7bSEric Dumazet */ 190bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 19187fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 19287fb4b7bSEric Dumazet data = kmalloc_node_track_caller(size, gfp_mask, node); 1931da177e4SLinus Torvalds if (!data) 1941da177e4SLinus Torvalds goto nodata; 19587fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 19687fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 19787fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 19887fb4b7bSEric Dumazet */ 19987fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 200ec7d2f2cSEric Dumazet prefetchw(data + size); 2011da177e4SLinus Torvalds 202ca0605a7SArnaldo Carvalho de Melo /* 203c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 204c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 205c8005785SJohannes Berg * the tail pointer in struct sk_buff! 206ca0605a7SArnaldo Carvalho de Melo */ 207ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 20887fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 20987fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 2101da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2111da177e4SLinus Torvalds skb->head = data; 2121da177e4SLinus Torvalds skb->data = data; 21327a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2144305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 21519633e12SStephen Hemminger #ifdef NET_SKBUFF_DATA_USES_OFFSET 21619633e12SStephen Hemminger skb->mac_header = ~0U; 21719633e12SStephen Hemminger #endif 21819633e12SStephen Hemminger 2194947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2204947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 221ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2224947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 223c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2244947d3efSBenjamin LaHaise 225d179cd12SDavid S. Miller if (fclone) { 226d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 227d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2281da177e4SLinus Torvalds 229fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 230fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 231d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 232d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 233d179cd12SDavid S. Miller 234d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 235d179cd12SDavid S. Miller } 2361da177e4SLinus Torvalds out: 2371da177e4SLinus Torvalds return skb; 2381da177e4SLinus Torvalds nodata: 2398798b3fbSHerbert Xu kmem_cache_free(cache, skb); 2401da177e4SLinus Torvalds skb = NULL; 2411da177e4SLinus Torvalds goto out; 2421da177e4SLinus Torvalds } 243b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 2441da177e4SLinus Torvalds 2451da177e4SLinus Torvalds /** 246b2b5ce9dSEric Dumazet * build_skb - build a network buffer 247b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 248d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 249b2b5ce9dSEric Dumazet * 250b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 251b2b5ce9dSEric Dumazet * skb_shared_info. @data must have been allocated by kmalloc() 252b2b5ce9dSEric Dumazet * The return is the new skb buffer. 253b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 254b2b5ce9dSEric Dumazet * Notes : 255b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 256b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 257b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 258b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 259b2b5ce9dSEric Dumazet * before giving packet to stack. 260b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 261b2b5ce9dSEric Dumazet */ 262d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 263b2b5ce9dSEric Dumazet { 264b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 265b2b5ce9dSEric Dumazet struct sk_buff *skb; 266d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 267b2b5ce9dSEric Dumazet 268b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 269b2b5ce9dSEric Dumazet if (!skb) 270b2b5ce9dSEric Dumazet return NULL; 271b2b5ce9dSEric Dumazet 272d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 273b2b5ce9dSEric Dumazet 274b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 275b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 276d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 277b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 278b2b5ce9dSEric Dumazet skb->head = data; 279b2b5ce9dSEric Dumazet skb->data = data; 280b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 281b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 282b2b5ce9dSEric Dumazet #ifdef NET_SKBUFF_DATA_USES_OFFSET 283b2b5ce9dSEric Dumazet skb->mac_header = ~0U; 284b2b5ce9dSEric Dumazet #endif 285b2b5ce9dSEric Dumazet 286b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 287b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 288b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 289b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 290b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 291b2b5ce9dSEric Dumazet 292b2b5ce9dSEric Dumazet return skb; 293b2b5ce9dSEric Dumazet } 294b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 295b2b5ce9dSEric Dumazet 296a1c7fff7SEric Dumazet struct netdev_alloc_cache { 297a1c7fff7SEric Dumazet struct page *page; 298a1c7fff7SEric Dumazet unsigned int offset; 299a1c7fff7SEric Dumazet }; 300a1c7fff7SEric Dumazet static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 301a1c7fff7SEric Dumazet 302b2b5ce9dSEric Dumazet /** 3036f532612SEric Dumazet * netdev_alloc_frag - allocate a page fragment 3046f532612SEric Dumazet * @fragsz: fragment size 3056f532612SEric Dumazet * 3066f532612SEric Dumazet * Allocates a frag from a page for receive buffer. 3076f532612SEric Dumazet * Uses GFP_ATOMIC allocations. 3086f532612SEric Dumazet */ 3096f532612SEric Dumazet void *netdev_alloc_frag(unsigned int fragsz) 3106f532612SEric Dumazet { 3116f532612SEric Dumazet struct netdev_alloc_cache *nc; 3126f532612SEric Dumazet void *data = NULL; 3136f532612SEric Dumazet unsigned long flags; 3146f532612SEric Dumazet 3156f532612SEric Dumazet local_irq_save(flags); 3166f532612SEric Dumazet nc = &__get_cpu_var(netdev_alloc_cache); 3176f532612SEric Dumazet if (unlikely(!nc->page)) { 3186f532612SEric Dumazet refill: 3196f532612SEric Dumazet nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD); 3206f532612SEric Dumazet nc->offset = 0; 3216f532612SEric Dumazet } 3226f532612SEric Dumazet if (likely(nc->page)) { 3236f532612SEric Dumazet if (nc->offset + fragsz > PAGE_SIZE) { 3246f532612SEric Dumazet put_page(nc->page); 3256f532612SEric Dumazet goto refill; 3266f532612SEric Dumazet } 3276f532612SEric Dumazet data = page_address(nc->page) + nc->offset; 3286f532612SEric Dumazet nc->offset += fragsz; 3296f532612SEric Dumazet get_page(nc->page); 3306f532612SEric Dumazet } 3316f532612SEric Dumazet local_irq_restore(flags); 3326f532612SEric Dumazet return data; 3336f532612SEric Dumazet } 3346f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag); 3356f532612SEric Dumazet 3366f532612SEric Dumazet /** 3378af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 3388af27456SChristoph Hellwig * @dev: network device to receive on 3398af27456SChristoph Hellwig * @length: length to allocate 3408af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 3418af27456SChristoph Hellwig * 3428af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 3438af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 3448af27456SChristoph Hellwig * the headroom they think they need without accounting for the 3458af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 3468af27456SChristoph Hellwig * 3478af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 3488af27456SChristoph Hellwig */ 3498af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 3508af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 3518af27456SChristoph Hellwig { 3526f532612SEric Dumazet struct sk_buff *skb = NULL; 353a1c7fff7SEric Dumazet unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 354a1c7fff7SEric Dumazet SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 3558af27456SChristoph Hellwig 356a1c7fff7SEric Dumazet if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) { 3576f532612SEric Dumazet void *data = netdev_alloc_frag(fragsz); 358a1c7fff7SEric Dumazet 3596f532612SEric Dumazet if (likely(data)) { 3606f532612SEric Dumazet skb = build_skb(data, fragsz); 3616f532612SEric Dumazet if (unlikely(!skb)) 3626f532612SEric Dumazet put_page(virt_to_head_page(data)); 363a1c7fff7SEric Dumazet } 364a1c7fff7SEric Dumazet } else { 365564824b0SEric Dumazet skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE); 366a1c7fff7SEric Dumazet } 3677b2e497aSChristoph Hellwig if (likely(skb)) { 3688af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 3697b2e497aSChristoph Hellwig skb->dev = dev; 3707b2e497aSChristoph Hellwig } 3718af27456SChristoph Hellwig return skb; 3728af27456SChristoph Hellwig } 373b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 3741da177e4SLinus Torvalds 375654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 37650269e19SEric Dumazet int size, unsigned int truesize) 377654bed16SPeter Zijlstra { 378654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 379654bed16SPeter Zijlstra skb->len += size; 380654bed16SPeter Zijlstra skb->data_len += size; 38150269e19SEric Dumazet skb->truesize += truesize; 382654bed16SPeter Zijlstra } 383654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 384654bed16SPeter Zijlstra 38527b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 3861da177e4SLinus Torvalds { 38727b437c8SHerbert Xu struct sk_buff *list = *listp; 3881da177e4SLinus Torvalds 38927b437c8SHerbert Xu *listp = NULL; 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds do { 3921da177e4SLinus Torvalds struct sk_buff *this = list; 3931da177e4SLinus Torvalds list = list->next; 3941da177e4SLinus Torvalds kfree_skb(this); 3951da177e4SLinus Torvalds } while (list); 3961da177e4SLinus Torvalds } 3971da177e4SLinus Torvalds 39827b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 39927b437c8SHerbert Xu { 40027b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 40127b437c8SHerbert Xu } 40227b437c8SHerbert Xu 4031da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 4041da177e4SLinus Torvalds { 4051da177e4SLinus Torvalds struct sk_buff *list; 4061da177e4SLinus Torvalds 407fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 4081da177e4SLinus Torvalds skb_get(list); 4091da177e4SLinus Torvalds } 4101da177e4SLinus Torvalds 411d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 412d3836f21SEric Dumazet { 413d3836f21SEric Dumazet if (skb->head_frag) 414d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 415d3836f21SEric Dumazet else 416d3836f21SEric Dumazet kfree(skb->head); 417d3836f21SEric Dumazet } 418d3836f21SEric Dumazet 4195bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 4201da177e4SLinus Torvalds { 4211da177e4SLinus Torvalds if (!skb->cloned || 4221da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 4231da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 4241da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 4251da177e4SLinus Torvalds int i; 4261da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 427ea2ab693SIan Campbell skb_frag_unref(skb, i); 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds 430a6686f2fSShirley Ma /* 431a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 432a6686f2fSShirley Ma * the lower device DMA has done; 433a6686f2fSShirley Ma */ 434a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 435a6686f2fSShirley Ma struct ubuf_info *uarg; 436a6686f2fSShirley Ma 437a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 438a6686f2fSShirley Ma if (uarg->callback) 439a6686f2fSShirley Ma uarg->callback(uarg); 440a6686f2fSShirley Ma } 441a6686f2fSShirley Ma 44221dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 4431da177e4SLinus Torvalds skb_drop_fraglist(skb); 4441da177e4SLinus Torvalds 445d3836f21SEric Dumazet skb_free_head(skb); 4461da177e4SLinus Torvalds } 4471da177e4SLinus Torvalds } 4481da177e4SLinus Torvalds 4491da177e4SLinus Torvalds /* 4501da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 4511da177e4SLinus Torvalds */ 4522d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 4531da177e4SLinus Torvalds { 454d179cd12SDavid S. Miller struct sk_buff *other; 455d179cd12SDavid S. Miller atomic_t *fclone_ref; 456d179cd12SDavid S. Miller 457d179cd12SDavid S. Miller switch (skb->fclone) { 458d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 4591da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 460d179cd12SDavid S. Miller break; 461d179cd12SDavid S. Miller 462d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 463d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 464d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 465d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 466d179cd12SDavid S. Miller break; 467d179cd12SDavid S. Miller 468d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 469d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 470d179cd12SDavid S. Miller other = skb - 1; 471d179cd12SDavid S. Miller 472d179cd12SDavid S. Miller /* The clone portion is available for 473d179cd12SDavid S. Miller * fast-cloning again. 474d179cd12SDavid S. Miller */ 475d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 476d179cd12SDavid S. Miller 477d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 478d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 479d179cd12SDavid S. Miller break; 4803ff50b79SStephen Hemminger } 4811da177e4SLinus Torvalds } 4821da177e4SLinus Torvalds 48304a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 4841da177e4SLinus Torvalds { 485adf30907SEric Dumazet skb_dst_drop(skb); 4861da177e4SLinus Torvalds #ifdef CONFIG_XFRM 4871da177e4SLinus Torvalds secpath_put(skb->sp); 4881da177e4SLinus Torvalds #endif 4891da177e4SLinus Torvalds if (skb->destructor) { 4909c2b3328SStephen Hemminger WARN_ON(in_irq()); 4911da177e4SLinus Torvalds skb->destructor(skb); 4921da177e4SLinus Torvalds } 493a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 4945f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 4952fc72c7bSKOVACS Krisztian #endif 4962fc72c7bSKOVACS Krisztian #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 4979fb9cbb1SYasuyuki Kozakai nf_conntrack_put_reasm(skb->nfct_reasm); 4989fb9cbb1SYasuyuki Kozakai #endif 4991da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 5001da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 5011da177e4SLinus Torvalds #endif 5021da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 5031da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 5041da177e4SLinus Torvalds skb->tc_index = 0; 5051da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 5061da177e4SLinus Torvalds skb->tc_verd = 0; 5071da177e4SLinus Torvalds #endif 5081da177e4SLinus Torvalds #endif 50904a4bb55SLennert Buytenhek } 51004a4bb55SLennert Buytenhek 51104a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 51204a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 51304a4bb55SLennert Buytenhek { 51404a4bb55SLennert Buytenhek skb_release_head_state(skb); 5152d4baff8SHerbert Xu skb_release_data(skb); 5162d4baff8SHerbert Xu } 5171da177e4SLinus Torvalds 5182d4baff8SHerbert Xu /** 5192d4baff8SHerbert Xu * __kfree_skb - private function 5202d4baff8SHerbert Xu * @skb: buffer 5212d4baff8SHerbert Xu * 5222d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 5232d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 5242d4baff8SHerbert Xu * always call kfree_skb 5252d4baff8SHerbert Xu */ 5262d4baff8SHerbert Xu 5272d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 5282d4baff8SHerbert Xu { 5292d4baff8SHerbert Xu skb_release_all(skb); 5301da177e4SLinus Torvalds kfree_skbmem(skb); 5311da177e4SLinus Torvalds } 532b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 5331da177e4SLinus Torvalds 5341da177e4SLinus Torvalds /** 535231d06aeSJörn Engel * kfree_skb - free an sk_buff 536231d06aeSJörn Engel * @skb: buffer to free 537231d06aeSJörn Engel * 538231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 539231d06aeSJörn Engel * hit zero. 540231d06aeSJörn Engel */ 541231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 542231d06aeSJörn Engel { 543231d06aeSJörn Engel if (unlikely(!skb)) 544231d06aeSJörn Engel return; 545231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 546231d06aeSJörn Engel smp_rmb(); 547231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 548231d06aeSJörn Engel return; 549ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 550231d06aeSJörn Engel __kfree_skb(skb); 551231d06aeSJörn Engel } 552b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 553231d06aeSJörn Engel 554d1a203eaSStephen Hemminger /** 555ead2ceb0SNeil Horman * consume_skb - free an skbuff 556ead2ceb0SNeil Horman * @skb: buffer to free 557ead2ceb0SNeil Horman * 558ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 559ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 560ead2ceb0SNeil Horman * is being dropped after a failure and notes that 561ead2ceb0SNeil Horman */ 562ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 563ead2ceb0SNeil Horman { 564ead2ceb0SNeil Horman if (unlikely(!skb)) 565ead2ceb0SNeil Horman return; 566ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 567ead2ceb0SNeil Horman smp_rmb(); 568ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 569ead2ceb0SNeil Horman return; 57007dc22e7SKoki Sanagi trace_consume_skb(skb); 571ead2ceb0SNeil Horman __kfree_skb(skb); 572ead2ceb0SNeil Horman } 573ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 574ead2ceb0SNeil Horman 575ead2ceb0SNeil Horman /** 5763d153a7cSAndy Fleming * skb_recycle - clean up an skb for reuse 5773d153a7cSAndy Fleming * @skb: buffer 5783d153a7cSAndy Fleming * 5793d153a7cSAndy Fleming * Recycles the skb to be reused as a receive buffer. This 5803d153a7cSAndy Fleming * function does any necessary reference count dropping, and 5813d153a7cSAndy Fleming * cleans up the skbuff as if it just came from __alloc_skb(). 5823d153a7cSAndy Fleming */ 5833d153a7cSAndy Fleming void skb_recycle(struct sk_buff *skb) 5843d153a7cSAndy Fleming { 5853d153a7cSAndy Fleming struct skb_shared_info *shinfo; 5863d153a7cSAndy Fleming 5873d153a7cSAndy Fleming skb_release_head_state(skb); 5883d153a7cSAndy Fleming 5893d153a7cSAndy Fleming shinfo = skb_shinfo(skb); 5903d153a7cSAndy Fleming memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 5913d153a7cSAndy Fleming atomic_set(&shinfo->dataref, 1); 5923d153a7cSAndy Fleming 5933d153a7cSAndy Fleming memset(skb, 0, offsetof(struct sk_buff, tail)); 5943d153a7cSAndy Fleming skb->data = skb->head + NET_SKB_PAD; 5953d153a7cSAndy Fleming skb_reset_tail_pointer(skb); 5963d153a7cSAndy Fleming } 5973d153a7cSAndy Fleming EXPORT_SYMBOL(skb_recycle); 5983d153a7cSAndy Fleming 5993d153a7cSAndy Fleming /** 600d1a203eaSStephen Hemminger * skb_recycle_check - check if skb can be reused for receive 601d1a203eaSStephen Hemminger * @skb: buffer 602d1a203eaSStephen Hemminger * @skb_size: minimum receive buffer size 603d1a203eaSStephen Hemminger * 604d1a203eaSStephen Hemminger * Checks that the skb passed in is not shared or cloned, and 605d1a203eaSStephen Hemminger * that it is linear and its head portion at least as large as 606d1a203eaSStephen Hemminger * skb_size so that it can be recycled as a receive buffer. 607d1a203eaSStephen Hemminger * If these conditions are met, this function does any necessary 608d1a203eaSStephen Hemminger * reference count dropping and cleans up the skbuff as if it 609d1a203eaSStephen Hemminger * just came from __alloc_skb(). 610d1a203eaSStephen Hemminger */ 6115b0daa34SChangli Gao bool skb_recycle_check(struct sk_buff *skb, int skb_size) 61204a4bb55SLennert Buytenhek { 6133d153a7cSAndy Fleming if (!skb_is_recycleable(skb, skb_size)) 6145b0daa34SChangli Gao return false; 615e84af6ddSAnton Vorontsov 6163d153a7cSAndy Fleming skb_recycle(skb); 61704a4bb55SLennert Buytenhek 6185b0daa34SChangli Gao return true; 61904a4bb55SLennert Buytenhek } 62004a4bb55SLennert Buytenhek EXPORT_SYMBOL(skb_recycle_check); 62104a4bb55SLennert Buytenhek 622dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 623dec18810SHerbert Xu { 624dec18810SHerbert Xu new->tstamp = old->tstamp; 625dec18810SHerbert Xu new->dev = old->dev; 626dec18810SHerbert Xu new->transport_header = old->transport_header; 627dec18810SHerbert Xu new->network_header = old->network_header; 628dec18810SHerbert Xu new->mac_header = old->mac_header; 6297fee226aSEric Dumazet skb_dst_copy(new, old); 6300a9627f2STom Herbert new->rxhash = old->rxhash; 6316461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 632bdeab991STom Herbert new->l4_rxhash = old->l4_rxhash; 6333bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 634def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 635dec18810SHerbert Xu new->sp = secpath_get(old->sp); 636dec18810SHerbert Xu #endif 637dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 6389bcb97caSHerbert Xu new->csum = old->csum; 639dec18810SHerbert Xu new->local_df = old->local_df; 640dec18810SHerbert Xu new->pkt_type = old->pkt_type; 641dec18810SHerbert Xu new->ip_summed = old->ip_summed; 642dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 643dec18810SHerbert Xu new->priority = old->priority; 644a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 645dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 646dec18810SHerbert Xu #endif 647dec18810SHerbert Xu new->protocol = old->protocol; 648dec18810SHerbert Xu new->mark = old->mark; 6498964be4aSEric Dumazet new->skb_iif = old->skb_iif; 650dec18810SHerbert Xu __nf_copy(new, old); 651a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 652dec18810SHerbert Xu new->nf_trace = old->nf_trace; 653dec18810SHerbert Xu #endif 654dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 655dec18810SHerbert Xu new->tc_index = old->tc_index; 656dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 657dec18810SHerbert Xu new->tc_verd = old->tc_verd; 658dec18810SHerbert Xu #endif 659dec18810SHerbert Xu #endif 6606aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 6616aa895b0SPatrick McHardy 662dec18810SHerbert Xu skb_copy_secmark(new, old); 663dec18810SHerbert Xu } 664dec18810SHerbert Xu 66582c49a35SHerbert Xu /* 66682c49a35SHerbert Xu * You should not add any new code to this function. Add it to 66782c49a35SHerbert Xu * __copy_skb_header above instead. 66882c49a35SHerbert Xu */ 669e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 6701da177e4SLinus Torvalds { 6711da177e4SLinus Torvalds #define C(x) n->x = skb->x 6721da177e4SLinus Torvalds 6731da177e4SLinus Torvalds n->next = n->prev = NULL; 6741da177e4SLinus Torvalds n->sk = NULL; 675dec18810SHerbert Xu __copy_skb_header(n, skb); 676dec18810SHerbert Xu 6771da177e4SLinus Torvalds C(len); 6781da177e4SLinus Torvalds C(data_len); 6793e6b3b2eSAlexey Dobriyan C(mac_len); 680334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 68102f1c89dSPaul Moore n->cloned = 1; 6821da177e4SLinus Torvalds n->nohdr = 0; 6831da177e4SLinus Torvalds n->destructor = NULL; 6841da177e4SLinus Torvalds C(tail); 6851da177e4SLinus Torvalds C(end); 68602f1c89dSPaul Moore C(head); 687d3836f21SEric Dumazet C(head_frag); 68802f1c89dSPaul Moore C(data); 68902f1c89dSPaul Moore C(truesize); 69002f1c89dSPaul Moore atomic_set(&n->users, 1); 6911da177e4SLinus Torvalds 6921da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 6931da177e4SLinus Torvalds skb->cloned = 1; 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds return n; 696e0053ec0SHerbert Xu #undef C 697e0053ec0SHerbert Xu } 698e0053ec0SHerbert Xu 699e0053ec0SHerbert Xu /** 700e0053ec0SHerbert Xu * skb_morph - morph one skb into another 701e0053ec0SHerbert Xu * @dst: the skb to receive the contents 702e0053ec0SHerbert Xu * @src: the skb to supply the contents 703e0053ec0SHerbert Xu * 704e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 705e0053ec0SHerbert Xu * supplied by the user. 706e0053ec0SHerbert Xu * 707e0053ec0SHerbert Xu * The target skb is returned upon exit. 708e0053ec0SHerbert Xu */ 709e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 710e0053ec0SHerbert Xu { 7112d4baff8SHerbert Xu skb_release_all(dst); 712e0053ec0SHerbert Xu return __skb_clone(dst, src); 713e0053ec0SHerbert Xu } 714e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 715e0053ec0SHerbert Xu 71648c83012SMichael S. Tsirkin /* skb_copy_ubufs - copy userspace skb frags buffers to kernel 71748c83012SMichael S. Tsirkin * @skb: the skb to modify 71848c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 71948c83012SMichael S. Tsirkin * 72048c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 72148c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 72248c83012SMichael S. Tsirkin * to userspace pages. 72348c83012SMichael S. Tsirkin * 72448c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 72548c83012SMichael S. Tsirkin * %GFP_ATOMIC. 72648c83012SMichael S. Tsirkin * 72748c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 72848c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 72948c83012SMichael S. Tsirkin */ 73048c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 731a6686f2fSShirley Ma { 732a6686f2fSShirley Ma int i; 733a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 734a6686f2fSShirley Ma struct page *page, *head = NULL; 735a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 736a6686f2fSShirley Ma 737a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 738a6686f2fSShirley Ma u8 *vaddr; 739a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 740a6686f2fSShirley Ma 741a6686f2fSShirley Ma page = alloc_page(GFP_ATOMIC); 742a6686f2fSShirley Ma if (!page) { 743a6686f2fSShirley Ma while (head) { 744a6686f2fSShirley Ma struct page *next = (struct page *)head->private; 745a6686f2fSShirley Ma put_page(head); 746a6686f2fSShirley Ma head = next; 747a6686f2fSShirley Ma } 748a6686f2fSShirley Ma return -ENOMEM; 749a6686f2fSShirley Ma } 75051c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 751a6686f2fSShirley Ma memcpy(page_address(page), 7529e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 75351c56b00SEric Dumazet kunmap_atomic(vaddr); 754a6686f2fSShirley Ma page->private = (unsigned long)head; 755a6686f2fSShirley Ma head = page; 756a6686f2fSShirley Ma } 757a6686f2fSShirley Ma 758a6686f2fSShirley Ma /* skb frags release userspace buffers */ 759a6686f2fSShirley Ma for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 760a8605c60SIan Campbell skb_frag_unref(skb, i); 761a6686f2fSShirley Ma 762a6686f2fSShirley Ma uarg->callback(uarg); 763a6686f2fSShirley Ma 764a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 765a6686f2fSShirley Ma for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) { 766a8605c60SIan Campbell __skb_fill_page_desc(skb, i-1, head, 0, 767a8605c60SIan Campbell skb_shinfo(skb)->frags[i - 1].size); 768a6686f2fSShirley Ma head = (struct page *)head->private; 769a6686f2fSShirley Ma } 77048c83012SMichael S. Tsirkin 77148c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 772a6686f2fSShirley Ma return 0; 773a6686f2fSShirley Ma } 774a6686f2fSShirley Ma 775a6686f2fSShirley Ma 776e0053ec0SHerbert Xu /** 777e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 778e0053ec0SHerbert Xu * @skb: buffer to clone 779e0053ec0SHerbert Xu * @gfp_mask: allocation priority 780e0053ec0SHerbert Xu * 781e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 782e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 783e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 784e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 785e0053ec0SHerbert Xu * 786e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 787e0053ec0SHerbert Xu * %GFP_ATOMIC. 788e0053ec0SHerbert Xu */ 789e0053ec0SHerbert Xu 790e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 791e0053ec0SHerbert Xu { 792e0053ec0SHerbert Xu struct sk_buff *n; 793e0053ec0SHerbert Xu 794a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 795a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) 796a6686f2fSShirley Ma return NULL; 797a6686f2fSShirley Ma } 798a6686f2fSShirley Ma 799e0053ec0SHerbert Xu n = skb + 1; 800e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 801e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 802e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 803e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 804e0053ec0SHerbert Xu atomic_inc(fclone_ref); 805e0053ec0SHerbert Xu } else { 806e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 807e0053ec0SHerbert Xu if (!n) 808e0053ec0SHerbert Xu return NULL; 809fe55f6d5SVegard Nossum 810fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 811fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 812e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 813e0053ec0SHerbert Xu } 814e0053ec0SHerbert Xu 815e0053ec0SHerbert Xu return __skb_clone(n, skb); 8161da177e4SLinus Torvalds } 817b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 8181da177e4SLinus Torvalds 8191da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 8201da177e4SLinus Torvalds { 8212e07fa9cSArnaldo Carvalho de Melo #ifndef NET_SKBUFF_DATA_USES_OFFSET 8221da177e4SLinus Torvalds /* 8231da177e4SLinus Torvalds * Shift between the two data areas in bytes 8241da177e4SLinus Torvalds */ 8251da177e4SLinus Torvalds unsigned long offset = new->data - old->data; 8262e07fa9cSArnaldo Carvalho de Melo #endif 827dec18810SHerbert Xu 828dec18810SHerbert Xu __copy_skb_header(new, old); 829dec18810SHerbert Xu 8302e07fa9cSArnaldo Carvalho de Melo #ifndef NET_SKBUFF_DATA_USES_OFFSET 8312e07fa9cSArnaldo Carvalho de Melo /* {transport,network,mac}_header are relative to skb->head */ 8322e07fa9cSArnaldo Carvalho de Melo new->transport_header += offset; 8332e07fa9cSArnaldo Carvalho de Melo new->network_header += offset; 834603a8bbeSStephen Hemminger if (skb_mac_header_was_set(new)) 8352e07fa9cSArnaldo Carvalho de Melo new->mac_header += offset; 8362e07fa9cSArnaldo Carvalho de Melo #endif 8377967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 8387967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 8397967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 8401da177e4SLinus Torvalds } 8411da177e4SLinus Torvalds 8421da177e4SLinus Torvalds /** 8431da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 8441da177e4SLinus Torvalds * @skb: buffer to copy 8451da177e4SLinus Torvalds * @gfp_mask: allocation priority 8461da177e4SLinus Torvalds * 8471da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 8481da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 8491da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 8501da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 8511da177e4SLinus Torvalds * 8521da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 8531da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 8541da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 8551da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 8561da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 8571da177e4SLinus Torvalds */ 8581da177e4SLinus Torvalds 859dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 8601da177e4SLinus Torvalds { 8616602cebbSEric Dumazet int headerlen = skb_headroom(skb); 862ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 8636602cebbSEric Dumazet struct sk_buff *n = alloc_skb(size, gfp_mask); 8646602cebbSEric Dumazet 8651da177e4SLinus Torvalds if (!n) 8661da177e4SLinus Torvalds return NULL; 8671da177e4SLinus Torvalds 8681da177e4SLinus Torvalds /* Set the data pointer */ 8691da177e4SLinus Torvalds skb_reserve(n, headerlen); 8701da177e4SLinus Torvalds /* Set the tail pointer and length */ 8711da177e4SLinus Torvalds skb_put(n, skb->len); 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 8741da177e4SLinus Torvalds BUG(); 8751da177e4SLinus Torvalds 8761da177e4SLinus Torvalds copy_skb_header(n, skb); 8771da177e4SLinus Torvalds return n; 8781da177e4SLinus Torvalds } 879b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 8801da177e4SLinus Torvalds 8811da177e4SLinus Torvalds /** 882117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 8831da177e4SLinus Torvalds * @skb: buffer to copy 884117632e6SEric Dumazet * @headroom: headroom of new skb 8851da177e4SLinus Torvalds * @gfp_mask: allocation priority 8861da177e4SLinus Torvalds * 8871da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 8881da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 8891da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 8901da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 8911da177e4SLinus Torvalds * or the pointer to the buffer on success. 8921da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 8931da177e4SLinus Torvalds */ 8941da177e4SLinus Torvalds 895117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 8961da177e4SLinus Torvalds { 897117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 8986602cebbSEric Dumazet struct sk_buff *n = alloc_skb(size, gfp_mask); 8996602cebbSEric Dumazet 9001da177e4SLinus Torvalds if (!n) 9011da177e4SLinus Torvalds goto out; 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds /* Set the data pointer */ 904117632e6SEric Dumazet skb_reserve(n, headroom); 9051da177e4SLinus Torvalds /* Set the tail pointer and length */ 9061da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 9071da177e4SLinus Torvalds /* Copy the bytes */ 908d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 9091da177e4SLinus Torvalds 91025f484a6SHerbert Xu n->truesize += skb->data_len; 9111da177e4SLinus Torvalds n->data_len = skb->data_len; 9121da177e4SLinus Torvalds n->len = skb->len; 9131da177e4SLinus Torvalds 9141da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 9151da177e4SLinus Torvalds int i; 9161da177e4SLinus Torvalds 917a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 918a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) { 9191511022cSDan Carpenter kfree_skb(n); 9201511022cSDan Carpenter n = NULL; 921a6686f2fSShirley Ma goto out; 922a6686f2fSShirley Ma } 923a6686f2fSShirley Ma } 9241da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 9251da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 926ea2ab693SIan Campbell skb_frag_ref(skb, i); 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 9291da177e4SLinus Torvalds } 9301da177e4SLinus Torvalds 93121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 9321da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 9331da177e4SLinus Torvalds skb_clone_fraglist(n); 9341da177e4SLinus Torvalds } 9351da177e4SLinus Torvalds 9361da177e4SLinus Torvalds copy_skb_header(n, skb); 9371da177e4SLinus Torvalds out: 9381da177e4SLinus Torvalds return n; 9391da177e4SLinus Torvalds } 940117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds /** 9431da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 9441da177e4SLinus Torvalds * @skb: buffer to reallocate 9451da177e4SLinus Torvalds * @nhead: room to add at head 9461da177e4SLinus Torvalds * @ntail: room to add at tail 9471da177e4SLinus Torvalds * @gfp_mask: allocation priority 9481da177e4SLinus Torvalds * 9491da177e4SLinus Torvalds * Expands (or creates identical copy, if &nhead and &ntail are zero) 9501da177e4SLinus Torvalds * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 9511da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 9521da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 9531da177e4SLinus Torvalds * 9541da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 9551da177e4SLinus Torvalds * reloaded after call to this function. 9561da177e4SLinus Torvalds */ 9571da177e4SLinus Torvalds 95886a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 959dd0fc66fSAl Viro gfp_t gfp_mask) 9601da177e4SLinus Torvalds { 9611da177e4SLinus Torvalds int i; 9621da177e4SLinus Torvalds u8 *data; 963ec47ea82SAlexander Duyck int size = nhead + skb_end_offset(skb) + ntail; 9641da177e4SLinus Torvalds long off; 9651da177e4SLinus Torvalds 9664edd87adSHerbert Xu BUG_ON(nhead < 0); 9674edd87adSHerbert Xu 9681da177e4SLinus Torvalds if (skb_shared(skb)) 9691da177e4SLinus Torvalds BUG(); 9701da177e4SLinus Torvalds 9711da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 9721da177e4SLinus Torvalds 97387151b86SEric Dumazet data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 97487151b86SEric Dumazet gfp_mask); 9751da177e4SLinus Torvalds if (!data) 9761da177e4SLinus Torvalds goto nodata; 97787151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 9781da177e4SLinus Torvalds 9791da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 9806602cebbSEric Dumazet * optimized for the cases when header is void. 9816602cebbSEric Dumazet */ 9826602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 9836602cebbSEric Dumazet 9846602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 9856602cebbSEric Dumazet skb_shinfo(skb), 986fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 9871da177e4SLinus Torvalds 9883e24591aSAlexander Duyck /* 9893e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 9903e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 9913e24591aSAlexander Duyck * be since all we did is relocate the values 9923e24591aSAlexander Duyck */ 9933e24591aSAlexander Duyck if (skb_cloned(skb)) { 994a6686f2fSShirley Ma /* copy this zero copy skb frags */ 995a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 996a6686f2fSShirley Ma if (skb_copy_ubufs(skb, gfp_mask)) 997a6686f2fSShirley Ma goto nofrags; 998a6686f2fSShirley Ma } 9991da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1000ea2ab693SIan Campbell skb_frag_ref(skb, i); 10011da177e4SLinus Torvalds 100221dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 10031da177e4SLinus Torvalds skb_clone_fraglist(skb); 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds skb_release_data(skb); 10063e24591aSAlexander Duyck } else { 10073e24591aSAlexander Duyck skb_free_head(skb); 10081fd63041SEric Dumazet } 10091da177e4SLinus Torvalds off = (data + nhead) - skb->head; 10101da177e4SLinus Torvalds 10111da177e4SLinus Torvalds skb->head = data; 1012d3836f21SEric Dumazet skb->head_frag = 0; 10131da177e4SLinus Torvalds skb->data += off; 10144305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 10154305b541SArnaldo Carvalho de Melo skb->end = size; 101656eb8882SPatrick McHardy off = nhead; 10174305b541SArnaldo Carvalho de Melo #else 10184305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 101956eb8882SPatrick McHardy #endif 102027a884dcSArnaldo Carvalho de Melo /* {transport,network,mac}_header and tail are relative to skb->head */ 102127a884dcSArnaldo Carvalho de Melo skb->tail += off; 1022b0e380b1SArnaldo Carvalho de Melo skb->transport_header += off; 1023b0e380b1SArnaldo Carvalho de Melo skb->network_header += off; 1024603a8bbeSStephen Hemminger if (skb_mac_header_was_set(skb)) 1025b0e380b1SArnaldo Carvalho de Melo skb->mac_header += off; 102600c5a983SAndrea Shepard /* Only adjust this if it actually is csum_start rather than csum */ 102700c5a983SAndrea Shepard if (skb->ip_summed == CHECKSUM_PARTIAL) 1028172a863fSHerbert Xu skb->csum_start += nhead; 10291da177e4SLinus Torvalds skb->cloned = 0; 1030334a8132SPatrick McHardy skb->hdr_len = 0; 10311da177e4SLinus Torvalds skb->nohdr = 0; 10321da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 10331da177e4SLinus Torvalds return 0; 10341da177e4SLinus Torvalds 1035a6686f2fSShirley Ma nofrags: 1036a6686f2fSShirley Ma kfree(data); 10371da177e4SLinus Torvalds nodata: 10381da177e4SLinus Torvalds return -ENOMEM; 10391da177e4SLinus Torvalds } 1040b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 10411da177e4SLinus Torvalds 10421da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 10431da177e4SLinus Torvalds 10441da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 10451da177e4SLinus Torvalds { 10461da177e4SLinus Torvalds struct sk_buff *skb2; 10471da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 10481da177e4SLinus Torvalds 10491da177e4SLinus Torvalds if (delta <= 0) 10501da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 10511da177e4SLinus Torvalds else { 10521da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 10531da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 10541da177e4SLinus Torvalds GFP_ATOMIC)) { 10551da177e4SLinus Torvalds kfree_skb(skb2); 10561da177e4SLinus Torvalds skb2 = NULL; 10571da177e4SLinus Torvalds } 10581da177e4SLinus Torvalds } 10591da177e4SLinus Torvalds return skb2; 10601da177e4SLinus Torvalds } 1061b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 10621da177e4SLinus Torvalds 10631da177e4SLinus Torvalds /** 10641da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 10651da177e4SLinus Torvalds * @skb: buffer to copy 10661da177e4SLinus Torvalds * @newheadroom: new free bytes at head 10671da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 10681da177e4SLinus Torvalds * @gfp_mask: allocation priority 10691da177e4SLinus Torvalds * 10701da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 10711da177e4SLinus Torvalds * allocate additional space. 10721da177e4SLinus Torvalds * 10731da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 10741da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 10751da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 10761da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 10771da177e4SLinus Torvalds * 10781da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 10791da177e4SLinus Torvalds * is called from an interrupt. 10801da177e4SLinus Torvalds */ 10811da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 108286a76cafSVictor Fusco int newheadroom, int newtailroom, 1083dd0fc66fSAl Viro gfp_t gfp_mask) 10841da177e4SLinus Torvalds { 10851da177e4SLinus Torvalds /* 10861da177e4SLinus Torvalds * Allocate the copy buffer 10871da177e4SLinus Torvalds */ 10881da177e4SLinus Torvalds struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 10891da177e4SLinus Torvalds gfp_mask); 1090efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 10911da177e4SLinus Torvalds int head_copy_len, head_copy_off; 109252886051SHerbert Xu int off; 10931da177e4SLinus Torvalds 10941da177e4SLinus Torvalds if (!n) 10951da177e4SLinus Torvalds return NULL; 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds skb_reserve(n, newheadroom); 10981da177e4SLinus Torvalds 10991da177e4SLinus Torvalds /* Set the tail pointer and length */ 11001da177e4SLinus Torvalds skb_put(n, skb->len); 11011da177e4SLinus Torvalds 1102efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 11031da177e4SLinus Torvalds head_copy_off = 0; 11041da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 11051da177e4SLinus Torvalds head_copy_len = newheadroom; 11061da177e4SLinus Torvalds else 11071da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds /* Copy the linear header and data. */ 11101da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 11111da177e4SLinus Torvalds skb->len + head_copy_len)) 11121da177e4SLinus Torvalds BUG(); 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds copy_skb_header(n, skb); 11151da177e4SLinus Torvalds 1116efd1e8d5SPatrick McHardy off = newheadroom - oldheadroom; 1117be2b6e62SDavid S. Miller if (n->ip_summed == CHECKSUM_PARTIAL) 111852886051SHerbert Xu n->csum_start += off; 111952886051SHerbert Xu #ifdef NET_SKBUFF_DATA_USES_OFFSET 1120efd1e8d5SPatrick McHardy n->transport_header += off; 1121efd1e8d5SPatrick McHardy n->network_header += off; 1122603a8bbeSStephen Hemminger if (skb_mac_header_was_set(skb)) 1123efd1e8d5SPatrick McHardy n->mac_header += off; 112452886051SHerbert Xu #endif 1125efd1e8d5SPatrick McHardy 11261da177e4SLinus Torvalds return n; 11271da177e4SLinus Torvalds } 1128b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 11291da177e4SLinus Torvalds 11301da177e4SLinus Torvalds /** 11311da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 11321da177e4SLinus Torvalds * @skb: buffer to pad 11331da177e4SLinus Torvalds * @pad: space to pad 11341da177e4SLinus Torvalds * 11351da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 11361da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 11371da177e4SLinus Torvalds * beyond the buffer end onto the wire. 11381da177e4SLinus Torvalds * 11395b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 11401da177e4SLinus Torvalds */ 11411da177e4SLinus Torvalds 11425b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 11431da177e4SLinus Torvalds { 11445b057c6bSHerbert Xu int err; 11455b057c6bSHerbert Xu int ntail; 11461da177e4SLinus Torvalds 11471da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 11485b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 11491da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 11505b057c6bSHerbert Xu return 0; 11511da177e4SLinus Torvalds } 11521da177e4SLinus Torvalds 11534305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 11545b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 11555b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 11565b057c6bSHerbert Xu if (unlikely(err)) 11575b057c6bSHerbert Xu goto free_skb; 11585b057c6bSHerbert Xu } 11595b057c6bSHerbert Xu 11605b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 11615b057c6bSHerbert Xu * to be audited. 11625b057c6bSHerbert Xu */ 11635b057c6bSHerbert Xu err = skb_linearize(skb); 11645b057c6bSHerbert Xu if (unlikely(err)) 11655b057c6bSHerbert Xu goto free_skb; 11665b057c6bSHerbert Xu 11675b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 11685b057c6bSHerbert Xu return 0; 11695b057c6bSHerbert Xu 11705b057c6bSHerbert Xu free_skb: 11711da177e4SLinus Torvalds kfree_skb(skb); 11725b057c6bSHerbert Xu return err; 11731da177e4SLinus Torvalds } 1174b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 11751da177e4SLinus Torvalds 11760dde3e16SIlpo Järvinen /** 11770dde3e16SIlpo Järvinen * skb_put - add data to a buffer 11780dde3e16SIlpo Järvinen * @skb: buffer to use 11790dde3e16SIlpo Järvinen * @len: amount of data to add 11800dde3e16SIlpo Järvinen * 11810dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 11820dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 11830dde3e16SIlpo Järvinen * first byte of the extra data is returned. 11840dde3e16SIlpo Järvinen */ 11850dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 11860dde3e16SIlpo Järvinen { 11870dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 11880dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 11890dde3e16SIlpo Järvinen skb->tail += len; 11900dde3e16SIlpo Järvinen skb->len += len; 11910dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 11920dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 11930dde3e16SIlpo Järvinen return tmp; 11940dde3e16SIlpo Järvinen } 11950dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 11960dde3e16SIlpo Järvinen 11976be8ac2fSIlpo Järvinen /** 1198c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1199c2aa270aSIlpo Järvinen * @skb: buffer to use 1200c2aa270aSIlpo Järvinen * @len: amount of data to add 1201c2aa270aSIlpo Järvinen * 1202c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1203c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1204c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1205c2aa270aSIlpo Järvinen */ 1206c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1207c2aa270aSIlpo Järvinen { 1208c2aa270aSIlpo Järvinen skb->data -= len; 1209c2aa270aSIlpo Järvinen skb->len += len; 1210c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1211c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1212c2aa270aSIlpo Järvinen return skb->data; 1213c2aa270aSIlpo Järvinen } 1214c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1215c2aa270aSIlpo Järvinen 1216c2aa270aSIlpo Järvinen /** 12176be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 12186be8ac2fSIlpo Järvinen * @skb: buffer to use 12196be8ac2fSIlpo Järvinen * @len: amount of data to remove 12206be8ac2fSIlpo Järvinen * 12216be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 12226be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 12236be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 12246be8ac2fSIlpo Järvinen * the old data. 12256be8ac2fSIlpo Järvinen */ 12266be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 12276be8ac2fSIlpo Järvinen { 122847d29646SDavid S. Miller return skb_pull_inline(skb, len); 12296be8ac2fSIlpo Järvinen } 12306be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 12316be8ac2fSIlpo Järvinen 1232419ae74eSIlpo Järvinen /** 1233419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1234419ae74eSIlpo Järvinen * @skb: buffer to alter 1235419ae74eSIlpo Järvinen * @len: new length 1236419ae74eSIlpo Järvinen * 1237419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1238419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1239419ae74eSIlpo Järvinen * The skb must be linear. 1240419ae74eSIlpo Järvinen */ 1241419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1242419ae74eSIlpo Järvinen { 1243419ae74eSIlpo Järvinen if (skb->len > len) 1244419ae74eSIlpo Järvinen __skb_trim(skb, len); 1245419ae74eSIlpo Järvinen } 1246419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1247419ae74eSIlpo Järvinen 12483cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 12491da177e4SLinus Torvalds */ 12501da177e4SLinus Torvalds 12513cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 12521da177e4SLinus Torvalds { 125327b437c8SHerbert Xu struct sk_buff **fragp; 125427b437c8SHerbert Xu struct sk_buff *frag; 12551da177e4SLinus Torvalds int offset = skb_headlen(skb); 12561da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 12571da177e4SLinus Torvalds int i; 125827b437c8SHerbert Xu int err; 125927b437c8SHerbert Xu 126027b437c8SHerbert Xu if (skb_cloned(skb) && 126127b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 126227b437c8SHerbert Xu return err; 12631da177e4SLinus Torvalds 1264f4d26fb3SHerbert Xu i = 0; 1265f4d26fb3SHerbert Xu if (offset >= len) 1266f4d26fb3SHerbert Xu goto drop_pages; 1267f4d26fb3SHerbert Xu 1268f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 12699e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 127027b437c8SHerbert Xu 127127b437c8SHerbert Xu if (end < len) { 12721da177e4SLinus Torvalds offset = end; 127327b437c8SHerbert Xu continue; 12741da177e4SLinus Torvalds } 12751da177e4SLinus Torvalds 12769e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 127727b437c8SHerbert Xu 1278f4d26fb3SHerbert Xu drop_pages: 127927b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 128027b437c8SHerbert Xu 128127b437c8SHerbert Xu for (; i < nfrags; i++) 1282ea2ab693SIan Campbell skb_frag_unref(skb, i); 128327b437c8SHerbert Xu 128421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 128527b437c8SHerbert Xu skb_drop_fraglist(skb); 1286f4d26fb3SHerbert Xu goto done; 128727b437c8SHerbert Xu } 128827b437c8SHerbert Xu 128927b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 129027b437c8SHerbert Xu fragp = &frag->next) { 129127b437c8SHerbert Xu int end = offset + frag->len; 129227b437c8SHerbert Xu 129327b437c8SHerbert Xu if (skb_shared(frag)) { 129427b437c8SHerbert Xu struct sk_buff *nfrag; 129527b437c8SHerbert Xu 129627b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 129727b437c8SHerbert Xu if (unlikely(!nfrag)) 129827b437c8SHerbert Xu return -ENOMEM; 129927b437c8SHerbert Xu 130027b437c8SHerbert Xu nfrag->next = frag->next; 130185bb2a60SEric Dumazet consume_skb(frag); 130227b437c8SHerbert Xu frag = nfrag; 130327b437c8SHerbert Xu *fragp = frag; 130427b437c8SHerbert Xu } 130527b437c8SHerbert Xu 130627b437c8SHerbert Xu if (end < len) { 130727b437c8SHerbert Xu offset = end; 130827b437c8SHerbert Xu continue; 130927b437c8SHerbert Xu } 131027b437c8SHerbert Xu 131127b437c8SHerbert Xu if (end > len && 131227b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 131327b437c8SHerbert Xu return err; 131427b437c8SHerbert Xu 131527b437c8SHerbert Xu if (frag->next) 131627b437c8SHerbert Xu skb_drop_list(&frag->next); 131727b437c8SHerbert Xu break; 131827b437c8SHerbert Xu } 131927b437c8SHerbert Xu 1320f4d26fb3SHerbert Xu done: 132127b437c8SHerbert Xu if (len > skb_headlen(skb)) { 13221da177e4SLinus Torvalds skb->data_len -= skb->len - len; 13231da177e4SLinus Torvalds skb->len = len; 13241da177e4SLinus Torvalds } else { 13251da177e4SLinus Torvalds skb->len = len; 13261da177e4SLinus Torvalds skb->data_len = 0; 132727a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 13281da177e4SLinus Torvalds } 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds return 0; 13311da177e4SLinus Torvalds } 1332b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 13331da177e4SLinus Torvalds 13341da177e4SLinus Torvalds /** 13351da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 13361da177e4SLinus Torvalds * @skb: buffer to reallocate 13371da177e4SLinus Torvalds * @delta: number of bytes to advance tail 13381da177e4SLinus Torvalds * 13391da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 13401da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 13411da177e4SLinus Torvalds * data from fragmented part. 13421da177e4SLinus Torvalds * 13431da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 13441da177e4SLinus Torvalds * 13451da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 13461da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 13471da177e4SLinus Torvalds * 13481da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 13491da177e4SLinus Torvalds * reloaded after call to this function. 13501da177e4SLinus Torvalds */ 13511da177e4SLinus Torvalds 13521da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 13531da177e4SLinus Torvalds * when it is necessary. 13541da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 13551da177e4SLinus Torvalds * 2. It may change skb pointers. 13561da177e4SLinus Torvalds * 13571da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 13581da177e4SLinus Torvalds */ 13591da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 13601da177e4SLinus Torvalds { 13611da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 13621da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 13631da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 13641da177e4SLinus Torvalds */ 13654305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 13661da177e4SLinus Torvalds 13671da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 13681da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 13691da177e4SLinus Torvalds GFP_ATOMIC)) 13701da177e4SLinus Torvalds return NULL; 13711da177e4SLinus Torvalds } 13721da177e4SLinus Torvalds 137327a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 13741da177e4SLinus Torvalds BUG(); 13751da177e4SLinus Torvalds 13761da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 13771da177e4SLinus Torvalds * size of pulled pages. Superb. 13781da177e4SLinus Torvalds */ 137921dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 13801da177e4SLinus Torvalds goto pull_pages; 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 13831da177e4SLinus Torvalds eat = delta; 13841da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 13859e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 13869e903e08SEric Dumazet 13879e903e08SEric Dumazet if (size >= eat) 13881da177e4SLinus Torvalds goto pull_pages; 13899e903e08SEric Dumazet eat -= size; 13901da177e4SLinus Torvalds } 13911da177e4SLinus Torvalds 13921da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 13931da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 13941da177e4SLinus Torvalds * but taking into account that pulling is expected to 13951da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 13961da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 13971da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 13981da177e4SLinus Torvalds */ 13991da177e4SLinus Torvalds if (eat) { 14001da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 14011da177e4SLinus Torvalds struct sk_buff *clone = NULL; 14021da177e4SLinus Torvalds struct sk_buff *insp = NULL; 14031da177e4SLinus Torvalds 14041da177e4SLinus Torvalds do { 140509a62660SKris Katterjohn BUG_ON(!list); 14061da177e4SLinus Torvalds 14071da177e4SLinus Torvalds if (list->len <= eat) { 14081da177e4SLinus Torvalds /* Eaten as whole. */ 14091da177e4SLinus Torvalds eat -= list->len; 14101da177e4SLinus Torvalds list = list->next; 14111da177e4SLinus Torvalds insp = list; 14121da177e4SLinus Torvalds } else { 14131da177e4SLinus Torvalds /* Eaten partially. */ 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds if (skb_shared(list)) { 14161da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 14171da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 14181da177e4SLinus Torvalds if (!clone) 14191da177e4SLinus Torvalds return NULL; 14201da177e4SLinus Torvalds insp = list->next; 14211da177e4SLinus Torvalds list = clone; 14221da177e4SLinus Torvalds } else { 14231da177e4SLinus Torvalds /* This may be pulled without 14241da177e4SLinus Torvalds * problems. */ 14251da177e4SLinus Torvalds insp = list; 14261da177e4SLinus Torvalds } 14271da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 14281da177e4SLinus Torvalds kfree_skb(clone); 14291da177e4SLinus Torvalds return NULL; 14301da177e4SLinus Torvalds } 14311da177e4SLinus Torvalds break; 14321da177e4SLinus Torvalds } 14331da177e4SLinus Torvalds } while (eat); 14341da177e4SLinus Torvalds 14351da177e4SLinus Torvalds /* Free pulled out fragments. */ 14361da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 14371da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 14381da177e4SLinus Torvalds kfree_skb(list); 14391da177e4SLinus Torvalds } 14401da177e4SLinus Torvalds /* And insert new clone at head. */ 14411da177e4SLinus Torvalds if (clone) { 14421da177e4SLinus Torvalds clone->next = list; 14431da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 14441da177e4SLinus Torvalds } 14451da177e4SLinus Torvalds } 14461da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 14471da177e4SLinus Torvalds 14481da177e4SLinus Torvalds pull_pages: 14491da177e4SLinus Torvalds eat = delta; 14501da177e4SLinus Torvalds k = 0; 14511da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14529e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14539e903e08SEric Dumazet 14549e903e08SEric Dumazet if (size <= eat) { 1455ea2ab693SIan Campbell skb_frag_unref(skb, i); 14569e903e08SEric Dumazet eat -= size; 14571da177e4SLinus Torvalds } else { 14581da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 14591da177e4SLinus Torvalds if (eat) { 14601da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 14619e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 14621da177e4SLinus Torvalds eat = 0; 14631da177e4SLinus Torvalds } 14641da177e4SLinus Torvalds k++; 14651da177e4SLinus Torvalds } 14661da177e4SLinus Torvalds } 14671da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds skb->tail += delta; 14701da177e4SLinus Torvalds skb->data_len -= delta; 14711da177e4SLinus Torvalds 147227a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 14731da177e4SLinus Torvalds } 1474b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 14751da177e4SLinus Torvalds 147622019b17SEric Dumazet /** 147722019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 147822019b17SEric Dumazet * @skb: source skb 147922019b17SEric Dumazet * @offset: offset in source 148022019b17SEric Dumazet * @to: destination buffer 148122019b17SEric Dumazet * @len: number of bytes to copy 148222019b17SEric Dumazet * 148322019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 148422019b17SEric Dumazet * destination buffer. 148522019b17SEric Dumazet * 148622019b17SEric Dumazet * CAUTION ! : 148722019b17SEric Dumazet * If its prototype is ever changed, 148822019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 148922019b17SEric Dumazet * since it is called from BPF assembly code. 149022019b17SEric Dumazet */ 14911da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 14921da177e4SLinus Torvalds { 14931a028e50SDavid S. Miller int start = skb_headlen(skb); 1494fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1495fbb398a8SDavid S. Miller int i, copy; 14961da177e4SLinus Torvalds 14971da177e4SLinus Torvalds if (offset > (int)skb->len - len) 14981da177e4SLinus Torvalds goto fault; 14991da177e4SLinus Torvalds 15001da177e4SLinus Torvalds /* Copy header. */ 15011a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 15021da177e4SLinus Torvalds if (copy > len) 15031da177e4SLinus Torvalds copy = len; 1504d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 15051da177e4SLinus Torvalds if ((len -= copy) == 0) 15061da177e4SLinus Torvalds return 0; 15071da177e4SLinus Torvalds offset += copy; 15081da177e4SLinus Torvalds to += copy; 15091da177e4SLinus Torvalds } 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15121a028e50SDavid S. Miller int end; 151351c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 15141da177e4SLinus Torvalds 1515547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15161a028e50SDavid S. Miller 151751c56b00SEric Dumazet end = start + skb_frag_size(f); 15181da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15191da177e4SLinus Torvalds u8 *vaddr; 15201da177e4SLinus Torvalds 15211da177e4SLinus Torvalds if (copy > len) 15221da177e4SLinus Torvalds copy = len; 15231da177e4SLinus Torvalds 152451c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 15251da177e4SLinus Torvalds memcpy(to, 152651c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 152751c56b00SEric Dumazet copy); 152851c56b00SEric Dumazet kunmap_atomic(vaddr); 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds if ((len -= copy) == 0) 15311da177e4SLinus Torvalds return 0; 15321da177e4SLinus Torvalds offset += copy; 15331da177e4SLinus Torvalds to += copy; 15341da177e4SLinus Torvalds } 15351a028e50SDavid S. Miller start = end; 15361da177e4SLinus Torvalds } 15371da177e4SLinus Torvalds 1538fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 15391a028e50SDavid S. Miller int end; 15401da177e4SLinus Torvalds 1541547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15421a028e50SDavid S. Miller 1543fbb398a8SDavid S. Miller end = start + frag_iter->len; 15441da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15451da177e4SLinus Torvalds if (copy > len) 15461da177e4SLinus Torvalds copy = len; 1547fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 15481da177e4SLinus Torvalds goto fault; 15491da177e4SLinus Torvalds if ((len -= copy) == 0) 15501da177e4SLinus Torvalds return 0; 15511da177e4SLinus Torvalds offset += copy; 15521da177e4SLinus Torvalds to += copy; 15531da177e4SLinus Torvalds } 15541a028e50SDavid S. Miller start = end; 15551da177e4SLinus Torvalds } 1556a6686f2fSShirley Ma 15571da177e4SLinus Torvalds if (!len) 15581da177e4SLinus Torvalds return 0; 15591da177e4SLinus Torvalds 15601da177e4SLinus Torvalds fault: 15611da177e4SLinus Torvalds return -EFAULT; 15621da177e4SLinus Torvalds } 1563b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 15641da177e4SLinus Torvalds 15659c55e01cSJens Axboe /* 15669c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 15679c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 15689c55e01cSJens Axboe */ 15699c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 15709c55e01cSJens Axboe { 15718b9d3728SJarek Poplawski put_page(spd->pages[i]); 15728b9d3728SJarek Poplawski } 15739c55e01cSJens Axboe 1574a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 15754fb66994SJarek Poplawski unsigned int *offset, 15767a67e56fSJarek Poplawski struct sk_buff *skb, struct sock *sk) 15778b9d3728SJarek Poplawski { 15784fb66994SJarek Poplawski struct page *p = sk->sk_sndmsg_page; 15794fb66994SJarek Poplawski unsigned int off; 15808b9d3728SJarek Poplawski 15814fb66994SJarek Poplawski if (!p) { 15824fb66994SJarek Poplawski new_page: 15834fb66994SJarek Poplawski p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0); 15848b9d3728SJarek Poplawski if (!p) 15858b9d3728SJarek Poplawski return NULL; 15864fb66994SJarek Poplawski 15874fb66994SJarek Poplawski off = sk->sk_sndmsg_off = 0; 15884fb66994SJarek Poplawski /* hold one ref to this page until it's full */ 15894fb66994SJarek Poplawski } else { 15904fb66994SJarek Poplawski unsigned int mlen; 15914fb66994SJarek Poplawski 1592e66e9a31SEric Dumazet /* If we are the only user of the page, we can reset offset */ 1593e66e9a31SEric Dumazet if (page_count(p) == 1) 1594e66e9a31SEric Dumazet sk->sk_sndmsg_off = 0; 15954fb66994SJarek Poplawski off = sk->sk_sndmsg_off; 15964fb66994SJarek Poplawski mlen = PAGE_SIZE - off; 15974fb66994SJarek Poplawski if (mlen < 64 && mlen < *len) { 15984fb66994SJarek Poplawski put_page(p); 15994fb66994SJarek Poplawski goto new_page; 16004fb66994SJarek Poplawski } 16014fb66994SJarek Poplawski 16024fb66994SJarek Poplawski *len = min_t(unsigned int, *len, mlen); 16034fb66994SJarek Poplawski } 16044fb66994SJarek Poplawski 16054fb66994SJarek Poplawski memcpy(page_address(p) + off, page_address(page) + *offset, *len); 16064fb66994SJarek Poplawski sk->sk_sndmsg_off += *len; 16074fb66994SJarek Poplawski *offset = off; 16088b9d3728SJarek Poplawski 16098b9d3728SJarek Poplawski return p; 16109c55e01cSJens Axboe } 16119c55e01cSJens Axboe 161241c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 161341c73a0dSEric Dumazet struct page *page, 161441c73a0dSEric Dumazet unsigned int offset) 161541c73a0dSEric Dumazet { 161641c73a0dSEric Dumazet return spd->nr_pages && 161741c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 161841c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 161941c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 162041c73a0dSEric Dumazet } 162141c73a0dSEric Dumazet 16229c55e01cSJens Axboe /* 16239c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 16249c55e01cSJens Axboe */ 1625a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 162635f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 16274fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 1628d7ccf7c0SEric Dumazet struct sk_buff *skb, bool linear, 16297a67e56fSJarek Poplawski struct sock *sk) 16309c55e01cSJens Axboe { 163141c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1632a108d5f3SDavid S. Miller return true; 16339c55e01cSJens Axboe 16348b9d3728SJarek Poplawski if (linear) { 16357a67e56fSJarek Poplawski page = linear_to_page(page, len, &offset, skb, sk); 16368b9d3728SJarek Poplawski if (!page) 1637a108d5f3SDavid S. Miller return true; 163841c73a0dSEric Dumazet } 163941c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 164041c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1641a108d5f3SDavid S. Miller return false; 164241c73a0dSEric Dumazet } 16438b9d3728SJarek Poplawski get_page(page); 16449c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 16454fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 16469c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 16479c55e01cSJens Axboe spd->nr_pages++; 16488b9d3728SJarek Poplawski 1649a108d5f3SDavid S. Miller return false; 16509c55e01cSJens Axboe } 16519c55e01cSJens Axboe 16522870c43dSOctavian Purdila static inline void __segment_seek(struct page **page, unsigned int *poff, 16532870c43dSOctavian Purdila unsigned int *plen, unsigned int off) 16542870c43dSOctavian Purdila { 1655ce3dd395SJarek Poplawski unsigned long n; 1656ce3dd395SJarek Poplawski 16572870c43dSOctavian Purdila *poff += off; 1658ce3dd395SJarek Poplawski n = *poff / PAGE_SIZE; 1659ce3dd395SJarek Poplawski if (n) 1660ce3dd395SJarek Poplawski *page = nth_page(*page, n); 1661ce3dd395SJarek Poplawski 16622870c43dSOctavian Purdila *poff = *poff % PAGE_SIZE; 16632870c43dSOctavian Purdila *plen -= off; 16642870c43dSOctavian Purdila } 16652870c43dSOctavian Purdila 1666a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 16672870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 16682870c43dSOctavian Purdila unsigned int *len, struct sk_buff *skb, 1669d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 167035f3d14dSJens Axboe struct sock *sk, 167135f3d14dSJens Axboe struct pipe_inode_info *pipe) 16729c55e01cSJens Axboe { 16732870c43dSOctavian Purdila if (!*len) 1674a108d5f3SDavid S. Miller return true; 16759c55e01cSJens Axboe 16762870c43dSOctavian Purdila /* skip this segment if already processed */ 16772870c43dSOctavian Purdila if (*off >= plen) { 16782870c43dSOctavian Purdila *off -= plen; 1679a108d5f3SDavid S. Miller return false; 16802870c43dSOctavian Purdila } 16812870c43dSOctavian Purdila 16822870c43dSOctavian Purdila /* ignore any bits we already processed */ 16832870c43dSOctavian Purdila if (*off) { 16842870c43dSOctavian Purdila __segment_seek(&page, &poff, &plen, *off); 16852870c43dSOctavian Purdila *off = 0; 16862870c43dSOctavian Purdila } 16872870c43dSOctavian Purdila 16882870c43dSOctavian Purdila do { 16892870c43dSOctavian Purdila unsigned int flen = min(*len, plen); 16902870c43dSOctavian Purdila 16912870c43dSOctavian Purdila /* the linear region may spread across several pages */ 16922870c43dSOctavian Purdila flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 16932870c43dSOctavian Purdila 169435f3d14dSJens Axboe if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1695a108d5f3SDavid S. Miller return true; 16962870c43dSOctavian Purdila 16972870c43dSOctavian Purdila __segment_seek(&page, &poff, &plen, flen); 16982870c43dSOctavian Purdila *len -= flen; 16992870c43dSOctavian Purdila 17002870c43dSOctavian Purdila } while (*len && plen); 17012870c43dSOctavian Purdila 1702a108d5f3SDavid S. Miller return false; 1703db43a282SOctavian Purdila } 17049c55e01cSJens Axboe 17059c55e01cSJens Axboe /* 1706a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 17072870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 17089c55e01cSJens Axboe */ 1709a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 171035f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 171135f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17122870c43dSOctavian Purdila { 17132870c43dSOctavian Purdila int seg; 17149c55e01cSJens Axboe 17151d0c0b32SEric Dumazet /* map the linear part : 17162996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17172996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17182996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17199c55e01cSJens Axboe */ 17202870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17212870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17222870c43dSOctavian Purdila skb_headlen(skb), 17231d0c0b32SEric Dumazet offset, len, skb, spd, 17243a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 17251d0c0b32SEric Dumazet sk, pipe)) 1726a108d5f3SDavid S. Miller return true; 17279c55e01cSJens Axboe 17289c55e01cSJens Axboe /* 17299c55e01cSJens Axboe * then map the fragments 17309c55e01cSJens Axboe */ 17319c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 17329c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 17339c55e01cSJens Axboe 1734ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 17359e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 1736d7ccf7c0SEric Dumazet offset, len, skb, spd, false, sk, pipe)) 1737a108d5f3SDavid S. Miller return true; 17389c55e01cSJens Axboe } 17399c55e01cSJens Axboe 1740a108d5f3SDavid S. Miller return false; 17419c55e01cSJens Axboe } 17429c55e01cSJens Axboe 17439c55e01cSJens Axboe /* 17449c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 17459c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 17469c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 17479c55e01cSJens Axboe * handle that cleanly. 17489c55e01cSJens Axboe */ 17498b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 17509c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 17519c55e01cSJens Axboe unsigned int flags) 17529c55e01cSJens Axboe { 175341c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 175441c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 17559c55e01cSJens Axboe struct splice_pipe_desc spd = { 17569c55e01cSJens Axboe .pages = pages, 17579c55e01cSJens Axboe .partial = partial, 17589c55e01cSJens Axboe .flags = flags, 17599c55e01cSJens Axboe .ops = &sock_pipe_buf_ops, 17609c55e01cSJens Axboe .spd_release = sock_spd_release, 17619c55e01cSJens Axboe }; 1762fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 17637a67e56fSJarek Poplawski struct sock *sk = skb->sk; 176435f3d14dSJens Axboe int ret = 0; 176535f3d14dSJens Axboe 17669c55e01cSJens Axboe /* 17679c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 17689c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 17699c55e01cSJens Axboe */ 177035f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 17719c55e01cSJens Axboe goto done; 17729c55e01cSJens Axboe else if (!tlen) 17739c55e01cSJens Axboe goto done; 17749c55e01cSJens Axboe 17759c55e01cSJens Axboe /* 17769c55e01cSJens Axboe * now see if we have a frag_list to map 17779c55e01cSJens Axboe */ 1778fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1779fbb398a8SDavid S. Miller if (!tlen) 17809c55e01cSJens Axboe break; 178135f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1782fbb398a8SDavid S. Miller break; 17839c55e01cSJens Axboe } 17849c55e01cSJens Axboe 17859c55e01cSJens Axboe done: 17869c55e01cSJens Axboe if (spd.nr_pages) { 17879c55e01cSJens Axboe /* 17889c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 17899c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 17909c55e01cSJens Axboe * here as compared to sendfile(). We enter here 17919c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 17929c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 17939c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 17949c55e01cSJens Axboe * and networking will grab the socket lock. 17959c55e01cSJens Axboe */ 1796293ad604SOctavian Purdila release_sock(sk); 17979c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1798293ad604SOctavian Purdila lock_sock(sk); 17999c55e01cSJens Axboe } 18009c55e01cSJens Axboe 180135f3d14dSJens Axboe return ret; 18029c55e01cSJens Axboe } 18039c55e01cSJens Axboe 1804357b40a1SHerbert Xu /** 1805357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1806357b40a1SHerbert Xu * @skb: destination buffer 1807357b40a1SHerbert Xu * @offset: offset in destination 1808357b40a1SHerbert Xu * @from: source buffer 1809357b40a1SHerbert Xu * @len: number of bytes to copy 1810357b40a1SHerbert Xu * 1811357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1812357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1813357b40a1SHerbert Xu * traversing fragment lists and such. 1814357b40a1SHerbert Xu */ 1815357b40a1SHerbert Xu 18160c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1817357b40a1SHerbert Xu { 18181a028e50SDavid S. Miller int start = skb_headlen(skb); 1819fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1820fbb398a8SDavid S. Miller int i, copy; 1821357b40a1SHerbert Xu 1822357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1823357b40a1SHerbert Xu goto fault; 1824357b40a1SHerbert Xu 18251a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1826357b40a1SHerbert Xu if (copy > len) 1827357b40a1SHerbert Xu copy = len; 182827d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1829357b40a1SHerbert Xu if ((len -= copy) == 0) 1830357b40a1SHerbert Xu return 0; 1831357b40a1SHerbert Xu offset += copy; 1832357b40a1SHerbert Xu from += copy; 1833357b40a1SHerbert Xu } 1834357b40a1SHerbert Xu 1835357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1836357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 18371a028e50SDavid S. Miller int end; 1838357b40a1SHerbert Xu 1839547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18401a028e50SDavid S. Miller 18419e903e08SEric Dumazet end = start + skb_frag_size(frag); 1842357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1843357b40a1SHerbert Xu u8 *vaddr; 1844357b40a1SHerbert Xu 1845357b40a1SHerbert Xu if (copy > len) 1846357b40a1SHerbert Xu copy = len; 1847357b40a1SHerbert Xu 184851c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 18491a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 18501a028e50SDavid S. Miller from, copy); 185151c56b00SEric Dumazet kunmap_atomic(vaddr); 1852357b40a1SHerbert Xu 1853357b40a1SHerbert Xu if ((len -= copy) == 0) 1854357b40a1SHerbert Xu return 0; 1855357b40a1SHerbert Xu offset += copy; 1856357b40a1SHerbert Xu from += copy; 1857357b40a1SHerbert Xu } 18581a028e50SDavid S. Miller start = end; 1859357b40a1SHerbert Xu } 1860357b40a1SHerbert Xu 1861fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 18621a028e50SDavid S. Miller int end; 1863357b40a1SHerbert Xu 1864547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18651a028e50SDavid S. Miller 1866fbb398a8SDavid S. Miller end = start + frag_iter->len; 1867357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1868357b40a1SHerbert Xu if (copy > len) 1869357b40a1SHerbert Xu copy = len; 1870fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 18711a028e50SDavid S. Miller from, copy)) 1872357b40a1SHerbert Xu goto fault; 1873357b40a1SHerbert Xu if ((len -= copy) == 0) 1874357b40a1SHerbert Xu return 0; 1875357b40a1SHerbert Xu offset += copy; 1876357b40a1SHerbert Xu from += copy; 1877357b40a1SHerbert Xu } 18781a028e50SDavid S. Miller start = end; 1879357b40a1SHerbert Xu } 1880357b40a1SHerbert Xu if (!len) 1881357b40a1SHerbert Xu return 0; 1882357b40a1SHerbert Xu 1883357b40a1SHerbert Xu fault: 1884357b40a1SHerbert Xu return -EFAULT; 1885357b40a1SHerbert Xu } 1886357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1887357b40a1SHerbert Xu 18881da177e4SLinus Torvalds /* Checksum skb data. */ 18891da177e4SLinus Torvalds 18902bbbc868SAl Viro __wsum skb_checksum(const struct sk_buff *skb, int offset, 18912bbbc868SAl Viro int len, __wsum csum) 18921da177e4SLinus Torvalds { 18931a028e50SDavid S. Miller int start = skb_headlen(skb); 18941a028e50SDavid S. Miller int i, copy = start - offset; 1895fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18961da177e4SLinus Torvalds int pos = 0; 18971da177e4SLinus Torvalds 18981da177e4SLinus Torvalds /* Checksum header. */ 18991da177e4SLinus Torvalds if (copy > 0) { 19001da177e4SLinus Torvalds if (copy > len) 19011da177e4SLinus Torvalds copy = len; 19021da177e4SLinus Torvalds csum = csum_partial(skb->data + offset, copy, csum); 19031da177e4SLinus Torvalds if ((len -= copy) == 0) 19041da177e4SLinus Torvalds return csum; 19051da177e4SLinus Torvalds offset += copy; 19061da177e4SLinus Torvalds pos = copy; 19071da177e4SLinus Torvalds } 19081da177e4SLinus Torvalds 19091da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19101a028e50SDavid S. Miller int end; 191151c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19121da177e4SLinus Torvalds 1913547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19141a028e50SDavid S. Miller 191551c56b00SEric Dumazet end = start + skb_frag_size(frag); 19161da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 191744bb9363SAl Viro __wsum csum2; 19181da177e4SLinus Torvalds u8 *vaddr; 19191da177e4SLinus Torvalds 19201da177e4SLinus Torvalds if (copy > len) 19211da177e4SLinus Torvalds copy = len; 192251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19231a028e50SDavid S. Miller csum2 = csum_partial(vaddr + frag->page_offset + 19241a028e50SDavid S. Miller offset - start, copy, 0); 192551c56b00SEric Dumazet kunmap_atomic(vaddr); 19261da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19271da177e4SLinus Torvalds if (!(len -= copy)) 19281da177e4SLinus Torvalds return csum; 19291da177e4SLinus Torvalds offset += copy; 19301da177e4SLinus Torvalds pos += copy; 19311da177e4SLinus Torvalds } 19321a028e50SDavid S. Miller start = end; 19331da177e4SLinus Torvalds } 19341da177e4SLinus Torvalds 1935fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19361a028e50SDavid S. Miller int end; 19371da177e4SLinus Torvalds 1938547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19391a028e50SDavid S. Miller 1940fbb398a8SDavid S. Miller end = start + frag_iter->len; 19411da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19425f92a738SAl Viro __wsum csum2; 19431da177e4SLinus Torvalds if (copy > len) 19441da177e4SLinus Torvalds copy = len; 1945fbb398a8SDavid S. Miller csum2 = skb_checksum(frag_iter, offset - start, 19461a028e50SDavid S. Miller copy, 0); 19471da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 19481da177e4SLinus Torvalds if ((len -= copy) == 0) 19491da177e4SLinus Torvalds return csum; 19501da177e4SLinus Torvalds offset += copy; 19511da177e4SLinus Torvalds pos += copy; 19521da177e4SLinus Torvalds } 19531a028e50SDavid S. Miller start = end; 19541da177e4SLinus Torvalds } 195509a62660SKris Katterjohn BUG_ON(len); 19561da177e4SLinus Torvalds 19571da177e4SLinus Torvalds return csum; 19581da177e4SLinus Torvalds } 1959b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 19601da177e4SLinus Torvalds 19611da177e4SLinus Torvalds /* Both of above in one bottle. */ 19621da177e4SLinus Torvalds 196381d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 196481d77662SAl Viro u8 *to, int len, __wsum csum) 19651da177e4SLinus Torvalds { 19661a028e50SDavid S. Miller int start = skb_headlen(skb); 19671a028e50SDavid S. Miller int i, copy = start - offset; 1968fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19691da177e4SLinus Torvalds int pos = 0; 19701da177e4SLinus Torvalds 19711da177e4SLinus Torvalds /* Copy header. */ 19721da177e4SLinus Torvalds if (copy > 0) { 19731da177e4SLinus Torvalds if (copy > len) 19741da177e4SLinus Torvalds copy = len; 19751da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 19761da177e4SLinus Torvalds copy, csum); 19771da177e4SLinus Torvalds if ((len -= copy) == 0) 19781da177e4SLinus Torvalds return csum; 19791da177e4SLinus Torvalds offset += copy; 19801da177e4SLinus Torvalds to += copy; 19811da177e4SLinus Torvalds pos = copy; 19821da177e4SLinus Torvalds } 19831da177e4SLinus Torvalds 19841da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19851a028e50SDavid S. Miller int end; 19861da177e4SLinus Torvalds 1987547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19881a028e50SDavid S. Miller 19899e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 19901da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19915084205fSAl Viro __wsum csum2; 19921da177e4SLinus Torvalds u8 *vaddr; 19931da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19941da177e4SLinus Torvalds 19951da177e4SLinus Torvalds if (copy > len) 19961da177e4SLinus Torvalds copy = len; 199751c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19981da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 19991a028e50SDavid S. Miller frag->page_offset + 20001a028e50SDavid S. Miller offset - start, to, 20011a028e50SDavid S. Miller copy, 0); 200251c56b00SEric Dumazet kunmap_atomic(vaddr); 20031da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20041da177e4SLinus Torvalds if (!(len -= copy)) 20051da177e4SLinus Torvalds return csum; 20061da177e4SLinus Torvalds offset += copy; 20071da177e4SLinus Torvalds to += copy; 20081da177e4SLinus Torvalds pos += copy; 20091da177e4SLinus Torvalds } 20101a028e50SDavid S. Miller start = end; 20111da177e4SLinus Torvalds } 20121da177e4SLinus Torvalds 2013fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 201481d77662SAl Viro __wsum csum2; 20151a028e50SDavid S. Miller int end; 20161da177e4SLinus Torvalds 2017547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20181a028e50SDavid S. Miller 2019fbb398a8SDavid S. Miller end = start + frag_iter->len; 20201da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20211da177e4SLinus Torvalds if (copy > len) 20221da177e4SLinus Torvalds copy = len; 2023fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 20241a028e50SDavid S. Miller offset - start, 20251da177e4SLinus Torvalds to, copy, 0); 20261da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20271da177e4SLinus Torvalds if ((len -= copy) == 0) 20281da177e4SLinus Torvalds return csum; 20291da177e4SLinus Torvalds offset += copy; 20301da177e4SLinus Torvalds to += copy; 20311da177e4SLinus Torvalds pos += copy; 20321da177e4SLinus Torvalds } 20331a028e50SDavid S. Miller start = end; 20341da177e4SLinus Torvalds } 203509a62660SKris Katterjohn BUG_ON(len); 20361da177e4SLinus Torvalds return csum; 20371da177e4SLinus Torvalds } 2038b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 20391da177e4SLinus Torvalds 20401da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 20411da177e4SLinus Torvalds { 2042d3bc23e7SAl Viro __wsum csum; 20431da177e4SLinus Torvalds long csstart; 20441da177e4SLinus Torvalds 204584fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 204655508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 20471da177e4SLinus Torvalds else 20481da177e4SLinus Torvalds csstart = skb_headlen(skb); 20491da177e4SLinus Torvalds 205009a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 20511da177e4SLinus Torvalds 2052d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 20531da177e4SLinus Torvalds 20541da177e4SLinus Torvalds csum = 0; 20551da177e4SLinus Torvalds if (csstart != skb->len) 20561da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 20571da177e4SLinus Torvalds skb->len - csstart, 0); 20581da177e4SLinus Torvalds 205984fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2060ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 20611da177e4SLinus Torvalds 2062d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds } 2065b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 20661da177e4SLinus Torvalds 20671da177e4SLinus Torvalds /** 20681da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 20691da177e4SLinus Torvalds * @list: list to dequeue from 20701da177e4SLinus Torvalds * 20711da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 20721da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 20731da177e4SLinus Torvalds * returned or %NULL if the list is empty. 20741da177e4SLinus Torvalds */ 20751da177e4SLinus Torvalds 20761da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 20771da177e4SLinus Torvalds { 20781da177e4SLinus Torvalds unsigned long flags; 20791da177e4SLinus Torvalds struct sk_buff *result; 20801da177e4SLinus Torvalds 20811da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 20821da177e4SLinus Torvalds result = __skb_dequeue(list); 20831da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 20841da177e4SLinus Torvalds return result; 20851da177e4SLinus Torvalds } 2086b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 20871da177e4SLinus Torvalds 20881da177e4SLinus Torvalds /** 20891da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 20901da177e4SLinus Torvalds * @list: list to dequeue from 20911da177e4SLinus Torvalds * 20921da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 20931da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 20941da177e4SLinus Torvalds * returned or %NULL if the list is empty. 20951da177e4SLinus Torvalds */ 20961da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 20971da177e4SLinus Torvalds { 20981da177e4SLinus Torvalds unsigned long flags; 20991da177e4SLinus Torvalds struct sk_buff *result; 21001da177e4SLinus Torvalds 21011da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21021da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 21031da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21041da177e4SLinus Torvalds return result; 21051da177e4SLinus Torvalds } 2106b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 21071da177e4SLinus Torvalds 21081da177e4SLinus Torvalds /** 21091da177e4SLinus Torvalds * skb_queue_purge - empty a list 21101da177e4SLinus Torvalds * @list: list to empty 21111da177e4SLinus Torvalds * 21121da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 21131da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 21141da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 21151da177e4SLinus Torvalds */ 21161da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 21171da177e4SLinus Torvalds { 21181da177e4SLinus Torvalds struct sk_buff *skb; 21191da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 21201da177e4SLinus Torvalds kfree_skb(skb); 21211da177e4SLinus Torvalds } 2122b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 21231da177e4SLinus Torvalds 21241da177e4SLinus Torvalds /** 21251da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 21261da177e4SLinus Torvalds * @list: list to use 21271da177e4SLinus Torvalds * @newsk: buffer to queue 21281da177e4SLinus Torvalds * 21291da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 21301da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21311da177e4SLinus Torvalds * safely. 21321da177e4SLinus Torvalds * 21331da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21341da177e4SLinus Torvalds */ 21351da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 21361da177e4SLinus Torvalds { 21371da177e4SLinus Torvalds unsigned long flags; 21381da177e4SLinus Torvalds 21391da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21401da177e4SLinus Torvalds __skb_queue_head(list, newsk); 21411da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21421da177e4SLinus Torvalds } 2143b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 21441da177e4SLinus Torvalds 21451da177e4SLinus Torvalds /** 21461da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 21471da177e4SLinus Torvalds * @list: list to use 21481da177e4SLinus Torvalds * @newsk: buffer to queue 21491da177e4SLinus Torvalds * 21501da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 21511da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21521da177e4SLinus Torvalds * safely. 21531da177e4SLinus Torvalds * 21541da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21551da177e4SLinus Torvalds */ 21561da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 21571da177e4SLinus Torvalds { 21581da177e4SLinus Torvalds unsigned long flags; 21591da177e4SLinus Torvalds 21601da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21611da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 21621da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21631da177e4SLinus Torvalds } 2164b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 21658728b834SDavid S. Miller 21661da177e4SLinus Torvalds /** 21671da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 21681da177e4SLinus Torvalds * @skb: buffer to remove 21698728b834SDavid S. Miller * @list: list to use 21701da177e4SLinus Torvalds * 21718728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 21728728b834SDavid S. Miller * function is atomic with respect to other list locked calls 21731da177e4SLinus Torvalds * 21748728b834SDavid S. Miller * You must know what list the SKB is on. 21751da177e4SLinus Torvalds */ 21768728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 21771da177e4SLinus Torvalds { 21781da177e4SLinus Torvalds unsigned long flags; 21791da177e4SLinus Torvalds 21801da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21818728b834SDavid S. Miller __skb_unlink(skb, list); 21821da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21831da177e4SLinus Torvalds } 2184b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 21851da177e4SLinus Torvalds 21861da177e4SLinus Torvalds /** 21871da177e4SLinus Torvalds * skb_append - append a buffer 21881da177e4SLinus Torvalds * @old: buffer to insert after 21891da177e4SLinus Torvalds * @newsk: buffer to insert 21908728b834SDavid S. Miller * @list: list to use 21911da177e4SLinus Torvalds * 21921da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 21931da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 21941da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21951da177e4SLinus Torvalds */ 21968728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 21971da177e4SLinus Torvalds { 21981da177e4SLinus Torvalds unsigned long flags; 21991da177e4SLinus Torvalds 22008728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22017de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 22028728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22031da177e4SLinus Torvalds } 2204b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 22051da177e4SLinus Torvalds 22061da177e4SLinus Torvalds /** 22071da177e4SLinus Torvalds * skb_insert - insert a buffer 22081da177e4SLinus Torvalds * @old: buffer to insert before 22091da177e4SLinus Torvalds * @newsk: buffer to insert 22108728b834SDavid S. Miller * @list: list to use 22111da177e4SLinus Torvalds * 22128728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 22138728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 22148728b834SDavid S. Miller * calls. 22158728b834SDavid S. Miller * 22161da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22171da177e4SLinus Torvalds */ 22188728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22191da177e4SLinus Torvalds { 22201da177e4SLinus Torvalds unsigned long flags; 22211da177e4SLinus Torvalds 22228728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22238728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 22248728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22251da177e4SLinus Torvalds } 2226b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 22271da177e4SLinus Torvalds 22281da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 22291da177e4SLinus Torvalds struct sk_buff* skb1, 22301da177e4SLinus Torvalds const u32 len, const int pos) 22311da177e4SLinus Torvalds { 22321da177e4SLinus Torvalds int i; 22331da177e4SLinus Torvalds 2234d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2235d626f62bSArnaldo Carvalho de Melo pos - len); 22361da177e4SLinus Torvalds /* And move data appendix as is. */ 22371da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 22381da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 22391da177e4SLinus Torvalds 22401da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 22411da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22421da177e4SLinus Torvalds skb1->data_len = skb->data_len; 22431da177e4SLinus Torvalds skb1->len += skb1->data_len; 22441da177e4SLinus Torvalds skb->data_len = 0; 22451da177e4SLinus Torvalds skb->len = len; 224627a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 22471da177e4SLinus Torvalds } 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 22501da177e4SLinus Torvalds struct sk_buff* skb1, 22511da177e4SLinus Torvalds const u32 len, int pos) 22521da177e4SLinus Torvalds { 22531da177e4SLinus Torvalds int i, k = 0; 22541da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 22551da177e4SLinus Torvalds 22561da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22571da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 22581da177e4SLinus Torvalds skb->len = len; 22591da177e4SLinus Torvalds skb->data_len = len - pos; 22601da177e4SLinus Torvalds 22611da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 22629e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 22631da177e4SLinus Torvalds 22641da177e4SLinus Torvalds if (pos + size > len) { 22651da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 22661da177e4SLinus Torvalds 22671da177e4SLinus Torvalds if (pos < len) { 22681da177e4SLinus Torvalds /* Split frag. 22691da177e4SLinus Torvalds * We have two variants in this case: 22701da177e4SLinus Torvalds * 1. Move all the frag to the second 22711da177e4SLinus Torvalds * part, if it is possible. F.e. 22721da177e4SLinus Torvalds * this approach is mandatory for TUX, 22731da177e4SLinus Torvalds * where splitting is expensive. 22741da177e4SLinus Torvalds * 2. Split is accurately. We make this. 22751da177e4SLinus Torvalds */ 2276ea2ab693SIan Campbell skb_frag_ref(skb, i); 22771da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 22789e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 22799e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 22801da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 22811da177e4SLinus Torvalds } 22821da177e4SLinus Torvalds k++; 22831da177e4SLinus Torvalds } else 22841da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 22851da177e4SLinus Torvalds pos += size; 22861da177e4SLinus Torvalds } 22871da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 22881da177e4SLinus Torvalds } 22891da177e4SLinus Torvalds 22901da177e4SLinus Torvalds /** 22911da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 22921da177e4SLinus Torvalds * @skb: the buffer to split 22931da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 22941da177e4SLinus Torvalds * @len: new length for skb 22951da177e4SLinus Torvalds */ 22961da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 22971da177e4SLinus Torvalds { 22981da177e4SLinus Torvalds int pos = skb_headlen(skb); 22991da177e4SLinus Torvalds 23001da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 23011da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 23021da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 23031da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 23041da177e4SLinus Torvalds } 2305b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 23061da177e4SLinus Torvalds 23079f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 23089f782db3SIlpo Järvinen * 23099f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 23109f782db3SIlpo Järvinen */ 2311832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2312832d11c5SIlpo Järvinen { 23130ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2314832d11c5SIlpo Järvinen } 2315832d11c5SIlpo Järvinen 2316832d11c5SIlpo Järvinen /** 2317832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2318832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2319832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2320832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2321832d11c5SIlpo Järvinen * 2322832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 232320e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2324832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2325832d11c5SIlpo Järvinen * 2326832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2327832d11c5SIlpo Järvinen * 2328832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2329832d11c5SIlpo Järvinen * to have non-paged data as well. 2330832d11c5SIlpo Järvinen * 2331832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2332832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2333832d11c5SIlpo Järvinen */ 2334832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2335832d11c5SIlpo Järvinen { 2336832d11c5SIlpo Järvinen int from, to, merge, todo; 2337832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2338832d11c5SIlpo Järvinen 2339832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2340832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2341832d11c5SIlpo Järvinen 2342832d11c5SIlpo Järvinen todo = shiftlen; 2343832d11c5SIlpo Järvinen from = 0; 2344832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2345832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2346832d11c5SIlpo Järvinen 2347832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2348832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2349832d11c5SIlpo Järvinen */ 2350832d11c5SIlpo Järvinen if (!to || 2351ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2352ea2ab693SIan Campbell fragfrom->page_offset)) { 2353832d11c5SIlpo Järvinen merge = -1; 2354832d11c5SIlpo Järvinen } else { 2355832d11c5SIlpo Järvinen merge = to - 1; 2356832d11c5SIlpo Järvinen 23579e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2358832d11c5SIlpo Järvinen if (todo < 0) { 2359832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2360832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2361832d11c5SIlpo Järvinen return 0; 2362832d11c5SIlpo Järvinen 23639f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 23649f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2365832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2366832d11c5SIlpo Järvinen 23679e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 23689e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2369832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2370832d11c5SIlpo Järvinen 2371832d11c5SIlpo Järvinen goto onlymerged; 2372832d11c5SIlpo Järvinen } 2373832d11c5SIlpo Järvinen 2374832d11c5SIlpo Järvinen from++; 2375832d11c5SIlpo Järvinen } 2376832d11c5SIlpo Järvinen 2377832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2378832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2379832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2380832d11c5SIlpo Järvinen return 0; 2381832d11c5SIlpo Järvinen 2382832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2383832d11c5SIlpo Järvinen return 0; 2384832d11c5SIlpo Järvinen 2385832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2386832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2387832d11c5SIlpo Järvinen return 0; 2388832d11c5SIlpo Järvinen 2389832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2390832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2391832d11c5SIlpo Järvinen 23929e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2393832d11c5SIlpo Järvinen *fragto = *fragfrom; 23949e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2395832d11c5SIlpo Järvinen from++; 2396832d11c5SIlpo Järvinen to++; 2397832d11c5SIlpo Järvinen 2398832d11c5SIlpo Järvinen } else { 2399ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2400832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2401832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 24029e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2403832d11c5SIlpo Järvinen 2404832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 24059e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2406832d11c5SIlpo Järvinen todo = 0; 2407832d11c5SIlpo Järvinen 2408832d11c5SIlpo Järvinen to++; 2409832d11c5SIlpo Järvinen break; 2410832d11c5SIlpo Järvinen } 2411832d11c5SIlpo Järvinen } 2412832d11c5SIlpo Järvinen 2413832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2414832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2415832d11c5SIlpo Järvinen 2416832d11c5SIlpo Järvinen if (merge >= 0) { 2417832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2418832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2419832d11c5SIlpo Järvinen 24209e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2421ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2422832d11c5SIlpo Järvinen } 2423832d11c5SIlpo Järvinen 2424832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2425832d11c5SIlpo Järvinen to = 0; 2426832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2427832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2428832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2429832d11c5SIlpo Järvinen 2430832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2431832d11c5SIlpo Järvinen 2432832d11c5SIlpo Järvinen onlymerged: 2433832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2434832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2435832d11c5SIlpo Järvinen */ 2436832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2437832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2438832d11c5SIlpo Järvinen 2439832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2440832d11c5SIlpo Järvinen skb->len -= shiftlen; 2441832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2442832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2443832d11c5SIlpo Järvinen tgt->len += shiftlen; 2444832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2445832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2446832d11c5SIlpo Järvinen 2447832d11c5SIlpo Järvinen return shiftlen; 2448832d11c5SIlpo Järvinen } 2449832d11c5SIlpo Järvinen 2450677e90edSThomas Graf /** 2451677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2452677e90edSThomas Graf * @skb: the buffer to read 2453677e90edSThomas Graf * @from: lower offset of data to be read 2454677e90edSThomas Graf * @to: upper offset of data to be read 2455677e90edSThomas Graf * @st: state variable 2456677e90edSThomas Graf * 2457677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2458677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2459677e90edSThomas Graf */ 2460677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2461677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2462677e90edSThomas Graf { 2463677e90edSThomas Graf st->lower_offset = from; 2464677e90edSThomas Graf st->upper_offset = to; 2465677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2466677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2467677e90edSThomas Graf st->frag_data = NULL; 2468677e90edSThomas Graf } 2469b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2470677e90edSThomas Graf 2471677e90edSThomas Graf /** 2472677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2473677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2474677e90edSThomas Graf * @data: destination pointer for data to be returned 2475677e90edSThomas Graf * @st: state variable 2476677e90edSThomas Graf * 2477677e90edSThomas Graf * Reads a block of skb data at &consumed relative to the 2478677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2479677e90edSThomas Graf * the head of the data block to &data and returns the length 2480677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2481677e90edSThomas Graf * offset has been reached. 2482677e90edSThomas Graf * 2483677e90edSThomas Graf * The caller is not required to consume all of the data 2484677e90edSThomas Graf * returned, i.e. &consumed is typically set to the number 2485677e90edSThomas Graf * of bytes already consumed and the next call to 2486677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2487677e90edSThomas Graf * 248825985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2489677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2490677e90edSThomas Graf * reads of potentially non linear data. 2491677e90edSThomas Graf * 2492bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2493677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2494677e90edSThomas Graf * a stack for this purpose. 2495677e90edSThomas Graf */ 2496677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2497677e90edSThomas Graf struct skb_seq_state *st) 2498677e90edSThomas Graf { 2499677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2500677e90edSThomas Graf skb_frag_t *frag; 2501677e90edSThomas Graf 2502677e90edSThomas Graf if (unlikely(abs_offset >= st->upper_offset)) 2503677e90edSThomas Graf return 0; 2504677e90edSThomas Graf 2505677e90edSThomas Graf next_skb: 250695e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2507677e90edSThomas Graf 2508995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 250995e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2510677e90edSThomas Graf return block_limit - abs_offset; 2511677e90edSThomas Graf } 2512677e90edSThomas Graf 2513677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2514677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2515677e90edSThomas Graf 2516677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2517677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 25189e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2519677e90edSThomas Graf 2520677e90edSThomas Graf if (abs_offset < block_limit) { 2521677e90edSThomas Graf if (!st->frag_data) 252251c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2523677e90edSThomas Graf 2524677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2525677e90edSThomas Graf (abs_offset - st->stepped_offset); 2526677e90edSThomas Graf 2527677e90edSThomas Graf return block_limit - abs_offset; 2528677e90edSThomas Graf } 2529677e90edSThomas Graf 2530677e90edSThomas Graf if (st->frag_data) { 253151c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2532677e90edSThomas Graf st->frag_data = NULL; 2533677e90edSThomas Graf } 2534677e90edSThomas Graf 2535677e90edSThomas Graf st->frag_idx++; 25369e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2537677e90edSThomas Graf } 2538677e90edSThomas Graf 25395b5a60daSOlaf Kirch if (st->frag_data) { 254051c56b00SEric Dumazet kunmap_atomic(st->frag_data); 25415b5a60daSOlaf Kirch st->frag_data = NULL; 25425b5a60daSOlaf Kirch } 25435b5a60daSOlaf Kirch 254421dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2545677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 254695e3b24cSHerbert Xu st->frag_idx = 0; 2547677e90edSThomas Graf goto next_skb; 254871b3346dSShyam Iyer } else if (st->cur_skb->next) { 254971b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 255071b3346dSShyam Iyer st->frag_idx = 0; 2551677e90edSThomas Graf goto next_skb; 2552677e90edSThomas Graf } 2553677e90edSThomas Graf 2554677e90edSThomas Graf return 0; 2555677e90edSThomas Graf } 2556b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2557677e90edSThomas Graf 2558677e90edSThomas Graf /** 2559677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2560677e90edSThomas Graf * @st: state variable 2561677e90edSThomas Graf * 2562677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2563677e90edSThomas Graf * returned 0. 2564677e90edSThomas Graf */ 2565677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2566677e90edSThomas Graf { 2567677e90edSThomas Graf if (st->frag_data) 256851c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2569677e90edSThomas Graf } 2570b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2571677e90edSThomas Graf 25723fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 25733fc7e8a6SThomas Graf 25743fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 25753fc7e8a6SThomas Graf struct ts_config *conf, 25763fc7e8a6SThomas Graf struct ts_state *state) 25773fc7e8a6SThomas Graf { 25783fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 25793fc7e8a6SThomas Graf } 25803fc7e8a6SThomas Graf 25813fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 25823fc7e8a6SThomas Graf { 25833fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 25843fc7e8a6SThomas Graf } 25853fc7e8a6SThomas Graf 25863fc7e8a6SThomas Graf /** 25873fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 25883fc7e8a6SThomas Graf * @skb: the buffer to look in 25893fc7e8a6SThomas Graf * @from: search offset 25903fc7e8a6SThomas Graf * @to: search limit 25913fc7e8a6SThomas Graf * @config: textsearch configuration 25923fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 25933fc7e8a6SThomas Graf * 25943fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 25953fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 25963fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 25973fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 25983fc7e8a6SThomas Graf */ 25993fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 26003fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 26013fc7e8a6SThomas Graf struct ts_state *state) 26023fc7e8a6SThomas Graf { 2603f72b948dSPhil Oester unsigned int ret; 2604f72b948dSPhil Oester 26053fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 26063fc7e8a6SThomas Graf config->finish = skb_ts_finish; 26073fc7e8a6SThomas Graf 26083fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 26093fc7e8a6SThomas Graf 2610f72b948dSPhil Oester ret = textsearch_find(config, state); 2611f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 26123fc7e8a6SThomas Graf } 2613b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 26143fc7e8a6SThomas Graf 2615e89e9cf5SAnanda Raju /** 2616e89e9cf5SAnanda Raju * skb_append_datato_frags: - append the user data to a skb 2617e89e9cf5SAnanda Raju * @sk: sock structure 2618e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2619e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2620e89e9cf5SAnanda Raju * @from: pointer to user message iov 2621e89e9cf5SAnanda Raju * @length: length of the iov message 2622e89e9cf5SAnanda Raju * 2623e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2624e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2625e89e9cf5SAnanda Raju */ 2626e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2627dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2628e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2629e89e9cf5SAnanda Raju void *from, int length) 2630e89e9cf5SAnanda Raju { 2631e89e9cf5SAnanda Raju int frg_cnt = 0; 2632e89e9cf5SAnanda Raju skb_frag_t *frag = NULL; 2633e89e9cf5SAnanda Raju struct page *page = NULL; 2634e89e9cf5SAnanda Raju int copy, left; 2635e89e9cf5SAnanda Raju int offset = 0; 2636e89e9cf5SAnanda Raju int ret; 2637e89e9cf5SAnanda Raju 2638e89e9cf5SAnanda Raju do { 2639e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2640e89e9cf5SAnanda Raju frg_cnt = skb_shinfo(skb)->nr_frags; 2641e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2642e89e9cf5SAnanda Raju return -EFAULT; 2643e89e9cf5SAnanda Raju 2644e89e9cf5SAnanda Raju /* allocate a new page for next frag */ 2645e89e9cf5SAnanda Raju page = alloc_pages(sk->sk_allocation, 0); 2646e89e9cf5SAnanda Raju 2647e89e9cf5SAnanda Raju /* If alloc_page fails just return failure and caller will 2648e89e9cf5SAnanda Raju * free previous allocated pages by doing kfree_skb() 2649e89e9cf5SAnanda Raju */ 2650e89e9cf5SAnanda Raju if (page == NULL) 2651e89e9cf5SAnanda Raju return -ENOMEM; 2652e89e9cf5SAnanda Raju 2653e89e9cf5SAnanda Raju /* initialize the next frag */ 2654e89e9cf5SAnanda Raju skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 2655e89e9cf5SAnanda Raju skb->truesize += PAGE_SIZE; 2656e89e9cf5SAnanda Raju atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 2657e89e9cf5SAnanda Raju 2658e89e9cf5SAnanda Raju /* get the new initialized frag */ 2659e89e9cf5SAnanda Raju frg_cnt = skb_shinfo(skb)->nr_frags; 2660e89e9cf5SAnanda Raju frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 2661e89e9cf5SAnanda Raju 2662e89e9cf5SAnanda Raju /* copy the user data to page */ 2663e89e9cf5SAnanda Raju left = PAGE_SIZE - frag->page_offset; 2664e89e9cf5SAnanda Raju copy = (length > left)? left : length; 2665e89e9cf5SAnanda Raju 26669e903e08SEric Dumazet ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2667e89e9cf5SAnanda Raju offset, copy, 0, skb); 2668e89e9cf5SAnanda Raju if (ret < 0) 2669e89e9cf5SAnanda Raju return -EFAULT; 2670e89e9cf5SAnanda Raju 2671e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 26729e903e08SEric Dumazet skb_frag_size_add(frag, copy); 2673e89e9cf5SAnanda Raju skb->len += copy; 2674e89e9cf5SAnanda Raju skb->data_len += copy; 2675e89e9cf5SAnanda Raju offset += copy; 2676e89e9cf5SAnanda Raju length -= copy; 2677e89e9cf5SAnanda Raju 2678e89e9cf5SAnanda Raju } while (length > 0); 2679e89e9cf5SAnanda Raju 2680e89e9cf5SAnanda Raju return 0; 2681e89e9cf5SAnanda Raju } 2682b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2683e89e9cf5SAnanda Raju 2684cbb042f9SHerbert Xu /** 2685cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2686cbb042f9SHerbert Xu * @skb: buffer to update 2687cbb042f9SHerbert Xu * @len: length of data pulled 2688cbb042f9SHerbert Xu * 2689cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2690fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 269184fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 269284fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 269384fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2694cbb042f9SHerbert Xu */ 2695cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2696cbb042f9SHerbert Xu { 2697cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2698cbb042f9SHerbert Xu skb->len -= len; 2699cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2700cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2701cbb042f9SHerbert Xu return skb->data += len; 2702cbb042f9SHerbert Xu } 2703f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2704f94691acSArnaldo Carvalho de Melo 2705f4c50d99SHerbert Xu /** 2706f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2707f4c50d99SHerbert Xu * @skb: buffer to segment 2708576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2709f4c50d99SHerbert Xu * 2710f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 27114c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 27124c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2713f4c50d99SHerbert Xu */ 2714c8f44affSMichał Mirosław struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2715f4c50d99SHerbert Xu { 2716f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2717f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 271889319d38SHerbert Xu struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 2719f4c50d99SHerbert Xu unsigned int mss = skb_shinfo(skb)->gso_size; 272098e399f8SArnaldo Carvalho de Melo unsigned int doffset = skb->data - skb_mac_header(skb); 2721f4c50d99SHerbert Xu unsigned int offset = doffset; 2722f4c50d99SHerbert Xu unsigned int headroom; 2723f4c50d99SHerbert Xu unsigned int len; 272404ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2725f4c50d99SHerbert Xu int nfrags = skb_shinfo(skb)->nr_frags; 2726f4c50d99SHerbert Xu int err = -ENOMEM; 2727f4c50d99SHerbert Xu int i = 0; 2728f4c50d99SHerbert Xu int pos; 2729f4c50d99SHerbert Xu 2730f4c50d99SHerbert Xu __skb_push(skb, doffset); 2731f4c50d99SHerbert Xu headroom = skb_headroom(skb); 2732f4c50d99SHerbert Xu pos = skb_headlen(skb); 2733f4c50d99SHerbert Xu 2734f4c50d99SHerbert Xu do { 2735f4c50d99SHerbert Xu struct sk_buff *nskb; 2736f4c50d99SHerbert Xu skb_frag_t *frag; 2737c8884eddSHerbert Xu int hsize; 2738f4c50d99SHerbert Xu int size; 2739f4c50d99SHerbert Xu 2740f4c50d99SHerbert Xu len = skb->len - offset; 2741f4c50d99SHerbert Xu if (len > mss) 2742f4c50d99SHerbert Xu len = mss; 2743f4c50d99SHerbert Xu 2744f4c50d99SHerbert Xu hsize = skb_headlen(skb) - offset; 2745f4c50d99SHerbert Xu if (hsize < 0) 2746f4c50d99SHerbert Xu hsize = 0; 2747c8884eddSHerbert Xu if (hsize > len || !sg) 2748c8884eddSHerbert Xu hsize = len; 2749f4c50d99SHerbert Xu 275089319d38SHerbert Xu if (!hsize && i >= nfrags) { 275189319d38SHerbert Xu BUG_ON(fskb->len != len); 275289319d38SHerbert Xu 275389319d38SHerbert Xu pos += len; 275489319d38SHerbert Xu nskb = skb_clone(fskb, GFP_ATOMIC); 275589319d38SHerbert Xu fskb = fskb->next; 275689319d38SHerbert Xu 2757f4c50d99SHerbert Xu if (unlikely(!nskb)) 2758f4c50d99SHerbert Xu goto err; 2759f4c50d99SHerbert Xu 2760ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 276189319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 276289319d38SHerbert Xu kfree_skb(nskb); 276389319d38SHerbert Xu goto err; 276489319d38SHerbert Xu } 276589319d38SHerbert Xu 2766ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 276789319d38SHerbert Xu skb_release_head_state(nskb); 276889319d38SHerbert Xu __skb_push(nskb, doffset); 276989319d38SHerbert Xu } else { 277089319d38SHerbert Xu nskb = alloc_skb(hsize + doffset + headroom, 277189319d38SHerbert Xu GFP_ATOMIC); 277289319d38SHerbert Xu 277389319d38SHerbert Xu if (unlikely(!nskb)) 277489319d38SHerbert Xu goto err; 277589319d38SHerbert Xu 277689319d38SHerbert Xu skb_reserve(nskb, headroom); 277789319d38SHerbert Xu __skb_put(nskb, doffset); 277889319d38SHerbert Xu } 277989319d38SHerbert Xu 2780f4c50d99SHerbert Xu if (segs) 2781f4c50d99SHerbert Xu tail->next = nskb; 2782f4c50d99SHerbert Xu else 2783f4c50d99SHerbert Xu segs = nskb; 2784f4c50d99SHerbert Xu tail = nskb; 2785f4c50d99SHerbert Xu 27866f85a124SHerbert Xu __copy_skb_header(nskb, skb); 2787f4c50d99SHerbert Xu nskb->mac_len = skb->mac_len; 2788f4c50d99SHerbert Xu 27893d3be433SEric Dumazet /* nskb and skb might have different headroom */ 27903d3be433SEric Dumazet if (nskb->ip_summed == CHECKSUM_PARTIAL) 27913d3be433SEric Dumazet nskb->csum_start += skb_headroom(nskb) - headroom; 27923d3be433SEric Dumazet 2793459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(nskb); 2794ddc7b8e3SArnaldo Carvalho de Melo skb_set_network_header(nskb, skb->mac_len); 2795b0e380b1SArnaldo Carvalho de Melo nskb->transport_header = (nskb->network_header + 2796b0e380b1SArnaldo Carvalho de Melo skb_network_header_len(skb)); 279789319d38SHerbert Xu skb_copy_from_linear_data(skb, nskb->data, doffset); 279889319d38SHerbert Xu 27992f181855SHerbert Xu if (fskb != skb_shinfo(skb)->frag_list) 280089319d38SHerbert Xu continue; 280189319d38SHerbert Xu 2802f4c50d99SHerbert Xu if (!sg) { 28036f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2804f4c50d99SHerbert Xu nskb->csum = skb_copy_and_csum_bits(skb, offset, 2805f4c50d99SHerbert Xu skb_put(nskb, len), 2806f4c50d99SHerbert Xu len, 0); 2807f4c50d99SHerbert Xu continue; 2808f4c50d99SHerbert Xu } 2809f4c50d99SHerbert Xu 2810f4c50d99SHerbert Xu frag = skb_shinfo(nskb)->frags; 2811f4c50d99SHerbert Xu 2812d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, 2813d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 2814f4c50d99SHerbert Xu 281589319d38SHerbert Xu while (pos < offset + len && i < nfrags) { 2816f4c50d99SHerbert Xu *frag = skb_shinfo(skb)->frags[i]; 2817ea2ab693SIan Campbell __skb_frag_ref(frag); 28189e903e08SEric Dumazet size = skb_frag_size(frag); 2819f4c50d99SHerbert Xu 2820f4c50d99SHerbert Xu if (pos < offset) { 2821f4c50d99SHerbert Xu frag->page_offset += offset - pos; 28229e903e08SEric Dumazet skb_frag_size_sub(frag, offset - pos); 2823f4c50d99SHerbert Xu } 2824f4c50d99SHerbert Xu 282589319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 2826f4c50d99SHerbert Xu 2827f4c50d99SHerbert Xu if (pos + size <= offset + len) { 2828f4c50d99SHerbert Xu i++; 2829f4c50d99SHerbert Xu pos += size; 2830f4c50d99SHerbert Xu } else { 28319e903e08SEric Dumazet skb_frag_size_sub(frag, pos + size - (offset + len)); 283289319d38SHerbert Xu goto skip_fraglist; 2833f4c50d99SHerbert Xu } 2834f4c50d99SHerbert Xu 2835f4c50d99SHerbert Xu frag++; 2836f4c50d99SHerbert Xu } 2837f4c50d99SHerbert Xu 283889319d38SHerbert Xu if (pos < offset + len) { 283989319d38SHerbert Xu struct sk_buff *fskb2 = fskb; 284089319d38SHerbert Xu 284189319d38SHerbert Xu BUG_ON(pos + fskb->len != offset + len); 284289319d38SHerbert Xu 284389319d38SHerbert Xu pos += fskb->len; 284489319d38SHerbert Xu fskb = fskb->next; 284589319d38SHerbert Xu 284689319d38SHerbert Xu if (fskb2->next) { 284789319d38SHerbert Xu fskb2 = skb_clone(fskb2, GFP_ATOMIC); 284889319d38SHerbert Xu if (!fskb2) 284989319d38SHerbert Xu goto err; 285089319d38SHerbert Xu } else 285189319d38SHerbert Xu skb_get(fskb2); 285289319d38SHerbert Xu 2853fbb398a8SDavid S. Miller SKB_FRAG_ASSERT(nskb); 285489319d38SHerbert Xu skb_shinfo(nskb)->frag_list = fskb2; 285589319d38SHerbert Xu } 285689319d38SHerbert Xu 285789319d38SHerbert Xu skip_fraglist: 2858f4c50d99SHerbert Xu nskb->data_len = len - hsize; 2859f4c50d99SHerbert Xu nskb->len += nskb->data_len; 2860f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 2861f4c50d99SHerbert Xu } while ((offset += len) < skb->len); 2862f4c50d99SHerbert Xu 2863f4c50d99SHerbert Xu return segs; 2864f4c50d99SHerbert Xu 2865f4c50d99SHerbert Xu err: 2866f4c50d99SHerbert Xu while ((skb = segs)) { 2867f4c50d99SHerbert Xu segs = skb->next; 2868b08d5840SPatrick McHardy kfree_skb(skb); 2869f4c50d99SHerbert Xu } 2870f4c50d99SHerbert Xu return ERR_PTR(err); 2871f4c50d99SHerbert Xu } 2872f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 2873f4c50d99SHerbert Xu 287471d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 287571d93b39SHerbert Xu { 287671d93b39SHerbert Xu struct sk_buff *p = *head; 287771d93b39SHerbert Xu struct sk_buff *nskb; 28789aaa156cSHerbert Xu struct skb_shared_info *skbinfo = skb_shinfo(skb); 28799aaa156cSHerbert Xu struct skb_shared_info *pinfo = skb_shinfo(p); 288071d93b39SHerbert Xu unsigned int headroom; 288186911732SHerbert Xu unsigned int len = skb_gro_len(skb); 288267147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 288367147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 2884715dc1f3SEric Dumazet unsigned int delta_truesize; 288571d93b39SHerbert Xu 288686911732SHerbert Xu if (p->len + len >= 65536) 288771d93b39SHerbert Xu return -E2BIG; 288871d93b39SHerbert Xu 28899aaa156cSHerbert Xu if (pinfo->frag_list) 289071d93b39SHerbert Xu goto merge; 289167147ba9SHerbert Xu else if (headlen <= offset) { 289242da6994SHerbert Xu skb_frag_t *frag; 289366e92fcfSHerbert Xu skb_frag_t *frag2; 28949aaa156cSHerbert Xu int i = skbinfo->nr_frags; 28959aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 289642da6994SHerbert Xu 289766e92fcfSHerbert Xu offset -= headlen; 289866e92fcfSHerbert Xu 289966e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 290081705ad1SHerbert Xu return -E2BIG; 290181705ad1SHerbert Xu 29029aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 29039aaa156cSHerbert Xu skbinfo->nr_frags = 0; 2904f5572068SHerbert Xu 29059aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 29069aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 290766e92fcfSHerbert Xu do { 290866e92fcfSHerbert Xu *--frag = *--frag2; 290966e92fcfSHerbert Xu } while (--i); 291066e92fcfSHerbert Xu 291166e92fcfSHerbert Xu frag->page_offset += offset; 29129e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 291366e92fcfSHerbert Xu 2914715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 2915ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 2916ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 2917715dc1f3SEric Dumazet 2918f5572068SHerbert Xu skb->truesize -= skb->data_len; 2919f5572068SHerbert Xu skb->len -= skb->data_len; 2920f5572068SHerbert Xu skb->data_len = 0; 2921f5572068SHerbert Xu 2922715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 29235d38a079SHerbert Xu goto done; 2924d7e8883cSEric Dumazet } else if (skb->head_frag) { 2925d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 2926d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 2927d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 2928d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 2929d7e8883cSEric Dumazet unsigned int first_offset; 2930d7e8883cSEric Dumazet 2931d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 2932d7e8883cSEric Dumazet return -E2BIG; 2933d7e8883cSEric Dumazet 2934d7e8883cSEric Dumazet first_offset = skb->data - 2935d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 2936d7e8883cSEric Dumazet offset; 2937d7e8883cSEric Dumazet 2938d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 2939d7e8883cSEric Dumazet 2940d7e8883cSEric Dumazet frag->page.p = page; 2941d7e8883cSEric Dumazet frag->page_offset = first_offset; 2942d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 2943d7e8883cSEric Dumazet 2944d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 2945d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 2946d7e8883cSEric Dumazet 2947715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 2948d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 2949d7e8883cSEric Dumazet goto done; 295069c0cab1SHerbert Xu } else if (skb_gro_len(p) != pinfo->gso_size) 295169c0cab1SHerbert Xu return -E2BIG; 295271d93b39SHerbert Xu 295371d93b39SHerbert Xu headroom = skb_headroom(p); 29543d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 295571d93b39SHerbert Xu if (unlikely(!nskb)) 295671d93b39SHerbert Xu return -ENOMEM; 295771d93b39SHerbert Xu 295871d93b39SHerbert Xu __copy_skb_header(nskb, p); 295971d93b39SHerbert Xu nskb->mac_len = p->mac_len; 296071d93b39SHerbert Xu 296171d93b39SHerbert Xu skb_reserve(nskb, headroom); 296286911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 296371d93b39SHerbert Xu 296486911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 296571d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 296671d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 296771d93b39SHerbert Xu 296886911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 296986911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 297086911732SHerbert Xu p->data - skb_mac_header(p)); 297171d93b39SHerbert Xu 297271d93b39SHerbert Xu *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); 297371d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 29749aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 2975622e0ca1SHerbert Xu pinfo->gso_size = 0; 297671d93b39SHerbert Xu skb_header_release(p); 297771d93b39SHerbert Xu nskb->prev = p; 297871d93b39SHerbert Xu 297971d93b39SHerbert Xu nskb->data_len += p->len; 2980de8261c2SEric Dumazet nskb->truesize += p->truesize; 298171d93b39SHerbert Xu nskb->len += p->len; 298271d93b39SHerbert Xu 298371d93b39SHerbert Xu *head = nskb; 298471d93b39SHerbert Xu nskb->next = p->next; 298571d93b39SHerbert Xu p->next = NULL; 298671d93b39SHerbert Xu 298771d93b39SHerbert Xu p = nskb; 298871d93b39SHerbert Xu 298971d93b39SHerbert Xu merge: 2990715dc1f3SEric Dumazet delta_truesize = skb->truesize; 299167147ba9SHerbert Xu if (offset > headlen) { 2992d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 2993d1dc7abfSMichal Schmidt 2994d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 29959e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 2996d1dc7abfSMichal Schmidt skb->data_len -= eat; 2997d1dc7abfSMichal Schmidt skb->len -= eat; 299867147ba9SHerbert Xu offset = headlen; 299956035022SHerbert Xu } 300056035022SHerbert Xu 300167147ba9SHerbert Xu __skb_pull(skb, offset); 300256035022SHerbert Xu 300371d93b39SHerbert Xu p->prev->next = skb; 300471d93b39SHerbert Xu p->prev = skb; 300571d93b39SHerbert Xu skb_header_release(skb); 300671d93b39SHerbert Xu 30075d38a079SHerbert Xu done: 30085d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 300937fe4732SHerbert Xu p->data_len += len; 3010715dc1f3SEric Dumazet p->truesize += delta_truesize; 301137fe4732SHerbert Xu p->len += len; 301271d93b39SHerbert Xu 301371d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 301471d93b39SHerbert Xu return 0; 301571d93b39SHerbert Xu } 301671d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 301771d93b39SHerbert Xu 30181da177e4SLinus Torvalds void __init skb_init(void) 30191da177e4SLinus Torvalds { 30201da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 30211da177e4SLinus Torvalds sizeof(struct sk_buff), 30221da177e4SLinus Torvalds 0, 3023e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 302420c2df83SPaul Mundt NULL); 3025d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3026d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3027d179cd12SDavid S. Miller sizeof(atomic_t), 3028d179cd12SDavid S. Miller 0, 3029e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 303020c2df83SPaul Mundt NULL); 30311da177e4SLinus Torvalds } 30321da177e4SLinus Torvalds 3033716ea3a7SDavid Howells /** 3034716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3035716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3036716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3037716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3038716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3039716ea3a7SDavid Howells * 3040716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3041716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3042716ea3a7SDavid Howells */ 304351c739d1SDavid S. Miller static int 304451c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3045716ea3a7SDavid Howells { 30461a028e50SDavid S. Miller int start = skb_headlen(skb); 30471a028e50SDavid S. Miller int i, copy = start - offset; 3048fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3049716ea3a7SDavid Howells int elt = 0; 3050716ea3a7SDavid Howells 3051716ea3a7SDavid Howells if (copy > 0) { 3052716ea3a7SDavid Howells if (copy > len) 3053716ea3a7SDavid Howells copy = len; 3054642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3055716ea3a7SDavid Howells elt++; 3056716ea3a7SDavid Howells if ((len -= copy) == 0) 3057716ea3a7SDavid Howells return elt; 3058716ea3a7SDavid Howells offset += copy; 3059716ea3a7SDavid Howells } 3060716ea3a7SDavid Howells 3061716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 30621a028e50SDavid S. Miller int end; 3063716ea3a7SDavid Howells 3064547b792cSIlpo Järvinen WARN_ON(start > offset + len); 30651a028e50SDavid S. Miller 30669e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3067716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3068716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3069716ea3a7SDavid Howells 3070716ea3a7SDavid Howells if (copy > len) 3071716ea3a7SDavid Howells copy = len; 3072ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3073642f1490SJens Axboe frag->page_offset+offset-start); 3074716ea3a7SDavid Howells elt++; 3075716ea3a7SDavid Howells if (!(len -= copy)) 3076716ea3a7SDavid Howells return elt; 3077716ea3a7SDavid Howells offset += copy; 3078716ea3a7SDavid Howells } 30791a028e50SDavid S. Miller start = end; 3080716ea3a7SDavid Howells } 3081716ea3a7SDavid Howells 3082fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 30831a028e50SDavid S. Miller int end; 3084716ea3a7SDavid Howells 3085547b792cSIlpo Järvinen WARN_ON(start > offset + len); 30861a028e50SDavid S. Miller 3087fbb398a8SDavid S. Miller end = start + frag_iter->len; 3088716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3089716ea3a7SDavid Howells if (copy > len) 3090716ea3a7SDavid Howells copy = len; 3091fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 309251c739d1SDavid S. Miller copy); 3093716ea3a7SDavid Howells if ((len -= copy) == 0) 3094716ea3a7SDavid Howells return elt; 3095716ea3a7SDavid Howells offset += copy; 3096716ea3a7SDavid Howells } 30971a028e50SDavid S. Miller start = end; 3098716ea3a7SDavid Howells } 3099716ea3a7SDavid Howells BUG_ON(len); 3100716ea3a7SDavid Howells return elt; 3101716ea3a7SDavid Howells } 3102716ea3a7SDavid Howells 310351c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 310451c739d1SDavid S. Miller { 310551c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 310651c739d1SDavid S. Miller 3107c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 310851c739d1SDavid S. Miller 310951c739d1SDavid S. Miller return nsg; 311051c739d1SDavid S. Miller } 3111b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 311251c739d1SDavid S. Miller 3113716ea3a7SDavid Howells /** 3114716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3115716ea3a7SDavid Howells * @skb: The socket buffer to check. 3116716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3117716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3118716ea3a7SDavid Howells * 3119716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3120716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3121716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3122716ea3a7SDavid Howells * 3123716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3124716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3125716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3126716ea3a7SDavid Howells * 3127716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3128716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3129716ea3a7SDavid Howells */ 3130716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3131716ea3a7SDavid Howells { 3132716ea3a7SDavid Howells int copyflag; 3133716ea3a7SDavid Howells int elt; 3134716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3135716ea3a7SDavid Howells 3136716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3137716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3138716ea3a7SDavid Howells * at the moment even if they are anonymous). 3139716ea3a7SDavid Howells */ 3140716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3141716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3142716ea3a7SDavid Howells return -ENOMEM; 3143716ea3a7SDavid Howells 3144716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 314521dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3146716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3147716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3148716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3149716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3150716ea3a7SDavid Howells 3151716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3152716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3153716ea3a7SDavid Howells return -ENOMEM; 3154716ea3a7SDavid Howells 3155716ea3a7SDavid Howells /* Voila! */ 3156716ea3a7SDavid Howells *trailer = skb; 3157716ea3a7SDavid Howells return 1; 3158716ea3a7SDavid Howells } 3159716ea3a7SDavid Howells 3160716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3161716ea3a7SDavid Howells 3162716ea3a7SDavid Howells elt = 1; 3163716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3164716ea3a7SDavid Howells copyflag = 0; 3165716ea3a7SDavid Howells 3166716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3167716ea3a7SDavid Howells int ntail = 0; 3168716ea3a7SDavid Howells 3169716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3170716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3171716ea3a7SDavid Howells * after it. */ 3172716ea3a7SDavid Howells 3173716ea3a7SDavid Howells if (skb_shared(skb1)) 3174716ea3a7SDavid Howells copyflag = 1; 3175716ea3a7SDavid Howells 3176716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3177716ea3a7SDavid Howells 3178716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3179716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 318021dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3181716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3182716ea3a7SDavid Howells ntail = tailbits + 128; 3183716ea3a7SDavid Howells } 3184716ea3a7SDavid Howells 3185716ea3a7SDavid Howells if (copyflag || 3186716ea3a7SDavid Howells skb_cloned(skb1) || 3187716ea3a7SDavid Howells ntail || 3188716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 318921dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3190716ea3a7SDavid Howells struct sk_buff *skb2; 3191716ea3a7SDavid Howells 3192716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3193716ea3a7SDavid Howells if (ntail == 0) 3194716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3195716ea3a7SDavid Howells else 3196716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3197716ea3a7SDavid Howells skb_headroom(skb1), 3198716ea3a7SDavid Howells ntail, 3199716ea3a7SDavid Howells GFP_ATOMIC); 3200716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3201716ea3a7SDavid Howells return -ENOMEM; 3202716ea3a7SDavid Howells 3203716ea3a7SDavid Howells if (skb1->sk) 3204716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3205716ea3a7SDavid Howells 3206716ea3a7SDavid Howells /* Looking around. Are we still alive? 3207716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3208716ea3a7SDavid Howells 3209716ea3a7SDavid Howells skb2->next = skb1->next; 3210716ea3a7SDavid Howells *skb_p = skb2; 3211716ea3a7SDavid Howells kfree_skb(skb1); 3212716ea3a7SDavid Howells skb1 = skb2; 3213716ea3a7SDavid Howells } 3214716ea3a7SDavid Howells elt++; 3215716ea3a7SDavid Howells *trailer = skb1; 3216716ea3a7SDavid Howells skb_p = &skb1->next; 3217716ea3a7SDavid Howells } 3218716ea3a7SDavid Howells 3219716ea3a7SDavid Howells return elt; 3220716ea3a7SDavid Howells } 3221b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3222716ea3a7SDavid Howells 3223b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3224b1faf566SEric Dumazet { 3225b1faf566SEric Dumazet struct sock *sk = skb->sk; 3226b1faf566SEric Dumazet 3227b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3228b1faf566SEric Dumazet } 3229b1faf566SEric Dumazet 3230b1faf566SEric Dumazet /* 3231b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3232b1faf566SEric Dumazet */ 3233b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3234b1faf566SEric Dumazet { 3235110c4330SEric Dumazet int len = skb->len; 3236110c4330SEric Dumazet 3237b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 323895c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3239b1faf566SEric Dumazet return -ENOMEM; 3240b1faf566SEric Dumazet 3241b1faf566SEric Dumazet skb_orphan(skb); 3242b1faf566SEric Dumazet skb->sk = sk; 3243b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3244b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3245b1faf566SEric Dumazet 3246abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3247abb57ea4SEric Dumazet skb_dst_force(skb); 3248abb57ea4SEric Dumazet 3249b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3250b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3251110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3252b1faf566SEric Dumazet return 0; 3253b1faf566SEric Dumazet } 3254b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3255b1faf566SEric Dumazet 3256ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3257ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3258ac45f602SPatrick Ohly { 3259ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3260ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3261ac45f602SPatrick Ohly struct sk_buff *skb; 3262ac45f602SPatrick Ohly int err; 3263ac45f602SPatrick Ohly 3264ac45f602SPatrick Ohly if (!sk) 3265ac45f602SPatrick Ohly return; 3266ac45f602SPatrick Ohly 3267ac45f602SPatrick Ohly skb = skb_clone(orig_skb, GFP_ATOMIC); 3268ac45f602SPatrick Ohly if (!skb) 3269ac45f602SPatrick Ohly return; 3270ac45f602SPatrick Ohly 3271ac45f602SPatrick Ohly if (hwtstamps) { 3272ac45f602SPatrick Ohly *skb_hwtstamps(skb) = 3273ac45f602SPatrick Ohly *hwtstamps; 3274ac45f602SPatrick Ohly } else { 3275ac45f602SPatrick Ohly /* 3276ac45f602SPatrick Ohly * no hardware time stamps available, 32772244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3278ac45f602SPatrick Ohly * store software time stamp 3279ac45f602SPatrick Ohly */ 3280ac45f602SPatrick Ohly skb->tstamp = ktime_get_real(); 3281ac45f602SPatrick Ohly } 3282ac45f602SPatrick Ohly 3283ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3284ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3285ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3286ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 328729030374SEric Dumazet 3288ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 328929030374SEric Dumazet 3290ac45f602SPatrick Ohly if (err) 3291ac45f602SPatrick Ohly kfree_skb(skb); 3292ac45f602SPatrick Ohly } 3293ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3294ac45f602SPatrick Ohly 32956e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 32966e3e939fSJohannes Berg { 32976e3e939fSJohannes Berg struct sock *sk = skb->sk; 32986e3e939fSJohannes Berg struct sock_exterr_skb *serr; 32996e3e939fSJohannes Berg int err; 33006e3e939fSJohannes Berg 33016e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 33026e3e939fSJohannes Berg skb->wifi_acked = acked; 33036e3e939fSJohannes Berg 33046e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 33056e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 33066e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 33076e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 33086e3e939fSJohannes Berg 33096e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 33106e3e939fSJohannes Berg if (err) 33116e3e939fSJohannes Berg kfree_skb(skb); 33126e3e939fSJohannes Berg } 33136e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 33146e3e939fSJohannes Berg 3315ac45f602SPatrick Ohly 3316f35d9d8aSRusty Russell /** 3317f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3318f35d9d8aSRusty Russell * @skb: the skb to set 3319f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3320f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3321f35d9d8aSRusty Russell * 3322f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3323f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3324f35d9d8aSRusty Russell * 3325f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3326f35d9d8aSRusty Russell * returns false you should drop the packet. 3327f35d9d8aSRusty Russell */ 3328f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3329f35d9d8aSRusty Russell { 33305ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 33315ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3332e87cc472SJoe Perches net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 33335ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3334f35d9d8aSRusty Russell return false; 3335f35d9d8aSRusty Russell } 3336f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3337f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3338f35d9d8aSRusty Russell skb->csum_offset = off; 3339f35d9d8aSRusty Russell return true; 3340f35d9d8aSRusty Russell } 3341b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3342f35d9d8aSRusty Russell 33434497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 33444497b076SBen Hutchings { 3345e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3346e87cc472SJoe Perches skb->dev->name); 33474497b076SBen Hutchings } 33484497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3349