11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 39e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40e005d193SJoe Perches 411da177e4SLinus Torvalds #include <linux/module.h> 421da177e4SLinus Torvalds #include <linux/types.h> 431da177e4SLinus Torvalds #include <linux/kernel.h> 44fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/interrupt.h> 471da177e4SLinus Torvalds #include <linux/in.h> 481da177e4SLinus Torvalds #include <linux/inet.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 50de960aa9SFlorian Westphal #include <linux/tcp.h> 51de960aa9SFlorian Westphal #include <linux/udp.h> 521da177e4SLinus Torvalds #include <linux/netdevice.h> 531da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 541da177e4SLinus Torvalds #include <net/pkt_sched.h> 551da177e4SLinus Torvalds #endif 561da177e4SLinus Torvalds #include <linux/string.h> 571da177e4SLinus Torvalds #include <linux/skbuff.h> 589c55e01cSJens Axboe #include <linux/splice.h> 591da177e4SLinus Torvalds #include <linux/cache.h> 601da177e4SLinus Torvalds #include <linux/rtnetlink.h> 611da177e4SLinus Torvalds #include <linux/init.h> 62716ea3a7SDavid Howells #include <linux/scatterlist.h> 63ac45f602SPatrick Ohly #include <linux/errqueue.h> 64268bb0ceSLinus Torvalds #include <linux/prefetch.h> 651da177e4SLinus Torvalds 661da177e4SLinus Torvalds #include <net/protocol.h> 671da177e4SLinus Torvalds #include <net/dst.h> 681da177e4SLinus Torvalds #include <net/sock.h> 691da177e4SLinus Torvalds #include <net/checksum.h> 70ed1f50c3SPaul Durrant #include <net/ip6_checksum.h> 711da177e4SLinus Torvalds #include <net/xfrm.h> 721da177e4SLinus Torvalds 731da177e4SLinus Torvalds #include <asm/uaccess.h> 74ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7551c56b00SEric Dumazet #include <linux/highmem.h> 76a1f8e7f7SAl Viro 77d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 78e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 791da177e4SLinus Torvalds 801da177e4SLinus Torvalds /** 81f05de73bSJean Sacren * skb_panic - private function for out-of-line support 821da177e4SLinus Torvalds * @skb: buffer 831da177e4SLinus Torvalds * @sz: size 84f05de73bSJean Sacren * @addr: address 8599d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 861da177e4SLinus Torvalds * 87f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 88f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 89f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 90f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 911da177e4SLinus Torvalds */ 92f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 9399d5851eSJames Hogan const char msg[]) 941da177e4SLinus Torvalds { 95e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 9699d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 974305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 9826095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 991da177e4SLinus Torvalds BUG(); 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds 102f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1031da177e4SLinus Torvalds { 104f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1051da177e4SLinus Torvalds } 1061da177e4SLinus Torvalds 107f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 108f05de73bSJean Sacren { 109f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 110f05de73bSJean Sacren } 111c93bdd0eSMel Gorman 112c93bdd0eSMel Gorman /* 113c93bdd0eSMel Gorman * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 114c93bdd0eSMel Gorman * the caller if emergency pfmemalloc reserves are being used. If it is and 115c93bdd0eSMel Gorman * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 116c93bdd0eSMel Gorman * may be used. Otherwise, the packet data may be discarded until enough 117c93bdd0eSMel Gorman * memory is free 118c93bdd0eSMel Gorman */ 119c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 120c93bdd0eSMel Gorman __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 12161c5e88aSstephen hemminger 12261c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 12361c5e88aSstephen hemminger unsigned long ip, bool *pfmemalloc) 124c93bdd0eSMel Gorman { 125c93bdd0eSMel Gorman void *obj; 126c93bdd0eSMel Gorman bool ret_pfmemalloc = false; 127c93bdd0eSMel Gorman 128c93bdd0eSMel Gorman /* 129c93bdd0eSMel Gorman * Try a regular allocation, when that fails and we're not entitled 130c93bdd0eSMel Gorman * to the reserves, fail. 131c93bdd0eSMel Gorman */ 132c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, 133c93bdd0eSMel Gorman flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 134c93bdd0eSMel Gorman node); 135c93bdd0eSMel Gorman if (obj || !(gfp_pfmemalloc_allowed(flags))) 136c93bdd0eSMel Gorman goto out; 137c93bdd0eSMel Gorman 138c93bdd0eSMel Gorman /* Try again but now we are using pfmemalloc reserves */ 139c93bdd0eSMel Gorman ret_pfmemalloc = true; 140c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, flags, node); 141c93bdd0eSMel Gorman 142c93bdd0eSMel Gorman out: 143c93bdd0eSMel Gorman if (pfmemalloc) 144c93bdd0eSMel Gorman *pfmemalloc = ret_pfmemalloc; 145c93bdd0eSMel Gorman 146c93bdd0eSMel Gorman return obj; 147c93bdd0eSMel Gorman } 148c93bdd0eSMel Gorman 1491da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1501da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1511da177e4SLinus Torvalds * [BEEP] leaks. 1521da177e4SLinus Torvalds * 1531da177e4SLinus Torvalds */ 1541da177e4SLinus Torvalds 1550ebd0ac5SPatrick McHardy struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 1560ebd0ac5SPatrick McHardy { 1570ebd0ac5SPatrick McHardy struct sk_buff *skb; 1580ebd0ac5SPatrick McHardy 1590ebd0ac5SPatrick McHardy /* Get the HEAD */ 1600ebd0ac5SPatrick McHardy skb = kmem_cache_alloc_node(skbuff_head_cache, 1610ebd0ac5SPatrick McHardy gfp_mask & ~__GFP_DMA, node); 1620ebd0ac5SPatrick McHardy if (!skb) 1630ebd0ac5SPatrick McHardy goto out; 1640ebd0ac5SPatrick McHardy 1650ebd0ac5SPatrick McHardy /* 1660ebd0ac5SPatrick McHardy * Only clear those fields we need to clear, not those that we will 1670ebd0ac5SPatrick McHardy * actually initialise below. Hence, don't put any more fields after 1680ebd0ac5SPatrick McHardy * the tail pointer in struct sk_buff! 1690ebd0ac5SPatrick McHardy */ 1700ebd0ac5SPatrick McHardy memset(skb, 0, offsetof(struct sk_buff, tail)); 1715e71d9d7SPablo Neira skb->head = NULL; 1720ebd0ac5SPatrick McHardy skb->truesize = sizeof(struct sk_buff); 1730ebd0ac5SPatrick McHardy atomic_set(&skb->users, 1); 1740ebd0ac5SPatrick McHardy 17535d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 1760ebd0ac5SPatrick McHardy out: 1770ebd0ac5SPatrick McHardy return skb; 1780ebd0ac5SPatrick McHardy } 1790ebd0ac5SPatrick McHardy 1801da177e4SLinus Torvalds /** 181d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 1821da177e4SLinus Torvalds * @size: size to allocate 1831da177e4SLinus Torvalds * @gfp_mask: allocation mask 184c93bdd0eSMel Gorman * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 185c93bdd0eSMel Gorman * instead of head cache and allocate a cloned (child) skb. 186c93bdd0eSMel Gorman * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 187c93bdd0eSMel Gorman * allocations in case the data is required for writeback 188b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 1891da177e4SLinus Torvalds * 1901da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 19194b6042cSBen Hutchings * tail room of at least size bytes. The object has a reference count 19294b6042cSBen Hutchings * of one. The return is the buffer. On a failure the return is %NULL. 1931da177e4SLinus Torvalds * 1941da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 1951da177e4SLinus Torvalds * %GFP_ATOMIC. 1961da177e4SLinus Torvalds */ 197dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 198c93bdd0eSMel Gorman int flags, int node) 1991da177e4SLinus Torvalds { 200e18b890bSChristoph Lameter struct kmem_cache *cache; 2014947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 2021da177e4SLinus Torvalds struct sk_buff *skb; 2031da177e4SLinus Torvalds u8 *data; 204c93bdd0eSMel Gorman bool pfmemalloc; 2051da177e4SLinus Torvalds 206c93bdd0eSMel Gorman cache = (flags & SKB_ALLOC_FCLONE) 207c93bdd0eSMel Gorman ? skbuff_fclone_cache : skbuff_head_cache; 208c93bdd0eSMel Gorman 209c93bdd0eSMel Gorman if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 210c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 2118798b3fbSHerbert Xu 2121da177e4SLinus Torvalds /* Get the HEAD */ 213b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 2141da177e4SLinus Torvalds if (!skb) 2151da177e4SLinus Torvalds goto out; 216ec7d2f2cSEric Dumazet prefetchw(skb); 2171da177e4SLinus Torvalds 21887fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 21987fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 22087fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 22187fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 22287fb4b7bSEric Dumazet */ 223bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 22487fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 225c93bdd0eSMel Gorman data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 2261da177e4SLinus Torvalds if (!data) 2271da177e4SLinus Torvalds goto nodata; 22887fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 22987fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 23087fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 23187fb4b7bSEric Dumazet */ 23287fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 233ec7d2f2cSEric Dumazet prefetchw(data + size); 2341da177e4SLinus Torvalds 235ca0605a7SArnaldo Carvalho de Melo /* 236c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 237c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 238c8005785SJohannes Berg * the tail pointer in struct sk_buff! 239ca0605a7SArnaldo Carvalho de Melo */ 240ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 24187fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 24287fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 243c93bdd0eSMel Gorman skb->pfmemalloc = pfmemalloc; 2441da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2451da177e4SLinus Torvalds skb->head = data; 2461da177e4SLinus Torvalds skb->data = data; 24727a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2484305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 24935d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 25035d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 25119633e12SStephen Hemminger 2524947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2534947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 254ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2554947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 256c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2574947d3efSBenjamin LaHaise 258c93bdd0eSMel Gorman if (flags & SKB_ALLOC_FCLONE) { 259d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 260d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2611da177e4SLinus Torvalds 262fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 263fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 264d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 265d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 266d179cd12SDavid S. Miller 267d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 268c93bdd0eSMel Gorman child->pfmemalloc = pfmemalloc; 269d179cd12SDavid S. Miller } 2701da177e4SLinus Torvalds out: 2711da177e4SLinus Torvalds return skb; 2721da177e4SLinus Torvalds nodata: 2738798b3fbSHerbert Xu kmem_cache_free(cache, skb); 2741da177e4SLinus Torvalds skb = NULL; 2751da177e4SLinus Torvalds goto out; 2761da177e4SLinus Torvalds } 277b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 2781da177e4SLinus Torvalds 2791da177e4SLinus Torvalds /** 280b2b5ce9dSEric Dumazet * build_skb - build a network buffer 281b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 282d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 283b2b5ce9dSEric Dumazet * 284b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 285deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 286deceb4c0SFlorian Fainelli * @frag_size is 0, otherwise data should come from the page allocator. 287b2b5ce9dSEric Dumazet * The return is the new skb buffer. 288b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 289b2b5ce9dSEric Dumazet * Notes : 290b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 291b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 292b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 293b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 294b2b5ce9dSEric Dumazet * before giving packet to stack. 295b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 296b2b5ce9dSEric Dumazet */ 297d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 298b2b5ce9dSEric Dumazet { 299b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 300b2b5ce9dSEric Dumazet struct sk_buff *skb; 301d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 302b2b5ce9dSEric Dumazet 303b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 304b2b5ce9dSEric Dumazet if (!skb) 305b2b5ce9dSEric Dumazet return NULL; 306b2b5ce9dSEric Dumazet 307d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 308b2b5ce9dSEric Dumazet 309b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 310b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 311d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 312b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 313b2b5ce9dSEric Dumazet skb->head = data; 314b2b5ce9dSEric Dumazet skb->data = data; 315b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 316b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 31735d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 31835d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 319b2b5ce9dSEric Dumazet 320b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 321b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 322b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 323b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 324b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 325b2b5ce9dSEric Dumazet 326b2b5ce9dSEric Dumazet return skb; 327b2b5ce9dSEric Dumazet } 328b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 329b2b5ce9dSEric Dumazet 330a1c7fff7SEric Dumazet struct netdev_alloc_cache { 33169b08f62SEric Dumazet struct page_frag frag; 33269b08f62SEric Dumazet /* we maintain a pagecount bias, so that we dont dirty cache line 33369b08f62SEric Dumazet * containing page->_count every time we allocate a fragment. 33469b08f62SEric Dumazet */ 335540eb7bfSAlexander Duyck unsigned int pagecnt_bias; 336a1c7fff7SEric Dumazet }; 337a1c7fff7SEric Dumazet static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 338a1c7fff7SEric Dumazet 339c93bdd0eSMel Gorman static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 3406f532612SEric Dumazet { 3416f532612SEric Dumazet struct netdev_alloc_cache *nc; 3426f532612SEric Dumazet void *data = NULL; 34369b08f62SEric Dumazet int order; 3446f532612SEric Dumazet unsigned long flags; 3456f532612SEric Dumazet 3466f532612SEric Dumazet local_irq_save(flags); 3476f532612SEric Dumazet nc = &__get_cpu_var(netdev_alloc_cache); 34869b08f62SEric Dumazet if (unlikely(!nc->frag.page)) { 3496f532612SEric Dumazet refill: 35069b08f62SEric Dumazet for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 35169b08f62SEric Dumazet gfp_t gfp = gfp_mask; 35269b08f62SEric Dumazet 35369b08f62SEric Dumazet if (order) 35469b08f62SEric Dumazet gfp |= __GFP_COMP | __GFP_NOWARN; 35569b08f62SEric Dumazet nc->frag.page = alloc_pages(gfp, order); 35669b08f62SEric Dumazet if (likely(nc->frag.page)) 35769b08f62SEric Dumazet break; 35869b08f62SEric Dumazet if (--order < 0) 359540eb7bfSAlexander Duyck goto end; 36069b08f62SEric Dumazet } 36169b08f62SEric Dumazet nc->frag.size = PAGE_SIZE << order; 362540eb7bfSAlexander Duyck recycle: 36369b08f62SEric Dumazet atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); 36469b08f62SEric Dumazet nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; 36569b08f62SEric Dumazet nc->frag.offset = 0; 3666f532612SEric Dumazet } 367540eb7bfSAlexander Duyck 36869b08f62SEric Dumazet if (nc->frag.offset + fragsz > nc->frag.size) { 369540eb7bfSAlexander Duyck /* avoid unnecessary locked operations if possible */ 37069b08f62SEric Dumazet if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || 37169b08f62SEric Dumazet atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) 372540eb7bfSAlexander Duyck goto recycle; 3736f532612SEric Dumazet goto refill; 3746f532612SEric Dumazet } 375540eb7bfSAlexander Duyck 37669b08f62SEric Dumazet data = page_address(nc->frag.page) + nc->frag.offset; 37769b08f62SEric Dumazet nc->frag.offset += fragsz; 378540eb7bfSAlexander Duyck nc->pagecnt_bias--; 379540eb7bfSAlexander Duyck end: 3806f532612SEric Dumazet local_irq_restore(flags); 3816f532612SEric Dumazet return data; 3826f532612SEric Dumazet } 383c93bdd0eSMel Gorman 384c93bdd0eSMel Gorman /** 385c93bdd0eSMel Gorman * netdev_alloc_frag - allocate a page fragment 386c93bdd0eSMel Gorman * @fragsz: fragment size 387c93bdd0eSMel Gorman * 388c93bdd0eSMel Gorman * Allocates a frag from a page for receive buffer. 389c93bdd0eSMel Gorman * Uses GFP_ATOMIC allocations. 390c93bdd0eSMel Gorman */ 391c93bdd0eSMel Gorman void *netdev_alloc_frag(unsigned int fragsz) 392c93bdd0eSMel Gorman { 393c93bdd0eSMel Gorman return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 394c93bdd0eSMel Gorman } 3956f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag); 3966f532612SEric Dumazet 3976f532612SEric Dumazet /** 3988af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 3998af27456SChristoph Hellwig * @dev: network device to receive on 4008af27456SChristoph Hellwig * @length: length to allocate 4018af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 4028af27456SChristoph Hellwig * 4038af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 4048af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 4058af27456SChristoph Hellwig * the headroom they think they need without accounting for the 4068af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 4078af27456SChristoph Hellwig * 4088af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 4098af27456SChristoph Hellwig */ 4108af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 4118af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 4128af27456SChristoph Hellwig { 4136f532612SEric Dumazet struct sk_buff *skb = NULL; 414a1c7fff7SEric Dumazet unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 415a1c7fff7SEric Dumazet SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4168af27456SChristoph Hellwig 417310e158cSEric Dumazet if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 418c93bdd0eSMel Gorman void *data; 419c93bdd0eSMel Gorman 420c93bdd0eSMel Gorman if (sk_memalloc_socks()) 421c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 422c93bdd0eSMel Gorman 423c93bdd0eSMel Gorman data = __netdev_alloc_frag(fragsz, gfp_mask); 424a1c7fff7SEric Dumazet 4256f532612SEric Dumazet if (likely(data)) { 4266f532612SEric Dumazet skb = build_skb(data, fragsz); 4276f532612SEric Dumazet if (unlikely(!skb)) 4286f532612SEric Dumazet put_page(virt_to_head_page(data)); 429a1c7fff7SEric Dumazet } 430a1c7fff7SEric Dumazet } else { 431c93bdd0eSMel Gorman skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 432c93bdd0eSMel Gorman SKB_ALLOC_RX, NUMA_NO_NODE); 433a1c7fff7SEric Dumazet } 4347b2e497aSChristoph Hellwig if (likely(skb)) { 4358af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 4367b2e497aSChristoph Hellwig skb->dev = dev; 4377b2e497aSChristoph Hellwig } 4388af27456SChristoph Hellwig return skb; 4398af27456SChristoph Hellwig } 440b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 4411da177e4SLinus Torvalds 442654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 44350269e19SEric Dumazet int size, unsigned int truesize) 444654bed16SPeter Zijlstra { 445654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 446654bed16SPeter Zijlstra skb->len += size; 447654bed16SPeter Zijlstra skb->data_len += size; 44850269e19SEric Dumazet skb->truesize += truesize; 449654bed16SPeter Zijlstra } 450654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 451654bed16SPeter Zijlstra 452f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 453f8e617e1SJason Wang unsigned int truesize) 454f8e617e1SJason Wang { 455f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 456f8e617e1SJason Wang 457f8e617e1SJason Wang skb_frag_size_add(frag, size); 458f8e617e1SJason Wang skb->len += size; 459f8e617e1SJason Wang skb->data_len += size; 460f8e617e1SJason Wang skb->truesize += truesize; 461f8e617e1SJason Wang } 462f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 463f8e617e1SJason Wang 46427b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 4651da177e4SLinus Torvalds { 466bd8a7036SEric Dumazet kfree_skb_list(*listp); 46727b437c8SHerbert Xu *listp = NULL; 4681da177e4SLinus Torvalds } 4691da177e4SLinus Torvalds 47027b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 47127b437c8SHerbert Xu { 47227b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 47327b437c8SHerbert Xu } 47427b437c8SHerbert Xu 4751da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 4761da177e4SLinus Torvalds { 4771da177e4SLinus Torvalds struct sk_buff *list; 4781da177e4SLinus Torvalds 479fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 4801da177e4SLinus Torvalds skb_get(list); 4811da177e4SLinus Torvalds } 4821da177e4SLinus Torvalds 483d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 484d3836f21SEric Dumazet { 485d3836f21SEric Dumazet if (skb->head_frag) 486d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 487d3836f21SEric Dumazet else 488d3836f21SEric Dumazet kfree(skb->head); 489d3836f21SEric Dumazet } 490d3836f21SEric Dumazet 4915bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 4921da177e4SLinus Torvalds { 4931da177e4SLinus Torvalds if (!skb->cloned || 4941da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 4951da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 4961da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 4971da177e4SLinus Torvalds int i; 4981da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 499ea2ab693SIan Campbell skb_frag_unref(skb, i); 5001da177e4SLinus Torvalds } 5011da177e4SLinus Torvalds 502a6686f2fSShirley Ma /* 503a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 504a6686f2fSShirley Ma * the lower device DMA has done; 505a6686f2fSShirley Ma */ 506a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 507a6686f2fSShirley Ma struct ubuf_info *uarg; 508a6686f2fSShirley Ma 509a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 510a6686f2fSShirley Ma if (uarg->callback) 511e19d6763SMichael S. Tsirkin uarg->callback(uarg, true); 512a6686f2fSShirley Ma } 513a6686f2fSShirley Ma 51421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 5151da177e4SLinus Torvalds skb_drop_fraglist(skb); 5161da177e4SLinus Torvalds 517d3836f21SEric Dumazet skb_free_head(skb); 5181da177e4SLinus Torvalds } 5191da177e4SLinus Torvalds } 5201da177e4SLinus Torvalds 5211da177e4SLinus Torvalds /* 5221da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 5231da177e4SLinus Torvalds */ 5242d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 5251da177e4SLinus Torvalds { 526d179cd12SDavid S. Miller struct sk_buff *other; 527d179cd12SDavid S. Miller atomic_t *fclone_ref; 528d179cd12SDavid S. Miller 529d179cd12SDavid S. Miller switch (skb->fclone) { 530d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 5311da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 532d179cd12SDavid S. Miller break; 533d179cd12SDavid S. Miller 534d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 535d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 536d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 537d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 538d179cd12SDavid S. Miller break; 539d179cd12SDavid S. Miller 540d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 541d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 542d179cd12SDavid S. Miller other = skb - 1; 543d179cd12SDavid S. Miller 544d179cd12SDavid S. Miller /* The clone portion is available for 545d179cd12SDavid S. Miller * fast-cloning again. 546d179cd12SDavid S. Miller */ 547d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 548d179cd12SDavid S. Miller 549d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 550d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 551d179cd12SDavid S. Miller break; 5523ff50b79SStephen Hemminger } 5531da177e4SLinus Torvalds } 5541da177e4SLinus Torvalds 55504a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 5561da177e4SLinus Torvalds { 557adf30907SEric Dumazet skb_dst_drop(skb); 5581da177e4SLinus Torvalds #ifdef CONFIG_XFRM 5591da177e4SLinus Torvalds secpath_put(skb->sp); 5601da177e4SLinus Torvalds #endif 5611da177e4SLinus Torvalds if (skb->destructor) { 5629c2b3328SStephen Hemminger WARN_ON(in_irq()); 5631da177e4SLinus Torvalds skb->destructor(skb); 5641da177e4SLinus Torvalds } 565a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 5665f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 5672fc72c7bSKOVACS Krisztian #endif 5681da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 5691da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 5701da177e4SLinus Torvalds #endif 5711da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 5721da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 5731da177e4SLinus Torvalds skb->tc_index = 0; 5741da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 5751da177e4SLinus Torvalds skb->tc_verd = 0; 5761da177e4SLinus Torvalds #endif 5771da177e4SLinus Torvalds #endif 57804a4bb55SLennert Buytenhek } 57904a4bb55SLennert Buytenhek 58004a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 58104a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 58204a4bb55SLennert Buytenhek { 58304a4bb55SLennert Buytenhek skb_release_head_state(skb); 5845e71d9d7SPablo Neira if (likely(skb->head)) 5852d4baff8SHerbert Xu skb_release_data(skb); 5862d4baff8SHerbert Xu } 5871da177e4SLinus Torvalds 5882d4baff8SHerbert Xu /** 5892d4baff8SHerbert Xu * __kfree_skb - private function 5902d4baff8SHerbert Xu * @skb: buffer 5912d4baff8SHerbert Xu * 5922d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 5932d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 5942d4baff8SHerbert Xu * always call kfree_skb 5952d4baff8SHerbert Xu */ 5962d4baff8SHerbert Xu 5972d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 5982d4baff8SHerbert Xu { 5992d4baff8SHerbert Xu skb_release_all(skb); 6001da177e4SLinus Torvalds kfree_skbmem(skb); 6011da177e4SLinus Torvalds } 602b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 6031da177e4SLinus Torvalds 6041da177e4SLinus Torvalds /** 605231d06aeSJörn Engel * kfree_skb - free an sk_buff 606231d06aeSJörn Engel * @skb: buffer to free 607231d06aeSJörn Engel * 608231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 609231d06aeSJörn Engel * hit zero. 610231d06aeSJörn Engel */ 611231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 612231d06aeSJörn Engel { 613231d06aeSJörn Engel if (unlikely(!skb)) 614231d06aeSJörn Engel return; 615231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 616231d06aeSJörn Engel smp_rmb(); 617231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 618231d06aeSJörn Engel return; 619ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 620231d06aeSJörn Engel __kfree_skb(skb); 621231d06aeSJörn Engel } 622b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 623231d06aeSJörn Engel 624bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 625bd8a7036SEric Dumazet { 626bd8a7036SEric Dumazet while (segs) { 627bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 628bd8a7036SEric Dumazet 629bd8a7036SEric Dumazet kfree_skb(segs); 630bd8a7036SEric Dumazet segs = next; 631bd8a7036SEric Dumazet } 632bd8a7036SEric Dumazet } 633bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 634bd8a7036SEric Dumazet 635d1a203eaSStephen Hemminger /** 63625121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 63725121173SMichael S. Tsirkin * @skb: buffer that triggered an error 63825121173SMichael S. Tsirkin * 63925121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 64025121173SMichael S. Tsirkin * skb must be freed afterwards. 64125121173SMichael S. Tsirkin */ 64225121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 64325121173SMichael S. Tsirkin { 64425121173SMichael S. Tsirkin if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 64525121173SMichael S. Tsirkin struct ubuf_info *uarg; 64625121173SMichael S. Tsirkin 64725121173SMichael S. Tsirkin uarg = skb_shinfo(skb)->destructor_arg; 64825121173SMichael S. Tsirkin if (uarg->callback) 64925121173SMichael S. Tsirkin uarg->callback(uarg, false); 65025121173SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 65125121173SMichael S. Tsirkin } 65225121173SMichael S. Tsirkin } 65325121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 65425121173SMichael S. Tsirkin 65525121173SMichael S. Tsirkin /** 656ead2ceb0SNeil Horman * consume_skb - free an skbuff 657ead2ceb0SNeil Horman * @skb: buffer to free 658ead2ceb0SNeil Horman * 659ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 660ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 661ead2ceb0SNeil Horman * is being dropped after a failure and notes that 662ead2ceb0SNeil Horman */ 663ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 664ead2ceb0SNeil Horman { 665ead2ceb0SNeil Horman if (unlikely(!skb)) 666ead2ceb0SNeil Horman return; 667ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 668ead2ceb0SNeil Horman smp_rmb(); 669ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 670ead2ceb0SNeil Horman return; 67107dc22e7SKoki Sanagi trace_consume_skb(skb); 672ead2ceb0SNeil Horman __kfree_skb(skb); 673ead2ceb0SNeil Horman } 674ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 675ead2ceb0SNeil Horman 676dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 677dec18810SHerbert Xu { 678dec18810SHerbert Xu new->tstamp = old->tstamp; 679dec18810SHerbert Xu new->dev = old->dev; 680dec18810SHerbert Xu new->transport_header = old->transport_header; 681dec18810SHerbert Xu new->network_header = old->network_header; 682dec18810SHerbert Xu new->mac_header = old->mac_header; 6834bc41b84SJoe Stringer new->inner_protocol = old->inner_protocol; 6846a674e9cSJoseph Gasparakis new->inner_transport_header = old->inner_transport_header; 68592df9b21SPravin B Shelar new->inner_network_header = old->inner_network_header; 686aefbd2b3SPravin B Shelar new->inner_mac_header = old->inner_mac_header; 6877fee226aSEric Dumazet skb_dst_copy(new, old); 6883df7a74eSTom Herbert skb_copy_hash(new, old); 6896461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 6903bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 6916a674e9cSJoseph Gasparakis new->encapsulation = old->encapsulation; 692def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 693dec18810SHerbert Xu new->sp = secpath_get(old->sp); 694dec18810SHerbert Xu #endif 695dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 6969bcb97caSHerbert Xu new->csum = old->csum; 697dec18810SHerbert Xu new->local_df = old->local_df; 698dec18810SHerbert Xu new->pkt_type = old->pkt_type; 699dec18810SHerbert Xu new->ip_summed = old->ip_summed; 700dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 701dec18810SHerbert Xu new->priority = old->priority; 702a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 703dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 704dec18810SHerbert Xu #endif 705c93bdd0eSMel Gorman new->pfmemalloc = old->pfmemalloc; 706dec18810SHerbert Xu new->protocol = old->protocol; 707dec18810SHerbert Xu new->mark = old->mark; 7088964be4aSEric Dumazet new->skb_iif = old->skb_iif; 709dec18810SHerbert Xu __nf_copy(new, old); 710dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 711dec18810SHerbert Xu new->tc_index = old->tc_index; 712dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 713dec18810SHerbert Xu new->tc_verd = old->tc_verd; 714dec18810SHerbert Xu #endif 715dec18810SHerbert Xu #endif 71686a9bad3SPatrick McHardy new->vlan_proto = old->vlan_proto; 7176aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 7186aa895b0SPatrick McHardy 719dec18810SHerbert Xu skb_copy_secmark(new, old); 72006021292SEliezer Tamir 721e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 72206021292SEliezer Tamir new->napi_id = old->napi_id; 72306021292SEliezer Tamir #endif 724dec18810SHerbert Xu } 725dec18810SHerbert Xu 72682c49a35SHerbert Xu /* 72782c49a35SHerbert Xu * You should not add any new code to this function. Add it to 72882c49a35SHerbert Xu * __copy_skb_header above instead. 72982c49a35SHerbert Xu */ 730e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 7311da177e4SLinus Torvalds { 7321da177e4SLinus Torvalds #define C(x) n->x = skb->x 7331da177e4SLinus Torvalds 7341da177e4SLinus Torvalds n->next = n->prev = NULL; 7351da177e4SLinus Torvalds n->sk = NULL; 736dec18810SHerbert Xu __copy_skb_header(n, skb); 737dec18810SHerbert Xu 7381da177e4SLinus Torvalds C(len); 7391da177e4SLinus Torvalds C(data_len); 7403e6b3b2eSAlexey Dobriyan C(mac_len); 741334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 74202f1c89dSPaul Moore n->cloned = 1; 7431da177e4SLinus Torvalds n->nohdr = 0; 7441da177e4SLinus Torvalds n->destructor = NULL; 7451da177e4SLinus Torvalds C(tail); 7461da177e4SLinus Torvalds C(end); 74702f1c89dSPaul Moore C(head); 748d3836f21SEric Dumazet C(head_frag); 74902f1c89dSPaul Moore C(data); 75002f1c89dSPaul Moore C(truesize); 75102f1c89dSPaul Moore atomic_set(&n->users, 1); 7521da177e4SLinus Torvalds 7531da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 7541da177e4SLinus Torvalds skb->cloned = 1; 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds return n; 757e0053ec0SHerbert Xu #undef C 758e0053ec0SHerbert Xu } 759e0053ec0SHerbert Xu 760e0053ec0SHerbert Xu /** 761e0053ec0SHerbert Xu * skb_morph - morph one skb into another 762e0053ec0SHerbert Xu * @dst: the skb to receive the contents 763e0053ec0SHerbert Xu * @src: the skb to supply the contents 764e0053ec0SHerbert Xu * 765e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 766e0053ec0SHerbert Xu * supplied by the user. 767e0053ec0SHerbert Xu * 768e0053ec0SHerbert Xu * The target skb is returned upon exit. 769e0053ec0SHerbert Xu */ 770e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 771e0053ec0SHerbert Xu { 7722d4baff8SHerbert Xu skb_release_all(dst); 773e0053ec0SHerbert Xu return __skb_clone(dst, src); 774e0053ec0SHerbert Xu } 775e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 776e0053ec0SHerbert Xu 7772c53040fSBen Hutchings /** 7782c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 77948c83012SMichael S. Tsirkin * @skb: the skb to modify 78048c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 78148c83012SMichael S. Tsirkin * 78248c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 78348c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 78448c83012SMichael S. Tsirkin * to userspace pages. 78548c83012SMichael S. Tsirkin * 78648c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 78748c83012SMichael S. Tsirkin * %GFP_ATOMIC. 78848c83012SMichael S. Tsirkin * 78948c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 79048c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 79148c83012SMichael S. Tsirkin */ 79248c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 793a6686f2fSShirley Ma { 794a6686f2fSShirley Ma int i; 795a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 796a6686f2fSShirley Ma struct page *page, *head = NULL; 797a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 798a6686f2fSShirley Ma 799a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 800a6686f2fSShirley Ma u8 *vaddr; 801a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 802a6686f2fSShirley Ma 80302756ed4SKrishna Kumar page = alloc_page(gfp_mask); 804a6686f2fSShirley Ma if (!page) { 805a6686f2fSShirley Ma while (head) { 80640dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 807a6686f2fSShirley Ma put_page(head); 808a6686f2fSShirley Ma head = next; 809a6686f2fSShirley Ma } 810a6686f2fSShirley Ma return -ENOMEM; 811a6686f2fSShirley Ma } 81251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 813a6686f2fSShirley Ma memcpy(page_address(page), 8149e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 81551c56b00SEric Dumazet kunmap_atomic(vaddr); 81640dadff2SSunghan Suh set_page_private(page, (unsigned long)head); 817a6686f2fSShirley Ma head = page; 818a6686f2fSShirley Ma } 819a6686f2fSShirley Ma 820a6686f2fSShirley Ma /* skb frags release userspace buffers */ 82102756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 822a8605c60SIan Campbell skb_frag_unref(skb, i); 823a6686f2fSShirley Ma 824e19d6763SMichael S. Tsirkin uarg->callback(uarg, false); 825a6686f2fSShirley Ma 826a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 82702756ed4SKrishna Kumar for (i = num_frags - 1; i >= 0; i--) { 82802756ed4SKrishna Kumar __skb_fill_page_desc(skb, i, head, 0, 82902756ed4SKrishna Kumar skb_shinfo(skb)->frags[i].size); 83040dadff2SSunghan Suh head = (struct page *)page_private(head); 831a6686f2fSShirley Ma } 83248c83012SMichael S. Tsirkin 83348c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 834a6686f2fSShirley Ma return 0; 835a6686f2fSShirley Ma } 836dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 837a6686f2fSShirley Ma 838e0053ec0SHerbert Xu /** 839e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 840e0053ec0SHerbert Xu * @skb: buffer to clone 841e0053ec0SHerbert Xu * @gfp_mask: allocation priority 842e0053ec0SHerbert Xu * 843e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 844e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 845e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 846e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 847e0053ec0SHerbert Xu * 848e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 849e0053ec0SHerbert Xu * %GFP_ATOMIC. 850e0053ec0SHerbert Xu */ 851e0053ec0SHerbert Xu 852e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 853e0053ec0SHerbert Xu { 854e0053ec0SHerbert Xu struct sk_buff *n; 855e0053ec0SHerbert Xu 85670008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 857a6686f2fSShirley Ma return NULL; 858a6686f2fSShirley Ma 859e0053ec0SHerbert Xu n = skb + 1; 860e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 861e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 862e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 863e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 864e0053ec0SHerbert Xu atomic_inc(fclone_ref); 865e0053ec0SHerbert Xu } else { 866c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 867c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 868c93bdd0eSMel Gorman 869e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 870e0053ec0SHerbert Xu if (!n) 871e0053ec0SHerbert Xu return NULL; 872fe55f6d5SVegard Nossum 873fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 874fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 875e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 876e0053ec0SHerbert Xu } 877e0053ec0SHerbert Xu 878e0053ec0SHerbert Xu return __skb_clone(n, skb); 8791da177e4SLinus Torvalds } 880b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 8811da177e4SLinus Torvalds 882f5b17294SPravin B Shelar static void skb_headers_offset_update(struct sk_buff *skb, int off) 883f5b17294SPravin B Shelar { 884030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 885030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 886030737bcSEric Dumazet skb->csum_start += off; 887f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 888f5b17294SPravin B Shelar skb->transport_header += off; 889f5b17294SPravin B Shelar skb->network_header += off; 890f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 891f5b17294SPravin B Shelar skb->mac_header += off; 892f5b17294SPravin B Shelar skb->inner_transport_header += off; 893f5b17294SPravin B Shelar skb->inner_network_header += off; 894aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 895f5b17294SPravin B Shelar } 896f5b17294SPravin B Shelar 8971da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 8981da177e4SLinus Torvalds { 899dec18810SHerbert Xu __copy_skb_header(new, old); 900dec18810SHerbert Xu 9017967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 9027967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 9037967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 9041da177e4SLinus Torvalds } 9051da177e4SLinus Torvalds 906c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 907c93bdd0eSMel Gorman { 908c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 909c93bdd0eSMel Gorman return SKB_ALLOC_RX; 910c93bdd0eSMel Gorman return 0; 911c93bdd0eSMel Gorman } 912c93bdd0eSMel Gorman 9131da177e4SLinus Torvalds /** 9141da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 9151da177e4SLinus Torvalds * @skb: buffer to copy 9161da177e4SLinus Torvalds * @gfp_mask: allocation priority 9171da177e4SLinus Torvalds * 9181da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 9191da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 9201da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 9211da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 9221da177e4SLinus Torvalds * 9231da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 9241da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 9251da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 9261da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 9271da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 9281da177e4SLinus Torvalds */ 9291da177e4SLinus Torvalds 930dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 9311da177e4SLinus Torvalds { 9326602cebbSEric Dumazet int headerlen = skb_headroom(skb); 933ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 934c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 935c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9366602cebbSEric Dumazet 9371da177e4SLinus Torvalds if (!n) 9381da177e4SLinus Torvalds return NULL; 9391da177e4SLinus Torvalds 9401da177e4SLinus Torvalds /* Set the data pointer */ 9411da177e4SLinus Torvalds skb_reserve(n, headerlen); 9421da177e4SLinus Torvalds /* Set the tail pointer and length */ 9431da177e4SLinus Torvalds skb_put(n, skb->len); 9441da177e4SLinus Torvalds 9451da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 9461da177e4SLinus Torvalds BUG(); 9471da177e4SLinus Torvalds 9481da177e4SLinus Torvalds copy_skb_header(n, skb); 9491da177e4SLinus Torvalds return n; 9501da177e4SLinus Torvalds } 951b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 9521da177e4SLinus Torvalds 9531da177e4SLinus Torvalds /** 954117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 9551da177e4SLinus Torvalds * @skb: buffer to copy 956117632e6SEric Dumazet * @headroom: headroom of new skb 9571da177e4SLinus Torvalds * @gfp_mask: allocation priority 9581da177e4SLinus Torvalds * 9591da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 9601da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 9611da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 9621da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 9631da177e4SLinus Torvalds * or the pointer to the buffer on success. 9641da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 9651da177e4SLinus Torvalds */ 9661da177e4SLinus Torvalds 967117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 9681da177e4SLinus Torvalds { 969117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 970c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 971c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9726602cebbSEric Dumazet 9731da177e4SLinus Torvalds if (!n) 9741da177e4SLinus Torvalds goto out; 9751da177e4SLinus Torvalds 9761da177e4SLinus Torvalds /* Set the data pointer */ 977117632e6SEric Dumazet skb_reserve(n, headroom); 9781da177e4SLinus Torvalds /* Set the tail pointer and length */ 9791da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 9801da177e4SLinus Torvalds /* Copy the bytes */ 981d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 9821da177e4SLinus Torvalds 98325f484a6SHerbert Xu n->truesize += skb->data_len; 9841da177e4SLinus Torvalds n->data_len = skb->data_len; 9851da177e4SLinus Torvalds n->len = skb->len; 9861da177e4SLinus Torvalds 9871da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 9881da177e4SLinus Torvalds int i; 9891da177e4SLinus Torvalds 99070008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) { 9911511022cSDan Carpenter kfree_skb(n); 9921511022cSDan Carpenter n = NULL; 993a6686f2fSShirley Ma goto out; 994a6686f2fSShirley Ma } 9951da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 9961da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 997ea2ab693SIan Campbell skb_frag_ref(skb, i); 9981da177e4SLinus Torvalds } 9991da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 10001da177e4SLinus Torvalds } 10011da177e4SLinus Torvalds 100221dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 10031da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 10041da177e4SLinus Torvalds skb_clone_fraglist(n); 10051da177e4SLinus Torvalds } 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds copy_skb_header(n, skb); 10081da177e4SLinus Torvalds out: 10091da177e4SLinus Torvalds return n; 10101da177e4SLinus Torvalds } 1011117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 10121da177e4SLinus Torvalds 10131da177e4SLinus Torvalds /** 10141da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 10151da177e4SLinus Torvalds * @skb: buffer to reallocate 10161da177e4SLinus Torvalds * @nhead: room to add at head 10171da177e4SLinus Torvalds * @ntail: room to add at tail 10181da177e4SLinus Torvalds * @gfp_mask: allocation priority 10191da177e4SLinus Torvalds * 1020bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1021bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 10221da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 10231da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 10241da177e4SLinus Torvalds * 10251da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 10261da177e4SLinus Torvalds * reloaded after call to this function. 10271da177e4SLinus Torvalds */ 10281da177e4SLinus Torvalds 102986a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1030dd0fc66fSAl Viro gfp_t gfp_mask) 10311da177e4SLinus Torvalds { 10321da177e4SLinus Torvalds int i; 10331da177e4SLinus Torvalds u8 *data; 1034ec47ea82SAlexander Duyck int size = nhead + skb_end_offset(skb) + ntail; 10351da177e4SLinus Torvalds long off; 10361da177e4SLinus Torvalds 10374edd87adSHerbert Xu BUG_ON(nhead < 0); 10384edd87adSHerbert Xu 10391da177e4SLinus Torvalds if (skb_shared(skb)) 10401da177e4SLinus Torvalds BUG(); 10411da177e4SLinus Torvalds 10421da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 10431da177e4SLinus Torvalds 1044c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1045c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1046c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1047c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 10481da177e4SLinus Torvalds if (!data) 10491da177e4SLinus Torvalds goto nodata; 105087151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 10511da177e4SLinus Torvalds 10521da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 10536602cebbSEric Dumazet * optimized for the cases when header is void. 10546602cebbSEric Dumazet */ 10556602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 10566602cebbSEric Dumazet 10576602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 10586602cebbSEric Dumazet skb_shinfo(skb), 1059fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 10601da177e4SLinus Torvalds 10613e24591aSAlexander Duyck /* 10623e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 10633e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 10643e24591aSAlexander Duyck * be since all we did is relocate the values 10653e24591aSAlexander Duyck */ 10663e24591aSAlexander Duyck if (skb_cloned(skb)) { 1067a6686f2fSShirley Ma /* copy this zero copy skb frags */ 106870008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1069a6686f2fSShirley Ma goto nofrags; 10701da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1071ea2ab693SIan Campbell skb_frag_ref(skb, i); 10721da177e4SLinus Torvalds 107321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 10741da177e4SLinus Torvalds skb_clone_fraglist(skb); 10751da177e4SLinus Torvalds 10761da177e4SLinus Torvalds skb_release_data(skb); 10773e24591aSAlexander Duyck } else { 10783e24591aSAlexander Duyck skb_free_head(skb); 10791fd63041SEric Dumazet } 10801da177e4SLinus Torvalds off = (data + nhead) - skb->head; 10811da177e4SLinus Torvalds 10821da177e4SLinus Torvalds skb->head = data; 1083d3836f21SEric Dumazet skb->head_frag = 0; 10841da177e4SLinus Torvalds skb->data += off; 10854305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 10864305b541SArnaldo Carvalho de Melo skb->end = size; 108756eb8882SPatrick McHardy off = nhead; 10884305b541SArnaldo Carvalho de Melo #else 10894305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 109056eb8882SPatrick McHardy #endif 109127a884dcSArnaldo Carvalho de Melo skb->tail += off; 1092b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 10931da177e4SLinus Torvalds skb->cloned = 0; 1094334a8132SPatrick McHardy skb->hdr_len = 0; 10951da177e4SLinus Torvalds skb->nohdr = 0; 10961da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 10971da177e4SLinus Torvalds return 0; 10981da177e4SLinus Torvalds 1099a6686f2fSShirley Ma nofrags: 1100a6686f2fSShirley Ma kfree(data); 11011da177e4SLinus Torvalds nodata: 11021da177e4SLinus Torvalds return -ENOMEM; 11031da177e4SLinus Torvalds } 1104b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 11051da177e4SLinus Torvalds 11061da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 11071da177e4SLinus Torvalds 11081da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 11091da177e4SLinus Torvalds { 11101da177e4SLinus Torvalds struct sk_buff *skb2; 11111da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 11121da177e4SLinus Torvalds 11131da177e4SLinus Torvalds if (delta <= 0) 11141da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 11151da177e4SLinus Torvalds else { 11161da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 11171da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 11181da177e4SLinus Torvalds GFP_ATOMIC)) { 11191da177e4SLinus Torvalds kfree_skb(skb2); 11201da177e4SLinus Torvalds skb2 = NULL; 11211da177e4SLinus Torvalds } 11221da177e4SLinus Torvalds } 11231da177e4SLinus Torvalds return skb2; 11241da177e4SLinus Torvalds } 1125b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 11261da177e4SLinus Torvalds 11271da177e4SLinus Torvalds /** 11281da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 11291da177e4SLinus Torvalds * @skb: buffer to copy 11301da177e4SLinus Torvalds * @newheadroom: new free bytes at head 11311da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 11321da177e4SLinus Torvalds * @gfp_mask: allocation priority 11331da177e4SLinus Torvalds * 11341da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 11351da177e4SLinus Torvalds * allocate additional space. 11361da177e4SLinus Torvalds * 11371da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 11381da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 11391da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 11401da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 11411da177e4SLinus Torvalds * 11421da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 11431da177e4SLinus Torvalds * is called from an interrupt. 11441da177e4SLinus Torvalds */ 11451da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 114686a76cafSVictor Fusco int newheadroom, int newtailroom, 1147dd0fc66fSAl Viro gfp_t gfp_mask) 11481da177e4SLinus Torvalds { 11491da177e4SLinus Torvalds /* 11501da177e4SLinus Torvalds * Allocate the copy buffer 11511da177e4SLinus Torvalds */ 1152c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1153c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1154c93bdd0eSMel Gorman NUMA_NO_NODE); 1155efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 11561da177e4SLinus Torvalds int head_copy_len, head_copy_off; 11571da177e4SLinus Torvalds 11581da177e4SLinus Torvalds if (!n) 11591da177e4SLinus Torvalds return NULL; 11601da177e4SLinus Torvalds 11611da177e4SLinus Torvalds skb_reserve(n, newheadroom); 11621da177e4SLinus Torvalds 11631da177e4SLinus Torvalds /* Set the tail pointer and length */ 11641da177e4SLinus Torvalds skb_put(n, skb->len); 11651da177e4SLinus Torvalds 1166efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 11671da177e4SLinus Torvalds head_copy_off = 0; 11681da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 11691da177e4SLinus Torvalds head_copy_len = newheadroom; 11701da177e4SLinus Torvalds else 11711da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 11721da177e4SLinus Torvalds 11731da177e4SLinus Torvalds /* Copy the linear header and data. */ 11741da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 11751da177e4SLinus Torvalds skb->len + head_copy_len)) 11761da177e4SLinus Torvalds BUG(); 11771da177e4SLinus Torvalds 11781da177e4SLinus Torvalds copy_skb_header(n, skb); 11791da177e4SLinus Torvalds 1180030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1181efd1e8d5SPatrick McHardy 11821da177e4SLinus Torvalds return n; 11831da177e4SLinus Torvalds } 1184b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 11851da177e4SLinus Torvalds 11861da177e4SLinus Torvalds /** 11871da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 11881da177e4SLinus Torvalds * @skb: buffer to pad 11891da177e4SLinus Torvalds * @pad: space to pad 11901da177e4SLinus Torvalds * 11911da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 11921da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 11931da177e4SLinus Torvalds * beyond the buffer end onto the wire. 11941da177e4SLinus Torvalds * 11955b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 11961da177e4SLinus Torvalds */ 11971da177e4SLinus Torvalds 11985b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 11991da177e4SLinus Torvalds { 12005b057c6bSHerbert Xu int err; 12015b057c6bSHerbert Xu int ntail; 12021da177e4SLinus Torvalds 12031da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 12045b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 12051da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 12065b057c6bSHerbert Xu return 0; 12071da177e4SLinus Torvalds } 12081da177e4SLinus Torvalds 12094305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 12105b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 12115b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 12125b057c6bSHerbert Xu if (unlikely(err)) 12135b057c6bSHerbert Xu goto free_skb; 12145b057c6bSHerbert Xu } 12155b057c6bSHerbert Xu 12165b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 12175b057c6bSHerbert Xu * to be audited. 12185b057c6bSHerbert Xu */ 12195b057c6bSHerbert Xu err = skb_linearize(skb); 12205b057c6bSHerbert Xu if (unlikely(err)) 12215b057c6bSHerbert Xu goto free_skb; 12225b057c6bSHerbert Xu 12235b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 12245b057c6bSHerbert Xu return 0; 12255b057c6bSHerbert Xu 12265b057c6bSHerbert Xu free_skb: 12271da177e4SLinus Torvalds kfree_skb(skb); 12285b057c6bSHerbert Xu return err; 12291da177e4SLinus Torvalds } 1230b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 12311da177e4SLinus Torvalds 12320dde3e16SIlpo Järvinen /** 12330c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 12340c7ddf36SMathias Krause * @skb: start of the buffer to use 12350c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 12360c7ddf36SMathias Krause * @len: amount of data to add 12370c7ddf36SMathias Krause * 12380c7ddf36SMathias Krause * This function extends the used data area of the potentially 12390c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 12400c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 12410c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 12420c7ddf36SMathias Krause * returned. 12430c7ddf36SMathias Krause */ 12440c7ddf36SMathias Krause 12450c7ddf36SMathias Krause unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 12460c7ddf36SMathias Krause { 12470c7ddf36SMathias Krause if (tail != skb) { 12480c7ddf36SMathias Krause skb->data_len += len; 12490c7ddf36SMathias Krause skb->len += len; 12500c7ddf36SMathias Krause } 12510c7ddf36SMathias Krause return skb_put(tail, len); 12520c7ddf36SMathias Krause } 12530c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 12540c7ddf36SMathias Krause 12550c7ddf36SMathias Krause /** 12560dde3e16SIlpo Järvinen * skb_put - add data to a buffer 12570dde3e16SIlpo Järvinen * @skb: buffer to use 12580dde3e16SIlpo Järvinen * @len: amount of data to add 12590dde3e16SIlpo Järvinen * 12600dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 12610dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 12620dde3e16SIlpo Järvinen * first byte of the extra data is returned. 12630dde3e16SIlpo Järvinen */ 12640dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 12650dde3e16SIlpo Järvinen { 12660dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 12670dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 12680dde3e16SIlpo Järvinen skb->tail += len; 12690dde3e16SIlpo Järvinen skb->len += len; 12700dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 12710dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 12720dde3e16SIlpo Järvinen return tmp; 12730dde3e16SIlpo Järvinen } 12740dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 12750dde3e16SIlpo Järvinen 12766be8ac2fSIlpo Järvinen /** 1277c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1278c2aa270aSIlpo Järvinen * @skb: buffer to use 1279c2aa270aSIlpo Järvinen * @len: amount of data to add 1280c2aa270aSIlpo Järvinen * 1281c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1282c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1283c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1284c2aa270aSIlpo Järvinen */ 1285c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1286c2aa270aSIlpo Järvinen { 1287c2aa270aSIlpo Järvinen skb->data -= len; 1288c2aa270aSIlpo Järvinen skb->len += len; 1289c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1290c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1291c2aa270aSIlpo Järvinen return skb->data; 1292c2aa270aSIlpo Järvinen } 1293c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1294c2aa270aSIlpo Järvinen 1295c2aa270aSIlpo Järvinen /** 12966be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 12976be8ac2fSIlpo Järvinen * @skb: buffer to use 12986be8ac2fSIlpo Järvinen * @len: amount of data to remove 12996be8ac2fSIlpo Järvinen * 13006be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 13016be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 13026be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 13036be8ac2fSIlpo Järvinen * the old data. 13046be8ac2fSIlpo Järvinen */ 13056be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 13066be8ac2fSIlpo Järvinen { 130747d29646SDavid S. Miller return skb_pull_inline(skb, len); 13086be8ac2fSIlpo Järvinen } 13096be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 13106be8ac2fSIlpo Järvinen 1311419ae74eSIlpo Järvinen /** 1312419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1313419ae74eSIlpo Järvinen * @skb: buffer to alter 1314419ae74eSIlpo Järvinen * @len: new length 1315419ae74eSIlpo Järvinen * 1316419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1317419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1318419ae74eSIlpo Järvinen * The skb must be linear. 1319419ae74eSIlpo Järvinen */ 1320419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1321419ae74eSIlpo Järvinen { 1322419ae74eSIlpo Järvinen if (skb->len > len) 1323419ae74eSIlpo Järvinen __skb_trim(skb, len); 1324419ae74eSIlpo Järvinen } 1325419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1326419ae74eSIlpo Järvinen 13273cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 13281da177e4SLinus Torvalds */ 13291da177e4SLinus Torvalds 13303cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 13311da177e4SLinus Torvalds { 133227b437c8SHerbert Xu struct sk_buff **fragp; 133327b437c8SHerbert Xu struct sk_buff *frag; 13341da177e4SLinus Torvalds int offset = skb_headlen(skb); 13351da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 13361da177e4SLinus Torvalds int i; 133727b437c8SHerbert Xu int err; 133827b437c8SHerbert Xu 133927b437c8SHerbert Xu if (skb_cloned(skb) && 134027b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 134127b437c8SHerbert Xu return err; 13421da177e4SLinus Torvalds 1343f4d26fb3SHerbert Xu i = 0; 1344f4d26fb3SHerbert Xu if (offset >= len) 1345f4d26fb3SHerbert Xu goto drop_pages; 1346f4d26fb3SHerbert Xu 1347f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 13489e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 134927b437c8SHerbert Xu 135027b437c8SHerbert Xu if (end < len) { 13511da177e4SLinus Torvalds offset = end; 135227b437c8SHerbert Xu continue; 13531da177e4SLinus Torvalds } 13541da177e4SLinus Torvalds 13559e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 135627b437c8SHerbert Xu 1357f4d26fb3SHerbert Xu drop_pages: 135827b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 135927b437c8SHerbert Xu 136027b437c8SHerbert Xu for (; i < nfrags; i++) 1361ea2ab693SIan Campbell skb_frag_unref(skb, i); 136227b437c8SHerbert Xu 136321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 136427b437c8SHerbert Xu skb_drop_fraglist(skb); 1365f4d26fb3SHerbert Xu goto done; 136627b437c8SHerbert Xu } 136727b437c8SHerbert Xu 136827b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 136927b437c8SHerbert Xu fragp = &frag->next) { 137027b437c8SHerbert Xu int end = offset + frag->len; 137127b437c8SHerbert Xu 137227b437c8SHerbert Xu if (skb_shared(frag)) { 137327b437c8SHerbert Xu struct sk_buff *nfrag; 137427b437c8SHerbert Xu 137527b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 137627b437c8SHerbert Xu if (unlikely(!nfrag)) 137727b437c8SHerbert Xu return -ENOMEM; 137827b437c8SHerbert Xu 137927b437c8SHerbert Xu nfrag->next = frag->next; 138085bb2a60SEric Dumazet consume_skb(frag); 138127b437c8SHerbert Xu frag = nfrag; 138227b437c8SHerbert Xu *fragp = frag; 138327b437c8SHerbert Xu } 138427b437c8SHerbert Xu 138527b437c8SHerbert Xu if (end < len) { 138627b437c8SHerbert Xu offset = end; 138727b437c8SHerbert Xu continue; 138827b437c8SHerbert Xu } 138927b437c8SHerbert Xu 139027b437c8SHerbert Xu if (end > len && 139127b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 139227b437c8SHerbert Xu return err; 139327b437c8SHerbert Xu 139427b437c8SHerbert Xu if (frag->next) 139527b437c8SHerbert Xu skb_drop_list(&frag->next); 139627b437c8SHerbert Xu break; 139727b437c8SHerbert Xu } 139827b437c8SHerbert Xu 1399f4d26fb3SHerbert Xu done: 140027b437c8SHerbert Xu if (len > skb_headlen(skb)) { 14011da177e4SLinus Torvalds skb->data_len -= skb->len - len; 14021da177e4SLinus Torvalds skb->len = len; 14031da177e4SLinus Torvalds } else { 14041da177e4SLinus Torvalds skb->len = len; 14051da177e4SLinus Torvalds skb->data_len = 0; 140627a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 14071da177e4SLinus Torvalds } 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds return 0; 14101da177e4SLinus Torvalds } 1411b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 14121da177e4SLinus Torvalds 14131da177e4SLinus Torvalds /** 14141da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 14151da177e4SLinus Torvalds * @skb: buffer to reallocate 14161da177e4SLinus Torvalds * @delta: number of bytes to advance tail 14171da177e4SLinus Torvalds * 14181da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 14191da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 14201da177e4SLinus Torvalds * data from fragmented part. 14211da177e4SLinus Torvalds * 14221da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 14231da177e4SLinus Torvalds * 14241da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 14251da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 14261da177e4SLinus Torvalds * 14271da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 14281da177e4SLinus Torvalds * reloaded after call to this function. 14291da177e4SLinus Torvalds */ 14301da177e4SLinus Torvalds 14311da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 14321da177e4SLinus Torvalds * when it is necessary. 14331da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 14341da177e4SLinus Torvalds * 2. It may change skb pointers. 14351da177e4SLinus Torvalds * 14361da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 14371da177e4SLinus Torvalds */ 14381da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 14391da177e4SLinus Torvalds { 14401da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 14411da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 14421da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 14431da177e4SLinus Torvalds */ 14444305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 14451da177e4SLinus Torvalds 14461da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 14471da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 14481da177e4SLinus Torvalds GFP_ATOMIC)) 14491da177e4SLinus Torvalds return NULL; 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds 145227a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 14531da177e4SLinus Torvalds BUG(); 14541da177e4SLinus Torvalds 14551da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 14561da177e4SLinus Torvalds * size of pulled pages. Superb. 14571da177e4SLinus Torvalds */ 145821dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 14591da177e4SLinus Torvalds goto pull_pages; 14601da177e4SLinus Torvalds 14611da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 14621da177e4SLinus Torvalds eat = delta; 14631da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14649e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14659e903e08SEric Dumazet 14669e903e08SEric Dumazet if (size >= eat) 14671da177e4SLinus Torvalds goto pull_pages; 14689e903e08SEric Dumazet eat -= size; 14691da177e4SLinus Torvalds } 14701da177e4SLinus Torvalds 14711da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 14721da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 14731da177e4SLinus Torvalds * but taking into account that pulling is expected to 14741da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 14751da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 14761da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 14771da177e4SLinus Torvalds */ 14781da177e4SLinus Torvalds if (eat) { 14791da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 14801da177e4SLinus Torvalds struct sk_buff *clone = NULL; 14811da177e4SLinus Torvalds struct sk_buff *insp = NULL; 14821da177e4SLinus Torvalds 14831da177e4SLinus Torvalds do { 148409a62660SKris Katterjohn BUG_ON(!list); 14851da177e4SLinus Torvalds 14861da177e4SLinus Torvalds if (list->len <= eat) { 14871da177e4SLinus Torvalds /* Eaten as whole. */ 14881da177e4SLinus Torvalds eat -= list->len; 14891da177e4SLinus Torvalds list = list->next; 14901da177e4SLinus Torvalds insp = list; 14911da177e4SLinus Torvalds } else { 14921da177e4SLinus Torvalds /* Eaten partially. */ 14931da177e4SLinus Torvalds 14941da177e4SLinus Torvalds if (skb_shared(list)) { 14951da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 14961da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 14971da177e4SLinus Torvalds if (!clone) 14981da177e4SLinus Torvalds return NULL; 14991da177e4SLinus Torvalds insp = list->next; 15001da177e4SLinus Torvalds list = clone; 15011da177e4SLinus Torvalds } else { 15021da177e4SLinus Torvalds /* This may be pulled without 15031da177e4SLinus Torvalds * problems. */ 15041da177e4SLinus Torvalds insp = list; 15051da177e4SLinus Torvalds } 15061da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 15071da177e4SLinus Torvalds kfree_skb(clone); 15081da177e4SLinus Torvalds return NULL; 15091da177e4SLinus Torvalds } 15101da177e4SLinus Torvalds break; 15111da177e4SLinus Torvalds } 15121da177e4SLinus Torvalds } while (eat); 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds /* Free pulled out fragments. */ 15151da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 15161da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 15171da177e4SLinus Torvalds kfree_skb(list); 15181da177e4SLinus Torvalds } 15191da177e4SLinus Torvalds /* And insert new clone at head. */ 15201da177e4SLinus Torvalds if (clone) { 15211da177e4SLinus Torvalds clone->next = list; 15221da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 15231da177e4SLinus Torvalds } 15241da177e4SLinus Torvalds } 15251da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds pull_pages: 15281da177e4SLinus Torvalds eat = delta; 15291da177e4SLinus Torvalds k = 0; 15301da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15319e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 15329e903e08SEric Dumazet 15339e903e08SEric Dumazet if (size <= eat) { 1534ea2ab693SIan Campbell skb_frag_unref(skb, i); 15359e903e08SEric Dumazet eat -= size; 15361da177e4SLinus Torvalds } else { 15371da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 15381da177e4SLinus Torvalds if (eat) { 15391da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 15409e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 15411da177e4SLinus Torvalds eat = 0; 15421da177e4SLinus Torvalds } 15431da177e4SLinus Torvalds k++; 15441da177e4SLinus Torvalds } 15451da177e4SLinus Torvalds } 15461da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 15471da177e4SLinus Torvalds 15481da177e4SLinus Torvalds skb->tail += delta; 15491da177e4SLinus Torvalds skb->data_len -= delta; 15501da177e4SLinus Torvalds 155127a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 15521da177e4SLinus Torvalds } 1553b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 15541da177e4SLinus Torvalds 155522019b17SEric Dumazet /** 155622019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 155722019b17SEric Dumazet * @skb: source skb 155822019b17SEric Dumazet * @offset: offset in source 155922019b17SEric Dumazet * @to: destination buffer 156022019b17SEric Dumazet * @len: number of bytes to copy 156122019b17SEric Dumazet * 156222019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 156322019b17SEric Dumazet * destination buffer. 156422019b17SEric Dumazet * 156522019b17SEric Dumazet * CAUTION ! : 156622019b17SEric Dumazet * If its prototype is ever changed, 156722019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 156822019b17SEric Dumazet * since it is called from BPF assembly code. 156922019b17SEric Dumazet */ 15701da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 15711da177e4SLinus Torvalds { 15721a028e50SDavid S. Miller int start = skb_headlen(skb); 1573fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1574fbb398a8SDavid S. Miller int i, copy; 15751da177e4SLinus Torvalds 15761da177e4SLinus Torvalds if (offset > (int)skb->len - len) 15771da177e4SLinus Torvalds goto fault; 15781da177e4SLinus Torvalds 15791da177e4SLinus Torvalds /* Copy header. */ 15801a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 15811da177e4SLinus Torvalds if (copy > len) 15821da177e4SLinus Torvalds copy = len; 1583d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 15841da177e4SLinus Torvalds if ((len -= copy) == 0) 15851da177e4SLinus Torvalds return 0; 15861da177e4SLinus Torvalds offset += copy; 15871da177e4SLinus Torvalds to += copy; 15881da177e4SLinus Torvalds } 15891da177e4SLinus Torvalds 15901da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15911a028e50SDavid S. Miller int end; 159251c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 15931da177e4SLinus Torvalds 1594547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15951a028e50SDavid S. Miller 159651c56b00SEric Dumazet end = start + skb_frag_size(f); 15971da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15981da177e4SLinus Torvalds u8 *vaddr; 15991da177e4SLinus Torvalds 16001da177e4SLinus Torvalds if (copy > len) 16011da177e4SLinus Torvalds copy = len; 16021da177e4SLinus Torvalds 160351c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 16041da177e4SLinus Torvalds memcpy(to, 160551c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 160651c56b00SEric Dumazet copy); 160751c56b00SEric Dumazet kunmap_atomic(vaddr); 16081da177e4SLinus Torvalds 16091da177e4SLinus Torvalds if ((len -= copy) == 0) 16101da177e4SLinus Torvalds return 0; 16111da177e4SLinus Torvalds offset += copy; 16121da177e4SLinus Torvalds to += copy; 16131da177e4SLinus Torvalds } 16141a028e50SDavid S. Miller start = end; 16151da177e4SLinus Torvalds } 16161da177e4SLinus Torvalds 1617fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 16181a028e50SDavid S. Miller int end; 16191da177e4SLinus Torvalds 1620547b792cSIlpo Järvinen WARN_ON(start > offset + len); 16211a028e50SDavid S. Miller 1622fbb398a8SDavid S. Miller end = start + frag_iter->len; 16231da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 16241da177e4SLinus Torvalds if (copy > len) 16251da177e4SLinus Torvalds copy = len; 1626fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 16271da177e4SLinus Torvalds goto fault; 16281da177e4SLinus Torvalds if ((len -= copy) == 0) 16291da177e4SLinus Torvalds return 0; 16301da177e4SLinus Torvalds offset += copy; 16311da177e4SLinus Torvalds to += copy; 16321da177e4SLinus Torvalds } 16331a028e50SDavid S. Miller start = end; 16341da177e4SLinus Torvalds } 1635a6686f2fSShirley Ma 16361da177e4SLinus Torvalds if (!len) 16371da177e4SLinus Torvalds return 0; 16381da177e4SLinus Torvalds 16391da177e4SLinus Torvalds fault: 16401da177e4SLinus Torvalds return -EFAULT; 16411da177e4SLinus Torvalds } 1642b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 16431da177e4SLinus Torvalds 16449c55e01cSJens Axboe /* 16459c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 16469c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 16479c55e01cSJens Axboe */ 16489c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 16499c55e01cSJens Axboe { 16508b9d3728SJarek Poplawski put_page(spd->pages[i]); 16518b9d3728SJarek Poplawski } 16529c55e01cSJens Axboe 1653a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 16544fb66994SJarek Poplawski unsigned int *offset, 165518aafc62SEric Dumazet struct sock *sk) 16568b9d3728SJarek Poplawski { 16575640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 16588b9d3728SJarek Poplawski 16595640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 16608b9d3728SJarek Poplawski return NULL; 16614fb66994SJarek Poplawski 16625640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 16634fb66994SJarek Poplawski 16645640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 16655640f768SEric Dumazet page_address(page) + *offset, *len); 16665640f768SEric Dumazet *offset = pfrag->offset; 16675640f768SEric Dumazet pfrag->offset += *len; 16684fb66994SJarek Poplawski 16695640f768SEric Dumazet return pfrag->page; 16709c55e01cSJens Axboe } 16719c55e01cSJens Axboe 167241c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 167341c73a0dSEric Dumazet struct page *page, 167441c73a0dSEric Dumazet unsigned int offset) 167541c73a0dSEric Dumazet { 167641c73a0dSEric Dumazet return spd->nr_pages && 167741c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 167841c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 167941c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 168041c73a0dSEric Dumazet } 168141c73a0dSEric Dumazet 16829c55e01cSJens Axboe /* 16839c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 16849c55e01cSJens Axboe */ 1685a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 168635f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 16874fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 168818aafc62SEric Dumazet bool linear, 16897a67e56fSJarek Poplawski struct sock *sk) 16909c55e01cSJens Axboe { 169141c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1692a108d5f3SDavid S. Miller return true; 16939c55e01cSJens Axboe 16948b9d3728SJarek Poplawski if (linear) { 169518aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 16968b9d3728SJarek Poplawski if (!page) 1697a108d5f3SDavid S. Miller return true; 169841c73a0dSEric Dumazet } 169941c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 170041c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1701a108d5f3SDavid S. Miller return false; 170241c73a0dSEric Dumazet } 17038b9d3728SJarek Poplawski get_page(page); 17049c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 17054fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 17069c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 17079c55e01cSJens Axboe spd->nr_pages++; 17088b9d3728SJarek Poplawski 1709a108d5f3SDavid S. Miller return false; 17109c55e01cSJens Axboe } 17119c55e01cSJens Axboe 1712a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 17132870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 171418aafc62SEric Dumazet unsigned int *len, 1715d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 171635f3d14dSJens Axboe struct sock *sk, 171735f3d14dSJens Axboe struct pipe_inode_info *pipe) 17189c55e01cSJens Axboe { 17192870c43dSOctavian Purdila if (!*len) 1720a108d5f3SDavid S. Miller return true; 17219c55e01cSJens Axboe 17222870c43dSOctavian Purdila /* skip this segment if already processed */ 17232870c43dSOctavian Purdila if (*off >= plen) { 17242870c43dSOctavian Purdila *off -= plen; 1725a108d5f3SDavid S. Miller return false; 17262870c43dSOctavian Purdila } 17272870c43dSOctavian Purdila 17282870c43dSOctavian Purdila /* ignore any bits we already processed */ 17299ca1b22dSEric Dumazet poff += *off; 17309ca1b22dSEric Dumazet plen -= *off; 17312870c43dSOctavian Purdila *off = 0; 17322870c43dSOctavian Purdila 173318aafc62SEric Dumazet do { 173418aafc62SEric Dumazet unsigned int flen = min(*len, plen); 17352870c43dSOctavian Purdila 173618aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 173718aafc62SEric Dumazet linear, sk)) 1738a108d5f3SDavid S. Miller return true; 173918aafc62SEric Dumazet poff += flen; 174018aafc62SEric Dumazet plen -= flen; 17412870c43dSOctavian Purdila *len -= flen; 174218aafc62SEric Dumazet } while (*len && plen); 17432870c43dSOctavian Purdila 1744a108d5f3SDavid S. Miller return false; 1745db43a282SOctavian Purdila } 17469c55e01cSJens Axboe 17479c55e01cSJens Axboe /* 1748a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 17492870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 17509c55e01cSJens Axboe */ 1751a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 175235f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 175335f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17542870c43dSOctavian Purdila { 17552870c43dSOctavian Purdila int seg; 17569c55e01cSJens Axboe 17571d0c0b32SEric Dumazet /* map the linear part : 17582996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17592996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17602996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17619c55e01cSJens Axboe */ 17622870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17632870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17642870c43dSOctavian Purdila skb_headlen(skb), 176518aafc62SEric Dumazet offset, len, spd, 17663a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 17671d0c0b32SEric Dumazet sk, pipe)) 1768a108d5f3SDavid S. Miller return true; 17699c55e01cSJens Axboe 17709c55e01cSJens Axboe /* 17719c55e01cSJens Axboe * then map the fragments 17729c55e01cSJens Axboe */ 17739c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 17749c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 17759c55e01cSJens Axboe 1776ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 17779e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 177818aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 1779a108d5f3SDavid S. Miller return true; 17809c55e01cSJens Axboe } 17819c55e01cSJens Axboe 1782a108d5f3SDavid S. Miller return false; 17839c55e01cSJens Axboe } 17849c55e01cSJens Axboe 17859c55e01cSJens Axboe /* 17869c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 17879c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 17889c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 17899c55e01cSJens Axboe * handle that cleanly. 17909c55e01cSJens Axboe */ 17918b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 17929c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 17939c55e01cSJens Axboe unsigned int flags) 17949c55e01cSJens Axboe { 179541c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 179641c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 17979c55e01cSJens Axboe struct splice_pipe_desc spd = { 17989c55e01cSJens Axboe .pages = pages, 17999c55e01cSJens Axboe .partial = partial, 1800047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 18019c55e01cSJens Axboe .flags = flags, 180228a625cbSMiklos Szeredi .ops = &nosteal_pipe_buf_ops, 18039c55e01cSJens Axboe .spd_release = sock_spd_release, 18049c55e01cSJens Axboe }; 1805fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18067a67e56fSJarek Poplawski struct sock *sk = skb->sk; 180735f3d14dSJens Axboe int ret = 0; 180835f3d14dSJens Axboe 18099c55e01cSJens Axboe /* 18109c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 18119c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 18129c55e01cSJens Axboe */ 181335f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 18149c55e01cSJens Axboe goto done; 18159c55e01cSJens Axboe else if (!tlen) 18169c55e01cSJens Axboe goto done; 18179c55e01cSJens Axboe 18189c55e01cSJens Axboe /* 18199c55e01cSJens Axboe * now see if we have a frag_list to map 18209c55e01cSJens Axboe */ 1821fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1822fbb398a8SDavid S. Miller if (!tlen) 18239c55e01cSJens Axboe break; 182435f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1825fbb398a8SDavid S. Miller break; 18269c55e01cSJens Axboe } 18279c55e01cSJens Axboe 18289c55e01cSJens Axboe done: 18299c55e01cSJens Axboe if (spd.nr_pages) { 18309c55e01cSJens Axboe /* 18319c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 18329c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 18339c55e01cSJens Axboe * here as compared to sendfile(). We enter here 18349c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 18359c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 18369c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 18379c55e01cSJens Axboe * and networking will grab the socket lock. 18389c55e01cSJens Axboe */ 1839293ad604SOctavian Purdila release_sock(sk); 18409c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1841293ad604SOctavian Purdila lock_sock(sk); 18429c55e01cSJens Axboe } 18439c55e01cSJens Axboe 184435f3d14dSJens Axboe return ret; 18459c55e01cSJens Axboe } 18469c55e01cSJens Axboe 1847357b40a1SHerbert Xu /** 1848357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1849357b40a1SHerbert Xu * @skb: destination buffer 1850357b40a1SHerbert Xu * @offset: offset in destination 1851357b40a1SHerbert Xu * @from: source buffer 1852357b40a1SHerbert Xu * @len: number of bytes to copy 1853357b40a1SHerbert Xu * 1854357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1855357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1856357b40a1SHerbert Xu * traversing fragment lists and such. 1857357b40a1SHerbert Xu */ 1858357b40a1SHerbert Xu 18590c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1860357b40a1SHerbert Xu { 18611a028e50SDavid S. Miller int start = skb_headlen(skb); 1862fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1863fbb398a8SDavid S. Miller int i, copy; 1864357b40a1SHerbert Xu 1865357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1866357b40a1SHerbert Xu goto fault; 1867357b40a1SHerbert Xu 18681a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1869357b40a1SHerbert Xu if (copy > len) 1870357b40a1SHerbert Xu copy = len; 187127d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1872357b40a1SHerbert Xu if ((len -= copy) == 0) 1873357b40a1SHerbert Xu return 0; 1874357b40a1SHerbert Xu offset += copy; 1875357b40a1SHerbert Xu from += copy; 1876357b40a1SHerbert Xu } 1877357b40a1SHerbert Xu 1878357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1879357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 18801a028e50SDavid S. Miller int end; 1881357b40a1SHerbert Xu 1882547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18831a028e50SDavid S. Miller 18849e903e08SEric Dumazet end = start + skb_frag_size(frag); 1885357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1886357b40a1SHerbert Xu u8 *vaddr; 1887357b40a1SHerbert Xu 1888357b40a1SHerbert Xu if (copy > len) 1889357b40a1SHerbert Xu copy = len; 1890357b40a1SHerbert Xu 189151c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 18921a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 18931a028e50SDavid S. Miller from, copy); 189451c56b00SEric Dumazet kunmap_atomic(vaddr); 1895357b40a1SHerbert Xu 1896357b40a1SHerbert Xu if ((len -= copy) == 0) 1897357b40a1SHerbert Xu return 0; 1898357b40a1SHerbert Xu offset += copy; 1899357b40a1SHerbert Xu from += copy; 1900357b40a1SHerbert Xu } 19011a028e50SDavid S. Miller start = end; 1902357b40a1SHerbert Xu } 1903357b40a1SHerbert Xu 1904fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19051a028e50SDavid S. Miller int end; 1906357b40a1SHerbert Xu 1907547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19081a028e50SDavid S. Miller 1909fbb398a8SDavid S. Miller end = start + frag_iter->len; 1910357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1911357b40a1SHerbert Xu if (copy > len) 1912357b40a1SHerbert Xu copy = len; 1913fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 19141a028e50SDavid S. Miller from, copy)) 1915357b40a1SHerbert Xu goto fault; 1916357b40a1SHerbert Xu if ((len -= copy) == 0) 1917357b40a1SHerbert Xu return 0; 1918357b40a1SHerbert Xu offset += copy; 1919357b40a1SHerbert Xu from += copy; 1920357b40a1SHerbert Xu } 19211a028e50SDavid S. Miller start = end; 1922357b40a1SHerbert Xu } 1923357b40a1SHerbert Xu if (!len) 1924357b40a1SHerbert Xu return 0; 1925357b40a1SHerbert Xu 1926357b40a1SHerbert Xu fault: 1927357b40a1SHerbert Xu return -EFAULT; 1928357b40a1SHerbert Xu } 1929357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1930357b40a1SHerbert Xu 19311da177e4SLinus Torvalds /* Checksum skb data. */ 19322817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 19332817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 19341da177e4SLinus Torvalds { 19351a028e50SDavid S. Miller int start = skb_headlen(skb); 19361a028e50SDavid S. Miller int i, copy = start - offset; 1937fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19381da177e4SLinus Torvalds int pos = 0; 19391da177e4SLinus Torvalds 19401da177e4SLinus Torvalds /* Checksum header. */ 19411da177e4SLinus Torvalds if (copy > 0) { 19421da177e4SLinus Torvalds if (copy > len) 19431da177e4SLinus Torvalds copy = len; 19442817a336SDaniel Borkmann csum = ops->update(skb->data + offset, copy, csum); 19451da177e4SLinus Torvalds if ((len -= copy) == 0) 19461da177e4SLinus Torvalds return csum; 19471da177e4SLinus Torvalds offset += copy; 19481da177e4SLinus Torvalds pos = copy; 19491da177e4SLinus Torvalds } 19501da177e4SLinus Torvalds 19511da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19521a028e50SDavid S. Miller int end; 195351c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19541da177e4SLinus Torvalds 1955547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19561a028e50SDavid S. Miller 195751c56b00SEric Dumazet end = start + skb_frag_size(frag); 19581da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 195944bb9363SAl Viro __wsum csum2; 19601da177e4SLinus Torvalds u8 *vaddr; 19611da177e4SLinus Torvalds 19621da177e4SLinus Torvalds if (copy > len) 19631da177e4SLinus Torvalds copy = len; 196451c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19652817a336SDaniel Borkmann csum2 = ops->update(vaddr + frag->page_offset + 19661a028e50SDavid S. Miller offset - start, copy, 0); 196751c56b00SEric Dumazet kunmap_atomic(vaddr); 19682817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 19691da177e4SLinus Torvalds if (!(len -= copy)) 19701da177e4SLinus Torvalds return csum; 19711da177e4SLinus Torvalds offset += copy; 19721da177e4SLinus Torvalds pos += copy; 19731da177e4SLinus Torvalds } 19741a028e50SDavid S. Miller start = end; 19751da177e4SLinus Torvalds } 19761da177e4SLinus Torvalds 1977fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19781a028e50SDavid S. Miller int end; 19791da177e4SLinus Torvalds 1980547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19811a028e50SDavid S. Miller 1982fbb398a8SDavid S. Miller end = start + frag_iter->len; 19831da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19845f92a738SAl Viro __wsum csum2; 19851da177e4SLinus Torvalds if (copy > len) 19861da177e4SLinus Torvalds copy = len; 19872817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 19882817a336SDaniel Borkmann copy, 0, ops); 19892817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 19901da177e4SLinus Torvalds if ((len -= copy) == 0) 19911da177e4SLinus Torvalds return csum; 19921da177e4SLinus Torvalds offset += copy; 19931da177e4SLinus Torvalds pos += copy; 19941da177e4SLinus Torvalds } 19951a028e50SDavid S. Miller start = end; 19961da177e4SLinus Torvalds } 199709a62660SKris Katterjohn BUG_ON(len); 19981da177e4SLinus Torvalds 19991da177e4SLinus Torvalds return csum; 20001da177e4SLinus Torvalds } 20012817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 20022817a336SDaniel Borkmann 20032817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 20042817a336SDaniel Borkmann int len, __wsum csum) 20052817a336SDaniel Borkmann { 20062817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2007cea80ea8SDaniel Borkmann .update = csum_partial_ext, 20082817a336SDaniel Borkmann .combine = csum_block_add_ext, 20092817a336SDaniel Borkmann }; 20102817a336SDaniel Borkmann 20112817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 20122817a336SDaniel Borkmann } 2013b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 20141da177e4SLinus Torvalds 20151da177e4SLinus Torvalds /* Both of above in one bottle. */ 20161da177e4SLinus Torvalds 201781d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 201881d77662SAl Viro u8 *to, int len, __wsum csum) 20191da177e4SLinus Torvalds { 20201a028e50SDavid S. Miller int start = skb_headlen(skb); 20211a028e50SDavid S. Miller int i, copy = start - offset; 2022fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 20231da177e4SLinus Torvalds int pos = 0; 20241da177e4SLinus Torvalds 20251da177e4SLinus Torvalds /* Copy header. */ 20261da177e4SLinus Torvalds if (copy > 0) { 20271da177e4SLinus Torvalds if (copy > len) 20281da177e4SLinus Torvalds copy = len; 20291da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 20301da177e4SLinus Torvalds copy, csum); 20311da177e4SLinus Torvalds if ((len -= copy) == 0) 20321da177e4SLinus Torvalds return csum; 20331da177e4SLinus Torvalds offset += copy; 20341da177e4SLinus Torvalds to += copy; 20351da177e4SLinus Torvalds pos = copy; 20361da177e4SLinus Torvalds } 20371da177e4SLinus Torvalds 20381da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20391a028e50SDavid S. Miller int end; 20401da177e4SLinus Torvalds 2041547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20421a028e50SDavid S. Miller 20439e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 20441da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20455084205fSAl Viro __wsum csum2; 20461da177e4SLinus Torvalds u8 *vaddr; 20471da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 20481da177e4SLinus Torvalds 20491da177e4SLinus Torvalds if (copy > len) 20501da177e4SLinus Torvalds copy = len; 205151c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 20521da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 20531a028e50SDavid S. Miller frag->page_offset + 20541a028e50SDavid S. Miller offset - start, to, 20551a028e50SDavid S. Miller copy, 0); 205651c56b00SEric Dumazet kunmap_atomic(vaddr); 20571da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20581da177e4SLinus Torvalds if (!(len -= copy)) 20591da177e4SLinus Torvalds return csum; 20601da177e4SLinus Torvalds offset += copy; 20611da177e4SLinus Torvalds to += copy; 20621da177e4SLinus Torvalds pos += copy; 20631da177e4SLinus Torvalds } 20641a028e50SDavid S. Miller start = end; 20651da177e4SLinus Torvalds } 20661da177e4SLinus Torvalds 2067fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 206881d77662SAl Viro __wsum csum2; 20691a028e50SDavid S. Miller int end; 20701da177e4SLinus Torvalds 2071547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20721a028e50SDavid S. Miller 2073fbb398a8SDavid S. Miller end = start + frag_iter->len; 20741da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20751da177e4SLinus Torvalds if (copy > len) 20761da177e4SLinus Torvalds copy = len; 2077fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 20781a028e50SDavid S. Miller offset - start, 20791da177e4SLinus Torvalds to, copy, 0); 20801da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20811da177e4SLinus Torvalds if ((len -= copy) == 0) 20821da177e4SLinus Torvalds return csum; 20831da177e4SLinus Torvalds offset += copy; 20841da177e4SLinus Torvalds to += copy; 20851da177e4SLinus Torvalds pos += copy; 20861da177e4SLinus Torvalds } 20871a028e50SDavid S. Miller start = end; 20881da177e4SLinus Torvalds } 208909a62660SKris Katterjohn BUG_ON(len); 20901da177e4SLinus Torvalds return csum; 20911da177e4SLinus Torvalds } 2092b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 20931da177e4SLinus Torvalds 2094af2806f8SThomas Graf /** 2095af2806f8SThomas Graf * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2096af2806f8SThomas Graf * @from: source buffer 2097af2806f8SThomas Graf * 2098af2806f8SThomas Graf * Calculates the amount of linear headroom needed in the 'to' skb passed 2099af2806f8SThomas Graf * into skb_zerocopy(). 2100af2806f8SThomas Graf */ 2101af2806f8SThomas Graf unsigned int 2102af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from) 2103af2806f8SThomas Graf { 2104af2806f8SThomas Graf unsigned int hlen = 0; 2105af2806f8SThomas Graf 2106af2806f8SThomas Graf if (!from->head_frag || 2107af2806f8SThomas Graf skb_headlen(from) < L1_CACHE_BYTES || 2108af2806f8SThomas Graf skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2109af2806f8SThomas Graf hlen = skb_headlen(from); 2110af2806f8SThomas Graf 2111af2806f8SThomas Graf if (skb_has_frag_list(from)) 2112af2806f8SThomas Graf hlen = from->len; 2113af2806f8SThomas Graf 2114af2806f8SThomas Graf return hlen; 2115af2806f8SThomas Graf } 2116af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2117af2806f8SThomas Graf 2118af2806f8SThomas Graf /** 2119af2806f8SThomas Graf * skb_zerocopy - Zero copy skb to skb 2120af2806f8SThomas Graf * @to: destination buffer 21217fceb4deSMasanari Iida * @from: source buffer 2122af2806f8SThomas Graf * @len: number of bytes to copy from source buffer 2123af2806f8SThomas Graf * @hlen: size of linear headroom in destination buffer 2124af2806f8SThomas Graf * 2125af2806f8SThomas Graf * Copies up to `len` bytes from `from` to `to` by creating references 2126af2806f8SThomas Graf * to the frags in the source buffer. 2127af2806f8SThomas Graf * 2128af2806f8SThomas Graf * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2129af2806f8SThomas Graf * headroom in the `to` buffer. 213036d5fe6aSZoltan Kiss * 213136d5fe6aSZoltan Kiss * Return value: 213236d5fe6aSZoltan Kiss * 0: everything is OK 213336d5fe6aSZoltan Kiss * -ENOMEM: couldn't orphan frags of @from due to lack of memory 213436d5fe6aSZoltan Kiss * -EFAULT: skb_copy_bits() found some problem with skb geometry 2135af2806f8SThomas Graf */ 213636d5fe6aSZoltan Kiss int 213736d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2138af2806f8SThomas Graf { 2139af2806f8SThomas Graf int i, j = 0; 2140af2806f8SThomas Graf int plen = 0; /* length of skb->head fragment */ 214136d5fe6aSZoltan Kiss int ret; 2142af2806f8SThomas Graf struct page *page; 2143af2806f8SThomas Graf unsigned int offset; 2144af2806f8SThomas Graf 2145af2806f8SThomas Graf BUG_ON(!from->head_frag && !hlen); 2146af2806f8SThomas Graf 2147af2806f8SThomas Graf /* dont bother with small payloads */ 214836d5fe6aSZoltan Kiss if (len <= skb_tailroom(to)) 214936d5fe6aSZoltan Kiss return skb_copy_bits(from, 0, skb_put(to, len), len); 2150af2806f8SThomas Graf 2151af2806f8SThomas Graf if (hlen) { 215236d5fe6aSZoltan Kiss ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 215336d5fe6aSZoltan Kiss if (unlikely(ret)) 215436d5fe6aSZoltan Kiss return ret; 2155af2806f8SThomas Graf len -= hlen; 2156af2806f8SThomas Graf } else { 2157af2806f8SThomas Graf plen = min_t(int, skb_headlen(from), len); 2158af2806f8SThomas Graf if (plen) { 2159af2806f8SThomas Graf page = virt_to_head_page(from->head); 2160af2806f8SThomas Graf offset = from->data - (unsigned char *)page_address(page); 2161af2806f8SThomas Graf __skb_fill_page_desc(to, 0, page, offset, plen); 2162af2806f8SThomas Graf get_page(page); 2163af2806f8SThomas Graf j = 1; 2164af2806f8SThomas Graf len -= plen; 2165af2806f8SThomas Graf } 2166af2806f8SThomas Graf } 2167af2806f8SThomas Graf 2168af2806f8SThomas Graf to->truesize += len + plen; 2169af2806f8SThomas Graf to->len += len + plen; 2170af2806f8SThomas Graf to->data_len += len + plen; 2171af2806f8SThomas Graf 217236d5fe6aSZoltan Kiss if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 217336d5fe6aSZoltan Kiss skb_tx_error(from); 217436d5fe6aSZoltan Kiss return -ENOMEM; 217536d5fe6aSZoltan Kiss } 217636d5fe6aSZoltan Kiss 2177af2806f8SThomas Graf for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2178af2806f8SThomas Graf if (!len) 2179af2806f8SThomas Graf break; 2180af2806f8SThomas Graf skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 2181af2806f8SThomas Graf skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); 2182af2806f8SThomas Graf len -= skb_shinfo(to)->frags[j].size; 2183af2806f8SThomas Graf skb_frag_ref(to, j); 2184af2806f8SThomas Graf j++; 2185af2806f8SThomas Graf } 2186af2806f8SThomas Graf skb_shinfo(to)->nr_frags = j; 218736d5fe6aSZoltan Kiss 218836d5fe6aSZoltan Kiss return 0; 2189af2806f8SThomas Graf } 2190af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy); 2191af2806f8SThomas Graf 21921da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 21931da177e4SLinus Torvalds { 2194d3bc23e7SAl Viro __wsum csum; 21951da177e4SLinus Torvalds long csstart; 21961da177e4SLinus Torvalds 219784fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 219855508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 21991da177e4SLinus Torvalds else 22001da177e4SLinus Torvalds csstart = skb_headlen(skb); 22011da177e4SLinus Torvalds 220209a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 22031da177e4SLinus Torvalds 2204d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 22051da177e4SLinus Torvalds 22061da177e4SLinus Torvalds csum = 0; 22071da177e4SLinus Torvalds if (csstart != skb->len) 22081da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 22091da177e4SLinus Torvalds skb->len - csstart, 0); 22101da177e4SLinus Torvalds 221184fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2212ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 22131da177e4SLinus Torvalds 2214d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 22151da177e4SLinus Torvalds } 22161da177e4SLinus Torvalds } 2217b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 22181da177e4SLinus Torvalds 22191da177e4SLinus Torvalds /** 22201da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 22211da177e4SLinus Torvalds * @list: list to dequeue from 22221da177e4SLinus Torvalds * 22231da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 22241da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 22251da177e4SLinus Torvalds * returned or %NULL if the list is empty. 22261da177e4SLinus Torvalds */ 22271da177e4SLinus Torvalds 22281da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 22291da177e4SLinus Torvalds { 22301da177e4SLinus Torvalds unsigned long flags; 22311da177e4SLinus Torvalds struct sk_buff *result; 22321da177e4SLinus Torvalds 22331da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22341da177e4SLinus Torvalds result = __skb_dequeue(list); 22351da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22361da177e4SLinus Torvalds return result; 22371da177e4SLinus Torvalds } 2238b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 22391da177e4SLinus Torvalds 22401da177e4SLinus Torvalds /** 22411da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 22421da177e4SLinus Torvalds * @list: list to dequeue from 22431da177e4SLinus Torvalds * 22441da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 22451da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 22461da177e4SLinus Torvalds * returned or %NULL if the list is empty. 22471da177e4SLinus Torvalds */ 22481da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 22491da177e4SLinus Torvalds { 22501da177e4SLinus Torvalds unsigned long flags; 22511da177e4SLinus Torvalds struct sk_buff *result; 22521da177e4SLinus Torvalds 22531da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22541da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 22551da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22561da177e4SLinus Torvalds return result; 22571da177e4SLinus Torvalds } 2258b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 22591da177e4SLinus Torvalds 22601da177e4SLinus Torvalds /** 22611da177e4SLinus Torvalds * skb_queue_purge - empty a list 22621da177e4SLinus Torvalds * @list: list to empty 22631da177e4SLinus Torvalds * 22641da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 22651da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 22661da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 22671da177e4SLinus Torvalds */ 22681da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 22691da177e4SLinus Torvalds { 22701da177e4SLinus Torvalds struct sk_buff *skb; 22711da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 22721da177e4SLinus Torvalds kfree_skb(skb); 22731da177e4SLinus Torvalds } 2274b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 22751da177e4SLinus Torvalds 22761da177e4SLinus Torvalds /** 22771da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 22781da177e4SLinus Torvalds * @list: list to use 22791da177e4SLinus Torvalds * @newsk: buffer to queue 22801da177e4SLinus Torvalds * 22811da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 22821da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 22831da177e4SLinus Torvalds * safely. 22841da177e4SLinus Torvalds * 22851da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22861da177e4SLinus Torvalds */ 22871da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 22881da177e4SLinus Torvalds { 22891da177e4SLinus Torvalds unsigned long flags; 22901da177e4SLinus Torvalds 22911da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22921da177e4SLinus Torvalds __skb_queue_head(list, newsk); 22931da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22941da177e4SLinus Torvalds } 2295b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 22961da177e4SLinus Torvalds 22971da177e4SLinus Torvalds /** 22981da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 22991da177e4SLinus Torvalds * @list: list to use 23001da177e4SLinus Torvalds * @newsk: buffer to queue 23011da177e4SLinus Torvalds * 23021da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 23031da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 23041da177e4SLinus Torvalds * safely. 23051da177e4SLinus Torvalds * 23061da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23071da177e4SLinus Torvalds */ 23081da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 23091da177e4SLinus Torvalds { 23101da177e4SLinus Torvalds unsigned long flags; 23111da177e4SLinus Torvalds 23121da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 23131da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 23141da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 23151da177e4SLinus Torvalds } 2316b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 23178728b834SDavid S. Miller 23181da177e4SLinus Torvalds /** 23191da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 23201da177e4SLinus Torvalds * @skb: buffer to remove 23218728b834SDavid S. Miller * @list: list to use 23221da177e4SLinus Torvalds * 23238728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 23248728b834SDavid S. Miller * function is atomic with respect to other list locked calls 23251da177e4SLinus Torvalds * 23268728b834SDavid S. Miller * You must know what list the SKB is on. 23271da177e4SLinus Torvalds */ 23288728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 23291da177e4SLinus Torvalds { 23301da177e4SLinus Torvalds unsigned long flags; 23311da177e4SLinus Torvalds 23321da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 23338728b834SDavid S. Miller __skb_unlink(skb, list); 23341da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 23351da177e4SLinus Torvalds } 2336b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 23371da177e4SLinus Torvalds 23381da177e4SLinus Torvalds /** 23391da177e4SLinus Torvalds * skb_append - append a buffer 23401da177e4SLinus Torvalds * @old: buffer to insert after 23411da177e4SLinus Torvalds * @newsk: buffer to insert 23428728b834SDavid S. Miller * @list: list to use 23431da177e4SLinus Torvalds * 23441da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 23451da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 23461da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23471da177e4SLinus Torvalds */ 23488728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 23491da177e4SLinus Torvalds { 23501da177e4SLinus Torvalds unsigned long flags; 23511da177e4SLinus Torvalds 23528728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 23537de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 23548728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 23551da177e4SLinus Torvalds } 2356b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 23571da177e4SLinus Torvalds 23581da177e4SLinus Torvalds /** 23591da177e4SLinus Torvalds * skb_insert - insert a buffer 23601da177e4SLinus Torvalds * @old: buffer to insert before 23611da177e4SLinus Torvalds * @newsk: buffer to insert 23628728b834SDavid S. Miller * @list: list to use 23631da177e4SLinus Torvalds * 23648728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 23658728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 23668728b834SDavid S. Miller * calls. 23678728b834SDavid S. Miller * 23681da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 23691da177e4SLinus Torvalds */ 23708728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 23711da177e4SLinus Torvalds { 23721da177e4SLinus Torvalds unsigned long flags; 23731da177e4SLinus Torvalds 23748728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 23758728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 23768728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 23771da177e4SLinus Torvalds } 2378b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 23791da177e4SLinus Torvalds 23801da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 23811da177e4SLinus Torvalds struct sk_buff* skb1, 23821da177e4SLinus Torvalds const u32 len, const int pos) 23831da177e4SLinus Torvalds { 23841da177e4SLinus Torvalds int i; 23851da177e4SLinus Torvalds 2386d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2387d626f62bSArnaldo Carvalho de Melo pos - len); 23881da177e4SLinus Torvalds /* And move data appendix as is. */ 23891da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 23901da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 23911da177e4SLinus Torvalds 23921da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 23931da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 23941da177e4SLinus Torvalds skb1->data_len = skb->data_len; 23951da177e4SLinus Torvalds skb1->len += skb1->data_len; 23961da177e4SLinus Torvalds skb->data_len = 0; 23971da177e4SLinus Torvalds skb->len = len; 239827a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 23991da177e4SLinus Torvalds } 24001da177e4SLinus Torvalds 24011da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 24021da177e4SLinus Torvalds struct sk_buff* skb1, 24031da177e4SLinus Torvalds const u32 len, int pos) 24041da177e4SLinus Torvalds { 24051da177e4SLinus Torvalds int i, k = 0; 24061da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 24071da177e4SLinus Torvalds 24081da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 24091da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 24101da177e4SLinus Torvalds skb->len = len; 24111da177e4SLinus Torvalds skb->data_len = len - pos; 24121da177e4SLinus Torvalds 24131da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 24149e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 24151da177e4SLinus Torvalds 24161da177e4SLinus Torvalds if (pos + size > len) { 24171da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 24181da177e4SLinus Torvalds 24191da177e4SLinus Torvalds if (pos < len) { 24201da177e4SLinus Torvalds /* Split frag. 24211da177e4SLinus Torvalds * We have two variants in this case: 24221da177e4SLinus Torvalds * 1. Move all the frag to the second 24231da177e4SLinus Torvalds * part, if it is possible. F.e. 24241da177e4SLinus Torvalds * this approach is mandatory for TUX, 24251da177e4SLinus Torvalds * where splitting is expensive. 24261da177e4SLinus Torvalds * 2. Split is accurately. We make this. 24271da177e4SLinus Torvalds */ 2428ea2ab693SIan Campbell skb_frag_ref(skb, i); 24291da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 24309e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 24319e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 24321da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 24331da177e4SLinus Torvalds } 24341da177e4SLinus Torvalds k++; 24351da177e4SLinus Torvalds } else 24361da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 24371da177e4SLinus Torvalds pos += size; 24381da177e4SLinus Torvalds } 24391da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 24401da177e4SLinus Torvalds } 24411da177e4SLinus Torvalds 24421da177e4SLinus Torvalds /** 24431da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 24441da177e4SLinus Torvalds * @skb: the buffer to split 24451da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 24461da177e4SLinus Torvalds * @len: new length for skb 24471da177e4SLinus Torvalds */ 24481da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 24491da177e4SLinus Torvalds { 24501da177e4SLinus Torvalds int pos = skb_headlen(skb); 24511da177e4SLinus Torvalds 245268534c68SAmerigo Wang skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 24531da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 24541da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 24551da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 24561da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 24571da177e4SLinus Torvalds } 2458b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 24591da177e4SLinus Torvalds 24609f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 24619f782db3SIlpo Järvinen * 24629f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 24639f782db3SIlpo Järvinen */ 2464832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2465832d11c5SIlpo Järvinen { 24660ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2467832d11c5SIlpo Järvinen } 2468832d11c5SIlpo Järvinen 2469832d11c5SIlpo Järvinen /** 2470832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2471832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2472832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2473832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2474832d11c5SIlpo Järvinen * 2475832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 247620e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2477832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2478832d11c5SIlpo Järvinen * 2479832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2480832d11c5SIlpo Järvinen * 2481832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2482832d11c5SIlpo Järvinen * to have non-paged data as well. 2483832d11c5SIlpo Järvinen * 2484832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2485832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2486832d11c5SIlpo Järvinen */ 2487832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2488832d11c5SIlpo Järvinen { 2489832d11c5SIlpo Järvinen int from, to, merge, todo; 2490832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2491832d11c5SIlpo Järvinen 2492832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2493832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2494832d11c5SIlpo Järvinen 2495832d11c5SIlpo Järvinen todo = shiftlen; 2496832d11c5SIlpo Järvinen from = 0; 2497832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2498832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2499832d11c5SIlpo Järvinen 2500832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2501832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2502832d11c5SIlpo Järvinen */ 2503832d11c5SIlpo Järvinen if (!to || 2504ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2505ea2ab693SIan Campbell fragfrom->page_offset)) { 2506832d11c5SIlpo Järvinen merge = -1; 2507832d11c5SIlpo Järvinen } else { 2508832d11c5SIlpo Järvinen merge = to - 1; 2509832d11c5SIlpo Järvinen 25109e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2511832d11c5SIlpo Järvinen if (todo < 0) { 2512832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2513832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2514832d11c5SIlpo Järvinen return 0; 2515832d11c5SIlpo Järvinen 25169f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 25179f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2518832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2519832d11c5SIlpo Järvinen 25209e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 25219e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2522832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2523832d11c5SIlpo Järvinen 2524832d11c5SIlpo Järvinen goto onlymerged; 2525832d11c5SIlpo Järvinen } 2526832d11c5SIlpo Järvinen 2527832d11c5SIlpo Järvinen from++; 2528832d11c5SIlpo Järvinen } 2529832d11c5SIlpo Järvinen 2530832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2531832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2532832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2533832d11c5SIlpo Järvinen return 0; 2534832d11c5SIlpo Järvinen 2535832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2536832d11c5SIlpo Järvinen return 0; 2537832d11c5SIlpo Järvinen 2538832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2539832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2540832d11c5SIlpo Järvinen return 0; 2541832d11c5SIlpo Järvinen 2542832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2543832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2544832d11c5SIlpo Järvinen 25459e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2546832d11c5SIlpo Järvinen *fragto = *fragfrom; 25479e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2548832d11c5SIlpo Järvinen from++; 2549832d11c5SIlpo Järvinen to++; 2550832d11c5SIlpo Järvinen 2551832d11c5SIlpo Järvinen } else { 2552ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2553832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2554832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 25559e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2556832d11c5SIlpo Järvinen 2557832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 25589e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2559832d11c5SIlpo Järvinen todo = 0; 2560832d11c5SIlpo Järvinen 2561832d11c5SIlpo Järvinen to++; 2562832d11c5SIlpo Järvinen break; 2563832d11c5SIlpo Järvinen } 2564832d11c5SIlpo Järvinen } 2565832d11c5SIlpo Järvinen 2566832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2567832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2568832d11c5SIlpo Järvinen 2569832d11c5SIlpo Järvinen if (merge >= 0) { 2570832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2571832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2572832d11c5SIlpo Järvinen 25739e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2574ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2575832d11c5SIlpo Järvinen } 2576832d11c5SIlpo Järvinen 2577832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2578832d11c5SIlpo Järvinen to = 0; 2579832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2580832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2581832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2582832d11c5SIlpo Järvinen 2583832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2584832d11c5SIlpo Järvinen 2585832d11c5SIlpo Järvinen onlymerged: 2586832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2587832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2588832d11c5SIlpo Järvinen */ 2589832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2590832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2591832d11c5SIlpo Järvinen 2592832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2593832d11c5SIlpo Järvinen skb->len -= shiftlen; 2594832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2595832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2596832d11c5SIlpo Järvinen tgt->len += shiftlen; 2597832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2598832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2599832d11c5SIlpo Järvinen 2600832d11c5SIlpo Järvinen return shiftlen; 2601832d11c5SIlpo Järvinen } 2602832d11c5SIlpo Järvinen 2603677e90edSThomas Graf /** 2604677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2605677e90edSThomas Graf * @skb: the buffer to read 2606677e90edSThomas Graf * @from: lower offset of data to be read 2607677e90edSThomas Graf * @to: upper offset of data to be read 2608677e90edSThomas Graf * @st: state variable 2609677e90edSThomas Graf * 2610677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2611677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2612677e90edSThomas Graf */ 2613677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2614677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2615677e90edSThomas Graf { 2616677e90edSThomas Graf st->lower_offset = from; 2617677e90edSThomas Graf st->upper_offset = to; 2618677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2619677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2620677e90edSThomas Graf st->frag_data = NULL; 2621677e90edSThomas Graf } 2622b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2623677e90edSThomas Graf 2624677e90edSThomas Graf /** 2625677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2626677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2627677e90edSThomas Graf * @data: destination pointer for data to be returned 2628677e90edSThomas Graf * @st: state variable 2629677e90edSThomas Graf * 2630bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 2631677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2632bc32383cSMathias Krause * the head of the data block to @data and returns the length 2633677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2634677e90edSThomas Graf * offset has been reached. 2635677e90edSThomas Graf * 2636677e90edSThomas Graf * The caller is not required to consume all of the data 2637bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 2638677e90edSThomas Graf * of bytes already consumed and the next call to 2639677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2640677e90edSThomas Graf * 264125985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2642677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2643677e90edSThomas Graf * reads of potentially non linear data. 2644677e90edSThomas Graf * 2645bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2646677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2647677e90edSThomas Graf * a stack for this purpose. 2648677e90edSThomas Graf */ 2649677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2650677e90edSThomas Graf struct skb_seq_state *st) 2651677e90edSThomas Graf { 2652677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2653677e90edSThomas Graf skb_frag_t *frag; 2654677e90edSThomas Graf 2655aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 2656aeb193eaSWedson Almeida Filho if (st->frag_data) { 2657aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 2658aeb193eaSWedson Almeida Filho st->frag_data = NULL; 2659aeb193eaSWedson Almeida Filho } 2660677e90edSThomas Graf return 0; 2661aeb193eaSWedson Almeida Filho } 2662677e90edSThomas Graf 2663677e90edSThomas Graf next_skb: 266495e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2665677e90edSThomas Graf 2666995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 266795e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2668677e90edSThomas Graf return block_limit - abs_offset; 2669677e90edSThomas Graf } 2670677e90edSThomas Graf 2671677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2672677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2673677e90edSThomas Graf 2674677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2675677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 26769e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2677677e90edSThomas Graf 2678677e90edSThomas Graf if (abs_offset < block_limit) { 2679677e90edSThomas Graf if (!st->frag_data) 268051c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2681677e90edSThomas Graf 2682677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2683677e90edSThomas Graf (abs_offset - st->stepped_offset); 2684677e90edSThomas Graf 2685677e90edSThomas Graf return block_limit - abs_offset; 2686677e90edSThomas Graf } 2687677e90edSThomas Graf 2688677e90edSThomas Graf if (st->frag_data) { 268951c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2690677e90edSThomas Graf st->frag_data = NULL; 2691677e90edSThomas Graf } 2692677e90edSThomas Graf 2693677e90edSThomas Graf st->frag_idx++; 26949e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2695677e90edSThomas Graf } 2696677e90edSThomas Graf 26975b5a60daSOlaf Kirch if (st->frag_data) { 269851c56b00SEric Dumazet kunmap_atomic(st->frag_data); 26995b5a60daSOlaf Kirch st->frag_data = NULL; 27005b5a60daSOlaf Kirch } 27015b5a60daSOlaf Kirch 270221dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2703677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 270495e3b24cSHerbert Xu st->frag_idx = 0; 2705677e90edSThomas Graf goto next_skb; 270671b3346dSShyam Iyer } else if (st->cur_skb->next) { 270771b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 270871b3346dSShyam Iyer st->frag_idx = 0; 2709677e90edSThomas Graf goto next_skb; 2710677e90edSThomas Graf } 2711677e90edSThomas Graf 2712677e90edSThomas Graf return 0; 2713677e90edSThomas Graf } 2714b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2715677e90edSThomas Graf 2716677e90edSThomas Graf /** 2717677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2718677e90edSThomas Graf * @st: state variable 2719677e90edSThomas Graf * 2720677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2721677e90edSThomas Graf * returned 0. 2722677e90edSThomas Graf */ 2723677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2724677e90edSThomas Graf { 2725677e90edSThomas Graf if (st->frag_data) 272651c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2727677e90edSThomas Graf } 2728b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2729677e90edSThomas Graf 27303fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 27313fc7e8a6SThomas Graf 27323fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 27333fc7e8a6SThomas Graf struct ts_config *conf, 27343fc7e8a6SThomas Graf struct ts_state *state) 27353fc7e8a6SThomas Graf { 27363fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 27373fc7e8a6SThomas Graf } 27383fc7e8a6SThomas Graf 27393fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 27403fc7e8a6SThomas Graf { 27413fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 27423fc7e8a6SThomas Graf } 27433fc7e8a6SThomas Graf 27443fc7e8a6SThomas Graf /** 27453fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 27463fc7e8a6SThomas Graf * @skb: the buffer to look in 27473fc7e8a6SThomas Graf * @from: search offset 27483fc7e8a6SThomas Graf * @to: search limit 27493fc7e8a6SThomas Graf * @config: textsearch configuration 27503fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 27513fc7e8a6SThomas Graf * 27523fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 27533fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 27543fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 27553fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 27563fc7e8a6SThomas Graf */ 27573fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 27583fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 27593fc7e8a6SThomas Graf struct ts_state *state) 27603fc7e8a6SThomas Graf { 2761f72b948dSPhil Oester unsigned int ret; 2762f72b948dSPhil Oester 27633fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 27643fc7e8a6SThomas Graf config->finish = skb_ts_finish; 27653fc7e8a6SThomas Graf 27663fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 27673fc7e8a6SThomas Graf 2768f72b948dSPhil Oester ret = textsearch_find(config, state); 2769f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 27703fc7e8a6SThomas Graf } 2771b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 27723fc7e8a6SThomas Graf 2773e89e9cf5SAnanda Raju /** 27742c53040fSBen Hutchings * skb_append_datato_frags - append the user data to a skb 2775e89e9cf5SAnanda Raju * @sk: sock structure 2776e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2777e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2778e89e9cf5SAnanda Raju * @from: pointer to user message iov 2779e89e9cf5SAnanda Raju * @length: length of the iov message 2780e89e9cf5SAnanda Raju * 2781e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2782e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2783e89e9cf5SAnanda Raju */ 2784e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2785dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2786e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2787e89e9cf5SAnanda Raju void *from, int length) 2788e89e9cf5SAnanda Raju { 2789b2111724SEric Dumazet int frg_cnt = skb_shinfo(skb)->nr_frags; 2790b2111724SEric Dumazet int copy; 2791e89e9cf5SAnanda Raju int offset = 0; 2792e89e9cf5SAnanda Raju int ret; 2793b2111724SEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 2794e89e9cf5SAnanda Raju 2795e89e9cf5SAnanda Raju do { 2796e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2797e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2798b2111724SEric Dumazet return -EMSGSIZE; 2799e89e9cf5SAnanda Raju 2800b2111724SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 2801e89e9cf5SAnanda Raju return -ENOMEM; 2802e89e9cf5SAnanda Raju 2803e89e9cf5SAnanda Raju /* copy the user data to page */ 2804b2111724SEric Dumazet copy = min_t(int, length, pfrag->size - pfrag->offset); 2805e89e9cf5SAnanda Raju 2806b2111724SEric Dumazet ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2807e89e9cf5SAnanda Raju offset, copy, 0, skb); 2808e89e9cf5SAnanda Raju if (ret < 0) 2809e89e9cf5SAnanda Raju return -EFAULT; 2810e89e9cf5SAnanda Raju 2811e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 2812b2111724SEric Dumazet skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2813b2111724SEric Dumazet copy); 2814b2111724SEric Dumazet frg_cnt++; 2815b2111724SEric Dumazet pfrag->offset += copy; 2816b2111724SEric Dumazet get_page(pfrag->page); 2817b2111724SEric Dumazet 2818b2111724SEric Dumazet skb->truesize += copy; 2819b2111724SEric Dumazet atomic_add(copy, &sk->sk_wmem_alloc); 2820e89e9cf5SAnanda Raju skb->len += copy; 2821e89e9cf5SAnanda Raju skb->data_len += copy; 2822e89e9cf5SAnanda Raju offset += copy; 2823e89e9cf5SAnanda Raju length -= copy; 2824e89e9cf5SAnanda Raju 2825e89e9cf5SAnanda Raju } while (length > 0); 2826e89e9cf5SAnanda Raju 2827e89e9cf5SAnanda Raju return 0; 2828e89e9cf5SAnanda Raju } 2829b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2830e89e9cf5SAnanda Raju 2831cbb042f9SHerbert Xu /** 2832cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2833cbb042f9SHerbert Xu * @skb: buffer to update 2834cbb042f9SHerbert Xu * @len: length of data pulled 2835cbb042f9SHerbert Xu * 2836cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2837fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 283884fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 283984fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 284084fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2841cbb042f9SHerbert Xu */ 2842cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2843cbb042f9SHerbert Xu { 2844cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2845cbb042f9SHerbert Xu skb->len -= len; 2846cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2847cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2848cbb042f9SHerbert Xu return skb->data += len; 2849cbb042f9SHerbert Xu } 2850f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2851f94691acSArnaldo Carvalho de Melo 2852f4c50d99SHerbert Xu /** 2853f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2854df5771ffSMichael S. Tsirkin * @head_skb: buffer to segment 2855576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2856f4c50d99SHerbert Xu * 2857f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 28584c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 28594c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2860f4c50d99SHerbert Xu */ 2861df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb, 2862df5771ffSMichael S. Tsirkin netdev_features_t features) 2863f4c50d99SHerbert Xu { 2864f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2865f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 28661a4cedafSMichael S. Tsirkin struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 2867df5771ffSMichael S. Tsirkin skb_frag_t *frag = skb_shinfo(head_skb)->frags; 2868df5771ffSMichael S. Tsirkin unsigned int mss = skb_shinfo(head_skb)->gso_size; 2869df5771ffSMichael S. Tsirkin unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 28701fd819ecSMichael S. Tsirkin struct sk_buff *frag_skb = head_skb; 2871f4c50d99SHerbert Xu unsigned int offset = doffset; 2872df5771ffSMichael S. Tsirkin unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 2873f4c50d99SHerbert Xu unsigned int headroom; 2874f4c50d99SHerbert Xu unsigned int len; 2875ec5f0615SPravin B Shelar __be16 proto; 2876ec5f0615SPravin B Shelar bool csum; 287704ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2878df5771ffSMichael S. Tsirkin int nfrags = skb_shinfo(head_skb)->nr_frags; 2879f4c50d99SHerbert Xu int err = -ENOMEM; 2880f4c50d99SHerbert Xu int i = 0; 2881f4c50d99SHerbert Xu int pos; 2882f4c50d99SHerbert Xu 2883df5771ffSMichael S. Tsirkin proto = skb_network_protocol(head_skb); 2884ec5f0615SPravin B Shelar if (unlikely(!proto)) 2885ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 2886ec5f0615SPravin B Shelar 2887ec5f0615SPravin B Shelar csum = !!can_checksum_protocol(features, proto); 2888df5771ffSMichael S. Tsirkin __skb_push(head_skb, doffset); 2889df5771ffSMichael S. Tsirkin headroom = skb_headroom(head_skb); 2890df5771ffSMichael S. Tsirkin pos = skb_headlen(head_skb); 2891f4c50d99SHerbert Xu 2892f4c50d99SHerbert Xu do { 2893f4c50d99SHerbert Xu struct sk_buff *nskb; 28948cb19905SMichael S. Tsirkin skb_frag_t *nskb_frag; 2895c8884eddSHerbert Xu int hsize; 2896f4c50d99SHerbert Xu int size; 2897f4c50d99SHerbert Xu 2898df5771ffSMichael S. Tsirkin len = head_skb->len - offset; 2899f4c50d99SHerbert Xu if (len > mss) 2900f4c50d99SHerbert Xu len = mss; 2901f4c50d99SHerbert Xu 2902df5771ffSMichael S. Tsirkin hsize = skb_headlen(head_skb) - offset; 2903f4c50d99SHerbert Xu if (hsize < 0) 2904f4c50d99SHerbert Xu hsize = 0; 2905c8884eddSHerbert Xu if (hsize > len || !sg) 2906c8884eddSHerbert Xu hsize = len; 2907f4c50d99SHerbert Xu 29081a4cedafSMichael S. Tsirkin if (!hsize && i >= nfrags && skb_headlen(list_skb) && 29091a4cedafSMichael S. Tsirkin (skb_headlen(list_skb) == len || sg)) { 29101a4cedafSMichael S. Tsirkin BUG_ON(skb_headlen(list_skb) > len); 291189319d38SHerbert Xu 29129d8506ccSHerbert Xu i = 0; 29131a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 29141a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 29151fd819ecSMichael S. Tsirkin frag_skb = list_skb; 29161a4cedafSMichael S. Tsirkin pos += skb_headlen(list_skb); 29179d8506ccSHerbert Xu 29189d8506ccSHerbert Xu while (pos < offset + len) { 29199d8506ccSHerbert Xu BUG_ON(i >= nfrags); 29209d8506ccSHerbert Xu 29214e1beba1SMichael S. Tsirkin size = skb_frag_size(frag); 29229d8506ccSHerbert Xu if (pos + size > offset + len) 29239d8506ccSHerbert Xu break; 29249d8506ccSHerbert Xu 29259d8506ccSHerbert Xu i++; 29269d8506ccSHerbert Xu pos += size; 29274e1beba1SMichael S. Tsirkin frag++; 29289d8506ccSHerbert Xu } 29299d8506ccSHerbert Xu 29301a4cedafSMichael S. Tsirkin nskb = skb_clone(list_skb, GFP_ATOMIC); 29311a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 293289319d38SHerbert Xu 2933f4c50d99SHerbert Xu if (unlikely(!nskb)) 2934f4c50d99SHerbert Xu goto err; 2935f4c50d99SHerbert Xu 29369d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 29379d8506ccSHerbert Xu kfree_skb(nskb); 29389d8506ccSHerbert Xu goto err; 29399d8506ccSHerbert Xu } 29409d8506ccSHerbert Xu 2941ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 294289319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 294389319d38SHerbert Xu kfree_skb(nskb); 294489319d38SHerbert Xu goto err; 294589319d38SHerbert Xu } 294689319d38SHerbert Xu 2947ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 294889319d38SHerbert Xu skb_release_head_state(nskb); 294989319d38SHerbert Xu __skb_push(nskb, doffset); 295089319d38SHerbert Xu } else { 2951c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 2952df5771ffSMichael S. Tsirkin GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 2953c93bdd0eSMel Gorman NUMA_NO_NODE); 295489319d38SHerbert Xu 295589319d38SHerbert Xu if (unlikely(!nskb)) 295689319d38SHerbert Xu goto err; 295789319d38SHerbert Xu 295889319d38SHerbert Xu skb_reserve(nskb, headroom); 295989319d38SHerbert Xu __skb_put(nskb, doffset); 296089319d38SHerbert Xu } 296189319d38SHerbert Xu 2962f4c50d99SHerbert Xu if (segs) 2963f4c50d99SHerbert Xu tail->next = nskb; 2964f4c50d99SHerbert Xu else 2965f4c50d99SHerbert Xu segs = nskb; 2966f4c50d99SHerbert Xu tail = nskb; 2967f4c50d99SHerbert Xu 2968df5771ffSMichael S. Tsirkin __copy_skb_header(nskb, head_skb); 2969df5771ffSMichael S. Tsirkin nskb->mac_len = head_skb->mac_len; 2970f4c50d99SHerbert Xu 2971030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 297268c33163SPravin B Shelar 2973df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 297468c33163SPravin B Shelar nskb->data - tnl_hlen, 297568c33163SPravin B Shelar doffset + tnl_hlen); 297689319d38SHerbert Xu 29779d8506ccSHerbert Xu if (nskb->len == len + doffset) 29781cdbcb79SSimon Horman goto perform_csum_check; 297989319d38SHerbert Xu 2980f4c50d99SHerbert Xu if (!sg) { 29816f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2982df5771ffSMichael S. Tsirkin nskb->csum = skb_copy_and_csum_bits(head_skb, offset, 2983f4c50d99SHerbert Xu skb_put(nskb, len), 2984f4c50d99SHerbert Xu len, 0); 2985f4c50d99SHerbert Xu continue; 2986f4c50d99SHerbert Xu } 2987f4c50d99SHerbert Xu 29888cb19905SMichael S. Tsirkin nskb_frag = skb_shinfo(nskb)->frags; 2989f4c50d99SHerbert Xu 2990df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, offset, 2991d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 2992f4c50d99SHerbert Xu 2993df5771ffSMichael S. Tsirkin skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & 2994df5771ffSMichael S. Tsirkin SKBTX_SHARED_FRAG; 2995cef401deSEric Dumazet 29969d8506ccSHerbert Xu while (pos < offset + len) { 29979d8506ccSHerbert Xu if (i >= nfrags) { 29981a4cedafSMichael S. Tsirkin BUG_ON(skb_headlen(list_skb)); 29999d8506ccSHerbert Xu 30009d8506ccSHerbert Xu i = 0; 30011a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 30021a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 30031fd819ecSMichael S. Tsirkin frag_skb = list_skb; 30049d8506ccSHerbert Xu 30059d8506ccSHerbert Xu BUG_ON(!nfrags); 30069d8506ccSHerbert Xu 30071a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 30089d8506ccSHerbert Xu } 30099d8506ccSHerbert Xu 30109d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 30119d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 30129d8506ccSHerbert Xu net_warn_ratelimited( 30139d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 30149d8506ccSHerbert Xu pos, mss); 30159d8506ccSHerbert Xu goto err; 30169d8506ccSHerbert Xu } 30179d8506ccSHerbert Xu 30181fd819ecSMichael S. Tsirkin if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) 30191fd819ecSMichael S. Tsirkin goto err; 30201fd819ecSMichael S. Tsirkin 30214e1beba1SMichael S. Tsirkin *nskb_frag = *frag; 30228cb19905SMichael S. Tsirkin __skb_frag_ref(nskb_frag); 30238cb19905SMichael S. Tsirkin size = skb_frag_size(nskb_frag); 3024f4c50d99SHerbert Xu 3025f4c50d99SHerbert Xu if (pos < offset) { 30268cb19905SMichael S. Tsirkin nskb_frag->page_offset += offset - pos; 30278cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, offset - pos); 3028f4c50d99SHerbert Xu } 3029f4c50d99SHerbert Xu 303089319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 3031f4c50d99SHerbert Xu 3032f4c50d99SHerbert Xu if (pos + size <= offset + len) { 3033f4c50d99SHerbert Xu i++; 30344e1beba1SMichael S. Tsirkin frag++; 3035f4c50d99SHerbert Xu pos += size; 3036f4c50d99SHerbert Xu } else { 30378cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 303889319d38SHerbert Xu goto skip_fraglist; 3039f4c50d99SHerbert Xu } 3040f4c50d99SHerbert Xu 30418cb19905SMichael S. Tsirkin nskb_frag++; 3042f4c50d99SHerbert Xu } 3043f4c50d99SHerbert Xu 304489319d38SHerbert Xu skip_fraglist: 3045f4c50d99SHerbert Xu nskb->data_len = len - hsize; 3046f4c50d99SHerbert Xu nskb->len += nskb->data_len; 3047f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 3048ec5f0615SPravin B Shelar 30491cdbcb79SSimon Horman perform_csum_check: 3050ec5f0615SPravin B Shelar if (!csum) { 3051ec5f0615SPravin B Shelar nskb->csum = skb_checksum(nskb, doffset, 3052ec5f0615SPravin B Shelar nskb->len - doffset, 0); 3053ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 3054ec5f0615SPravin B Shelar } 3055df5771ffSMichael S. Tsirkin } while ((offset += len) < head_skb->len); 3056f4c50d99SHerbert Xu 3057f4c50d99SHerbert Xu return segs; 3058f4c50d99SHerbert Xu 3059f4c50d99SHerbert Xu err: 3060289dccbeSEric Dumazet kfree_skb_list(segs); 3061f4c50d99SHerbert Xu return ERR_PTR(err); 3062f4c50d99SHerbert Xu } 3063f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 3064f4c50d99SHerbert Xu 306571d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 306671d93b39SHerbert Xu { 30678a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 306867147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 306967147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 30708a29111cSEric Dumazet struct sk_buff *nskb, *lp, *p = *head; 30718a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 3072715dc1f3SEric Dumazet unsigned int delta_truesize; 30738a29111cSEric Dumazet unsigned int headroom; 307471d93b39SHerbert Xu 30758a29111cSEric Dumazet if (unlikely(p->len + len >= 65536)) 307671d93b39SHerbert Xu return -E2BIG; 307771d93b39SHerbert Xu 30788a29111cSEric Dumazet lp = NAPI_GRO_CB(p)->last ?: p; 30798a29111cSEric Dumazet pinfo = skb_shinfo(lp); 30808a29111cSEric Dumazet 30818a29111cSEric Dumazet if (headlen <= offset) { 308242da6994SHerbert Xu skb_frag_t *frag; 308366e92fcfSHerbert Xu skb_frag_t *frag2; 30849aaa156cSHerbert Xu int i = skbinfo->nr_frags; 30859aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 308642da6994SHerbert Xu 308766e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 30888a29111cSEric Dumazet goto merge; 308981705ad1SHerbert Xu 30908a29111cSEric Dumazet offset -= headlen; 30919aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 30929aaa156cSHerbert Xu skbinfo->nr_frags = 0; 3093f5572068SHerbert Xu 30949aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 30959aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 309666e92fcfSHerbert Xu do { 309766e92fcfSHerbert Xu *--frag = *--frag2; 309866e92fcfSHerbert Xu } while (--i); 309966e92fcfSHerbert Xu 310066e92fcfSHerbert Xu frag->page_offset += offset; 31019e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 310266e92fcfSHerbert Xu 3103715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 3104ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 3105ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 3106715dc1f3SEric Dumazet 3107f5572068SHerbert Xu skb->truesize -= skb->data_len; 3108f5572068SHerbert Xu skb->len -= skb->data_len; 3109f5572068SHerbert Xu skb->data_len = 0; 3110f5572068SHerbert Xu 3111715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 31125d38a079SHerbert Xu goto done; 3113d7e8883cSEric Dumazet } else if (skb->head_frag) { 3114d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 3115d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 3116d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 3117d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 3118d7e8883cSEric Dumazet unsigned int first_offset; 3119d7e8883cSEric Dumazet 3120d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 31218a29111cSEric Dumazet goto merge; 3122d7e8883cSEric Dumazet 3123d7e8883cSEric Dumazet first_offset = skb->data - 3124d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 3125d7e8883cSEric Dumazet offset; 3126d7e8883cSEric Dumazet 3127d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3128d7e8883cSEric Dumazet 3129d7e8883cSEric Dumazet frag->page.p = page; 3130d7e8883cSEric Dumazet frag->page_offset = first_offset; 3131d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 3132d7e8883cSEric Dumazet 3133d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3134d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 3135d7e8883cSEric Dumazet 3136715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3137d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3138d7e8883cSEric Dumazet goto done; 31398a29111cSEric Dumazet } 31408a29111cSEric Dumazet if (pinfo->frag_list) 31418a29111cSEric Dumazet goto merge; 31428a29111cSEric Dumazet if (skb_gro_len(p) != pinfo->gso_size) 314369c0cab1SHerbert Xu return -E2BIG; 314471d93b39SHerbert Xu 314571d93b39SHerbert Xu headroom = skb_headroom(p); 31463d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 314771d93b39SHerbert Xu if (unlikely(!nskb)) 314871d93b39SHerbert Xu return -ENOMEM; 314971d93b39SHerbert Xu 315071d93b39SHerbert Xu __copy_skb_header(nskb, p); 315171d93b39SHerbert Xu nskb->mac_len = p->mac_len; 315271d93b39SHerbert Xu 315371d93b39SHerbert Xu skb_reserve(nskb, headroom); 315486911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 315571d93b39SHerbert Xu 315686911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 315771d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 315871d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 315971d93b39SHerbert Xu 316086911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 316186911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 316286911732SHerbert Xu p->data - skb_mac_header(p)); 316371d93b39SHerbert Xu 316471d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 31659aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3166622e0ca1SHerbert Xu pinfo->gso_size = 0; 316771d93b39SHerbert Xu skb_header_release(p); 3168c3c7c254SEric Dumazet NAPI_GRO_CB(nskb)->last = p; 316971d93b39SHerbert Xu 317071d93b39SHerbert Xu nskb->data_len += p->len; 3171de8261c2SEric Dumazet nskb->truesize += p->truesize; 317271d93b39SHerbert Xu nskb->len += p->len; 317371d93b39SHerbert Xu 317471d93b39SHerbert Xu *head = nskb; 317571d93b39SHerbert Xu nskb->next = p->next; 317671d93b39SHerbert Xu p->next = NULL; 317771d93b39SHerbert Xu 317871d93b39SHerbert Xu p = nskb; 317971d93b39SHerbert Xu 318071d93b39SHerbert Xu merge: 3181715dc1f3SEric Dumazet delta_truesize = skb->truesize; 318267147ba9SHerbert Xu if (offset > headlen) { 3183d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 3184d1dc7abfSMichal Schmidt 3185d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 31869e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 3187d1dc7abfSMichal Schmidt skb->data_len -= eat; 3188d1dc7abfSMichal Schmidt skb->len -= eat; 318967147ba9SHerbert Xu offset = headlen; 319056035022SHerbert Xu } 319156035022SHerbert Xu 319267147ba9SHerbert Xu __skb_pull(skb, offset); 319356035022SHerbert Xu 31948a29111cSEric Dumazet if (!NAPI_GRO_CB(p)->last) 31958a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 31968a29111cSEric Dumazet else 3197c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 3198c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 319971d93b39SHerbert Xu skb_header_release(skb); 32008a29111cSEric Dumazet lp = p; 320171d93b39SHerbert Xu 32025d38a079SHerbert Xu done: 32035d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 320437fe4732SHerbert Xu p->data_len += len; 3205715dc1f3SEric Dumazet p->truesize += delta_truesize; 320637fe4732SHerbert Xu p->len += len; 32078a29111cSEric Dumazet if (lp != p) { 32088a29111cSEric Dumazet lp->data_len += len; 32098a29111cSEric Dumazet lp->truesize += delta_truesize; 32108a29111cSEric Dumazet lp->len += len; 32118a29111cSEric Dumazet } 321271d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 321371d93b39SHerbert Xu return 0; 321471d93b39SHerbert Xu } 321571d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 321671d93b39SHerbert Xu 32171da177e4SLinus Torvalds void __init skb_init(void) 32181da177e4SLinus Torvalds { 32191da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 32201da177e4SLinus Torvalds sizeof(struct sk_buff), 32211da177e4SLinus Torvalds 0, 3222e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 322320c2df83SPaul Mundt NULL); 3224d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3225d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3226d179cd12SDavid S. Miller sizeof(atomic_t), 3227d179cd12SDavid S. Miller 0, 3228e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 322920c2df83SPaul Mundt NULL); 32301da177e4SLinus Torvalds } 32311da177e4SLinus Torvalds 3232716ea3a7SDavid Howells /** 3233716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3234716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3235716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3236716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3237716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3238716ea3a7SDavid Howells * 3239716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3240716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3241716ea3a7SDavid Howells */ 324251c739d1SDavid S. Miller static int 324351c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3244716ea3a7SDavid Howells { 32451a028e50SDavid S. Miller int start = skb_headlen(skb); 32461a028e50SDavid S. Miller int i, copy = start - offset; 3247fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3248716ea3a7SDavid Howells int elt = 0; 3249716ea3a7SDavid Howells 3250716ea3a7SDavid Howells if (copy > 0) { 3251716ea3a7SDavid Howells if (copy > len) 3252716ea3a7SDavid Howells copy = len; 3253642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3254716ea3a7SDavid Howells elt++; 3255716ea3a7SDavid Howells if ((len -= copy) == 0) 3256716ea3a7SDavid Howells return elt; 3257716ea3a7SDavid Howells offset += copy; 3258716ea3a7SDavid Howells } 3259716ea3a7SDavid Howells 3260716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 32611a028e50SDavid S. Miller int end; 3262716ea3a7SDavid Howells 3263547b792cSIlpo Järvinen WARN_ON(start > offset + len); 32641a028e50SDavid S. Miller 32659e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3266716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3267716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3268716ea3a7SDavid Howells 3269716ea3a7SDavid Howells if (copy > len) 3270716ea3a7SDavid Howells copy = len; 3271ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3272642f1490SJens Axboe frag->page_offset+offset-start); 3273716ea3a7SDavid Howells elt++; 3274716ea3a7SDavid Howells if (!(len -= copy)) 3275716ea3a7SDavid Howells return elt; 3276716ea3a7SDavid Howells offset += copy; 3277716ea3a7SDavid Howells } 32781a028e50SDavid S. Miller start = end; 3279716ea3a7SDavid Howells } 3280716ea3a7SDavid Howells 3281fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 32821a028e50SDavid S. Miller int end; 3283716ea3a7SDavid Howells 3284547b792cSIlpo Järvinen WARN_ON(start > offset + len); 32851a028e50SDavid S. Miller 3286fbb398a8SDavid S. Miller end = start + frag_iter->len; 3287716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3288716ea3a7SDavid Howells if (copy > len) 3289716ea3a7SDavid Howells copy = len; 3290fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 329151c739d1SDavid S. Miller copy); 3292716ea3a7SDavid Howells if ((len -= copy) == 0) 3293716ea3a7SDavid Howells return elt; 3294716ea3a7SDavid Howells offset += copy; 3295716ea3a7SDavid Howells } 32961a028e50SDavid S. Miller start = end; 3297716ea3a7SDavid Howells } 3298716ea3a7SDavid Howells BUG_ON(len); 3299716ea3a7SDavid Howells return elt; 3300716ea3a7SDavid Howells } 3301716ea3a7SDavid Howells 330251c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 330351c739d1SDavid S. Miller { 330451c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 330551c739d1SDavid S. Miller 3306c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 330751c739d1SDavid S. Miller 330851c739d1SDavid S. Miller return nsg; 330951c739d1SDavid S. Miller } 3310b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 331151c739d1SDavid S. Miller 3312716ea3a7SDavid Howells /** 3313716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3314716ea3a7SDavid Howells * @skb: The socket buffer to check. 3315716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3316716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3317716ea3a7SDavid Howells * 3318716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3319716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3320716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3321716ea3a7SDavid Howells * 3322716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3323716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3324716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3325716ea3a7SDavid Howells * 3326716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3327716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3328716ea3a7SDavid Howells */ 3329716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3330716ea3a7SDavid Howells { 3331716ea3a7SDavid Howells int copyflag; 3332716ea3a7SDavid Howells int elt; 3333716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3334716ea3a7SDavid Howells 3335716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3336716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3337716ea3a7SDavid Howells * at the moment even if they are anonymous). 3338716ea3a7SDavid Howells */ 3339716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3340716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3341716ea3a7SDavid Howells return -ENOMEM; 3342716ea3a7SDavid Howells 3343716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 334421dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3345716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3346716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3347716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3348716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3349716ea3a7SDavid Howells 3350716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3351716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3352716ea3a7SDavid Howells return -ENOMEM; 3353716ea3a7SDavid Howells 3354716ea3a7SDavid Howells /* Voila! */ 3355716ea3a7SDavid Howells *trailer = skb; 3356716ea3a7SDavid Howells return 1; 3357716ea3a7SDavid Howells } 3358716ea3a7SDavid Howells 3359716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3360716ea3a7SDavid Howells 3361716ea3a7SDavid Howells elt = 1; 3362716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3363716ea3a7SDavid Howells copyflag = 0; 3364716ea3a7SDavid Howells 3365716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3366716ea3a7SDavid Howells int ntail = 0; 3367716ea3a7SDavid Howells 3368716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3369716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3370716ea3a7SDavid Howells * after it. */ 3371716ea3a7SDavid Howells 3372716ea3a7SDavid Howells if (skb_shared(skb1)) 3373716ea3a7SDavid Howells copyflag = 1; 3374716ea3a7SDavid Howells 3375716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3376716ea3a7SDavid Howells 3377716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3378716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 337921dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3380716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3381716ea3a7SDavid Howells ntail = tailbits + 128; 3382716ea3a7SDavid Howells } 3383716ea3a7SDavid Howells 3384716ea3a7SDavid Howells if (copyflag || 3385716ea3a7SDavid Howells skb_cloned(skb1) || 3386716ea3a7SDavid Howells ntail || 3387716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 338821dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3389716ea3a7SDavid Howells struct sk_buff *skb2; 3390716ea3a7SDavid Howells 3391716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3392716ea3a7SDavid Howells if (ntail == 0) 3393716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3394716ea3a7SDavid Howells else 3395716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3396716ea3a7SDavid Howells skb_headroom(skb1), 3397716ea3a7SDavid Howells ntail, 3398716ea3a7SDavid Howells GFP_ATOMIC); 3399716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3400716ea3a7SDavid Howells return -ENOMEM; 3401716ea3a7SDavid Howells 3402716ea3a7SDavid Howells if (skb1->sk) 3403716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3404716ea3a7SDavid Howells 3405716ea3a7SDavid Howells /* Looking around. Are we still alive? 3406716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3407716ea3a7SDavid Howells 3408716ea3a7SDavid Howells skb2->next = skb1->next; 3409716ea3a7SDavid Howells *skb_p = skb2; 3410716ea3a7SDavid Howells kfree_skb(skb1); 3411716ea3a7SDavid Howells skb1 = skb2; 3412716ea3a7SDavid Howells } 3413716ea3a7SDavid Howells elt++; 3414716ea3a7SDavid Howells *trailer = skb1; 3415716ea3a7SDavid Howells skb_p = &skb1->next; 3416716ea3a7SDavid Howells } 3417716ea3a7SDavid Howells 3418716ea3a7SDavid Howells return elt; 3419716ea3a7SDavid Howells } 3420b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3421716ea3a7SDavid Howells 3422b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3423b1faf566SEric Dumazet { 3424b1faf566SEric Dumazet struct sock *sk = skb->sk; 3425b1faf566SEric Dumazet 3426b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3427b1faf566SEric Dumazet } 3428b1faf566SEric Dumazet 3429b1faf566SEric Dumazet /* 3430b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3431b1faf566SEric Dumazet */ 3432b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3433b1faf566SEric Dumazet { 3434110c4330SEric Dumazet int len = skb->len; 3435110c4330SEric Dumazet 3436b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 343795c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3438b1faf566SEric Dumazet return -ENOMEM; 3439b1faf566SEric Dumazet 3440b1faf566SEric Dumazet skb_orphan(skb); 3441b1faf566SEric Dumazet skb->sk = sk; 3442b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3443b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3444b1faf566SEric Dumazet 3445abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3446abb57ea4SEric Dumazet skb_dst_force(skb); 3447abb57ea4SEric Dumazet 3448b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3449b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3450110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3451b1faf566SEric Dumazet return 0; 3452b1faf566SEric Dumazet } 3453b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3454b1faf566SEric Dumazet 3455ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3456ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3457ac45f602SPatrick Ohly { 3458ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3459ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3460ac45f602SPatrick Ohly struct sk_buff *skb; 3461ac45f602SPatrick Ohly int err; 3462ac45f602SPatrick Ohly 3463ac45f602SPatrick Ohly if (!sk) 3464ac45f602SPatrick Ohly return; 3465ac45f602SPatrick Ohly 3466ac45f602SPatrick Ohly if (hwtstamps) { 34672e31396fSWillem de Bruijn *skb_hwtstamps(orig_skb) = 3468ac45f602SPatrick Ohly *hwtstamps; 3469ac45f602SPatrick Ohly } else { 3470ac45f602SPatrick Ohly /* 3471ac45f602SPatrick Ohly * no hardware time stamps available, 34722244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3473ac45f602SPatrick Ohly * store software time stamp 3474ac45f602SPatrick Ohly */ 34752e31396fSWillem de Bruijn orig_skb->tstamp = ktime_get_real(); 3476ac45f602SPatrick Ohly } 3477ac45f602SPatrick Ohly 34782e31396fSWillem de Bruijn skb = skb_clone(orig_skb, GFP_ATOMIC); 34792e31396fSWillem de Bruijn if (!skb) 34802e31396fSWillem de Bruijn return; 34812e31396fSWillem de Bruijn 3482ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3483ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3484ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3485ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 348629030374SEric Dumazet 3487ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 348829030374SEric Dumazet 3489ac45f602SPatrick Ohly if (err) 3490ac45f602SPatrick Ohly kfree_skb(skb); 3491ac45f602SPatrick Ohly } 3492ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3493ac45f602SPatrick Ohly 34946e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 34956e3e939fSJohannes Berg { 34966e3e939fSJohannes Berg struct sock *sk = skb->sk; 34976e3e939fSJohannes Berg struct sock_exterr_skb *serr; 34986e3e939fSJohannes Berg int err; 34996e3e939fSJohannes Berg 35006e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 35016e3e939fSJohannes Berg skb->wifi_acked = acked; 35026e3e939fSJohannes Berg 35036e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 35046e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 35056e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 35066e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 35076e3e939fSJohannes Berg 35086e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 35096e3e939fSJohannes Berg if (err) 35106e3e939fSJohannes Berg kfree_skb(skb); 35116e3e939fSJohannes Berg } 35126e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 35136e3e939fSJohannes Berg 3514ac45f602SPatrick Ohly 3515f35d9d8aSRusty Russell /** 3516f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3517f35d9d8aSRusty Russell * @skb: the skb to set 3518f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3519f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3520f35d9d8aSRusty Russell * 3521f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3522f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3523f35d9d8aSRusty Russell * 3524f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3525f35d9d8aSRusty Russell * returns false you should drop the packet. 3526f35d9d8aSRusty Russell */ 3527f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3528f35d9d8aSRusty Russell { 35295ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 35305ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3531e87cc472SJoe Perches net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 35325ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3533f35d9d8aSRusty Russell return false; 3534f35d9d8aSRusty Russell } 3535f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3536f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3537f35d9d8aSRusty Russell skb->csum_offset = off; 3538e5d5decaSJason Wang skb_set_transport_header(skb, start); 3539f35d9d8aSRusty Russell return true; 3540f35d9d8aSRusty Russell } 3541b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3542f35d9d8aSRusty Russell 3543ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 3544ed1f50c3SPaul Durrant unsigned int max) 3545ed1f50c3SPaul Durrant { 3546ed1f50c3SPaul Durrant if (skb_headlen(skb) >= len) 3547ed1f50c3SPaul Durrant return 0; 3548ed1f50c3SPaul Durrant 3549ed1f50c3SPaul Durrant /* If we need to pullup then pullup to the max, so we 3550ed1f50c3SPaul Durrant * won't need to do it again. 3551ed1f50c3SPaul Durrant */ 3552ed1f50c3SPaul Durrant if (max > skb->len) 3553ed1f50c3SPaul Durrant max = skb->len; 3554ed1f50c3SPaul Durrant 3555ed1f50c3SPaul Durrant if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 3556ed1f50c3SPaul Durrant return -ENOMEM; 3557ed1f50c3SPaul Durrant 3558ed1f50c3SPaul Durrant if (skb_headlen(skb) < len) 3559ed1f50c3SPaul Durrant return -EPROTO; 3560ed1f50c3SPaul Durrant 3561ed1f50c3SPaul Durrant return 0; 3562ed1f50c3SPaul Durrant } 3563ed1f50c3SPaul Durrant 3564ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 3565ed1f50c3SPaul Durrant * maximally sized IP and TCP or UDP headers. 3566ed1f50c3SPaul Durrant */ 3567ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128 3568ed1f50c3SPaul Durrant 3569ed1f50c3SPaul Durrant static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate) 3570ed1f50c3SPaul Durrant { 3571ed1f50c3SPaul Durrant unsigned int off; 3572ed1f50c3SPaul Durrant bool fragment; 3573ed1f50c3SPaul Durrant int err; 3574ed1f50c3SPaul Durrant 3575ed1f50c3SPaul Durrant fragment = false; 3576ed1f50c3SPaul Durrant 3577ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3578ed1f50c3SPaul Durrant sizeof(struct iphdr), 3579ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 3580ed1f50c3SPaul Durrant if (err < 0) 3581ed1f50c3SPaul Durrant goto out; 3582ed1f50c3SPaul Durrant 3583ed1f50c3SPaul Durrant if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) 3584ed1f50c3SPaul Durrant fragment = true; 3585ed1f50c3SPaul Durrant 3586ed1f50c3SPaul Durrant off = ip_hdrlen(skb); 3587ed1f50c3SPaul Durrant 3588ed1f50c3SPaul Durrant err = -EPROTO; 3589ed1f50c3SPaul Durrant 3590ed1f50c3SPaul Durrant if (fragment) 3591ed1f50c3SPaul Durrant goto out; 3592ed1f50c3SPaul Durrant 3593ed1f50c3SPaul Durrant switch (ip_hdr(skb)->protocol) { 3594ed1f50c3SPaul Durrant case IPPROTO_TCP: 3595ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3596ed1f50c3SPaul Durrant off + sizeof(struct tcphdr), 3597ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 3598ed1f50c3SPaul Durrant if (err < 0) 3599ed1f50c3SPaul Durrant goto out; 3600ed1f50c3SPaul Durrant 3601ed1f50c3SPaul Durrant if (!skb_partial_csum_set(skb, off, 3602ed1f50c3SPaul Durrant offsetof(struct tcphdr, check))) { 3603ed1f50c3SPaul Durrant err = -EPROTO; 3604ed1f50c3SPaul Durrant goto out; 3605ed1f50c3SPaul Durrant } 3606ed1f50c3SPaul Durrant 3607ed1f50c3SPaul Durrant if (recalculate) 3608ed1f50c3SPaul Durrant tcp_hdr(skb)->check = 3609ed1f50c3SPaul Durrant ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3610ed1f50c3SPaul Durrant ip_hdr(skb)->daddr, 3611ed1f50c3SPaul Durrant skb->len - off, 3612ed1f50c3SPaul Durrant IPPROTO_TCP, 0); 3613ed1f50c3SPaul Durrant break; 3614ed1f50c3SPaul Durrant case IPPROTO_UDP: 3615ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3616ed1f50c3SPaul Durrant off + sizeof(struct udphdr), 3617ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 3618ed1f50c3SPaul Durrant if (err < 0) 3619ed1f50c3SPaul Durrant goto out; 3620ed1f50c3SPaul Durrant 3621ed1f50c3SPaul Durrant if (!skb_partial_csum_set(skb, off, 3622ed1f50c3SPaul Durrant offsetof(struct udphdr, check))) { 3623ed1f50c3SPaul Durrant err = -EPROTO; 3624ed1f50c3SPaul Durrant goto out; 3625ed1f50c3SPaul Durrant } 3626ed1f50c3SPaul Durrant 3627ed1f50c3SPaul Durrant if (recalculate) 3628ed1f50c3SPaul Durrant udp_hdr(skb)->check = 3629ed1f50c3SPaul Durrant ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 3630ed1f50c3SPaul Durrant ip_hdr(skb)->daddr, 3631ed1f50c3SPaul Durrant skb->len - off, 3632ed1f50c3SPaul Durrant IPPROTO_UDP, 0); 3633ed1f50c3SPaul Durrant break; 3634ed1f50c3SPaul Durrant default: 3635ed1f50c3SPaul Durrant goto out; 3636ed1f50c3SPaul Durrant } 3637ed1f50c3SPaul Durrant 3638ed1f50c3SPaul Durrant err = 0; 3639ed1f50c3SPaul Durrant 3640ed1f50c3SPaul Durrant out: 3641ed1f50c3SPaul Durrant return err; 3642ed1f50c3SPaul Durrant } 3643ed1f50c3SPaul Durrant 3644ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 3645ed1f50c3SPaul Durrant * an IPv6 header, all options, and a maximal TCP or UDP header. 3646ed1f50c3SPaul Durrant */ 3647ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256 3648ed1f50c3SPaul Durrant 3649ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \ 3650ed1f50c3SPaul Durrant (type *)(skb_network_header(skb) + (off)) 3651ed1f50c3SPaul Durrant 3652ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 3653ed1f50c3SPaul Durrant { 3654ed1f50c3SPaul Durrant int err; 3655ed1f50c3SPaul Durrant u8 nexthdr; 3656ed1f50c3SPaul Durrant unsigned int off; 3657ed1f50c3SPaul Durrant unsigned int len; 3658ed1f50c3SPaul Durrant bool fragment; 3659ed1f50c3SPaul Durrant bool done; 3660ed1f50c3SPaul Durrant 3661ed1f50c3SPaul Durrant fragment = false; 3662ed1f50c3SPaul Durrant done = false; 3663ed1f50c3SPaul Durrant 3664ed1f50c3SPaul Durrant off = sizeof(struct ipv6hdr); 3665ed1f50c3SPaul Durrant 3666ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 3667ed1f50c3SPaul Durrant if (err < 0) 3668ed1f50c3SPaul Durrant goto out; 3669ed1f50c3SPaul Durrant 3670ed1f50c3SPaul Durrant nexthdr = ipv6_hdr(skb)->nexthdr; 3671ed1f50c3SPaul Durrant 3672ed1f50c3SPaul Durrant len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 3673ed1f50c3SPaul Durrant while (off <= len && !done) { 3674ed1f50c3SPaul Durrant switch (nexthdr) { 3675ed1f50c3SPaul Durrant case IPPROTO_DSTOPTS: 3676ed1f50c3SPaul Durrant case IPPROTO_HOPOPTS: 3677ed1f50c3SPaul Durrant case IPPROTO_ROUTING: { 3678ed1f50c3SPaul Durrant struct ipv6_opt_hdr *hp; 3679ed1f50c3SPaul Durrant 3680ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3681ed1f50c3SPaul Durrant off + 3682ed1f50c3SPaul Durrant sizeof(struct ipv6_opt_hdr), 3683ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 3684ed1f50c3SPaul Durrant if (err < 0) 3685ed1f50c3SPaul Durrant goto out; 3686ed1f50c3SPaul Durrant 3687ed1f50c3SPaul Durrant hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 3688ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 3689ed1f50c3SPaul Durrant off += ipv6_optlen(hp); 3690ed1f50c3SPaul Durrant break; 3691ed1f50c3SPaul Durrant } 3692ed1f50c3SPaul Durrant case IPPROTO_AH: { 3693ed1f50c3SPaul Durrant struct ip_auth_hdr *hp; 3694ed1f50c3SPaul Durrant 3695ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3696ed1f50c3SPaul Durrant off + 3697ed1f50c3SPaul Durrant sizeof(struct ip_auth_hdr), 3698ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 3699ed1f50c3SPaul Durrant if (err < 0) 3700ed1f50c3SPaul Durrant goto out; 3701ed1f50c3SPaul Durrant 3702ed1f50c3SPaul Durrant hp = OPT_HDR(struct ip_auth_hdr, skb, off); 3703ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 3704ed1f50c3SPaul Durrant off += ipv6_authlen(hp); 3705ed1f50c3SPaul Durrant break; 3706ed1f50c3SPaul Durrant } 3707ed1f50c3SPaul Durrant case IPPROTO_FRAGMENT: { 3708ed1f50c3SPaul Durrant struct frag_hdr *hp; 3709ed1f50c3SPaul Durrant 3710ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3711ed1f50c3SPaul Durrant off + 3712ed1f50c3SPaul Durrant sizeof(struct frag_hdr), 3713ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 3714ed1f50c3SPaul Durrant if (err < 0) 3715ed1f50c3SPaul Durrant goto out; 3716ed1f50c3SPaul Durrant 3717ed1f50c3SPaul Durrant hp = OPT_HDR(struct frag_hdr, skb, off); 3718ed1f50c3SPaul Durrant 3719ed1f50c3SPaul Durrant if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 3720ed1f50c3SPaul Durrant fragment = true; 3721ed1f50c3SPaul Durrant 3722ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 3723ed1f50c3SPaul Durrant off += sizeof(struct frag_hdr); 3724ed1f50c3SPaul Durrant break; 3725ed1f50c3SPaul Durrant } 3726ed1f50c3SPaul Durrant default: 3727ed1f50c3SPaul Durrant done = true; 3728ed1f50c3SPaul Durrant break; 3729ed1f50c3SPaul Durrant } 3730ed1f50c3SPaul Durrant } 3731ed1f50c3SPaul Durrant 3732ed1f50c3SPaul Durrant err = -EPROTO; 3733ed1f50c3SPaul Durrant 3734ed1f50c3SPaul Durrant if (!done || fragment) 3735ed1f50c3SPaul Durrant goto out; 3736ed1f50c3SPaul Durrant 3737ed1f50c3SPaul Durrant switch (nexthdr) { 3738ed1f50c3SPaul Durrant case IPPROTO_TCP: 3739ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3740ed1f50c3SPaul Durrant off + sizeof(struct tcphdr), 3741ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 3742ed1f50c3SPaul Durrant if (err < 0) 3743ed1f50c3SPaul Durrant goto out; 3744ed1f50c3SPaul Durrant 3745ed1f50c3SPaul Durrant if (!skb_partial_csum_set(skb, off, 3746ed1f50c3SPaul Durrant offsetof(struct tcphdr, check))) { 3747ed1f50c3SPaul Durrant err = -EPROTO; 3748ed1f50c3SPaul Durrant goto out; 3749ed1f50c3SPaul Durrant } 3750ed1f50c3SPaul Durrant 3751ed1f50c3SPaul Durrant if (recalculate) 3752ed1f50c3SPaul Durrant tcp_hdr(skb)->check = 3753ed1f50c3SPaul Durrant ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3754ed1f50c3SPaul Durrant &ipv6_hdr(skb)->daddr, 3755ed1f50c3SPaul Durrant skb->len - off, 3756ed1f50c3SPaul Durrant IPPROTO_TCP, 0); 3757ed1f50c3SPaul Durrant break; 3758ed1f50c3SPaul Durrant case IPPROTO_UDP: 3759ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 3760ed1f50c3SPaul Durrant off + sizeof(struct udphdr), 3761ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 3762ed1f50c3SPaul Durrant if (err < 0) 3763ed1f50c3SPaul Durrant goto out; 3764ed1f50c3SPaul Durrant 3765ed1f50c3SPaul Durrant if (!skb_partial_csum_set(skb, off, 3766ed1f50c3SPaul Durrant offsetof(struct udphdr, check))) { 3767ed1f50c3SPaul Durrant err = -EPROTO; 3768ed1f50c3SPaul Durrant goto out; 3769ed1f50c3SPaul Durrant } 3770ed1f50c3SPaul Durrant 3771ed1f50c3SPaul Durrant if (recalculate) 3772ed1f50c3SPaul Durrant udp_hdr(skb)->check = 3773ed1f50c3SPaul Durrant ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3774ed1f50c3SPaul Durrant &ipv6_hdr(skb)->daddr, 3775ed1f50c3SPaul Durrant skb->len - off, 3776ed1f50c3SPaul Durrant IPPROTO_UDP, 0); 3777ed1f50c3SPaul Durrant break; 3778ed1f50c3SPaul Durrant default: 3779ed1f50c3SPaul Durrant goto out; 3780ed1f50c3SPaul Durrant } 3781ed1f50c3SPaul Durrant 3782ed1f50c3SPaul Durrant err = 0; 3783ed1f50c3SPaul Durrant 3784ed1f50c3SPaul Durrant out: 3785ed1f50c3SPaul Durrant return err; 3786ed1f50c3SPaul Durrant } 3787ed1f50c3SPaul Durrant 3788ed1f50c3SPaul Durrant /** 3789ed1f50c3SPaul Durrant * skb_checksum_setup - set up partial checksum offset 3790ed1f50c3SPaul Durrant * @skb: the skb to set up 3791ed1f50c3SPaul Durrant * @recalculate: if true the pseudo-header checksum will be recalculated 3792ed1f50c3SPaul Durrant */ 3793ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 3794ed1f50c3SPaul Durrant { 3795ed1f50c3SPaul Durrant int err; 3796ed1f50c3SPaul Durrant 3797ed1f50c3SPaul Durrant switch (skb->protocol) { 3798ed1f50c3SPaul Durrant case htons(ETH_P_IP): 3799ed1f50c3SPaul Durrant err = skb_checksum_setup_ip(skb, recalculate); 3800ed1f50c3SPaul Durrant break; 3801ed1f50c3SPaul Durrant 3802ed1f50c3SPaul Durrant case htons(ETH_P_IPV6): 3803ed1f50c3SPaul Durrant err = skb_checksum_setup_ipv6(skb, recalculate); 3804ed1f50c3SPaul Durrant break; 3805ed1f50c3SPaul Durrant 3806ed1f50c3SPaul Durrant default: 3807ed1f50c3SPaul Durrant err = -EPROTO; 3808ed1f50c3SPaul Durrant break; 3809ed1f50c3SPaul Durrant } 3810ed1f50c3SPaul Durrant 3811ed1f50c3SPaul Durrant return err; 3812ed1f50c3SPaul Durrant } 3813ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup); 3814ed1f50c3SPaul Durrant 38154497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 38164497b076SBen Hutchings { 3817e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3818e87cc472SJoe Perches skb->dev->name); 38194497b076SBen Hutchings } 38204497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3821bad43ca8SEric Dumazet 3822bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3823bad43ca8SEric Dumazet { 38243d861f66SEric Dumazet if (head_stolen) { 38253d861f66SEric Dumazet skb_release_head_state(skb); 3826bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 38273d861f66SEric Dumazet } else { 3828bad43ca8SEric Dumazet __kfree_skb(skb); 3829bad43ca8SEric Dumazet } 38303d861f66SEric Dumazet } 3831bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 3832bad43ca8SEric Dumazet 3833bad43ca8SEric Dumazet /** 3834bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 3835bad43ca8SEric Dumazet * @to: prior buffer 3836bad43ca8SEric Dumazet * @from: buffer to add 3837bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 3838c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 3839bad43ca8SEric Dumazet */ 3840bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3841bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 3842bad43ca8SEric Dumazet { 3843bad43ca8SEric Dumazet int i, delta, len = from->len; 3844bad43ca8SEric Dumazet 3845bad43ca8SEric Dumazet *fragstolen = false; 3846bad43ca8SEric Dumazet 3847bad43ca8SEric Dumazet if (skb_cloned(to)) 3848bad43ca8SEric Dumazet return false; 3849bad43ca8SEric Dumazet 3850bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 3851bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3852bad43ca8SEric Dumazet *delta_truesize = 0; 3853bad43ca8SEric Dumazet return true; 3854bad43ca8SEric Dumazet } 3855bad43ca8SEric Dumazet 3856bad43ca8SEric Dumazet if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3857bad43ca8SEric Dumazet return false; 3858bad43ca8SEric Dumazet 3859bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 3860bad43ca8SEric Dumazet struct page *page; 3861bad43ca8SEric Dumazet unsigned int offset; 3862bad43ca8SEric Dumazet 3863bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3864bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3865bad43ca8SEric Dumazet return false; 3866bad43ca8SEric Dumazet 3867bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 3868bad43ca8SEric Dumazet return false; 3869bad43ca8SEric Dumazet 3870bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3871bad43ca8SEric Dumazet 3872bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 3873bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 3874bad43ca8SEric Dumazet 3875bad43ca8SEric Dumazet skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3876bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 3877bad43ca8SEric Dumazet *fragstolen = true; 3878bad43ca8SEric Dumazet } else { 3879bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3880bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3881bad43ca8SEric Dumazet return false; 3882bad43ca8SEric Dumazet 3883f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 3884bad43ca8SEric Dumazet } 3885bad43ca8SEric Dumazet 3886bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 3887bad43ca8SEric Dumazet 3888bad43ca8SEric Dumazet memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3889bad43ca8SEric Dumazet skb_shinfo(from)->frags, 3890bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3891bad43ca8SEric Dumazet skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3892bad43ca8SEric Dumazet 3893bad43ca8SEric Dumazet if (!skb_cloned(from)) 3894bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags = 0; 3895bad43ca8SEric Dumazet 38968ea853fdSLi RongQing /* if the skb is not cloned this does nothing 38978ea853fdSLi RongQing * since we set nr_frags to 0. 38988ea853fdSLi RongQing */ 3899bad43ca8SEric Dumazet for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3900bad43ca8SEric Dumazet skb_frag_ref(from, i); 3901bad43ca8SEric Dumazet 3902bad43ca8SEric Dumazet to->truesize += delta; 3903bad43ca8SEric Dumazet to->len += len; 3904bad43ca8SEric Dumazet to->data_len += len; 3905bad43ca8SEric Dumazet 3906bad43ca8SEric Dumazet *delta_truesize = delta; 3907bad43ca8SEric Dumazet return true; 3908bad43ca8SEric Dumazet } 3909bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 3910621e84d6SNicolas Dichtel 3911621e84d6SNicolas Dichtel /** 39128b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 3913621e84d6SNicolas Dichtel * 3914621e84d6SNicolas Dichtel * @skb: buffer to clean 39158b27f277SNicolas Dichtel * @xnet: packet is crossing netns 3916621e84d6SNicolas Dichtel * 39178b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 39188b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 39198b27f277SNicolas Dichtel * operations. 39208b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 39218b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 39228b27f277SNicolas Dichtel * skb that could impact namespace isolation. 3923621e84d6SNicolas Dichtel */ 39248b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 3925621e84d6SNicolas Dichtel { 39268b27f277SNicolas Dichtel if (xnet) 3927621e84d6SNicolas Dichtel skb_orphan(skb); 3928621e84d6SNicolas Dichtel skb->tstamp.tv64 = 0; 3929621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 3930621e84d6SNicolas Dichtel skb->skb_iif = 0; 3931239c78dbSHannes Frederic Sowa skb->local_df = 0; 3932621e84d6SNicolas Dichtel skb_dst_drop(skb); 3933621e84d6SNicolas Dichtel skb->mark = 0; 3934621e84d6SNicolas Dichtel secpath_reset(skb); 3935621e84d6SNicolas Dichtel nf_reset(skb); 3936621e84d6SNicolas Dichtel nf_reset_trace(skb); 3937621e84d6SNicolas Dichtel } 3938621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 3939de960aa9SFlorian Westphal 3940de960aa9SFlorian Westphal /** 3941de960aa9SFlorian Westphal * skb_gso_transport_seglen - Return length of individual segments of a gso packet 3942de960aa9SFlorian Westphal * 3943de960aa9SFlorian Westphal * @skb: GSO skb 3944de960aa9SFlorian Westphal * 3945de960aa9SFlorian Westphal * skb_gso_transport_seglen is used to determine the real size of the 3946de960aa9SFlorian Westphal * individual segments, including Layer4 headers (TCP/UDP). 3947de960aa9SFlorian Westphal * 3948de960aa9SFlorian Westphal * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 3949de960aa9SFlorian Westphal */ 3950de960aa9SFlorian Westphal unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 3951de960aa9SFlorian Westphal { 3952de960aa9SFlorian Westphal const struct skb_shared_info *shinfo = skb_shinfo(skb); 3953de960aa9SFlorian Westphal unsigned int hdr_len; 3954de960aa9SFlorian Westphal 3955de960aa9SFlorian Westphal if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3956de960aa9SFlorian Westphal hdr_len = tcp_hdrlen(skb); 3957de960aa9SFlorian Westphal else 3958de960aa9SFlorian Westphal hdr_len = sizeof(struct udphdr); 3959de960aa9SFlorian Westphal return hdr_len + shinfo->gso_size; 3960de960aa9SFlorian Westphal } 3961de960aa9SFlorian Westphal EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 3962