11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 31da177e4SLinus Torvalds * 4113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 51da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Fixes: 81da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 91da177e4SLinus Torvalds * balancer bugs. 101da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 111da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 121da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 131da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 141da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 151da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 161da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 171da177e4SLinus Torvalds * only put in the headers 181da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 191da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 201da177e4SLinus Torvalds * Andi Kleen : slabified it. 211da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 221da177e4SLinus Torvalds * 231da177e4SLinus Torvalds * NOTE: 241da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 251da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 261da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 271da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 281da177e4SLinus Torvalds * 291da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 301da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 311da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 321da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds /* 361da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 371da177e4SLinus Torvalds */ 381da177e4SLinus Torvalds 39e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 40e005d193SJoe Perches 411da177e4SLinus Torvalds #include <linux/module.h> 421da177e4SLinus Torvalds #include <linux/types.h> 431da177e4SLinus Torvalds #include <linux/kernel.h> 44fe55f6d5SVegard Nossum #include <linux/kmemcheck.h> 451da177e4SLinus Torvalds #include <linux/mm.h> 461da177e4SLinus Torvalds #include <linux/interrupt.h> 471da177e4SLinus Torvalds #include <linux/in.h> 481da177e4SLinus Torvalds #include <linux/inet.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/netdevice.h> 511da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 521da177e4SLinus Torvalds #include <net/pkt_sched.h> 531da177e4SLinus Torvalds #endif 541da177e4SLinus Torvalds #include <linux/string.h> 551da177e4SLinus Torvalds #include <linux/skbuff.h> 569c55e01cSJens Axboe #include <linux/splice.h> 571da177e4SLinus Torvalds #include <linux/cache.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 591da177e4SLinus Torvalds #include <linux/init.h> 60716ea3a7SDavid Howells #include <linux/scatterlist.h> 61ac45f602SPatrick Ohly #include <linux/errqueue.h> 62268bb0ceSLinus Torvalds #include <linux/prefetch.h> 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #include <net/protocol.h> 651da177e4SLinus Torvalds #include <net/dst.h> 661da177e4SLinus Torvalds #include <net/sock.h> 671da177e4SLinus Torvalds #include <net/checksum.h> 681da177e4SLinus Torvalds #include <net/xfrm.h> 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds #include <asm/uaccess.h> 71ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7251c56b00SEric Dumazet #include <linux/highmem.h> 73a1f8e7f7SAl Viro 74d7e8883cSEric Dumazet struct kmem_cache *skbuff_head_cache __read_mostly; 75e18b890bSChristoph Lameter static struct kmem_cache *skbuff_fclone_cache __read_mostly; 761da177e4SLinus Torvalds 771da177e4SLinus Torvalds /** 78f05de73bSJean Sacren * skb_panic - private function for out-of-line support 791da177e4SLinus Torvalds * @skb: buffer 801da177e4SLinus Torvalds * @sz: size 81f05de73bSJean Sacren * @addr: address 8299d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 831da177e4SLinus Torvalds * 84f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 85f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 86f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 87f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 881da177e4SLinus Torvalds */ 89f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 9099d5851eSJames Hogan const char msg[]) 911da177e4SLinus Torvalds { 92e005d193SJoe Perches pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", 9399d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 944305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 9526095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 961da177e4SLinus Torvalds BUG(); 971da177e4SLinus Torvalds } 981da177e4SLinus Torvalds 99f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1001da177e4SLinus Torvalds { 101f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1021da177e4SLinus Torvalds } 1031da177e4SLinus Torvalds 104f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 105f05de73bSJean Sacren { 106f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 107f05de73bSJean Sacren } 108c93bdd0eSMel Gorman 109c93bdd0eSMel Gorman /* 110c93bdd0eSMel Gorman * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 111c93bdd0eSMel Gorman * the caller if emergency pfmemalloc reserves are being used. If it is and 112c93bdd0eSMel Gorman * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 113c93bdd0eSMel Gorman * may be used. Otherwise, the packet data may be discarded until enough 114c93bdd0eSMel Gorman * memory is free 115c93bdd0eSMel Gorman */ 116c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 117c93bdd0eSMel Gorman __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 11861c5e88aSstephen hemminger 11961c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 12061c5e88aSstephen hemminger unsigned long ip, bool *pfmemalloc) 121c93bdd0eSMel Gorman { 122c93bdd0eSMel Gorman void *obj; 123c93bdd0eSMel Gorman bool ret_pfmemalloc = false; 124c93bdd0eSMel Gorman 125c93bdd0eSMel Gorman /* 126c93bdd0eSMel Gorman * Try a regular allocation, when that fails and we're not entitled 127c93bdd0eSMel Gorman * to the reserves, fail. 128c93bdd0eSMel Gorman */ 129c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, 130c93bdd0eSMel Gorman flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 131c93bdd0eSMel Gorman node); 132c93bdd0eSMel Gorman if (obj || !(gfp_pfmemalloc_allowed(flags))) 133c93bdd0eSMel Gorman goto out; 134c93bdd0eSMel Gorman 135c93bdd0eSMel Gorman /* Try again but now we are using pfmemalloc reserves */ 136c93bdd0eSMel Gorman ret_pfmemalloc = true; 137c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, flags, node); 138c93bdd0eSMel Gorman 139c93bdd0eSMel Gorman out: 140c93bdd0eSMel Gorman if (pfmemalloc) 141c93bdd0eSMel Gorman *pfmemalloc = ret_pfmemalloc; 142c93bdd0eSMel Gorman 143c93bdd0eSMel Gorman return obj; 144c93bdd0eSMel Gorman } 145c93bdd0eSMel Gorman 1461da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1471da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1481da177e4SLinus Torvalds * [BEEP] leaks. 1491da177e4SLinus Torvalds * 1501da177e4SLinus Torvalds */ 1511da177e4SLinus Torvalds 1520ebd0ac5SPatrick McHardy struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) 1530ebd0ac5SPatrick McHardy { 1540ebd0ac5SPatrick McHardy struct sk_buff *skb; 1550ebd0ac5SPatrick McHardy 1560ebd0ac5SPatrick McHardy /* Get the HEAD */ 1570ebd0ac5SPatrick McHardy skb = kmem_cache_alloc_node(skbuff_head_cache, 1580ebd0ac5SPatrick McHardy gfp_mask & ~__GFP_DMA, node); 1590ebd0ac5SPatrick McHardy if (!skb) 1600ebd0ac5SPatrick McHardy goto out; 1610ebd0ac5SPatrick McHardy 1620ebd0ac5SPatrick McHardy /* 1630ebd0ac5SPatrick McHardy * Only clear those fields we need to clear, not those that we will 1640ebd0ac5SPatrick McHardy * actually initialise below. Hence, don't put any more fields after 1650ebd0ac5SPatrick McHardy * the tail pointer in struct sk_buff! 1660ebd0ac5SPatrick McHardy */ 1670ebd0ac5SPatrick McHardy memset(skb, 0, offsetof(struct sk_buff, tail)); 1685e71d9d7SPablo Neira skb->head = NULL; 1690ebd0ac5SPatrick McHardy skb->truesize = sizeof(struct sk_buff); 1700ebd0ac5SPatrick McHardy atomic_set(&skb->users, 1); 1710ebd0ac5SPatrick McHardy 17235d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 1730ebd0ac5SPatrick McHardy out: 1740ebd0ac5SPatrick McHardy return skb; 1750ebd0ac5SPatrick McHardy } 1760ebd0ac5SPatrick McHardy 1771da177e4SLinus Torvalds /** 178d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 1791da177e4SLinus Torvalds * @size: size to allocate 1801da177e4SLinus Torvalds * @gfp_mask: allocation mask 181c93bdd0eSMel Gorman * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 182c93bdd0eSMel Gorman * instead of head cache and allocate a cloned (child) skb. 183c93bdd0eSMel Gorman * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 184c93bdd0eSMel Gorman * allocations in case the data is required for writeback 185b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 1861da177e4SLinus Torvalds * 1871da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 18894b6042cSBen Hutchings * tail room of at least size bytes. The object has a reference count 18994b6042cSBen Hutchings * of one. The return is the buffer. On a failure the return is %NULL. 1901da177e4SLinus Torvalds * 1911da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 1921da177e4SLinus Torvalds * %GFP_ATOMIC. 1931da177e4SLinus Torvalds */ 194dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 195c93bdd0eSMel Gorman int flags, int node) 1961da177e4SLinus Torvalds { 197e18b890bSChristoph Lameter struct kmem_cache *cache; 1984947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 1991da177e4SLinus Torvalds struct sk_buff *skb; 2001da177e4SLinus Torvalds u8 *data; 201c93bdd0eSMel Gorman bool pfmemalloc; 2021da177e4SLinus Torvalds 203c93bdd0eSMel Gorman cache = (flags & SKB_ALLOC_FCLONE) 204c93bdd0eSMel Gorman ? skbuff_fclone_cache : skbuff_head_cache; 205c93bdd0eSMel Gorman 206c93bdd0eSMel Gorman if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 207c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 2088798b3fbSHerbert Xu 2091da177e4SLinus Torvalds /* Get the HEAD */ 210b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 2111da177e4SLinus Torvalds if (!skb) 2121da177e4SLinus Torvalds goto out; 213ec7d2f2cSEric Dumazet prefetchw(skb); 2141da177e4SLinus Torvalds 21587fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 21687fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 21787fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 21887fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 21987fb4b7bSEric Dumazet */ 220bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 22187fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 222c93bdd0eSMel Gorman data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 2231da177e4SLinus Torvalds if (!data) 2241da177e4SLinus Torvalds goto nodata; 22587fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 22687fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 22787fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 22887fb4b7bSEric Dumazet */ 22987fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 230ec7d2f2cSEric Dumazet prefetchw(data + size); 2311da177e4SLinus Torvalds 232ca0605a7SArnaldo Carvalho de Melo /* 233c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 234c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 235c8005785SJohannes Berg * the tail pointer in struct sk_buff! 236ca0605a7SArnaldo Carvalho de Melo */ 237ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 23887fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 23987fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 240c93bdd0eSMel Gorman skb->pfmemalloc = pfmemalloc; 2411da177e4SLinus Torvalds atomic_set(&skb->users, 1); 2421da177e4SLinus Torvalds skb->head = data; 2431da177e4SLinus Torvalds skb->data = data; 24427a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2454305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 24635d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 24735d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 24819633e12SStephen Hemminger 2494947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2504947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 251ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2524947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 253c2aa3665SEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 2544947d3efSBenjamin LaHaise 255c93bdd0eSMel Gorman if (flags & SKB_ALLOC_FCLONE) { 256d179cd12SDavid S. Miller struct sk_buff *child = skb + 1; 257d179cd12SDavid S. Miller atomic_t *fclone_ref = (atomic_t *) (child + 1); 2581da177e4SLinus Torvalds 259fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags1); 260fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(child, flags2); 261d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 262d179cd12SDavid S. Miller atomic_set(fclone_ref, 1); 263d179cd12SDavid S. Miller 264d179cd12SDavid S. Miller child->fclone = SKB_FCLONE_UNAVAILABLE; 265c93bdd0eSMel Gorman child->pfmemalloc = pfmemalloc; 266d179cd12SDavid S. Miller } 2671da177e4SLinus Torvalds out: 2681da177e4SLinus Torvalds return skb; 2691da177e4SLinus Torvalds nodata: 2708798b3fbSHerbert Xu kmem_cache_free(cache, skb); 2711da177e4SLinus Torvalds skb = NULL; 2721da177e4SLinus Torvalds goto out; 2731da177e4SLinus Torvalds } 274b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 2751da177e4SLinus Torvalds 2761da177e4SLinus Torvalds /** 277b2b5ce9dSEric Dumazet * build_skb - build a network buffer 278b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 279d3836f21SEric Dumazet * @frag_size: size of fragment, or 0 if head was kmalloced 280b2b5ce9dSEric Dumazet * 281b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 282deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 283deceb4c0SFlorian Fainelli * @frag_size is 0, otherwise data should come from the page allocator. 284b2b5ce9dSEric Dumazet * The return is the new skb buffer. 285b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 286b2b5ce9dSEric Dumazet * Notes : 287b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 288b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 289b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 290b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 291b2b5ce9dSEric Dumazet * before giving packet to stack. 292b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 293b2b5ce9dSEric Dumazet */ 294d3836f21SEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 295b2b5ce9dSEric Dumazet { 296b2b5ce9dSEric Dumazet struct skb_shared_info *shinfo; 297b2b5ce9dSEric Dumazet struct sk_buff *skb; 298d3836f21SEric Dumazet unsigned int size = frag_size ? : ksize(data); 299b2b5ce9dSEric Dumazet 300b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 301b2b5ce9dSEric Dumazet if (!skb) 302b2b5ce9dSEric Dumazet return NULL; 303b2b5ce9dSEric Dumazet 304d3836f21SEric Dumazet size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 305b2b5ce9dSEric Dumazet 306b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 307b2b5ce9dSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 308d3836f21SEric Dumazet skb->head_frag = frag_size != 0; 309b2b5ce9dSEric Dumazet atomic_set(&skb->users, 1); 310b2b5ce9dSEric Dumazet skb->head = data; 311b2b5ce9dSEric Dumazet skb->data = data; 312b2b5ce9dSEric Dumazet skb_reset_tail_pointer(skb); 313b2b5ce9dSEric Dumazet skb->end = skb->tail + size; 31435d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 31535d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 316b2b5ce9dSEric Dumazet 317b2b5ce9dSEric Dumazet /* make sure we initialize shinfo sequentially */ 318b2b5ce9dSEric Dumazet shinfo = skb_shinfo(skb); 319b2b5ce9dSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 320b2b5ce9dSEric Dumazet atomic_set(&shinfo->dataref, 1); 321b2b5ce9dSEric Dumazet kmemcheck_annotate_variable(shinfo->destructor_arg); 322b2b5ce9dSEric Dumazet 323b2b5ce9dSEric Dumazet return skb; 324b2b5ce9dSEric Dumazet } 325b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 326b2b5ce9dSEric Dumazet 327a1c7fff7SEric Dumazet struct netdev_alloc_cache { 32869b08f62SEric Dumazet struct page_frag frag; 32969b08f62SEric Dumazet /* we maintain a pagecount bias, so that we dont dirty cache line 33069b08f62SEric Dumazet * containing page->_count every time we allocate a fragment. 33169b08f62SEric Dumazet */ 332540eb7bfSAlexander Duyck unsigned int pagecnt_bias; 333a1c7fff7SEric Dumazet }; 334a1c7fff7SEric Dumazet static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); 335a1c7fff7SEric Dumazet 336c93bdd0eSMel Gorman static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 3376f532612SEric Dumazet { 3386f532612SEric Dumazet struct netdev_alloc_cache *nc; 3396f532612SEric Dumazet void *data = NULL; 34069b08f62SEric Dumazet int order; 3416f532612SEric Dumazet unsigned long flags; 3426f532612SEric Dumazet 3436f532612SEric Dumazet local_irq_save(flags); 3446f532612SEric Dumazet nc = &__get_cpu_var(netdev_alloc_cache); 34569b08f62SEric Dumazet if (unlikely(!nc->frag.page)) { 3466f532612SEric Dumazet refill: 34769b08f62SEric Dumazet for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { 34869b08f62SEric Dumazet gfp_t gfp = gfp_mask; 34969b08f62SEric Dumazet 35069b08f62SEric Dumazet if (order) 35169b08f62SEric Dumazet gfp |= __GFP_COMP | __GFP_NOWARN; 35269b08f62SEric Dumazet nc->frag.page = alloc_pages(gfp, order); 35369b08f62SEric Dumazet if (likely(nc->frag.page)) 35469b08f62SEric Dumazet break; 35569b08f62SEric Dumazet if (--order < 0) 356540eb7bfSAlexander Duyck goto end; 35769b08f62SEric Dumazet } 35869b08f62SEric Dumazet nc->frag.size = PAGE_SIZE << order; 359540eb7bfSAlexander Duyck recycle: 36069b08f62SEric Dumazet atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS); 36169b08f62SEric Dumazet nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; 36269b08f62SEric Dumazet nc->frag.offset = 0; 3636f532612SEric Dumazet } 364540eb7bfSAlexander Duyck 36569b08f62SEric Dumazet if (nc->frag.offset + fragsz > nc->frag.size) { 366540eb7bfSAlexander Duyck /* avoid unnecessary locked operations if possible */ 36769b08f62SEric Dumazet if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) || 36869b08f62SEric Dumazet atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count)) 369540eb7bfSAlexander Duyck goto recycle; 3706f532612SEric Dumazet goto refill; 3716f532612SEric Dumazet } 372540eb7bfSAlexander Duyck 37369b08f62SEric Dumazet data = page_address(nc->frag.page) + nc->frag.offset; 37469b08f62SEric Dumazet nc->frag.offset += fragsz; 375540eb7bfSAlexander Duyck nc->pagecnt_bias--; 376540eb7bfSAlexander Duyck end: 3776f532612SEric Dumazet local_irq_restore(flags); 3786f532612SEric Dumazet return data; 3796f532612SEric Dumazet } 380c93bdd0eSMel Gorman 381c93bdd0eSMel Gorman /** 382c93bdd0eSMel Gorman * netdev_alloc_frag - allocate a page fragment 383c93bdd0eSMel Gorman * @fragsz: fragment size 384c93bdd0eSMel Gorman * 385c93bdd0eSMel Gorman * Allocates a frag from a page for receive buffer. 386c93bdd0eSMel Gorman * Uses GFP_ATOMIC allocations. 387c93bdd0eSMel Gorman */ 388c93bdd0eSMel Gorman void *netdev_alloc_frag(unsigned int fragsz) 389c93bdd0eSMel Gorman { 390c93bdd0eSMel Gorman return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); 391c93bdd0eSMel Gorman } 3926f532612SEric Dumazet EXPORT_SYMBOL(netdev_alloc_frag); 3936f532612SEric Dumazet 3946f532612SEric Dumazet /** 3958af27456SChristoph Hellwig * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 3968af27456SChristoph Hellwig * @dev: network device to receive on 3978af27456SChristoph Hellwig * @length: length to allocate 3988af27456SChristoph Hellwig * @gfp_mask: get_free_pages mask, passed to alloc_skb 3998af27456SChristoph Hellwig * 4008af27456SChristoph Hellwig * Allocate a new &sk_buff and assign it a usage count of one. The 4018af27456SChristoph Hellwig * buffer has unspecified headroom built in. Users should allocate 4028af27456SChristoph Hellwig * the headroom they think they need without accounting for the 4038af27456SChristoph Hellwig * built in space. The built in space is used for optimisations. 4048af27456SChristoph Hellwig * 4058af27456SChristoph Hellwig * %NULL is returned if there is no free memory. 4068af27456SChristoph Hellwig */ 4078af27456SChristoph Hellwig struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 4088af27456SChristoph Hellwig unsigned int length, gfp_t gfp_mask) 4098af27456SChristoph Hellwig { 4106f532612SEric Dumazet struct sk_buff *skb = NULL; 411a1c7fff7SEric Dumazet unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + 412a1c7fff7SEric Dumazet SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4138af27456SChristoph Hellwig 414310e158cSEric Dumazet if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { 415c93bdd0eSMel Gorman void *data; 416c93bdd0eSMel Gorman 417c93bdd0eSMel Gorman if (sk_memalloc_socks()) 418c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 419c93bdd0eSMel Gorman 420c93bdd0eSMel Gorman data = __netdev_alloc_frag(fragsz, gfp_mask); 421a1c7fff7SEric Dumazet 4226f532612SEric Dumazet if (likely(data)) { 4236f532612SEric Dumazet skb = build_skb(data, fragsz); 4246f532612SEric Dumazet if (unlikely(!skb)) 4256f532612SEric Dumazet put_page(virt_to_head_page(data)); 426a1c7fff7SEric Dumazet } 427a1c7fff7SEric Dumazet } else { 428c93bdd0eSMel Gorman skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 429c93bdd0eSMel Gorman SKB_ALLOC_RX, NUMA_NO_NODE); 430a1c7fff7SEric Dumazet } 4317b2e497aSChristoph Hellwig if (likely(skb)) { 4328af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 4337b2e497aSChristoph Hellwig skb->dev = dev; 4347b2e497aSChristoph Hellwig } 4358af27456SChristoph Hellwig return skb; 4368af27456SChristoph Hellwig } 437b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 4381da177e4SLinus Torvalds 439654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 44050269e19SEric Dumazet int size, unsigned int truesize) 441654bed16SPeter Zijlstra { 442654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 443654bed16SPeter Zijlstra skb->len += size; 444654bed16SPeter Zijlstra skb->data_len += size; 44550269e19SEric Dumazet skb->truesize += truesize; 446654bed16SPeter Zijlstra } 447654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 448654bed16SPeter Zijlstra 449f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 450f8e617e1SJason Wang unsigned int truesize) 451f8e617e1SJason Wang { 452f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 453f8e617e1SJason Wang 454f8e617e1SJason Wang skb_frag_size_add(frag, size); 455f8e617e1SJason Wang skb->len += size; 456f8e617e1SJason Wang skb->data_len += size; 457f8e617e1SJason Wang skb->truesize += truesize; 458f8e617e1SJason Wang } 459f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 460f8e617e1SJason Wang 46127b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 4621da177e4SLinus Torvalds { 463bd8a7036SEric Dumazet kfree_skb_list(*listp); 46427b437c8SHerbert Xu *listp = NULL; 4651da177e4SLinus Torvalds } 4661da177e4SLinus Torvalds 46727b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 46827b437c8SHerbert Xu { 46927b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 47027b437c8SHerbert Xu } 47127b437c8SHerbert Xu 4721da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 4731da177e4SLinus Torvalds { 4741da177e4SLinus Torvalds struct sk_buff *list; 4751da177e4SLinus Torvalds 476fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 4771da177e4SLinus Torvalds skb_get(list); 4781da177e4SLinus Torvalds } 4791da177e4SLinus Torvalds 480d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 481d3836f21SEric Dumazet { 482d3836f21SEric Dumazet if (skb->head_frag) 483d3836f21SEric Dumazet put_page(virt_to_head_page(skb->head)); 484d3836f21SEric Dumazet else 485d3836f21SEric Dumazet kfree(skb->head); 486d3836f21SEric Dumazet } 487d3836f21SEric Dumazet 4885bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 4891da177e4SLinus Torvalds { 4901da177e4SLinus Torvalds if (!skb->cloned || 4911da177e4SLinus Torvalds !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 4921da177e4SLinus Torvalds &skb_shinfo(skb)->dataref)) { 4931da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 4941da177e4SLinus Torvalds int i; 4951da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 496ea2ab693SIan Campbell skb_frag_unref(skb, i); 4971da177e4SLinus Torvalds } 4981da177e4SLinus Torvalds 499a6686f2fSShirley Ma /* 500a6686f2fSShirley Ma * If skb buf is from userspace, we need to notify the caller 501a6686f2fSShirley Ma * the lower device DMA has done; 502a6686f2fSShirley Ma */ 503a6686f2fSShirley Ma if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 504a6686f2fSShirley Ma struct ubuf_info *uarg; 505a6686f2fSShirley Ma 506a6686f2fSShirley Ma uarg = skb_shinfo(skb)->destructor_arg; 507a6686f2fSShirley Ma if (uarg->callback) 508e19d6763SMichael S. Tsirkin uarg->callback(uarg, true); 509a6686f2fSShirley Ma } 510a6686f2fSShirley Ma 51121dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 5121da177e4SLinus Torvalds skb_drop_fraglist(skb); 5131da177e4SLinus Torvalds 514d3836f21SEric Dumazet skb_free_head(skb); 5151da177e4SLinus Torvalds } 5161da177e4SLinus Torvalds } 5171da177e4SLinus Torvalds 5181da177e4SLinus Torvalds /* 5191da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 5201da177e4SLinus Torvalds */ 5212d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 5221da177e4SLinus Torvalds { 523d179cd12SDavid S. Miller struct sk_buff *other; 524d179cd12SDavid S. Miller atomic_t *fclone_ref; 525d179cd12SDavid S. Miller 526d179cd12SDavid S. Miller switch (skb->fclone) { 527d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 5281da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 529d179cd12SDavid S. Miller break; 530d179cd12SDavid S. Miller 531d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 532d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 2); 533d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 534d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, skb); 535d179cd12SDavid S. Miller break; 536d179cd12SDavid S. Miller 537d179cd12SDavid S. Miller case SKB_FCLONE_CLONE: 538d179cd12SDavid S. Miller fclone_ref = (atomic_t *) (skb + 1); 539d179cd12SDavid S. Miller other = skb - 1; 540d179cd12SDavid S. Miller 541d179cd12SDavid S. Miller /* The clone portion is available for 542d179cd12SDavid S. Miller * fast-cloning again. 543d179cd12SDavid S. Miller */ 544d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_UNAVAILABLE; 545d179cd12SDavid S. Miller 546d179cd12SDavid S. Miller if (atomic_dec_and_test(fclone_ref)) 547d179cd12SDavid S. Miller kmem_cache_free(skbuff_fclone_cache, other); 548d179cd12SDavid S. Miller break; 5493ff50b79SStephen Hemminger } 5501da177e4SLinus Torvalds } 5511da177e4SLinus Torvalds 55204a4bb55SLennert Buytenhek static void skb_release_head_state(struct sk_buff *skb) 5531da177e4SLinus Torvalds { 554adf30907SEric Dumazet skb_dst_drop(skb); 5551da177e4SLinus Torvalds #ifdef CONFIG_XFRM 5561da177e4SLinus Torvalds secpath_put(skb->sp); 5571da177e4SLinus Torvalds #endif 5581da177e4SLinus Torvalds if (skb->destructor) { 5599c2b3328SStephen Hemminger WARN_ON(in_irq()); 5601da177e4SLinus Torvalds skb->destructor(skb); 5611da177e4SLinus Torvalds } 562a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 5635f79e0f9SYasuyuki Kozakai nf_conntrack_put(skb->nfct); 5642fc72c7bSKOVACS Krisztian #endif 5651da177e4SLinus Torvalds #ifdef CONFIG_BRIDGE_NETFILTER 5661da177e4SLinus Torvalds nf_bridge_put(skb->nf_bridge); 5671da177e4SLinus Torvalds #endif 5681da177e4SLinus Torvalds /* XXX: IS this still necessary? - JHS */ 5691da177e4SLinus Torvalds #ifdef CONFIG_NET_SCHED 5701da177e4SLinus Torvalds skb->tc_index = 0; 5711da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 5721da177e4SLinus Torvalds skb->tc_verd = 0; 5731da177e4SLinus Torvalds #endif 5741da177e4SLinus Torvalds #endif 57504a4bb55SLennert Buytenhek } 57604a4bb55SLennert Buytenhek 57704a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 57804a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 57904a4bb55SLennert Buytenhek { 58004a4bb55SLennert Buytenhek skb_release_head_state(skb); 5815e71d9d7SPablo Neira if (likely(skb->head)) 5822d4baff8SHerbert Xu skb_release_data(skb); 5832d4baff8SHerbert Xu } 5841da177e4SLinus Torvalds 5852d4baff8SHerbert Xu /** 5862d4baff8SHerbert Xu * __kfree_skb - private function 5872d4baff8SHerbert Xu * @skb: buffer 5882d4baff8SHerbert Xu * 5892d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 5902d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 5912d4baff8SHerbert Xu * always call kfree_skb 5922d4baff8SHerbert Xu */ 5932d4baff8SHerbert Xu 5942d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 5952d4baff8SHerbert Xu { 5962d4baff8SHerbert Xu skb_release_all(skb); 5971da177e4SLinus Torvalds kfree_skbmem(skb); 5981da177e4SLinus Torvalds } 599b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 6001da177e4SLinus Torvalds 6011da177e4SLinus Torvalds /** 602231d06aeSJörn Engel * kfree_skb - free an sk_buff 603231d06aeSJörn Engel * @skb: buffer to free 604231d06aeSJörn Engel * 605231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 606231d06aeSJörn Engel * hit zero. 607231d06aeSJörn Engel */ 608231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 609231d06aeSJörn Engel { 610231d06aeSJörn Engel if (unlikely(!skb)) 611231d06aeSJörn Engel return; 612231d06aeSJörn Engel if (likely(atomic_read(&skb->users) == 1)) 613231d06aeSJörn Engel smp_rmb(); 614231d06aeSJörn Engel else if (likely(!atomic_dec_and_test(&skb->users))) 615231d06aeSJörn Engel return; 616ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 617231d06aeSJörn Engel __kfree_skb(skb); 618231d06aeSJörn Engel } 619b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 620231d06aeSJörn Engel 621bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 622bd8a7036SEric Dumazet { 623bd8a7036SEric Dumazet while (segs) { 624bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 625bd8a7036SEric Dumazet 626bd8a7036SEric Dumazet kfree_skb(segs); 627bd8a7036SEric Dumazet segs = next; 628bd8a7036SEric Dumazet } 629bd8a7036SEric Dumazet } 630bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 631bd8a7036SEric Dumazet 632d1a203eaSStephen Hemminger /** 63325121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 63425121173SMichael S. Tsirkin * @skb: buffer that triggered an error 63525121173SMichael S. Tsirkin * 63625121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 63725121173SMichael S. Tsirkin * skb must be freed afterwards. 63825121173SMichael S. Tsirkin */ 63925121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 64025121173SMichael S. Tsirkin { 64125121173SMichael S. Tsirkin if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 64225121173SMichael S. Tsirkin struct ubuf_info *uarg; 64325121173SMichael S. Tsirkin 64425121173SMichael S. Tsirkin uarg = skb_shinfo(skb)->destructor_arg; 64525121173SMichael S. Tsirkin if (uarg->callback) 64625121173SMichael S. Tsirkin uarg->callback(uarg, false); 64725121173SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 64825121173SMichael S. Tsirkin } 64925121173SMichael S. Tsirkin } 65025121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 65125121173SMichael S. Tsirkin 65225121173SMichael S. Tsirkin /** 653ead2ceb0SNeil Horman * consume_skb - free an skbuff 654ead2ceb0SNeil Horman * @skb: buffer to free 655ead2ceb0SNeil Horman * 656ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 657ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 658ead2ceb0SNeil Horman * is being dropped after a failure and notes that 659ead2ceb0SNeil Horman */ 660ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 661ead2ceb0SNeil Horman { 662ead2ceb0SNeil Horman if (unlikely(!skb)) 663ead2ceb0SNeil Horman return; 664ead2ceb0SNeil Horman if (likely(atomic_read(&skb->users) == 1)) 665ead2ceb0SNeil Horman smp_rmb(); 666ead2ceb0SNeil Horman else if (likely(!atomic_dec_and_test(&skb->users))) 667ead2ceb0SNeil Horman return; 66807dc22e7SKoki Sanagi trace_consume_skb(skb); 669ead2ceb0SNeil Horman __kfree_skb(skb); 670ead2ceb0SNeil Horman } 671ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 672ead2ceb0SNeil Horman 673dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 674dec18810SHerbert Xu { 675dec18810SHerbert Xu new->tstamp = old->tstamp; 676dec18810SHerbert Xu new->dev = old->dev; 677dec18810SHerbert Xu new->transport_header = old->transport_header; 678dec18810SHerbert Xu new->network_header = old->network_header; 679dec18810SHerbert Xu new->mac_header = old->mac_header; 6804bc41b84SJoe Stringer new->inner_protocol = old->inner_protocol; 6816a674e9cSJoseph Gasparakis new->inner_transport_header = old->inner_transport_header; 68292df9b21SPravin B Shelar new->inner_network_header = old->inner_network_header; 683aefbd2b3SPravin B Shelar new->inner_mac_header = old->inner_mac_header; 6847fee226aSEric Dumazet skb_dst_copy(new, old); 6850a9627f2STom Herbert new->rxhash = old->rxhash; 6866461be3aSChangli Gao new->ooo_okay = old->ooo_okay; 687bdeab991STom Herbert new->l4_rxhash = old->l4_rxhash; 6883bdc0ebaSBen Greear new->no_fcs = old->no_fcs; 6896a674e9cSJoseph Gasparakis new->encapsulation = old->encapsulation; 690def8b4faSAlexey Dobriyan #ifdef CONFIG_XFRM 691dec18810SHerbert Xu new->sp = secpath_get(old->sp); 692dec18810SHerbert Xu #endif 693dec18810SHerbert Xu memcpy(new->cb, old->cb, sizeof(old->cb)); 6949bcb97caSHerbert Xu new->csum = old->csum; 695dec18810SHerbert Xu new->local_df = old->local_df; 696dec18810SHerbert Xu new->pkt_type = old->pkt_type; 697dec18810SHerbert Xu new->ip_summed = old->ip_summed; 698dec18810SHerbert Xu skb_copy_queue_mapping(new, old); 699dec18810SHerbert Xu new->priority = old->priority; 700a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_IP_VS) 701dec18810SHerbert Xu new->ipvs_property = old->ipvs_property; 702dec18810SHerbert Xu #endif 703c93bdd0eSMel Gorman new->pfmemalloc = old->pfmemalloc; 704dec18810SHerbert Xu new->protocol = old->protocol; 705dec18810SHerbert Xu new->mark = old->mark; 7068964be4aSEric Dumazet new->skb_iif = old->skb_iif; 707dec18810SHerbert Xu __nf_copy(new, old); 708a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) 709dec18810SHerbert Xu new->nf_trace = old->nf_trace; 710dec18810SHerbert Xu #endif 711dec18810SHerbert Xu #ifdef CONFIG_NET_SCHED 712dec18810SHerbert Xu new->tc_index = old->tc_index; 713dec18810SHerbert Xu #ifdef CONFIG_NET_CLS_ACT 714dec18810SHerbert Xu new->tc_verd = old->tc_verd; 715dec18810SHerbert Xu #endif 716dec18810SHerbert Xu #endif 71786a9bad3SPatrick McHardy new->vlan_proto = old->vlan_proto; 7186aa895b0SPatrick McHardy new->vlan_tci = old->vlan_tci; 7196aa895b0SPatrick McHardy 720dec18810SHerbert Xu skb_copy_secmark(new, old); 72106021292SEliezer Tamir 722e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 72306021292SEliezer Tamir new->napi_id = old->napi_id; 72406021292SEliezer Tamir #endif 725dec18810SHerbert Xu } 726dec18810SHerbert Xu 72782c49a35SHerbert Xu /* 72882c49a35SHerbert Xu * You should not add any new code to this function. Add it to 72982c49a35SHerbert Xu * __copy_skb_header above instead. 73082c49a35SHerbert Xu */ 731e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 7321da177e4SLinus Torvalds { 7331da177e4SLinus Torvalds #define C(x) n->x = skb->x 7341da177e4SLinus Torvalds 7351da177e4SLinus Torvalds n->next = n->prev = NULL; 7361da177e4SLinus Torvalds n->sk = NULL; 737dec18810SHerbert Xu __copy_skb_header(n, skb); 738dec18810SHerbert Xu 7391da177e4SLinus Torvalds C(len); 7401da177e4SLinus Torvalds C(data_len); 7413e6b3b2eSAlexey Dobriyan C(mac_len); 742334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 74302f1c89dSPaul Moore n->cloned = 1; 7441da177e4SLinus Torvalds n->nohdr = 0; 7451da177e4SLinus Torvalds n->destructor = NULL; 7461da177e4SLinus Torvalds C(tail); 7471da177e4SLinus Torvalds C(end); 74802f1c89dSPaul Moore C(head); 749d3836f21SEric Dumazet C(head_frag); 75002f1c89dSPaul Moore C(data); 75102f1c89dSPaul Moore C(truesize); 75202f1c89dSPaul Moore atomic_set(&n->users, 1); 7531da177e4SLinus Torvalds 7541da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 7551da177e4SLinus Torvalds skb->cloned = 1; 7561da177e4SLinus Torvalds 7571da177e4SLinus Torvalds return n; 758e0053ec0SHerbert Xu #undef C 759e0053ec0SHerbert Xu } 760e0053ec0SHerbert Xu 761e0053ec0SHerbert Xu /** 762e0053ec0SHerbert Xu * skb_morph - morph one skb into another 763e0053ec0SHerbert Xu * @dst: the skb to receive the contents 764e0053ec0SHerbert Xu * @src: the skb to supply the contents 765e0053ec0SHerbert Xu * 766e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 767e0053ec0SHerbert Xu * supplied by the user. 768e0053ec0SHerbert Xu * 769e0053ec0SHerbert Xu * The target skb is returned upon exit. 770e0053ec0SHerbert Xu */ 771e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 772e0053ec0SHerbert Xu { 7732d4baff8SHerbert Xu skb_release_all(dst); 774e0053ec0SHerbert Xu return __skb_clone(dst, src); 775e0053ec0SHerbert Xu } 776e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 777e0053ec0SHerbert Xu 7782c53040fSBen Hutchings /** 7792c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 78048c83012SMichael S. Tsirkin * @skb: the skb to modify 78148c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 78248c83012SMichael S. Tsirkin * 78348c83012SMichael S. Tsirkin * This must be called on SKBTX_DEV_ZEROCOPY skb. 78448c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 78548c83012SMichael S. Tsirkin * to userspace pages. 78648c83012SMichael S. Tsirkin * 78748c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 78848c83012SMichael S. Tsirkin * %GFP_ATOMIC. 78948c83012SMichael S. Tsirkin * 79048c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 79148c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 79248c83012SMichael S. Tsirkin */ 79348c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 794a6686f2fSShirley Ma { 795a6686f2fSShirley Ma int i; 796a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 797a6686f2fSShirley Ma struct page *page, *head = NULL; 798a6686f2fSShirley Ma struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; 799a6686f2fSShirley Ma 800a6686f2fSShirley Ma for (i = 0; i < num_frags; i++) { 801a6686f2fSShirley Ma u8 *vaddr; 802a6686f2fSShirley Ma skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 803a6686f2fSShirley Ma 80402756ed4SKrishna Kumar page = alloc_page(gfp_mask); 805a6686f2fSShirley Ma if (!page) { 806a6686f2fSShirley Ma while (head) { 80740dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 808a6686f2fSShirley Ma put_page(head); 809a6686f2fSShirley Ma head = next; 810a6686f2fSShirley Ma } 811a6686f2fSShirley Ma return -ENOMEM; 812a6686f2fSShirley Ma } 81351c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 814a6686f2fSShirley Ma memcpy(page_address(page), 8159e903e08SEric Dumazet vaddr + f->page_offset, skb_frag_size(f)); 81651c56b00SEric Dumazet kunmap_atomic(vaddr); 81740dadff2SSunghan Suh set_page_private(page, (unsigned long)head); 818a6686f2fSShirley Ma head = page; 819a6686f2fSShirley Ma } 820a6686f2fSShirley Ma 821a6686f2fSShirley Ma /* skb frags release userspace buffers */ 82202756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 823a8605c60SIan Campbell skb_frag_unref(skb, i); 824a6686f2fSShirley Ma 825e19d6763SMichael S. Tsirkin uarg->callback(uarg, false); 826a6686f2fSShirley Ma 827a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 82802756ed4SKrishna Kumar for (i = num_frags - 1; i >= 0; i--) { 82902756ed4SKrishna Kumar __skb_fill_page_desc(skb, i, head, 0, 83002756ed4SKrishna Kumar skb_shinfo(skb)->frags[i].size); 83140dadff2SSunghan Suh head = (struct page *)page_private(head); 832a6686f2fSShirley Ma } 83348c83012SMichael S. Tsirkin 83448c83012SMichael S. Tsirkin skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; 835a6686f2fSShirley Ma return 0; 836a6686f2fSShirley Ma } 837dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 838a6686f2fSShirley Ma 839e0053ec0SHerbert Xu /** 840e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 841e0053ec0SHerbert Xu * @skb: buffer to clone 842e0053ec0SHerbert Xu * @gfp_mask: allocation priority 843e0053ec0SHerbert Xu * 844e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 845e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 846e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 847e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 848e0053ec0SHerbert Xu * 849e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 850e0053ec0SHerbert Xu * %GFP_ATOMIC. 851e0053ec0SHerbert Xu */ 852e0053ec0SHerbert Xu 853e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 854e0053ec0SHerbert Xu { 855e0053ec0SHerbert Xu struct sk_buff *n; 856e0053ec0SHerbert Xu 85770008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 858a6686f2fSShirley Ma return NULL; 859a6686f2fSShirley Ma 860e0053ec0SHerbert Xu n = skb + 1; 861e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 862e0053ec0SHerbert Xu n->fclone == SKB_FCLONE_UNAVAILABLE) { 863e0053ec0SHerbert Xu atomic_t *fclone_ref = (atomic_t *) (n + 1); 864e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_CLONE; 865e0053ec0SHerbert Xu atomic_inc(fclone_ref); 866e0053ec0SHerbert Xu } else { 867c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 868c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 869c93bdd0eSMel Gorman 870e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 871e0053ec0SHerbert Xu if (!n) 872e0053ec0SHerbert Xu return NULL; 873fe55f6d5SVegard Nossum 874fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags1); 875fe55f6d5SVegard Nossum kmemcheck_annotate_bitfield(n, flags2); 876e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 877e0053ec0SHerbert Xu } 878e0053ec0SHerbert Xu 879e0053ec0SHerbert Xu return __skb_clone(n, skb); 8801da177e4SLinus Torvalds } 881b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 8821da177e4SLinus Torvalds 883f5b17294SPravin B Shelar static void skb_headers_offset_update(struct sk_buff *skb, int off) 884f5b17294SPravin B Shelar { 885030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 886030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 887030737bcSEric Dumazet skb->csum_start += off; 888f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 889f5b17294SPravin B Shelar skb->transport_header += off; 890f5b17294SPravin B Shelar skb->network_header += off; 891f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 892f5b17294SPravin B Shelar skb->mac_header += off; 893f5b17294SPravin B Shelar skb->inner_transport_header += off; 894f5b17294SPravin B Shelar skb->inner_network_header += off; 895aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 896f5b17294SPravin B Shelar } 897f5b17294SPravin B Shelar 8981da177e4SLinus Torvalds static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 8991da177e4SLinus Torvalds { 900dec18810SHerbert Xu __copy_skb_header(new, old); 901dec18810SHerbert Xu 9027967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 9037967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 9047967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 9051da177e4SLinus Torvalds } 9061da177e4SLinus Torvalds 907c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 908c93bdd0eSMel Gorman { 909c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 910c93bdd0eSMel Gorman return SKB_ALLOC_RX; 911c93bdd0eSMel Gorman return 0; 912c93bdd0eSMel Gorman } 913c93bdd0eSMel Gorman 9141da177e4SLinus Torvalds /** 9151da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 9161da177e4SLinus Torvalds * @skb: buffer to copy 9171da177e4SLinus Torvalds * @gfp_mask: allocation priority 9181da177e4SLinus Torvalds * 9191da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 9201da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 9211da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 9221da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 9231da177e4SLinus Torvalds * 9241da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 9251da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 9261da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 9271da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 9281da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 9291da177e4SLinus Torvalds */ 9301da177e4SLinus Torvalds 931dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 9321da177e4SLinus Torvalds { 9336602cebbSEric Dumazet int headerlen = skb_headroom(skb); 934ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 935c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 936c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9376602cebbSEric Dumazet 9381da177e4SLinus Torvalds if (!n) 9391da177e4SLinus Torvalds return NULL; 9401da177e4SLinus Torvalds 9411da177e4SLinus Torvalds /* Set the data pointer */ 9421da177e4SLinus Torvalds skb_reserve(n, headerlen); 9431da177e4SLinus Torvalds /* Set the tail pointer and length */ 9441da177e4SLinus Torvalds skb_put(n, skb->len); 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 9471da177e4SLinus Torvalds BUG(); 9481da177e4SLinus Torvalds 9491da177e4SLinus Torvalds copy_skb_header(n, skb); 9501da177e4SLinus Torvalds return n; 9511da177e4SLinus Torvalds } 952b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 9531da177e4SLinus Torvalds 9541da177e4SLinus Torvalds /** 955117632e6SEric Dumazet * __pskb_copy - create copy of an sk_buff with private head. 9561da177e4SLinus Torvalds * @skb: buffer to copy 957117632e6SEric Dumazet * @headroom: headroom of new skb 9581da177e4SLinus Torvalds * @gfp_mask: allocation priority 9591da177e4SLinus Torvalds * 9601da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 9611da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 9621da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 9631da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 9641da177e4SLinus Torvalds * or the pointer to the buffer on success. 9651da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 9661da177e4SLinus Torvalds */ 9671da177e4SLinus Torvalds 968117632e6SEric Dumazet struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) 9691da177e4SLinus Torvalds { 970117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 971c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 972c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 9736602cebbSEric Dumazet 9741da177e4SLinus Torvalds if (!n) 9751da177e4SLinus Torvalds goto out; 9761da177e4SLinus Torvalds 9771da177e4SLinus Torvalds /* Set the data pointer */ 978117632e6SEric Dumazet skb_reserve(n, headroom); 9791da177e4SLinus Torvalds /* Set the tail pointer and length */ 9801da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 9811da177e4SLinus Torvalds /* Copy the bytes */ 982d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 9831da177e4SLinus Torvalds 98425f484a6SHerbert Xu n->truesize += skb->data_len; 9851da177e4SLinus Torvalds n->data_len = skb->data_len; 9861da177e4SLinus Torvalds n->len = skb->len; 9871da177e4SLinus Torvalds 9881da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 9891da177e4SLinus Torvalds int i; 9901da177e4SLinus Torvalds 99170008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) { 9921511022cSDan Carpenter kfree_skb(n); 9931511022cSDan Carpenter n = NULL; 994a6686f2fSShirley Ma goto out; 995a6686f2fSShirley Ma } 9961da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 9971da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 998ea2ab693SIan Campbell skb_frag_ref(skb, i); 9991da177e4SLinus Torvalds } 10001da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 10011da177e4SLinus Torvalds } 10021da177e4SLinus Torvalds 100321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 10041da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 10051da177e4SLinus Torvalds skb_clone_fraglist(n); 10061da177e4SLinus Torvalds } 10071da177e4SLinus Torvalds 10081da177e4SLinus Torvalds copy_skb_header(n, skb); 10091da177e4SLinus Torvalds out: 10101da177e4SLinus Torvalds return n; 10111da177e4SLinus Torvalds } 1012117632e6SEric Dumazet EXPORT_SYMBOL(__pskb_copy); 10131da177e4SLinus Torvalds 10141da177e4SLinus Torvalds /** 10151da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 10161da177e4SLinus Torvalds * @skb: buffer to reallocate 10171da177e4SLinus Torvalds * @nhead: room to add at head 10181da177e4SLinus Torvalds * @ntail: room to add at tail 10191da177e4SLinus Torvalds * @gfp_mask: allocation priority 10201da177e4SLinus Torvalds * 1021bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1022bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 10231da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 10241da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 10251da177e4SLinus Torvalds * 10261da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 10271da177e4SLinus Torvalds * reloaded after call to this function. 10281da177e4SLinus Torvalds */ 10291da177e4SLinus Torvalds 103086a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1031dd0fc66fSAl Viro gfp_t gfp_mask) 10321da177e4SLinus Torvalds { 10331da177e4SLinus Torvalds int i; 10341da177e4SLinus Torvalds u8 *data; 1035ec47ea82SAlexander Duyck int size = nhead + skb_end_offset(skb) + ntail; 10361da177e4SLinus Torvalds long off; 10371da177e4SLinus Torvalds 10384edd87adSHerbert Xu BUG_ON(nhead < 0); 10394edd87adSHerbert Xu 10401da177e4SLinus Torvalds if (skb_shared(skb)) 10411da177e4SLinus Torvalds BUG(); 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 10441da177e4SLinus Torvalds 1045c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1046c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1047c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1048c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 10491da177e4SLinus Torvalds if (!data) 10501da177e4SLinus Torvalds goto nodata; 105187151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 10546602cebbSEric Dumazet * optimized for the cases when header is void. 10556602cebbSEric Dumazet */ 10566602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 10576602cebbSEric Dumazet 10586602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 10596602cebbSEric Dumazet skb_shinfo(skb), 1060fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 10611da177e4SLinus Torvalds 10623e24591aSAlexander Duyck /* 10633e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 10643e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 10653e24591aSAlexander Duyck * be since all we did is relocate the values 10663e24591aSAlexander Duyck */ 10673e24591aSAlexander Duyck if (skb_cloned(skb)) { 1068a6686f2fSShirley Ma /* copy this zero copy skb frags */ 106970008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1070a6686f2fSShirley Ma goto nofrags; 10711da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1072ea2ab693SIan Campbell skb_frag_ref(skb, i); 10731da177e4SLinus Torvalds 107421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 10751da177e4SLinus Torvalds skb_clone_fraglist(skb); 10761da177e4SLinus Torvalds 10771da177e4SLinus Torvalds skb_release_data(skb); 10783e24591aSAlexander Duyck } else { 10793e24591aSAlexander Duyck skb_free_head(skb); 10801fd63041SEric Dumazet } 10811da177e4SLinus Torvalds off = (data + nhead) - skb->head; 10821da177e4SLinus Torvalds 10831da177e4SLinus Torvalds skb->head = data; 1084d3836f21SEric Dumazet skb->head_frag = 0; 10851da177e4SLinus Torvalds skb->data += off; 10864305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 10874305b541SArnaldo Carvalho de Melo skb->end = size; 108856eb8882SPatrick McHardy off = nhead; 10894305b541SArnaldo Carvalho de Melo #else 10904305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 109156eb8882SPatrick McHardy #endif 109227a884dcSArnaldo Carvalho de Melo skb->tail += off; 1093b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 10941da177e4SLinus Torvalds skb->cloned = 0; 1095334a8132SPatrick McHardy skb->hdr_len = 0; 10961da177e4SLinus Torvalds skb->nohdr = 0; 10971da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 10981da177e4SLinus Torvalds return 0; 10991da177e4SLinus Torvalds 1100a6686f2fSShirley Ma nofrags: 1101a6686f2fSShirley Ma kfree(data); 11021da177e4SLinus Torvalds nodata: 11031da177e4SLinus Torvalds return -ENOMEM; 11041da177e4SLinus Torvalds } 1105b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 11061da177e4SLinus Torvalds 11071da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 11081da177e4SLinus Torvalds 11091da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 11101da177e4SLinus Torvalds { 11111da177e4SLinus Torvalds struct sk_buff *skb2; 11121da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 11131da177e4SLinus Torvalds 11141da177e4SLinus Torvalds if (delta <= 0) 11151da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 11161da177e4SLinus Torvalds else { 11171da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 11181da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 11191da177e4SLinus Torvalds GFP_ATOMIC)) { 11201da177e4SLinus Torvalds kfree_skb(skb2); 11211da177e4SLinus Torvalds skb2 = NULL; 11221da177e4SLinus Torvalds } 11231da177e4SLinus Torvalds } 11241da177e4SLinus Torvalds return skb2; 11251da177e4SLinus Torvalds } 1126b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds /** 11291da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 11301da177e4SLinus Torvalds * @skb: buffer to copy 11311da177e4SLinus Torvalds * @newheadroom: new free bytes at head 11321da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 11331da177e4SLinus Torvalds * @gfp_mask: allocation priority 11341da177e4SLinus Torvalds * 11351da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 11361da177e4SLinus Torvalds * allocate additional space. 11371da177e4SLinus Torvalds * 11381da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 11391da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 11401da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 11411da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 11421da177e4SLinus Torvalds * 11431da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 11441da177e4SLinus Torvalds * is called from an interrupt. 11451da177e4SLinus Torvalds */ 11461da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 114786a76cafSVictor Fusco int newheadroom, int newtailroom, 1148dd0fc66fSAl Viro gfp_t gfp_mask) 11491da177e4SLinus Torvalds { 11501da177e4SLinus Torvalds /* 11511da177e4SLinus Torvalds * Allocate the copy buffer 11521da177e4SLinus Torvalds */ 1153c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1154c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1155c93bdd0eSMel Gorman NUMA_NO_NODE); 1156efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 11571da177e4SLinus Torvalds int head_copy_len, head_copy_off; 11581da177e4SLinus Torvalds 11591da177e4SLinus Torvalds if (!n) 11601da177e4SLinus Torvalds return NULL; 11611da177e4SLinus Torvalds 11621da177e4SLinus Torvalds skb_reserve(n, newheadroom); 11631da177e4SLinus Torvalds 11641da177e4SLinus Torvalds /* Set the tail pointer and length */ 11651da177e4SLinus Torvalds skb_put(n, skb->len); 11661da177e4SLinus Torvalds 1167efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 11681da177e4SLinus Torvalds head_copy_off = 0; 11691da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 11701da177e4SLinus Torvalds head_copy_len = newheadroom; 11711da177e4SLinus Torvalds else 11721da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 11731da177e4SLinus Torvalds 11741da177e4SLinus Torvalds /* Copy the linear header and data. */ 11751da177e4SLinus Torvalds if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 11761da177e4SLinus Torvalds skb->len + head_copy_len)) 11771da177e4SLinus Torvalds BUG(); 11781da177e4SLinus Torvalds 11791da177e4SLinus Torvalds copy_skb_header(n, skb); 11801da177e4SLinus Torvalds 1181030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1182efd1e8d5SPatrick McHardy 11831da177e4SLinus Torvalds return n; 11841da177e4SLinus Torvalds } 1185b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 11861da177e4SLinus Torvalds 11871da177e4SLinus Torvalds /** 11881da177e4SLinus Torvalds * skb_pad - zero pad the tail of an skb 11891da177e4SLinus Torvalds * @skb: buffer to pad 11901da177e4SLinus Torvalds * @pad: space to pad 11911da177e4SLinus Torvalds * 11921da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 11931da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 11941da177e4SLinus Torvalds * beyond the buffer end onto the wire. 11951da177e4SLinus Torvalds * 11965b057c6bSHerbert Xu * May return error in out of memory cases. The skb is freed on error. 11971da177e4SLinus Torvalds */ 11981da177e4SLinus Torvalds 11995b057c6bSHerbert Xu int skb_pad(struct sk_buff *skb, int pad) 12001da177e4SLinus Torvalds { 12015b057c6bSHerbert Xu int err; 12025b057c6bSHerbert Xu int ntail; 12031da177e4SLinus Torvalds 12041da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 12055b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 12061da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 12075b057c6bSHerbert Xu return 0; 12081da177e4SLinus Torvalds } 12091da177e4SLinus Torvalds 12104305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 12115b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 12125b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 12135b057c6bSHerbert Xu if (unlikely(err)) 12145b057c6bSHerbert Xu goto free_skb; 12155b057c6bSHerbert Xu } 12165b057c6bSHerbert Xu 12175b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 12185b057c6bSHerbert Xu * to be audited. 12195b057c6bSHerbert Xu */ 12205b057c6bSHerbert Xu err = skb_linearize(skb); 12215b057c6bSHerbert Xu if (unlikely(err)) 12225b057c6bSHerbert Xu goto free_skb; 12235b057c6bSHerbert Xu 12245b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 12255b057c6bSHerbert Xu return 0; 12265b057c6bSHerbert Xu 12275b057c6bSHerbert Xu free_skb: 12281da177e4SLinus Torvalds kfree_skb(skb); 12295b057c6bSHerbert Xu return err; 12301da177e4SLinus Torvalds } 1231b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_pad); 12321da177e4SLinus Torvalds 12330dde3e16SIlpo Järvinen /** 12340c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 12350c7ddf36SMathias Krause * @skb: start of the buffer to use 12360c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 12370c7ddf36SMathias Krause * @len: amount of data to add 12380c7ddf36SMathias Krause * 12390c7ddf36SMathias Krause * This function extends the used data area of the potentially 12400c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 12410c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 12420c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 12430c7ddf36SMathias Krause * returned. 12440c7ddf36SMathias Krause */ 12450c7ddf36SMathias Krause 12460c7ddf36SMathias Krause unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 12470c7ddf36SMathias Krause { 12480c7ddf36SMathias Krause if (tail != skb) { 12490c7ddf36SMathias Krause skb->data_len += len; 12500c7ddf36SMathias Krause skb->len += len; 12510c7ddf36SMathias Krause } 12520c7ddf36SMathias Krause return skb_put(tail, len); 12530c7ddf36SMathias Krause } 12540c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 12550c7ddf36SMathias Krause 12560c7ddf36SMathias Krause /** 12570dde3e16SIlpo Järvinen * skb_put - add data to a buffer 12580dde3e16SIlpo Järvinen * @skb: buffer to use 12590dde3e16SIlpo Järvinen * @len: amount of data to add 12600dde3e16SIlpo Järvinen * 12610dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 12620dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 12630dde3e16SIlpo Järvinen * first byte of the extra data is returned. 12640dde3e16SIlpo Järvinen */ 12650dde3e16SIlpo Järvinen unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 12660dde3e16SIlpo Järvinen { 12670dde3e16SIlpo Järvinen unsigned char *tmp = skb_tail_pointer(skb); 12680dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 12690dde3e16SIlpo Järvinen skb->tail += len; 12700dde3e16SIlpo Järvinen skb->len += len; 12710dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 12720dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 12730dde3e16SIlpo Järvinen return tmp; 12740dde3e16SIlpo Järvinen } 12750dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 12760dde3e16SIlpo Järvinen 12776be8ac2fSIlpo Järvinen /** 1278c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1279c2aa270aSIlpo Järvinen * @skb: buffer to use 1280c2aa270aSIlpo Järvinen * @len: amount of data to add 1281c2aa270aSIlpo Järvinen * 1282c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1283c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1284c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1285c2aa270aSIlpo Järvinen */ 1286c2aa270aSIlpo Järvinen unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 1287c2aa270aSIlpo Järvinen { 1288c2aa270aSIlpo Järvinen skb->data -= len; 1289c2aa270aSIlpo Järvinen skb->len += len; 1290c2aa270aSIlpo Järvinen if (unlikely(skb->data<skb->head)) 1291c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1292c2aa270aSIlpo Järvinen return skb->data; 1293c2aa270aSIlpo Järvinen } 1294c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1295c2aa270aSIlpo Järvinen 1296c2aa270aSIlpo Järvinen /** 12976be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 12986be8ac2fSIlpo Järvinen * @skb: buffer to use 12996be8ac2fSIlpo Järvinen * @len: amount of data to remove 13006be8ac2fSIlpo Järvinen * 13016be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 13026be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 13036be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 13046be8ac2fSIlpo Järvinen * the old data. 13056be8ac2fSIlpo Järvinen */ 13066be8ac2fSIlpo Järvinen unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 13076be8ac2fSIlpo Järvinen { 130847d29646SDavid S. Miller return skb_pull_inline(skb, len); 13096be8ac2fSIlpo Järvinen } 13106be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 13116be8ac2fSIlpo Järvinen 1312419ae74eSIlpo Järvinen /** 1313419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1314419ae74eSIlpo Järvinen * @skb: buffer to alter 1315419ae74eSIlpo Järvinen * @len: new length 1316419ae74eSIlpo Järvinen * 1317419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1318419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1319419ae74eSIlpo Järvinen * The skb must be linear. 1320419ae74eSIlpo Järvinen */ 1321419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1322419ae74eSIlpo Järvinen { 1323419ae74eSIlpo Järvinen if (skb->len > len) 1324419ae74eSIlpo Järvinen __skb_trim(skb, len); 1325419ae74eSIlpo Järvinen } 1326419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1327419ae74eSIlpo Järvinen 13283cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 13291da177e4SLinus Torvalds */ 13301da177e4SLinus Torvalds 13313cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 13321da177e4SLinus Torvalds { 133327b437c8SHerbert Xu struct sk_buff **fragp; 133427b437c8SHerbert Xu struct sk_buff *frag; 13351da177e4SLinus Torvalds int offset = skb_headlen(skb); 13361da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 13371da177e4SLinus Torvalds int i; 133827b437c8SHerbert Xu int err; 133927b437c8SHerbert Xu 134027b437c8SHerbert Xu if (skb_cloned(skb) && 134127b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 134227b437c8SHerbert Xu return err; 13431da177e4SLinus Torvalds 1344f4d26fb3SHerbert Xu i = 0; 1345f4d26fb3SHerbert Xu if (offset >= len) 1346f4d26fb3SHerbert Xu goto drop_pages; 1347f4d26fb3SHerbert Xu 1348f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 13499e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 135027b437c8SHerbert Xu 135127b437c8SHerbert Xu if (end < len) { 13521da177e4SLinus Torvalds offset = end; 135327b437c8SHerbert Xu continue; 13541da177e4SLinus Torvalds } 13551da177e4SLinus Torvalds 13569e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 135727b437c8SHerbert Xu 1358f4d26fb3SHerbert Xu drop_pages: 135927b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 136027b437c8SHerbert Xu 136127b437c8SHerbert Xu for (; i < nfrags; i++) 1362ea2ab693SIan Campbell skb_frag_unref(skb, i); 136327b437c8SHerbert Xu 136421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 136527b437c8SHerbert Xu skb_drop_fraglist(skb); 1366f4d26fb3SHerbert Xu goto done; 136727b437c8SHerbert Xu } 136827b437c8SHerbert Xu 136927b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 137027b437c8SHerbert Xu fragp = &frag->next) { 137127b437c8SHerbert Xu int end = offset + frag->len; 137227b437c8SHerbert Xu 137327b437c8SHerbert Xu if (skb_shared(frag)) { 137427b437c8SHerbert Xu struct sk_buff *nfrag; 137527b437c8SHerbert Xu 137627b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 137727b437c8SHerbert Xu if (unlikely(!nfrag)) 137827b437c8SHerbert Xu return -ENOMEM; 137927b437c8SHerbert Xu 138027b437c8SHerbert Xu nfrag->next = frag->next; 138185bb2a60SEric Dumazet consume_skb(frag); 138227b437c8SHerbert Xu frag = nfrag; 138327b437c8SHerbert Xu *fragp = frag; 138427b437c8SHerbert Xu } 138527b437c8SHerbert Xu 138627b437c8SHerbert Xu if (end < len) { 138727b437c8SHerbert Xu offset = end; 138827b437c8SHerbert Xu continue; 138927b437c8SHerbert Xu } 139027b437c8SHerbert Xu 139127b437c8SHerbert Xu if (end > len && 139227b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 139327b437c8SHerbert Xu return err; 139427b437c8SHerbert Xu 139527b437c8SHerbert Xu if (frag->next) 139627b437c8SHerbert Xu skb_drop_list(&frag->next); 139727b437c8SHerbert Xu break; 139827b437c8SHerbert Xu } 139927b437c8SHerbert Xu 1400f4d26fb3SHerbert Xu done: 140127b437c8SHerbert Xu if (len > skb_headlen(skb)) { 14021da177e4SLinus Torvalds skb->data_len -= skb->len - len; 14031da177e4SLinus Torvalds skb->len = len; 14041da177e4SLinus Torvalds } else { 14051da177e4SLinus Torvalds skb->len = len; 14061da177e4SLinus Torvalds skb->data_len = 0; 140727a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 14081da177e4SLinus Torvalds } 14091da177e4SLinus Torvalds 14101da177e4SLinus Torvalds return 0; 14111da177e4SLinus Torvalds } 1412b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 14131da177e4SLinus Torvalds 14141da177e4SLinus Torvalds /** 14151da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 14161da177e4SLinus Torvalds * @skb: buffer to reallocate 14171da177e4SLinus Torvalds * @delta: number of bytes to advance tail 14181da177e4SLinus Torvalds * 14191da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 14201da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 14211da177e4SLinus Torvalds * data from fragmented part. 14221da177e4SLinus Torvalds * 14231da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 14241da177e4SLinus Torvalds * 14251da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 14261da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 14271da177e4SLinus Torvalds * 14281da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 14291da177e4SLinus Torvalds * reloaded after call to this function. 14301da177e4SLinus Torvalds */ 14311da177e4SLinus Torvalds 14321da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 14331da177e4SLinus Torvalds * when it is necessary. 14341da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 14351da177e4SLinus Torvalds * 2. It may change skb pointers. 14361da177e4SLinus Torvalds * 14371da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 14381da177e4SLinus Torvalds */ 14391da177e4SLinus Torvalds unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 14401da177e4SLinus Torvalds { 14411da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 14421da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 14431da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 14441da177e4SLinus Torvalds */ 14454305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 14461da177e4SLinus Torvalds 14471da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 14481da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 14491da177e4SLinus Torvalds GFP_ATOMIC)) 14501da177e4SLinus Torvalds return NULL; 14511da177e4SLinus Torvalds } 14521da177e4SLinus Torvalds 145327a884dcSArnaldo Carvalho de Melo if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 14541da177e4SLinus Torvalds BUG(); 14551da177e4SLinus Torvalds 14561da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 14571da177e4SLinus Torvalds * size of pulled pages. Superb. 14581da177e4SLinus Torvalds */ 145921dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 14601da177e4SLinus Torvalds goto pull_pages; 14611da177e4SLinus Torvalds 14621da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 14631da177e4SLinus Torvalds eat = delta; 14641da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 14659e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 14669e903e08SEric Dumazet 14679e903e08SEric Dumazet if (size >= eat) 14681da177e4SLinus Torvalds goto pull_pages; 14699e903e08SEric Dumazet eat -= size; 14701da177e4SLinus Torvalds } 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 14731da177e4SLinus Torvalds * Certainly, it possible to add an offset to skb data, 14741da177e4SLinus Torvalds * but taking into account that pulling is expected to 14751da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 14761da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 14771da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 14781da177e4SLinus Torvalds */ 14791da177e4SLinus Torvalds if (eat) { 14801da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 14811da177e4SLinus Torvalds struct sk_buff *clone = NULL; 14821da177e4SLinus Torvalds struct sk_buff *insp = NULL; 14831da177e4SLinus Torvalds 14841da177e4SLinus Torvalds do { 148509a62660SKris Katterjohn BUG_ON(!list); 14861da177e4SLinus Torvalds 14871da177e4SLinus Torvalds if (list->len <= eat) { 14881da177e4SLinus Torvalds /* Eaten as whole. */ 14891da177e4SLinus Torvalds eat -= list->len; 14901da177e4SLinus Torvalds list = list->next; 14911da177e4SLinus Torvalds insp = list; 14921da177e4SLinus Torvalds } else { 14931da177e4SLinus Torvalds /* Eaten partially. */ 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds if (skb_shared(list)) { 14961da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 14971da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 14981da177e4SLinus Torvalds if (!clone) 14991da177e4SLinus Torvalds return NULL; 15001da177e4SLinus Torvalds insp = list->next; 15011da177e4SLinus Torvalds list = clone; 15021da177e4SLinus Torvalds } else { 15031da177e4SLinus Torvalds /* This may be pulled without 15041da177e4SLinus Torvalds * problems. */ 15051da177e4SLinus Torvalds insp = list; 15061da177e4SLinus Torvalds } 15071da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 15081da177e4SLinus Torvalds kfree_skb(clone); 15091da177e4SLinus Torvalds return NULL; 15101da177e4SLinus Torvalds } 15111da177e4SLinus Torvalds break; 15121da177e4SLinus Torvalds } 15131da177e4SLinus Torvalds } while (eat); 15141da177e4SLinus Torvalds 15151da177e4SLinus Torvalds /* Free pulled out fragments. */ 15161da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 15171da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 15181da177e4SLinus Torvalds kfree_skb(list); 15191da177e4SLinus Torvalds } 15201da177e4SLinus Torvalds /* And insert new clone at head. */ 15211da177e4SLinus Torvalds if (clone) { 15221da177e4SLinus Torvalds clone->next = list; 15231da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 15241da177e4SLinus Torvalds } 15251da177e4SLinus Torvalds } 15261da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 15271da177e4SLinus Torvalds 15281da177e4SLinus Torvalds pull_pages: 15291da177e4SLinus Torvalds eat = delta; 15301da177e4SLinus Torvalds k = 0; 15311da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15329e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 15339e903e08SEric Dumazet 15349e903e08SEric Dumazet if (size <= eat) { 1535ea2ab693SIan Campbell skb_frag_unref(skb, i); 15369e903e08SEric Dumazet eat -= size; 15371da177e4SLinus Torvalds } else { 15381da177e4SLinus Torvalds skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 15391da177e4SLinus Torvalds if (eat) { 15401da177e4SLinus Torvalds skb_shinfo(skb)->frags[k].page_offset += eat; 15419e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 15421da177e4SLinus Torvalds eat = 0; 15431da177e4SLinus Torvalds } 15441da177e4SLinus Torvalds k++; 15451da177e4SLinus Torvalds } 15461da177e4SLinus Torvalds } 15471da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 15481da177e4SLinus Torvalds 15491da177e4SLinus Torvalds skb->tail += delta; 15501da177e4SLinus Torvalds skb->data_len -= delta; 15511da177e4SLinus Torvalds 155227a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 15531da177e4SLinus Torvalds } 1554b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 15551da177e4SLinus Torvalds 155622019b17SEric Dumazet /** 155722019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 155822019b17SEric Dumazet * @skb: source skb 155922019b17SEric Dumazet * @offset: offset in source 156022019b17SEric Dumazet * @to: destination buffer 156122019b17SEric Dumazet * @len: number of bytes to copy 156222019b17SEric Dumazet * 156322019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 156422019b17SEric Dumazet * destination buffer. 156522019b17SEric Dumazet * 156622019b17SEric Dumazet * CAUTION ! : 156722019b17SEric Dumazet * If its prototype is ever changed, 156822019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 156922019b17SEric Dumazet * since it is called from BPF assembly code. 157022019b17SEric Dumazet */ 15711da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 15721da177e4SLinus Torvalds { 15731a028e50SDavid S. Miller int start = skb_headlen(skb); 1574fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1575fbb398a8SDavid S. Miller int i, copy; 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds if (offset > (int)skb->len - len) 15781da177e4SLinus Torvalds goto fault; 15791da177e4SLinus Torvalds 15801da177e4SLinus Torvalds /* Copy header. */ 15811a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 15821da177e4SLinus Torvalds if (copy > len) 15831da177e4SLinus Torvalds copy = len; 1584d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 15851da177e4SLinus Torvalds if ((len -= copy) == 0) 15861da177e4SLinus Torvalds return 0; 15871da177e4SLinus Torvalds offset += copy; 15881da177e4SLinus Torvalds to += copy; 15891da177e4SLinus Torvalds } 15901da177e4SLinus Torvalds 15911da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15921a028e50SDavid S. Miller int end; 159351c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 15941da177e4SLinus Torvalds 1595547b792cSIlpo Järvinen WARN_ON(start > offset + len); 15961a028e50SDavid S. Miller 159751c56b00SEric Dumazet end = start + skb_frag_size(f); 15981da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 15991da177e4SLinus Torvalds u8 *vaddr; 16001da177e4SLinus Torvalds 16011da177e4SLinus Torvalds if (copy > len) 16021da177e4SLinus Torvalds copy = len; 16031da177e4SLinus Torvalds 160451c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(f)); 16051da177e4SLinus Torvalds memcpy(to, 160651c56b00SEric Dumazet vaddr + f->page_offset + offset - start, 160751c56b00SEric Dumazet copy); 160851c56b00SEric Dumazet kunmap_atomic(vaddr); 16091da177e4SLinus Torvalds 16101da177e4SLinus Torvalds if ((len -= copy) == 0) 16111da177e4SLinus Torvalds return 0; 16121da177e4SLinus Torvalds offset += copy; 16131da177e4SLinus Torvalds to += copy; 16141da177e4SLinus Torvalds } 16151a028e50SDavid S. Miller start = end; 16161da177e4SLinus Torvalds } 16171da177e4SLinus Torvalds 1618fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 16191a028e50SDavid S. Miller int end; 16201da177e4SLinus Torvalds 1621547b792cSIlpo Järvinen WARN_ON(start > offset + len); 16221a028e50SDavid S. Miller 1623fbb398a8SDavid S. Miller end = start + frag_iter->len; 16241da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 16251da177e4SLinus Torvalds if (copy > len) 16261da177e4SLinus Torvalds copy = len; 1627fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 16281da177e4SLinus Torvalds goto fault; 16291da177e4SLinus Torvalds if ((len -= copy) == 0) 16301da177e4SLinus Torvalds return 0; 16311da177e4SLinus Torvalds offset += copy; 16321da177e4SLinus Torvalds to += copy; 16331da177e4SLinus Torvalds } 16341a028e50SDavid S. Miller start = end; 16351da177e4SLinus Torvalds } 1636a6686f2fSShirley Ma 16371da177e4SLinus Torvalds if (!len) 16381da177e4SLinus Torvalds return 0; 16391da177e4SLinus Torvalds 16401da177e4SLinus Torvalds fault: 16411da177e4SLinus Torvalds return -EFAULT; 16421da177e4SLinus Torvalds } 1643b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 16441da177e4SLinus Torvalds 16459c55e01cSJens Axboe /* 16469c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 16479c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 16489c55e01cSJens Axboe */ 16499c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 16509c55e01cSJens Axboe { 16518b9d3728SJarek Poplawski put_page(spd->pages[i]); 16528b9d3728SJarek Poplawski } 16539c55e01cSJens Axboe 1654a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 16554fb66994SJarek Poplawski unsigned int *offset, 165618aafc62SEric Dumazet struct sock *sk) 16578b9d3728SJarek Poplawski { 16585640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 16598b9d3728SJarek Poplawski 16605640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 16618b9d3728SJarek Poplawski return NULL; 16624fb66994SJarek Poplawski 16635640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 16644fb66994SJarek Poplawski 16655640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 16665640f768SEric Dumazet page_address(page) + *offset, *len); 16675640f768SEric Dumazet *offset = pfrag->offset; 16685640f768SEric Dumazet pfrag->offset += *len; 16694fb66994SJarek Poplawski 16705640f768SEric Dumazet return pfrag->page; 16719c55e01cSJens Axboe } 16729c55e01cSJens Axboe 167341c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 167441c73a0dSEric Dumazet struct page *page, 167541c73a0dSEric Dumazet unsigned int offset) 167641c73a0dSEric Dumazet { 167741c73a0dSEric Dumazet return spd->nr_pages && 167841c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 167941c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 168041c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 168141c73a0dSEric Dumazet } 168241c73a0dSEric Dumazet 16839c55e01cSJens Axboe /* 16849c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 16859c55e01cSJens Axboe */ 1686a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 168735f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 16884fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 168918aafc62SEric Dumazet bool linear, 16907a67e56fSJarek Poplawski struct sock *sk) 16919c55e01cSJens Axboe { 169241c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 1693a108d5f3SDavid S. Miller return true; 16949c55e01cSJens Axboe 16958b9d3728SJarek Poplawski if (linear) { 169618aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 16978b9d3728SJarek Poplawski if (!page) 1698a108d5f3SDavid S. Miller return true; 169941c73a0dSEric Dumazet } 170041c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 170141c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 1702a108d5f3SDavid S. Miller return false; 170341c73a0dSEric Dumazet } 17048b9d3728SJarek Poplawski get_page(page); 17059c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 17064fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 17079c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 17089c55e01cSJens Axboe spd->nr_pages++; 17098b9d3728SJarek Poplawski 1710a108d5f3SDavid S. Miller return false; 17119c55e01cSJens Axboe } 17129c55e01cSJens Axboe 1713a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 17142870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 171518aafc62SEric Dumazet unsigned int *len, 1716d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 171735f3d14dSJens Axboe struct sock *sk, 171835f3d14dSJens Axboe struct pipe_inode_info *pipe) 17199c55e01cSJens Axboe { 17202870c43dSOctavian Purdila if (!*len) 1721a108d5f3SDavid S. Miller return true; 17229c55e01cSJens Axboe 17232870c43dSOctavian Purdila /* skip this segment if already processed */ 17242870c43dSOctavian Purdila if (*off >= plen) { 17252870c43dSOctavian Purdila *off -= plen; 1726a108d5f3SDavid S. Miller return false; 17272870c43dSOctavian Purdila } 17282870c43dSOctavian Purdila 17292870c43dSOctavian Purdila /* ignore any bits we already processed */ 17309ca1b22dSEric Dumazet poff += *off; 17319ca1b22dSEric Dumazet plen -= *off; 17322870c43dSOctavian Purdila *off = 0; 17332870c43dSOctavian Purdila 173418aafc62SEric Dumazet do { 173518aafc62SEric Dumazet unsigned int flen = min(*len, plen); 17362870c43dSOctavian Purdila 173718aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 173818aafc62SEric Dumazet linear, sk)) 1739a108d5f3SDavid S. Miller return true; 174018aafc62SEric Dumazet poff += flen; 174118aafc62SEric Dumazet plen -= flen; 17422870c43dSOctavian Purdila *len -= flen; 174318aafc62SEric Dumazet } while (*len && plen); 17442870c43dSOctavian Purdila 1745a108d5f3SDavid S. Miller return false; 1746db43a282SOctavian Purdila } 17479c55e01cSJens Axboe 17489c55e01cSJens Axboe /* 1749a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 17502870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 17519c55e01cSJens Axboe */ 1752a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 175335f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 175435f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 17552870c43dSOctavian Purdila { 17562870c43dSOctavian Purdila int seg; 17579c55e01cSJens Axboe 17581d0c0b32SEric Dumazet /* map the linear part : 17592996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 17602996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 17612996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 17629c55e01cSJens Axboe */ 17632870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 17642870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 17652870c43dSOctavian Purdila skb_headlen(skb), 176618aafc62SEric Dumazet offset, len, spd, 17673a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 17681d0c0b32SEric Dumazet sk, pipe)) 1769a108d5f3SDavid S. Miller return true; 17709c55e01cSJens Axboe 17719c55e01cSJens Axboe /* 17729c55e01cSJens Axboe * then map the fragments 17739c55e01cSJens Axboe */ 17749c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 17759c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 17769c55e01cSJens Axboe 1777ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 17789e903e08SEric Dumazet f->page_offset, skb_frag_size(f), 177918aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 1780a108d5f3SDavid S. Miller return true; 17819c55e01cSJens Axboe } 17829c55e01cSJens Axboe 1783a108d5f3SDavid S. Miller return false; 17849c55e01cSJens Axboe } 17859c55e01cSJens Axboe 17869c55e01cSJens Axboe /* 17879c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 17889c55e01cSJens Axboe * the fragments, and the frag list. It does NOT handle frag lists within 17899c55e01cSJens Axboe * the frag list, if such a thing exists. We'd probably need to recurse to 17909c55e01cSJens Axboe * handle that cleanly. 17919c55e01cSJens Axboe */ 17928b9d3728SJarek Poplawski int skb_splice_bits(struct sk_buff *skb, unsigned int offset, 17939c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 17949c55e01cSJens Axboe unsigned int flags) 17959c55e01cSJens Axboe { 179641c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 179741c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 17989c55e01cSJens Axboe struct splice_pipe_desc spd = { 17999c55e01cSJens Axboe .pages = pages, 18009c55e01cSJens Axboe .partial = partial, 1801047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 18029c55e01cSJens Axboe .flags = flags, 180328a625cbSMiklos Szeredi .ops = &nosteal_pipe_buf_ops, 18049c55e01cSJens Axboe .spd_release = sock_spd_release, 18059c55e01cSJens Axboe }; 1806fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 18077a67e56fSJarek Poplawski struct sock *sk = skb->sk; 180835f3d14dSJens Axboe int ret = 0; 180935f3d14dSJens Axboe 18109c55e01cSJens Axboe /* 18119c55e01cSJens Axboe * __skb_splice_bits() only fails if the output has no room left, 18129c55e01cSJens Axboe * so no point in going over the frag_list for the error case. 18139c55e01cSJens Axboe */ 181435f3d14dSJens Axboe if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) 18159c55e01cSJens Axboe goto done; 18169c55e01cSJens Axboe else if (!tlen) 18179c55e01cSJens Axboe goto done; 18189c55e01cSJens Axboe 18199c55e01cSJens Axboe /* 18209c55e01cSJens Axboe * now see if we have a frag_list to map 18219c55e01cSJens Axboe */ 1822fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 1823fbb398a8SDavid S. Miller if (!tlen) 18249c55e01cSJens Axboe break; 182535f3d14dSJens Axboe if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) 1826fbb398a8SDavid S. Miller break; 18279c55e01cSJens Axboe } 18289c55e01cSJens Axboe 18299c55e01cSJens Axboe done: 18309c55e01cSJens Axboe if (spd.nr_pages) { 18319c55e01cSJens Axboe /* 18329c55e01cSJens Axboe * Drop the socket lock, otherwise we have reverse 18339c55e01cSJens Axboe * locking dependencies between sk_lock and i_mutex 18349c55e01cSJens Axboe * here as compared to sendfile(). We enter here 18359c55e01cSJens Axboe * with the socket lock held, and splice_to_pipe() will 18369c55e01cSJens Axboe * grab the pipe inode lock. For sendfile() emulation, 18379c55e01cSJens Axboe * we call into ->sendpage() with the i_mutex lock held 18389c55e01cSJens Axboe * and networking will grab the socket lock. 18399c55e01cSJens Axboe */ 1840293ad604SOctavian Purdila release_sock(sk); 18419c55e01cSJens Axboe ret = splice_to_pipe(pipe, &spd); 1842293ad604SOctavian Purdila lock_sock(sk); 18439c55e01cSJens Axboe } 18449c55e01cSJens Axboe 184535f3d14dSJens Axboe return ret; 18469c55e01cSJens Axboe } 18479c55e01cSJens Axboe 1848357b40a1SHerbert Xu /** 1849357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 1850357b40a1SHerbert Xu * @skb: destination buffer 1851357b40a1SHerbert Xu * @offset: offset in destination 1852357b40a1SHerbert Xu * @from: source buffer 1853357b40a1SHerbert Xu * @len: number of bytes to copy 1854357b40a1SHerbert Xu * 1855357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 1856357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 1857357b40a1SHerbert Xu * traversing fragment lists and such. 1858357b40a1SHerbert Xu */ 1859357b40a1SHerbert Xu 18600c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1861357b40a1SHerbert Xu { 18621a028e50SDavid S. Miller int start = skb_headlen(skb); 1863fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 1864fbb398a8SDavid S. Miller int i, copy; 1865357b40a1SHerbert Xu 1866357b40a1SHerbert Xu if (offset > (int)skb->len - len) 1867357b40a1SHerbert Xu goto fault; 1868357b40a1SHerbert Xu 18691a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 1870357b40a1SHerbert Xu if (copy > len) 1871357b40a1SHerbert Xu copy = len; 187227d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 1873357b40a1SHerbert Xu if ((len -= copy) == 0) 1874357b40a1SHerbert Xu return 0; 1875357b40a1SHerbert Xu offset += copy; 1876357b40a1SHerbert Xu from += copy; 1877357b40a1SHerbert Xu } 1878357b40a1SHerbert Xu 1879357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1880357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 18811a028e50SDavid S. Miller int end; 1882357b40a1SHerbert Xu 1883547b792cSIlpo Järvinen WARN_ON(start > offset + len); 18841a028e50SDavid S. Miller 18859e903e08SEric Dumazet end = start + skb_frag_size(frag); 1886357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1887357b40a1SHerbert Xu u8 *vaddr; 1888357b40a1SHerbert Xu 1889357b40a1SHerbert Xu if (copy > len) 1890357b40a1SHerbert Xu copy = len; 1891357b40a1SHerbert Xu 189251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 18931a028e50SDavid S. Miller memcpy(vaddr + frag->page_offset + offset - start, 18941a028e50SDavid S. Miller from, copy); 189551c56b00SEric Dumazet kunmap_atomic(vaddr); 1896357b40a1SHerbert Xu 1897357b40a1SHerbert Xu if ((len -= copy) == 0) 1898357b40a1SHerbert Xu return 0; 1899357b40a1SHerbert Xu offset += copy; 1900357b40a1SHerbert Xu from += copy; 1901357b40a1SHerbert Xu } 19021a028e50SDavid S. Miller start = end; 1903357b40a1SHerbert Xu } 1904357b40a1SHerbert Xu 1905fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19061a028e50SDavid S. Miller int end; 1907357b40a1SHerbert Xu 1908547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19091a028e50SDavid S. Miller 1910fbb398a8SDavid S. Miller end = start + frag_iter->len; 1911357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 1912357b40a1SHerbert Xu if (copy > len) 1913357b40a1SHerbert Xu copy = len; 1914fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 19151a028e50SDavid S. Miller from, copy)) 1916357b40a1SHerbert Xu goto fault; 1917357b40a1SHerbert Xu if ((len -= copy) == 0) 1918357b40a1SHerbert Xu return 0; 1919357b40a1SHerbert Xu offset += copy; 1920357b40a1SHerbert Xu from += copy; 1921357b40a1SHerbert Xu } 19221a028e50SDavid S. Miller start = end; 1923357b40a1SHerbert Xu } 1924357b40a1SHerbert Xu if (!len) 1925357b40a1SHerbert Xu return 0; 1926357b40a1SHerbert Xu 1927357b40a1SHerbert Xu fault: 1928357b40a1SHerbert Xu return -EFAULT; 1929357b40a1SHerbert Xu } 1930357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 1931357b40a1SHerbert Xu 19321da177e4SLinus Torvalds /* Checksum skb data. */ 19332817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 19342817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 19351da177e4SLinus Torvalds { 19361a028e50SDavid S. Miller int start = skb_headlen(skb); 19371a028e50SDavid S. Miller int i, copy = start - offset; 1938fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 19391da177e4SLinus Torvalds int pos = 0; 19401da177e4SLinus Torvalds 19411da177e4SLinus Torvalds /* Checksum header. */ 19421da177e4SLinus Torvalds if (copy > 0) { 19431da177e4SLinus Torvalds if (copy > len) 19441da177e4SLinus Torvalds copy = len; 19452817a336SDaniel Borkmann csum = ops->update(skb->data + offset, copy, csum); 19461da177e4SLinus Torvalds if ((len -= copy) == 0) 19471da177e4SLinus Torvalds return csum; 19481da177e4SLinus Torvalds offset += copy; 19491da177e4SLinus Torvalds pos = copy; 19501da177e4SLinus Torvalds } 19511da177e4SLinus Torvalds 19521da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 19531a028e50SDavid S. Miller int end; 195451c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 19551da177e4SLinus Torvalds 1956547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19571a028e50SDavid S. Miller 195851c56b00SEric Dumazet end = start + skb_frag_size(frag); 19591da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 196044bb9363SAl Viro __wsum csum2; 19611da177e4SLinus Torvalds u8 *vaddr; 19621da177e4SLinus Torvalds 19631da177e4SLinus Torvalds if (copy > len) 19641da177e4SLinus Torvalds copy = len; 196551c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 19662817a336SDaniel Borkmann csum2 = ops->update(vaddr + frag->page_offset + 19671a028e50SDavid S. Miller offset - start, copy, 0); 196851c56b00SEric Dumazet kunmap_atomic(vaddr); 19692817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 19701da177e4SLinus Torvalds if (!(len -= copy)) 19711da177e4SLinus Torvalds return csum; 19721da177e4SLinus Torvalds offset += copy; 19731da177e4SLinus Torvalds pos += copy; 19741da177e4SLinus Torvalds } 19751a028e50SDavid S. Miller start = end; 19761da177e4SLinus Torvalds } 19771da177e4SLinus Torvalds 1978fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 19791a028e50SDavid S. Miller int end; 19801da177e4SLinus Torvalds 1981547b792cSIlpo Järvinen WARN_ON(start > offset + len); 19821a028e50SDavid S. Miller 1983fbb398a8SDavid S. Miller end = start + frag_iter->len; 19841da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 19855f92a738SAl Viro __wsum csum2; 19861da177e4SLinus Torvalds if (copy > len) 19871da177e4SLinus Torvalds copy = len; 19882817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 19892817a336SDaniel Borkmann copy, 0, ops); 19902817a336SDaniel Borkmann csum = ops->combine(csum, csum2, pos, copy); 19911da177e4SLinus Torvalds if ((len -= copy) == 0) 19921da177e4SLinus Torvalds return csum; 19931da177e4SLinus Torvalds offset += copy; 19941da177e4SLinus Torvalds pos += copy; 19951da177e4SLinus Torvalds } 19961a028e50SDavid S. Miller start = end; 19971da177e4SLinus Torvalds } 199809a62660SKris Katterjohn BUG_ON(len); 19991da177e4SLinus Torvalds 20001da177e4SLinus Torvalds return csum; 20011da177e4SLinus Torvalds } 20022817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 20032817a336SDaniel Borkmann 20042817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 20052817a336SDaniel Borkmann int len, __wsum csum) 20062817a336SDaniel Borkmann { 20072817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2008cea80ea8SDaniel Borkmann .update = csum_partial_ext, 20092817a336SDaniel Borkmann .combine = csum_block_add_ext, 20102817a336SDaniel Borkmann }; 20112817a336SDaniel Borkmann 20122817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 20132817a336SDaniel Borkmann } 2014b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 20151da177e4SLinus Torvalds 20161da177e4SLinus Torvalds /* Both of above in one bottle. */ 20171da177e4SLinus Torvalds 201881d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 201981d77662SAl Viro u8 *to, int len, __wsum csum) 20201da177e4SLinus Torvalds { 20211a028e50SDavid S. Miller int start = skb_headlen(skb); 20221a028e50SDavid S. Miller int i, copy = start - offset; 2023fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 20241da177e4SLinus Torvalds int pos = 0; 20251da177e4SLinus Torvalds 20261da177e4SLinus Torvalds /* Copy header. */ 20271da177e4SLinus Torvalds if (copy > 0) { 20281da177e4SLinus Torvalds if (copy > len) 20291da177e4SLinus Torvalds copy = len; 20301da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 20311da177e4SLinus Torvalds copy, csum); 20321da177e4SLinus Torvalds if ((len -= copy) == 0) 20331da177e4SLinus Torvalds return csum; 20341da177e4SLinus Torvalds offset += copy; 20351da177e4SLinus Torvalds to += copy; 20361da177e4SLinus Torvalds pos = copy; 20371da177e4SLinus Torvalds } 20381da177e4SLinus Torvalds 20391da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20401a028e50SDavid S. Miller int end; 20411da177e4SLinus Torvalds 2042547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20431a028e50SDavid S. Miller 20449e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 20451da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20465084205fSAl Viro __wsum csum2; 20471da177e4SLinus Torvalds u8 *vaddr; 20481da177e4SLinus Torvalds skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 20491da177e4SLinus Torvalds 20501da177e4SLinus Torvalds if (copy > len) 20511da177e4SLinus Torvalds copy = len; 205251c56b00SEric Dumazet vaddr = kmap_atomic(skb_frag_page(frag)); 20531da177e4SLinus Torvalds csum2 = csum_partial_copy_nocheck(vaddr + 20541a028e50SDavid S. Miller frag->page_offset + 20551a028e50SDavid S. Miller offset - start, to, 20561a028e50SDavid S. Miller copy, 0); 205751c56b00SEric Dumazet kunmap_atomic(vaddr); 20581da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20591da177e4SLinus Torvalds if (!(len -= copy)) 20601da177e4SLinus Torvalds return csum; 20611da177e4SLinus Torvalds offset += copy; 20621da177e4SLinus Torvalds to += copy; 20631da177e4SLinus Torvalds pos += copy; 20641da177e4SLinus Torvalds } 20651a028e50SDavid S. Miller start = end; 20661da177e4SLinus Torvalds } 20671da177e4SLinus Torvalds 2068fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 206981d77662SAl Viro __wsum csum2; 20701a028e50SDavid S. Miller int end; 20711da177e4SLinus Torvalds 2072547b792cSIlpo Järvinen WARN_ON(start > offset + len); 20731a028e50SDavid S. Miller 2074fbb398a8SDavid S. Miller end = start + frag_iter->len; 20751da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 20761da177e4SLinus Torvalds if (copy > len) 20771da177e4SLinus Torvalds copy = len; 2078fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 20791a028e50SDavid S. Miller offset - start, 20801da177e4SLinus Torvalds to, copy, 0); 20811da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 20821da177e4SLinus Torvalds if ((len -= copy) == 0) 20831da177e4SLinus Torvalds return csum; 20841da177e4SLinus Torvalds offset += copy; 20851da177e4SLinus Torvalds to += copy; 20861da177e4SLinus Torvalds pos += copy; 20871da177e4SLinus Torvalds } 20881a028e50SDavid S. Miller start = end; 20891da177e4SLinus Torvalds } 209009a62660SKris Katterjohn BUG_ON(len); 20911da177e4SLinus Torvalds return csum; 20921da177e4SLinus Torvalds } 2093b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 20941da177e4SLinus Torvalds 20951da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 20961da177e4SLinus Torvalds { 2097d3bc23e7SAl Viro __wsum csum; 20981da177e4SLinus Torvalds long csstart; 20991da177e4SLinus Torvalds 210084fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 210155508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 21021da177e4SLinus Torvalds else 21031da177e4SLinus Torvalds csstart = skb_headlen(skb); 21041da177e4SLinus Torvalds 210509a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 21061da177e4SLinus Torvalds 2107d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 21081da177e4SLinus Torvalds 21091da177e4SLinus Torvalds csum = 0; 21101da177e4SLinus Torvalds if (csstart != skb->len) 21111da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 21121da177e4SLinus Torvalds skb->len - csstart, 0); 21131da177e4SLinus Torvalds 211484fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 2115ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 21161da177e4SLinus Torvalds 2117d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 21181da177e4SLinus Torvalds } 21191da177e4SLinus Torvalds } 2120b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 21211da177e4SLinus Torvalds 21221da177e4SLinus Torvalds /** 21231da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 21241da177e4SLinus Torvalds * @list: list to dequeue from 21251da177e4SLinus Torvalds * 21261da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 21271da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 21281da177e4SLinus Torvalds * returned or %NULL if the list is empty. 21291da177e4SLinus Torvalds */ 21301da177e4SLinus Torvalds 21311da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 21321da177e4SLinus Torvalds { 21331da177e4SLinus Torvalds unsigned long flags; 21341da177e4SLinus Torvalds struct sk_buff *result; 21351da177e4SLinus Torvalds 21361da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21371da177e4SLinus Torvalds result = __skb_dequeue(list); 21381da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21391da177e4SLinus Torvalds return result; 21401da177e4SLinus Torvalds } 2141b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 21421da177e4SLinus Torvalds 21431da177e4SLinus Torvalds /** 21441da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 21451da177e4SLinus Torvalds * @list: list to dequeue from 21461da177e4SLinus Torvalds * 21471da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 21481da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 21491da177e4SLinus Torvalds * returned or %NULL if the list is empty. 21501da177e4SLinus Torvalds */ 21511da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 21521da177e4SLinus Torvalds { 21531da177e4SLinus Torvalds unsigned long flags; 21541da177e4SLinus Torvalds struct sk_buff *result; 21551da177e4SLinus Torvalds 21561da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21571da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 21581da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21591da177e4SLinus Torvalds return result; 21601da177e4SLinus Torvalds } 2161b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 21621da177e4SLinus Torvalds 21631da177e4SLinus Torvalds /** 21641da177e4SLinus Torvalds * skb_queue_purge - empty a list 21651da177e4SLinus Torvalds * @list: list to empty 21661da177e4SLinus Torvalds * 21671da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 21681da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 21691da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 21701da177e4SLinus Torvalds */ 21711da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 21721da177e4SLinus Torvalds { 21731da177e4SLinus Torvalds struct sk_buff *skb; 21741da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 21751da177e4SLinus Torvalds kfree_skb(skb); 21761da177e4SLinus Torvalds } 2177b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 21781da177e4SLinus Torvalds 21791da177e4SLinus Torvalds /** 21801da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 21811da177e4SLinus Torvalds * @list: list to use 21821da177e4SLinus Torvalds * @newsk: buffer to queue 21831da177e4SLinus Torvalds * 21841da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 21851da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 21861da177e4SLinus Torvalds * safely. 21871da177e4SLinus Torvalds * 21881da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 21891da177e4SLinus Torvalds */ 21901da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 21911da177e4SLinus Torvalds { 21921da177e4SLinus Torvalds unsigned long flags; 21931da177e4SLinus Torvalds 21941da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 21951da177e4SLinus Torvalds __skb_queue_head(list, newsk); 21961da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 21971da177e4SLinus Torvalds } 2198b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 21991da177e4SLinus Torvalds 22001da177e4SLinus Torvalds /** 22011da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 22021da177e4SLinus Torvalds * @list: list to use 22031da177e4SLinus Torvalds * @newsk: buffer to queue 22041da177e4SLinus Torvalds * 22051da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 22061da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 22071da177e4SLinus Torvalds * safely. 22081da177e4SLinus Torvalds * 22091da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22101da177e4SLinus Torvalds */ 22111da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 22121da177e4SLinus Torvalds { 22131da177e4SLinus Torvalds unsigned long flags; 22141da177e4SLinus Torvalds 22151da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22161da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 22171da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22181da177e4SLinus Torvalds } 2219b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 22208728b834SDavid S. Miller 22211da177e4SLinus Torvalds /** 22221da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 22231da177e4SLinus Torvalds * @skb: buffer to remove 22248728b834SDavid S. Miller * @list: list to use 22251da177e4SLinus Torvalds * 22268728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 22278728b834SDavid S. Miller * function is atomic with respect to other list locked calls 22281da177e4SLinus Torvalds * 22298728b834SDavid S. Miller * You must know what list the SKB is on. 22301da177e4SLinus Torvalds */ 22318728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 22321da177e4SLinus Torvalds { 22331da177e4SLinus Torvalds unsigned long flags; 22341da177e4SLinus Torvalds 22351da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 22368728b834SDavid S. Miller __skb_unlink(skb, list); 22371da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 22381da177e4SLinus Torvalds } 2239b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 22401da177e4SLinus Torvalds 22411da177e4SLinus Torvalds /** 22421da177e4SLinus Torvalds * skb_append - append a buffer 22431da177e4SLinus Torvalds * @old: buffer to insert after 22441da177e4SLinus Torvalds * @newsk: buffer to insert 22458728b834SDavid S. Miller * @list: list to use 22461da177e4SLinus Torvalds * 22471da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 22481da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 22491da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22501da177e4SLinus Torvalds */ 22518728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22521da177e4SLinus Torvalds { 22531da177e4SLinus Torvalds unsigned long flags; 22541da177e4SLinus Torvalds 22558728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22567de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 22578728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22581da177e4SLinus Torvalds } 2259b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 22601da177e4SLinus Torvalds 22611da177e4SLinus Torvalds /** 22621da177e4SLinus Torvalds * skb_insert - insert a buffer 22631da177e4SLinus Torvalds * @old: buffer to insert before 22641da177e4SLinus Torvalds * @newsk: buffer to insert 22658728b834SDavid S. Miller * @list: list to use 22661da177e4SLinus Torvalds * 22678728b834SDavid S. Miller * Place a packet before a given packet in a list. The list locks are 22688728b834SDavid S. Miller * taken and this function is atomic with respect to other list locked 22698728b834SDavid S. Miller * calls. 22708728b834SDavid S. Miller * 22711da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 22721da177e4SLinus Torvalds */ 22738728b834SDavid S. Miller void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 22741da177e4SLinus Torvalds { 22751da177e4SLinus Torvalds unsigned long flags; 22761da177e4SLinus Torvalds 22778728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 22788728b834SDavid S. Miller __skb_insert(newsk, old->prev, old, list); 22798728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 22801da177e4SLinus Torvalds } 2281b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_insert); 22821da177e4SLinus Torvalds 22831da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 22841da177e4SLinus Torvalds struct sk_buff* skb1, 22851da177e4SLinus Torvalds const u32 len, const int pos) 22861da177e4SLinus Torvalds { 22871da177e4SLinus Torvalds int i; 22881da177e4SLinus Torvalds 2289d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 2290d626f62bSArnaldo Carvalho de Melo pos - len); 22911da177e4SLinus Torvalds /* And move data appendix as is. */ 22921da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 22931da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 22941da177e4SLinus Torvalds 22951da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 22961da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 22971da177e4SLinus Torvalds skb1->data_len = skb->data_len; 22981da177e4SLinus Torvalds skb1->len += skb1->data_len; 22991da177e4SLinus Torvalds skb->data_len = 0; 23001da177e4SLinus Torvalds skb->len = len; 230127a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 23021da177e4SLinus Torvalds } 23031da177e4SLinus Torvalds 23041da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 23051da177e4SLinus Torvalds struct sk_buff* skb1, 23061da177e4SLinus Torvalds const u32 len, int pos) 23071da177e4SLinus Torvalds { 23081da177e4SLinus Torvalds int i, k = 0; 23091da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 23101da177e4SLinus Torvalds 23111da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 23121da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 23131da177e4SLinus Torvalds skb->len = len; 23141da177e4SLinus Torvalds skb->data_len = len - pos; 23151da177e4SLinus Torvalds 23161da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 23179e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 23181da177e4SLinus Torvalds 23191da177e4SLinus Torvalds if (pos + size > len) { 23201da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 23211da177e4SLinus Torvalds 23221da177e4SLinus Torvalds if (pos < len) { 23231da177e4SLinus Torvalds /* Split frag. 23241da177e4SLinus Torvalds * We have two variants in this case: 23251da177e4SLinus Torvalds * 1. Move all the frag to the second 23261da177e4SLinus Torvalds * part, if it is possible. F.e. 23271da177e4SLinus Torvalds * this approach is mandatory for TUX, 23281da177e4SLinus Torvalds * where splitting is expensive. 23291da177e4SLinus Torvalds * 2. Split is accurately. We make this. 23301da177e4SLinus Torvalds */ 2331ea2ab693SIan Campbell skb_frag_ref(skb, i); 23321da177e4SLinus Torvalds skb_shinfo(skb1)->frags[0].page_offset += len - pos; 23339e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 23349e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 23351da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 23361da177e4SLinus Torvalds } 23371da177e4SLinus Torvalds k++; 23381da177e4SLinus Torvalds } else 23391da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 23401da177e4SLinus Torvalds pos += size; 23411da177e4SLinus Torvalds } 23421da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 23431da177e4SLinus Torvalds } 23441da177e4SLinus Torvalds 23451da177e4SLinus Torvalds /** 23461da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 23471da177e4SLinus Torvalds * @skb: the buffer to split 23481da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 23491da177e4SLinus Torvalds * @len: new length for skb 23501da177e4SLinus Torvalds */ 23511da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 23521da177e4SLinus Torvalds { 23531da177e4SLinus Torvalds int pos = skb_headlen(skb); 23541da177e4SLinus Torvalds 235568534c68SAmerigo Wang skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 23561da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 23571da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 23581da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 23591da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 23601da177e4SLinus Torvalds } 2361b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 23621da177e4SLinus Torvalds 23639f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 23649f782db3SIlpo Järvinen * 23659f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 23669f782db3SIlpo Järvinen */ 2367832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 2368832d11c5SIlpo Järvinen { 23690ace2856SIlpo Järvinen return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2370832d11c5SIlpo Järvinen } 2371832d11c5SIlpo Järvinen 2372832d11c5SIlpo Järvinen /** 2373832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 2374832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 2375832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 2376832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 2377832d11c5SIlpo Järvinen * 2378832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 237920e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 2380832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 2381832d11c5SIlpo Järvinen * 2382832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 2383832d11c5SIlpo Järvinen * 2384832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 2385832d11c5SIlpo Järvinen * to have non-paged data as well. 2386832d11c5SIlpo Järvinen * 2387832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 2388832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 2389832d11c5SIlpo Järvinen */ 2390832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 2391832d11c5SIlpo Järvinen { 2392832d11c5SIlpo Järvinen int from, to, merge, todo; 2393832d11c5SIlpo Järvinen struct skb_frag_struct *fragfrom, *fragto; 2394832d11c5SIlpo Järvinen 2395832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 2396832d11c5SIlpo Järvinen BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ 2397832d11c5SIlpo Järvinen 2398832d11c5SIlpo Järvinen todo = shiftlen; 2399832d11c5SIlpo Järvinen from = 0; 2400832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 2401832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2402832d11c5SIlpo Järvinen 2403832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 2404832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 2405832d11c5SIlpo Järvinen */ 2406832d11c5SIlpo Järvinen if (!to || 2407ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 2408ea2ab693SIan Campbell fragfrom->page_offset)) { 2409832d11c5SIlpo Järvinen merge = -1; 2410832d11c5SIlpo Järvinen } else { 2411832d11c5SIlpo Järvinen merge = to - 1; 2412832d11c5SIlpo Järvinen 24139e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2414832d11c5SIlpo Järvinen if (todo < 0) { 2415832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 2416832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 2417832d11c5SIlpo Järvinen return 0; 2418832d11c5SIlpo Järvinen 24199f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 24209f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2421832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2422832d11c5SIlpo Järvinen 24239e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 24249e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 2425832d11c5SIlpo Järvinen fragfrom->page_offset += shiftlen; 2426832d11c5SIlpo Järvinen 2427832d11c5SIlpo Järvinen goto onlymerged; 2428832d11c5SIlpo Järvinen } 2429832d11c5SIlpo Järvinen 2430832d11c5SIlpo Järvinen from++; 2431832d11c5SIlpo Järvinen } 2432832d11c5SIlpo Järvinen 2433832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 2434832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 2435832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 2436832d11c5SIlpo Järvinen return 0; 2437832d11c5SIlpo Järvinen 2438832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 2439832d11c5SIlpo Järvinen return 0; 2440832d11c5SIlpo Järvinen 2441832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 2442832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 2443832d11c5SIlpo Järvinen return 0; 2444832d11c5SIlpo Järvinen 2445832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 2446832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 2447832d11c5SIlpo Järvinen 24489e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 2449832d11c5SIlpo Järvinen *fragto = *fragfrom; 24509e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 2451832d11c5SIlpo Järvinen from++; 2452832d11c5SIlpo Järvinen to++; 2453832d11c5SIlpo Järvinen 2454832d11c5SIlpo Järvinen } else { 2455ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 2456832d11c5SIlpo Järvinen fragto->page = fragfrom->page; 2457832d11c5SIlpo Järvinen fragto->page_offset = fragfrom->page_offset; 24589e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 2459832d11c5SIlpo Järvinen 2460832d11c5SIlpo Järvinen fragfrom->page_offset += todo; 24619e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 2462832d11c5SIlpo Järvinen todo = 0; 2463832d11c5SIlpo Järvinen 2464832d11c5SIlpo Järvinen to++; 2465832d11c5SIlpo Järvinen break; 2466832d11c5SIlpo Järvinen } 2467832d11c5SIlpo Järvinen } 2468832d11c5SIlpo Järvinen 2469832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 2470832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 2471832d11c5SIlpo Järvinen 2472832d11c5SIlpo Järvinen if (merge >= 0) { 2473832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 2474832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 2475832d11c5SIlpo Järvinen 24769e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2477ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 2478832d11c5SIlpo Järvinen } 2479832d11c5SIlpo Järvinen 2480832d11c5SIlpo Järvinen /* Reposition in the original skb */ 2481832d11c5SIlpo Järvinen to = 0; 2482832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 2483832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 2484832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 2485832d11c5SIlpo Järvinen 2486832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 2487832d11c5SIlpo Järvinen 2488832d11c5SIlpo Järvinen onlymerged: 2489832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 2490832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 2491832d11c5SIlpo Järvinen */ 2492832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 2493832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 2494832d11c5SIlpo Järvinen 2495832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 2496832d11c5SIlpo Järvinen skb->len -= shiftlen; 2497832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 2498832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 2499832d11c5SIlpo Järvinen tgt->len += shiftlen; 2500832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 2501832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 2502832d11c5SIlpo Järvinen 2503832d11c5SIlpo Järvinen return shiftlen; 2504832d11c5SIlpo Järvinen } 2505832d11c5SIlpo Järvinen 2506677e90edSThomas Graf /** 2507677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 2508677e90edSThomas Graf * @skb: the buffer to read 2509677e90edSThomas Graf * @from: lower offset of data to be read 2510677e90edSThomas Graf * @to: upper offset of data to be read 2511677e90edSThomas Graf * @st: state variable 2512677e90edSThomas Graf * 2513677e90edSThomas Graf * Initializes the specified state variable. Must be called before 2514677e90edSThomas Graf * invoking skb_seq_read() for the first time. 2515677e90edSThomas Graf */ 2516677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 2517677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 2518677e90edSThomas Graf { 2519677e90edSThomas Graf st->lower_offset = from; 2520677e90edSThomas Graf st->upper_offset = to; 2521677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 2522677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 2523677e90edSThomas Graf st->frag_data = NULL; 2524677e90edSThomas Graf } 2525b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 2526677e90edSThomas Graf 2527677e90edSThomas Graf /** 2528677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 2529677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 2530677e90edSThomas Graf * @data: destination pointer for data to be returned 2531677e90edSThomas Graf * @st: state variable 2532677e90edSThomas Graf * 2533bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 2534677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 2535bc32383cSMathias Krause * the head of the data block to @data and returns the length 2536677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 2537677e90edSThomas Graf * offset has been reached. 2538677e90edSThomas Graf * 2539677e90edSThomas Graf * The caller is not required to consume all of the data 2540bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 2541677e90edSThomas Graf * of bytes already consumed and the next call to 2542677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 2543677e90edSThomas Graf * 254425985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 2545677e90edSThomas Graf * this limitation is the cost for zerocopy seqeuental 2546677e90edSThomas Graf * reads of potentially non linear data. 2547677e90edSThomas Graf * 2548bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 2549677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 2550677e90edSThomas Graf * a stack for this purpose. 2551677e90edSThomas Graf */ 2552677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 2553677e90edSThomas Graf struct skb_seq_state *st) 2554677e90edSThomas Graf { 2555677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 2556677e90edSThomas Graf skb_frag_t *frag; 2557677e90edSThomas Graf 2558aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 2559aeb193eaSWedson Almeida Filho if (st->frag_data) { 2560aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 2561aeb193eaSWedson Almeida Filho st->frag_data = NULL; 2562aeb193eaSWedson Almeida Filho } 2563677e90edSThomas Graf return 0; 2564aeb193eaSWedson Almeida Filho } 2565677e90edSThomas Graf 2566677e90edSThomas Graf next_skb: 256795e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 2568677e90edSThomas Graf 2569995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 257095e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 2571677e90edSThomas Graf return block_limit - abs_offset; 2572677e90edSThomas Graf } 2573677e90edSThomas Graf 2574677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 2575677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 2576677e90edSThomas Graf 2577677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2578677e90edSThomas Graf frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 25799e903e08SEric Dumazet block_limit = skb_frag_size(frag) + st->stepped_offset; 2580677e90edSThomas Graf 2581677e90edSThomas Graf if (abs_offset < block_limit) { 2582677e90edSThomas Graf if (!st->frag_data) 258351c56b00SEric Dumazet st->frag_data = kmap_atomic(skb_frag_page(frag)); 2584677e90edSThomas Graf 2585677e90edSThomas Graf *data = (u8 *) st->frag_data + frag->page_offset + 2586677e90edSThomas Graf (abs_offset - st->stepped_offset); 2587677e90edSThomas Graf 2588677e90edSThomas Graf return block_limit - abs_offset; 2589677e90edSThomas Graf } 2590677e90edSThomas Graf 2591677e90edSThomas Graf if (st->frag_data) { 259251c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2593677e90edSThomas Graf st->frag_data = NULL; 2594677e90edSThomas Graf } 2595677e90edSThomas Graf 2596677e90edSThomas Graf st->frag_idx++; 25979e903e08SEric Dumazet st->stepped_offset += skb_frag_size(frag); 2598677e90edSThomas Graf } 2599677e90edSThomas Graf 26005b5a60daSOlaf Kirch if (st->frag_data) { 260151c56b00SEric Dumazet kunmap_atomic(st->frag_data); 26025b5a60daSOlaf Kirch st->frag_data = NULL; 26035b5a60daSOlaf Kirch } 26045b5a60daSOlaf Kirch 260521dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 2606677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 260795e3b24cSHerbert Xu st->frag_idx = 0; 2608677e90edSThomas Graf goto next_skb; 260971b3346dSShyam Iyer } else if (st->cur_skb->next) { 261071b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 261171b3346dSShyam Iyer st->frag_idx = 0; 2612677e90edSThomas Graf goto next_skb; 2613677e90edSThomas Graf } 2614677e90edSThomas Graf 2615677e90edSThomas Graf return 0; 2616677e90edSThomas Graf } 2617b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 2618677e90edSThomas Graf 2619677e90edSThomas Graf /** 2620677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 2621677e90edSThomas Graf * @st: state variable 2622677e90edSThomas Graf * 2623677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 2624677e90edSThomas Graf * returned 0. 2625677e90edSThomas Graf */ 2626677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 2627677e90edSThomas Graf { 2628677e90edSThomas Graf if (st->frag_data) 262951c56b00SEric Dumazet kunmap_atomic(st->frag_data); 2630677e90edSThomas Graf } 2631b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 2632677e90edSThomas Graf 26333fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 26343fc7e8a6SThomas Graf 26353fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 26363fc7e8a6SThomas Graf struct ts_config *conf, 26373fc7e8a6SThomas Graf struct ts_state *state) 26383fc7e8a6SThomas Graf { 26393fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 26403fc7e8a6SThomas Graf } 26413fc7e8a6SThomas Graf 26423fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 26433fc7e8a6SThomas Graf { 26443fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 26453fc7e8a6SThomas Graf } 26463fc7e8a6SThomas Graf 26473fc7e8a6SThomas Graf /** 26483fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 26493fc7e8a6SThomas Graf * @skb: the buffer to look in 26503fc7e8a6SThomas Graf * @from: search offset 26513fc7e8a6SThomas Graf * @to: search limit 26523fc7e8a6SThomas Graf * @config: textsearch configuration 26533fc7e8a6SThomas Graf * @state: uninitialized textsearch state variable 26543fc7e8a6SThomas Graf * 26553fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 26563fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 26573fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 26583fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 26593fc7e8a6SThomas Graf */ 26603fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 26613fc7e8a6SThomas Graf unsigned int to, struct ts_config *config, 26623fc7e8a6SThomas Graf struct ts_state *state) 26633fc7e8a6SThomas Graf { 2664f72b948dSPhil Oester unsigned int ret; 2665f72b948dSPhil Oester 26663fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 26673fc7e8a6SThomas Graf config->finish = skb_ts_finish; 26683fc7e8a6SThomas Graf 26693fc7e8a6SThomas Graf skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 26703fc7e8a6SThomas Graf 2671f72b948dSPhil Oester ret = textsearch_find(config, state); 2672f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 26733fc7e8a6SThomas Graf } 2674b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 26753fc7e8a6SThomas Graf 2676e89e9cf5SAnanda Raju /** 26772c53040fSBen Hutchings * skb_append_datato_frags - append the user data to a skb 2678e89e9cf5SAnanda Raju * @sk: sock structure 2679e89e9cf5SAnanda Raju * @skb: skb structure to be appened with user data. 2680e89e9cf5SAnanda Raju * @getfrag: call back function to be used for getting the user data 2681e89e9cf5SAnanda Raju * @from: pointer to user message iov 2682e89e9cf5SAnanda Raju * @length: length of the iov message 2683e89e9cf5SAnanda Raju * 2684e89e9cf5SAnanda Raju * Description: This procedure append the user data in the fragment part 2685e89e9cf5SAnanda Raju * of the skb if any page alloc fails user this procedure returns -ENOMEM 2686e89e9cf5SAnanda Raju */ 2687e89e9cf5SAnanda Raju int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 2688dab9630fSMartin Waitz int (*getfrag)(void *from, char *to, int offset, 2689e89e9cf5SAnanda Raju int len, int odd, struct sk_buff *skb), 2690e89e9cf5SAnanda Raju void *from, int length) 2691e89e9cf5SAnanda Raju { 2692b2111724SEric Dumazet int frg_cnt = skb_shinfo(skb)->nr_frags; 2693b2111724SEric Dumazet int copy; 2694e89e9cf5SAnanda Raju int offset = 0; 2695e89e9cf5SAnanda Raju int ret; 2696b2111724SEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 2697e89e9cf5SAnanda Raju 2698e89e9cf5SAnanda Raju do { 2699e89e9cf5SAnanda Raju /* Return error if we don't have space for new frag */ 2700e89e9cf5SAnanda Raju if (frg_cnt >= MAX_SKB_FRAGS) 2701b2111724SEric Dumazet return -EMSGSIZE; 2702e89e9cf5SAnanda Raju 2703b2111724SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 2704e89e9cf5SAnanda Raju return -ENOMEM; 2705e89e9cf5SAnanda Raju 2706e89e9cf5SAnanda Raju /* copy the user data to page */ 2707b2111724SEric Dumazet copy = min_t(int, length, pfrag->size - pfrag->offset); 2708e89e9cf5SAnanda Raju 2709b2111724SEric Dumazet ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, 2710e89e9cf5SAnanda Raju offset, copy, 0, skb); 2711e89e9cf5SAnanda Raju if (ret < 0) 2712e89e9cf5SAnanda Raju return -EFAULT; 2713e89e9cf5SAnanda Raju 2714e89e9cf5SAnanda Raju /* copy was successful so update the size parameters */ 2715b2111724SEric Dumazet skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, 2716b2111724SEric Dumazet copy); 2717b2111724SEric Dumazet frg_cnt++; 2718b2111724SEric Dumazet pfrag->offset += copy; 2719b2111724SEric Dumazet get_page(pfrag->page); 2720b2111724SEric Dumazet 2721b2111724SEric Dumazet skb->truesize += copy; 2722b2111724SEric Dumazet atomic_add(copy, &sk->sk_wmem_alloc); 2723e89e9cf5SAnanda Raju skb->len += copy; 2724e89e9cf5SAnanda Raju skb->data_len += copy; 2725e89e9cf5SAnanda Raju offset += copy; 2726e89e9cf5SAnanda Raju length -= copy; 2727e89e9cf5SAnanda Raju 2728e89e9cf5SAnanda Raju } while (length > 0); 2729e89e9cf5SAnanda Raju 2730e89e9cf5SAnanda Raju return 0; 2731e89e9cf5SAnanda Raju } 2732b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append_datato_frags); 2733e89e9cf5SAnanda Raju 2734cbb042f9SHerbert Xu /** 2735cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 2736cbb042f9SHerbert Xu * @skb: buffer to update 2737cbb042f9SHerbert Xu * @len: length of data pulled 2738cbb042f9SHerbert Xu * 2739cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 2740fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 274184fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 274284fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 274384fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 2744cbb042f9SHerbert Xu */ 2745cbb042f9SHerbert Xu unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 2746cbb042f9SHerbert Xu { 2747cbb042f9SHerbert Xu BUG_ON(len > skb->len); 2748cbb042f9SHerbert Xu skb->len -= len; 2749cbb042f9SHerbert Xu BUG_ON(skb->len < skb->data_len); 2750cbb042f9SHerbert Xu skb_postpull_rcsum(skb, skb->data, len); 2751cbb042f9SHerbert Xu return skb->data += len; 2752cbb042f9SHerbert Xu } 2753f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2754f94691acSArnaldo Carvalho de Melo 2755f4c50d99SHerbert Xu /** 2756f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 2757f4c50d99SHerbert Xu * @skb: buffer to segment 2758576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 2759f4c50d99SHerbert Xu * 2760f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 27614c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 27624c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 2763f4c50d99SHerbert Xu */ 2764c8f44affSMichał Mirosław struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) 2765f4c50d99SHerbert Xu { 2766f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 2767f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 276889319d38SHerbert Xu struct sk_buff *fskb = skb_shinfo(skb)->frag_list; 27699d8506ccSHerbert Xu skb_frag_t *skb_frag = skb_shinfo(skb)->frags; 2770f4c50d99SHerbert Xu unsigned int mss = skb_shinfo(skb)->gso_size; 277198e399f8SArnaldo Carvalho de Melo unsigned int doffset = skb->data - skb_mac_header(skb); 2772f4c50d99SHerbert Xu unsigned int offset = doffset; 277368c33163SPravin B Shelar unsigned int tnl_hlen = skb_tnl_header_len(skb); 2774f4c50d99SHerbert Xu unsigned int headroom; 2775f4c50d99SHerbert Xu unsigned int len; 2776ec5f0615SPravin B Shelar __be16 proto; 2777ec5f0615SPravin B Shelar bool csum; 277804ed3e74SMichał Mirosław int sg = !!(features & NETIF_F_SG); 2779f4c50d99SHerbert Xu int nfrags = skb_shinfo(skb)->nr_frags; 2780f4c50d99SHerbert Xu int err = -ENOMEM; 2781f4c50d99SHerbert Xu int i = 0; 2782f4c50d99SHerbert Xu int pos; 2783f4c50d99SHerbert Xu 2784ec5f0615SPravin B Shelar proto = skb_network_protocol(skb); 2785ec5f0615SPravin B Shelar if (unlikely(!proto)) 2786ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 2787ec5f0615SPravin B Shelar 2788ec5f0615SPravin B Shelar csum = !!can_checksum_protocol(features, proto); 2789f4c50d99SHerbert Xu __skb_push(skb, doffset); 2790f4c50d99SHerbert Xu headroom = skb_headroom(skb); 2791f4c50d99SHerbert Xu pos = skb_headlen(skb); 2792f4c50d99SHerbert Xu 2793f4c50d99SHerbert Xu do { 2794f4c50d99SHerbert Xu struct sk_buff *nskb; 2795f4c50d99SHerbert Xu skb_frag_t *frag; 2796c8884eddSHerbert Xu int hsize; 2797f4c50d99SHerbert Xu int size; 2798f4c50d99SHerbert Xu 2799f4c50d99SHerbert Xu len = skb->len - offset; 2800f4c50d99SHerbert Xu if (len > mss) 2801f4c50d99SHerbert Xu len = mss; 2802f4c50d99SHerbert Xu 2803f4c50d99SHerbert Xu hsize = skb_headlen(skb) - offset; 2804f4c50d99SHerbert Xu if (hsize < 0) 2805f4c50d99SHerbert Xu hsize = 0; 2806c8884eddSHerbert Xu if (hsize > len || !sg) 2807c8884eddSHerbert Xu hsize = len; 2808f4c50d99SHerbert Xu 28099d8506ccSHerbert Xu if (!hsize && i >= nfrags && skb_headlen(fskb) && 28109d8506ccSHerbert Xu (skb_headlen(fskb) == len || sg)) { 28119d8506ccSHerbert Xu BUG_ON(skb_headlen(fskb) > len); 281289319d38SHerbert Xu 28139d8506ccSHerbert Xu i = 0; 28149d8506ccSHerbert Xu nfrags = skb_shinfo(fskb)->nr_frags; 28159d8506ccSHerbert Xu skb_frag = skb_shinfo(fskb)->frags; 28169d8506ccSHerbert Xu pos += skb_headlen(fskb); 28179d8506ccSHerbert Xu 28189d8506ccSHerbert Xu while (pos < offset + len) { 28199d8506ccSHerbert Xu BUG_ON(i >= nfrags); 28209d8506ccSHerbert Xu 28219d8506ccSHerbert Xu size = skb_frag_size(skb_frag); 28229d8506ccSHerbert Xu if (pos + size > offset + len) 28239d8506ccSHerbert Xu break; 28249d8506ccSHerbert Xu 28259d8506ccSHerbert Xu i++; 28269d8506ccSHerbert Xu pos += size; 28279d8506ccSHerbert Xu skb_frag++; 28289d8506ccSHerbert Xu } 28299d8506ccSHerbert Xu 283089319d38SHerbert Xu nskb = skb_clone(fskb, GFP_ATOMIC); 283189319d38SHerbert Xu fskb = fskb->next; 283289319d38SHerbert Xu 2833f4c50d99SHerbert Xu if (unlikely(!nskb)) 2834f4c50d99SHerbert Xu goto err; 2835f4c50d99SHerbert Xu 28369d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 28379d8506ccSHerbert Xu kfree_skb(nskb); 28389d8506ccSHerbert Xu goto err; 28399d8506ccSHerbert Xu } 28409d8506ccSHerbert Xu 2841ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 284289319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 284389319d38SHerbert Xu kfree_skb(nskb); 284489319d38SHerbert Xu goto err; 284589319d38SHerbert Xu } 284689319d38SHerbert Xu 2847ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 284889319d38SHerbert Xu skb_release_head_state(nskb); 284989319d38SHerbert Xu __skb_push(nskb, doffset); 285089319d38SHerbert Xu } else { 2851c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 2852c93bdd0eSMel Gorman GFP_ATOMIC, skb_alloc_rx_flag(skb), 2853c93bdd0eSMel Gorman NUMA_NO_NODE); 285489319d38SHerbert Xu 285589319d38SHerbert Xu if (unlikely(!nskb)) 285689319d38SHerbert Xu goto err; 285789319d38SHerbert Xu 285889319d38SHerbert Xu skb_reserve(nskb, headroom); 285989319d38SHerbert Xu __skb_put(nskb, doffset); 286089319d38SHerbert Xu } 286189319d38SHerbert Xu 2862f4c50d99SHerbert Xu if (segs) 2863f4c50d99SHerbert Xu tail->next = nskb; 2864f4c50d99SHerbert Xu else 2865f4c50d99SHerbert Xu segs = nskb; 2866f4c50d99SHerbert Xu tail = nskb; 2867f4c50d99SHerbert Xu 28686f85a124SHerbert Xu __copy_skb_header(nskb, skb); 2869f4c50d99SHerbert Xu nskb->mac_len = skb->mac_len; 2870f4c50d99SHerbert Xu 2871030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 287268c33163SPravin B Shelar 287368c33163SPravin B Shelar skb_copy_from_linear_data_offset(skb, -tnl_hlen, 287468c33163SPravin B Shelar nskb->data - tnl_hlen, 287568c33163SPravin B Shelar doffset + tnl_hlen); 287689319d38SHerbert Xu 28779d8506ccSHerbert Xu if (nskb->len == len + doffset) 28781cdbcb79SSimon Horman goto perform_csum_check; 287989319d38SHerbert Xu 2880f4c50d99SHerbert Xu if (!sg) { 28816f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 2882f4c50d99SHerbert Xu nskb->csum = skb_copy_and_csum_bits(skb, offset, 2883f4c50d99SHerbert Xu skb_put(nskb, len), 2884f4c50d99SHerbert Xu len, 0); 2885f4c50d99SHerbert Xu continue; 2886f4c50d99SHerbert Xu } 2887f4c50d99SHerbert Xu 2888f4c50d99SHerbert Xu frag = skb_shinfo(nskb)->frags; 2889f4c50d99SHerbert Xu 2890d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, 2891d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 2892f4c50d99SHerbert Xu 2893c9af6db4SPravin B Shelar skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; 2894cef401deSEric Dumazet 28959d8506ccSHerbert Xu while (pos < offset + len) { 28969d8506ccSHerbert Xu if (i >= nfrags) { 28979d8506ccSHerbert Xu BUG_ON(skb_headlen(fskb)); 28989d8506ccSHerbert Xu 28999d8506ccSHerbert Xu i = 0; 29009d8506ccSHerbert Xu nfrags = skb_shinfo(fskb)->nr_frags; 29019d8506ccSHerbert Xu skb_frag = skb_shinfo(fskb)->frags; 29029d8506ccSHerbert Xu 29039d8506ccSHerbert Xu BUG_ON(!nfrags); 29049d8506ccSHerbert Xu 29059d8506ccSHerbert Xu fskb = fskb->next; 29069d8506ccSHerbert Xu } 29079d8506ccSHerbert Xu 29089d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 29099d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 29109d8506ccSHerbert Xu net_warn_ratelimited( 29119d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 29129d8506ccSHerbert Xu pos, mss); 29139d8506ccSHerbert Xu goto err; 29149d8506ccSHerbert Xu } 29159d8506ccSHerbert Xu 29169d8506ccSHerbert Xu *frag = *skb_frag; 2917ea2ab693SIan Campbell __skb_frag_ref(frag); 29189e903e08SEric Dumazet size = skb_frag_size(frag); 2919f4c50d99SHerbert Xu 2920f4c50d99SHerbert Xu if (pos < offset) { 2921f4c50d99SHerbert Xu frag->page_offset += offset - pos; 29229e903e08SEric Dumazet skb_frag_size_sub(frag, offset - pos); 2923f4c50d99SHerbert Xu } 2924f4c50d99SHerbert Xu 292589319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 2926f4c50d99SHerbert Xu 2927f4c50d99SHerbert Xu if (pos + size <= offset + len) { 2928f4c50d99SHerbert Xu i++; 29299d8506ccSHerbert Xu skb_frag++; 2930f4c50d99SHerbert Xu pos += size; 2931f4c50d99SHerbert Xu } else { 29329e903e08SEric Dumazet skb_frag_size_sub(frag, pos + size - (offset + len)); 293389319d38SHerbert Xu goto skip_fraglist; 2934f4c50d99SHerbert Xu } 2935f4c50d99SHerbert Xu 2936f4c50d99SHerbert Xu frag++; 2937f4c50d99SHerbert Xu } 2938f4c50d99SHerbert Xu 293989319d38SHerbert Xu skip_fraglist: 2940f4c50d99SHerbert Xu nskb->data_len = len - hsize; 2941f4c50d99SHerbert Xu nskb->len += nskb->data_len; 2942f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 2943ec5f0615SPravin B Shelar 29441cdbcb79SSimon Horman perform_csum_check: 2945ec5f0615SPravin B Shelar if (!csum) { 2946ec5f0615SPravin B Shelar nskb->csum = skb_checksum(nskb, doffset, 2947ec5f0615SPravin B Shelar nskb->len - doffset, 0); 2948ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 2949ec5f0615SPravin B Shelar } 2950f4c50d99SHerbert Xu } while ((offset += len) < skb->len); 2951f4c50d99SHerbert Xu 2952f4c50d99SHerbert Xu return segs; 2953f4c50d99SHerbert Xu 2954f4c50d99SHerbert Xu err: 2955f4c50d99SHerbert Xu while ((skb = segs)) { 2956f4c50d99SHerbert Xu segs = skb->next; 2957b08d5840SPatrick McHardy kfree_skb(skb); 2958f4c50d99SHerbert Xu } 2959f4c50d99SHerbert Xu return ERR_PTR(err); 2960f4c50d99SHerbert Xu } 2961f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 2962f4c50d99SHerbert Xu 296371d93b39SHerbert Xu int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 296471d93b39SHerbert Xu { 29658a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 296667147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 296767147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 29688a29111cSEric Dumazet struct sk_buff *nskb, *lp, *p = *head; 29698a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 2970715dc1f3SEric Dumazet unsigned int delta_truesize; 29718a29111cSEric Dumazet unsigned int headroom; 297271d93b39SHerbert Xu 29738a29111cSEric Dumazet if (unlikely(p->len + len >= 65536)) 297471d93b39SHerbert Xu return -E2BIG; 297571d93b39SHerbert Xu 29768a29111cSEric Dumazet lp = NAPI_GRO_CB(p)->last ?: p; 29778a29111cSEric Dumazet pinfo = skb_shinfo(lp); 29788a29111cSEric Dumazet 29798a29111cSEric Dumazet if (headlen <= offset) { 298042da6994SHerbert Xu skb_frag_t *frag; 298166e92fcfSHerbert Xu skb_frag_t *frag2; 29829aaa156cSHerbert Xu int i = skbinfo->nr_frags; 29839aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 298442da6994SHerbert Xu 298566e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 29868a29111cSEric Dumazet goto merge; 298781705ad1SHerbert Xu 29888a29111cSEric Dumazet offset -= headlen; 29899aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 29909aaa156cSHerbert Xu skbinfo->nr_frags = 0; 2991f5572068SHerbert Xu 29929aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 29939aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 299466e92fcfSHerbert Xu do { 299566e92fcfSHerbert Xu *--frag = *--frag2; 299666e92fcfSHerbert Xu } while (--i); 299766e92fcfSHerbert Xu 299866e92fcfSHerbert Xu frag->page_offset += offset; 29999e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 300066e92fcfSHerbert Xu 3001715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 3002ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 3003ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 3004715dc1f3SEric Dumazet 3005f5572068SHerbert Xu skb->truesize -= skb->data_len; 3006f5572068SHerbert Xu skb->len -= skb->data_len; 3007f5572068SHerbert Xu skb->data_len = 0; 3008f5572068SHerbert Xu 3009715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 30105d38a079SHerbert Xu goto done; 3011d7e8883cSEric Dumazet } else if (skb->head_frag) { 3012d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 3013d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 3014d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 3015d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 3016d7e8883cSEric Dumazet unsigned int first_offset; 3017d7e8883cSEric Dumazet 3018d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 30198a29111cSEric Dumazet goto merge; 3020d7e8883cSEric Dumazet 3021d7e8883cSEric Dumazet first_offset = skb->data - 3022d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 3023d7e8883cSEric Dumazet offset; 3024d7e8883cSEric Dumazet 3025d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 3026d7e8883cSEric Dumazet 3027d7e8883cSEric Dumazet frag->page.p = page; 3028d7e8883cSEric Dumazet frag->page_offset = first_offset; 3029d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 3030d7e8883cSEric Dumazet 3031d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 3032d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 3033d7e8883cSEric Dumazet 3034715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3035d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3036d7e8883cSEric Dumazet goto done; 30378a29111cSEric Dumazet } 30388a29111cSEric Dumazet if (pinfo->frag_list) 30398a29111cSEric Dumazet goto merge; 30408a29111cSEric Dumazet if (skb_gro_len(p) != pinfo->gso_size) 304169c0cab1SHerbert Xu return -E2BIG; 304271d93b39SHerbert Xu 304371d93b39SHerbert Xu headroom = skb_headroom(p); 30443d3be433SEric Dumazet nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); 304571d93b39SHerbert Xu if (unlikely(!nskb)) 304671d93b39SHerbert Xu return -ENOMEM; 304771d93b39SHerbert Xu 304871d93b39SHerbert Xu __copy_skb_header(nskb, p); 304971d93b39SHerbert Xu nskb->mac_len = p->mac_len; 305071d93b39SHerbert Xu 305171d93b39SHerbert Xu skb_reserve(nskb, headroom); 305286911732SHerbert Xu __skb_put(nskb, skb_gro_offset(p)); 305371d93b39SHerbert Xu 305486911732SHerbert Xu skb_set_mac_header(nskb, skb_mac_header(p) - p->data); 305571d93b39SHerbert Xu skb_set_network_header(nskb, skb_network_offset(p)); 305671d93b39SHerbert Xu skb_set_transport_header(nskb, skb_transport_offset(p)); 305771d93b39SHerbert Xu 305886911732SHerbert Xu __skb_pull(p, skb_gro_offset(p)); 305986911732SHerbert Xu memcpy(skb_mac_header(nskb), skb_mac_header(p), 306086911732SHerbert Xu p->data - skb_mac_header(p)); 306171d93b39SHerbert Xu 306271d93b39SHerbert Xu skb_shinfo(nskb)->frag_list = p; 30639aaa156cSHerbert Xu skb_shinfo(nskb)->gso_size = pinfo->gso_size; 3064622e0ca1SHerbert Xu pinfo->gso_size = 0; 306571d93b39SHerbert Xu skb_header_release(p); 3066c3c7c254SEric Dumazet NAPI_GRO_CB(nskb)->last = p; 306771d93b39SHerbert Xu 306871d93b39SHerbert Xu nskb->data_len += p->len; 3069de8261c2SEric Dumazet nskb->truesize += p->truesize; 307071d93b39SHerbert Xu nskb->len += p->len; 307171d93b39SHerbert Xu 307271d93b39SHerbert Xu *head = nskb; 307371d93b39SHerbert Xu nskb->next = p->next; 307471d93b39SHerbert Xu p->next = NULL; 307571d93b39SHerbert Xu 307671d93b39SHerbert Xu p = nskb; 307771d93b39SHerbert Xu 307871d93b39SHerbert Xu merge: 3079715dc1f3SEric Dumazet delta_truesize = skb->truesize; 308067147ba9SHerbert Xu if (offset > headlen) { 3081d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 3082d1dc7abfSMichal Schmidt 3083d1dc7abfSMichal Schmidt skbinfo->frags[0].page_offset += eat; 30849e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 3085d1dc7abfSMichal Schmidt skb->data_len -= eat; 3086d1dc7abfSMichal Schmidt skb->len -= eat; 308767147ba9SHerbert Xu offset = headlen; 308856035022SHerbert Xu } 308956035022SHerbert Xu 309067147ba9SHerbert Xu __skb_pull(skb, offset); 309156035022SHerbert Xu 30928a29111cSEric Dumazet if (!NAPI_GRO_CB(p)->last) 30938a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 30948a29111cSEric Dumazet else 3095c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 3096c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 309771d93b39SHerbert Xu skb_header_release(skb); 30988a29111cSEric Dumazet lp = p; 309971d93b39SHerbert Xu 31005d38a079SHerbert Xu done: 31015d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 310237fe4732SHerbert Xu p->data_len += len; 3103715dc1f3SEric Dumazet p->truesize += delta_truesize; 310437fe4732SHerbert Xu p->len += len; 31058a29111cSEric Dumazet if (lp != p) { 31068a29111cSEric Dumazet lp->data_len += len; 31078a29111cSEric Dumazet lp->truesize += delta_truesize; 31088a29111cSEric Dumazet lp->len += len; 31098a29111cSEric Dumazet } 311071d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 311171d93b39SHerbert Xu return 0; 311271d93b39SHerbert Xu } 311371d93b39SHerbert Xu EXPORT_SYMBOL_GPL(skb_gro_receive); 311471d93b39SHerbert Xu 31151da177e4SLinus Torvalds void __init skb_init(void) 31161da177e4SLinus Torvalds { 31171da177e4SLinus Torvalds skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 31181da177e4SLinus Torvalds sizeof(struct sk_buff), 31191da177e4SLinus Torvalds 0, 3120e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 312120c2df83SPaul Mundt NULL); 3122d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 3123d179cd12SDavid S. Miller (2*sizeof(struct sk_buff)) + 3124d179cd12SDavid S. Miller sizeof(atomic_t), 3125d179cd12SDavid S. Miller 0, 3126e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 312720c2df83SPaul Mundt NULL); 31281da177e4SLinus Torvalds } 31291da177e4SLinus Torvalds 3130716ea3a7SDavid Howells /** 3131716ea3a7SDavid Howells * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 3132716ea3a7SDavid Howells * @skb: Socket buffer containing the buffers to be mapped 3133716ea3a7SDavid Howells * @sg: The scatter-gather list to map into 3134716ea3a7SDavid Howells * @offset: The offset into the buffer's contents to start mapping 3135716ea3a7SDavid Howells * @len: Length of buffer space to be mapped 3136716ea3a7SDavid Howells * 3137716ea3a7SDavid Howells * Fill the specified scatter-gather list with mappings/pointers into a 3138716ea3a7SDavid Howells * region of the buffer space attached to a socket buffer. 3139716ea3a7SDavid Howells */ 314051c739d1SDavid S. Miller static int 314151c739d1SDavid S. Miller __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 3142716ea3a7SDavid Howells { 31431a028e50SDavid S. Miller int start = skb_headlen(skb); 31441a028e50SDavid S. Miller int i, copy = start - offset; 3145fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 3146716ea3a7SDavid Howells int elt = 0; 3147716ea3a7SDavid Howells 3148716ea3a7SDavid Howells if (copy > 0) { 3149716ea3a7SDavid Howells if (copy > len) 3150716ea3a7SDavid Howells copy = len; 3151642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 3152716ea3a7SDavid Howells elt++; 3153716ea3a7SDavid Howells if ((len -= copy) == 0) 3154716ea3a7SDavid Howells return elt; 3155716ea3a7SDavid Howells offset += copy; 3156716ea3a7SDavid Howells } 3157716ea3a7SDavid Howells 3158716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 31591a028e50SDavid S. Miller int end; 3160716ea3a7SDavid Howells 3161547b792cSIlpo Järvinen WARN_ON(start > offset + len); 31621a028e50SDavid S. Miller 31639e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 3164716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3165716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3166716ea3a7SDavid Howells 3167716ea3a7SDavid Howells if (copy > len) 3168716ea3a7SDavid Howells copy = len; 3169ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 3170642f1490SJens Axboe frag->page_offset+offset-start); 3171716ea3a7SDavid Howells elt++; 3172716ea3a7SDavid Howells if (!(len -= copy)) 3173716ea3a7SDavid Howells return elt; 3174716ea3a7SDavid Howells offset += copy; 3175716ea3a7SDavid Howells } 31761a028e50SDavid S. Miller start = end; 3177716ea3a7SDavid Howells } 3178716ea3a7SDavid Howells 3179fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 31801a028e50SDavid S. Miller int end; 3181716ea3a7SDavid Howells 3182547b792cSIlpo Järvinen WARN_ON(start > offset + len); 31831a028e50SDavid S. Miller 3184fbb398a8SDavid S. Miller end = start + frag_iter->len; 3185716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 3186716ea3a7SDavid Howells if (copy > len) 3187716ea3a7SDavid Howells copy = len; 3188fbb398a8SDavid S. Miller elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, 318951c739d1SDavid S. Miller copy); 3190716ea3a7SDavid Howells if ((len -= copy) == 0) 3191716ea3a7SDavid Howells return elt; 3192716ea3a7SDavid Howells offset += copy; 3193716ea3a7SDavid Howells } 31941a028e50SDavid S. Miller start = end; 3195716ea3a7SDavid Howells } 3196716ea3a7SDavid Howells BUG_ON(len); 3197716ea3a7SDavid Howells return elt; 3198716ea3a7SDavid Howells } 3199716ea3a7SDavid Howells 320051c739d1SDavid S. Miller int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 320151c739d1SDavid S. Miller { 320251c739d1SDavid S. Miller int nsg = __skb_to_sgvec(skb, sg, offset, len); 320351c739d1SDavid S. Miller 3204c46f2334SJens Axboe sg_mark_end(&sg[nsg - 1]); 320551c739d1SDavid S. Miller 320651c739d1SDavid S. Miller return nsg; 320751c739d1SDavid S. Miller } 3208b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_to_sgvec); 320951c739d1SDavid S. Miller 3210716ea3a7SDavid Howells /** 3211716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 3212716ea3a7SDavid Howells * @skb: The socket buffer to check. 3213716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 3214716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 3215716ea3a7SDavid Howells * 3216716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 3217716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 3218716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 3219716ea3a7SDavid Howells * 3220716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 3221716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 3222716ea3a7SDavid Howells * set to point to the skb in which this space begins. 3223716ea3a7SDavid Howells * 3224716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 3225716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 3226716ea3a7SDavid Howells */ 3227716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 3228716ea3a7SDavid Howells { 3229716ea3a7SDavid Howells int copyflag; 3230716ea3a7SDavid Howells int elt; 3231716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 3232716ea3a7SDavid Howells 3233716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 3234716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 3235716ea3a7SDavid Howells * at the moment even if they are anonymous). 3236716ea3a7SDavid Howells */ 3237716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 3238716ea3a7SDavid Howells __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 3239716ea3a7SDavid Howells return -ENOMEM; 3240716ea3a7SDavid Howells 3241716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 324221dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 3243716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 3244716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 3245716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 3246716ea3a7SDavid Howells * space, 128 bytes is fair. */ 3247716ea3a7SDavid Howells 3248716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 3249716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 3250716ea3a7SDavid Howells return -ENOMEM; 3251716ea3a7SDavid Howells 3252716ea3a7SDavid Howells /* Voila! */ 3253716ea3a7SDavid Howells *trailer = skb; 3254716ea3a7SDavid Howells return 1; 3255716ea3a7SDavid Howells } 3256716ea3a7SDavid Howells 3257716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 3258716ea3a7SDavid Howells 3259716ea3a7SDavid Howells elt = 1; 3260716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 3261716ea3a7SDavid Howells copyflag = 0; 3262716ea3a7SDavid Howells 3263716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 3264716ea3a7SDavid Howells int ntail = 0; 3265716ea3a7SDavid Howells 3266716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 3267716ea3a7SDavid Howells * this can happen on input. Copy it and everything 3268716ea3a7SDavid Howells * after it. */ 3269716ea3a7SDavid Howells 3270716ea3a7SDavid Howells if (skb_shared(skb1)) 3271716ea3a7SDavid Howells copyflag = 1; 3272716ea3a7SDavid Howells 3273716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 3274716ea3a7SDavid Howells 3275716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 3276716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 327721dc3301SDavid S. Miller skb_has_frag_list(skb1) || 3278716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 3279716ea3a7SDavid Howells ntail = tailbits + 128; 3280716ea3a7SDavid Howells } 3281716ea3a7SDavid Howells 3282716ea3a7SDavid Howells if (copyflag || 3283716ea3a7SDavid Howells skb_cloned(skb1) || 3284716ea3a7SDavid Howells ntail || 3285716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 328621dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 3287716ea3a7SDavid Howells struct sk_buff *skb2; 3288716ea3a7SDavid Howells 3289716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 3290716ea3a7SDavid Howells if (ntail == 0) 3291716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 3292716ea3a7SDavid Howells else 3293716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 3294716ea3a7SDavid Howells skb_headroom(skb1), 3295716ea3a7SDavid Howells ntail, 3296716ea3a7SDavid Howells GFP_ATOMIC); 3297716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 3298716ea3a7SDavid Howells return -ENOMEM; 3299716ea3a7SDavid Howells 3300716ea3a7SDavid Howells if (skb1->sk) 3301716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 3302716ea3a7SDavid Howells 3303716ea3a7SDavid Howells /* Looking around. Are we still alive? 3304716ea3a7SDavid Howells * OK, link new skb, drop old one */ 3305716ea3a7SDavid Howells 3306716ea3a7SDavid Howells skb2->next = skb1->next; 3307716ea3a7SDavid Howells *skb_p = skb2; 3308716ea3a7SDavid Howells kfree_skb(skb1); 3309716ea3a7SDavid Howells skb1 = skb2; 3310716ea3a7SDavid Howells } 3311716ea3a7SDavid Howells elt++; 3312716ea3a7SDavid Howells *trailer = skb1; 3313716ea3a7SDavid Howells skb_p = &skb1->next; 3314716ea3a7SDavid Howells } 3315716ea3a7SDavid Howells 3316716ea3a7SDavid Howells return elt; 3317716ea3a7SDavid Howells } 3318b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 3319716ea3a7SDavid Howells 3320b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 3321b1faf566SEric Dumazet { 3322b1faf566SEric Dumazet struct sock *sk = skb->sk; 3323b1faf566SEric Dumazet 3324b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 3325b1faf566SEric Dumazet } 3326b1faf566SEric Dumazet 3327b1faf566SEric Dumazet /* 3328b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 3329b1faf566SEric Dumazet */ 3330b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 3331b1faf566SEric Dumazet { 3332110c4330SEric Dumazet int len = skb->len; 3333110c4330SEric Dumazet 3334b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 333595c96174SEric Dumazet (unsigned int)sk->sk_rcvbuf) 3336b1faf566SEric Dumazet return -ENOMEM; 3337b1faf566SEric Dumazet 3338b1faf566SEric Dumazet skb_orphan(skb); 3339b1faf566SEric Dumazet skb->sk = sk; 3340b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 3341b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 3342b1faf566SEric Dumazet 3343abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 3344abb57ea4SEric Dumazet skb_dst_force(skb); 3345abb57ea4SEric Dumazet 3346b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 3347b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 3348110c4330SEric Dumazet sk->sk_data_ready(sk, len); 3349b1faf566SEric Dumazet return 0; 3350b1faf566SEric Dumazet } 3351b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 3352b1faf566SEric Dumazet 3353ac45f602SPatrick Ohly void skb_tstamp_tx(struct sk_buff *orig_skb, 3354ac45f602SPatrick Ohly struct skb_shared_hwtstamps *hwtstamps) 3355ac45f602SPatrick Ohly { 3356ac45f602SPatrick Ohly struct sock *sk = orig_skb->sk; 3357ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 3358ac45f602SPatrick Ohly struct sk_buff *skb; 3359ac45f602SPatrick Ohly int err; 3360ac45f602SPatrick Ohly 3361ac45f602SPatrick Ohly if (!sk) 3362ac45f602SPatrick Ohly return; 3363ac45f602SPatrick Ohly 3364ac45f602SPatrick Ohly if (hwtstamps) { 33652e31396fSWillem de Bruijn *skb_hwtstamps(orig_skb) = 3366ac45f602SPatrick Ohly *hwtstamps; 3367ac45f602SPatrick Ohly } else { 3368ac45f602SPatrick Ohly /* 3369ac45f602SPatrick Ohly * no hardware time stamps available, 33702244d07bSOliver Hartkopp * so keep the shared tx_flags and only 3371ac45f602SPatrick Ohly * store software time stamp 3372ac45f602SPatrick Ohly */ 33732e31396fSWillem de Bruijn orig_skb->tstamp = ktime_get_real(); 3374ac45f602SPatrick Ohly } 3375ac45f602SPatrick Ohly 33762e31396fSWillem de Bruijn skb = skb_clone(orig_skb, GFP_ATOMIC); 33772e31396fSWillem de Bruijn if (!skb) 33782e31396fSWillem de Bruijn return; 33792e31396fSWillem de Bruijn 3380ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 3381ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 3382ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 3383ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 338429030374SEric Dumazet 3385ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 338629030374SEric Dumazet 3387ac45f602SPatrick Ohly if (err) 3388ac45f602SPatrick Ohly kfree_skb(skb); 3389ac45f602SPatrick Ohly } 3390ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 3391ac45f602SPatrick Ohly 33926e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 33936e3e939fSJohannes Berg { 33946e3e939fSJohannes Berg struct sock *sk = skb->sk; 33956e3e939fSJohannes Berg struct sock_exterr_skb *serr; 33966e3e939fSJohannes Berg int err; 33976e3e939fSJohannes Berg 33986e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 33996e3e939fSJohannes Berg skb->wifi_acked = acked; 34006e3e939fSJohannes Berg 34016e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 34026e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 34036e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 34046e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 34056e3e939fSJohannes Berg 34066e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 34076e3e939fSJohannes Berg if (err) 34086e3e939fSJohannes Berg kfree_skb(skb); 34096e3e939fSJohannes Berg } 34106e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 34116e3e939fSJohannes Berg 3412ac45f602SPatrick Ohly 3413f35d9d8aSRusty Russell /** 3414f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 3415f35d9d8aSRusty Russell * @skb: the skb to set 3416f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 3417f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 3418f35d9d8aSRusty Russell * 3419f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 3420f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 3421f35d9d8aSRusty Russell * 3422f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 3423f35d9d8aSRusty Russell * returns false you should drop the packet. 3424f35d9d8aSRusty Russell */ 3425f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 3426f35d9d8aSRusty Russell { 34275ff8dda3SHerbert Xu if (unlikely(start > skb_headlen(skb)) || 34285ff8dda3SHerbert Xu unlikely((int)start + off > skb_headlen(skb) - 2)) { 3429e87cc472SJoe Perches net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", 34305ff8dda3SHerbert Xu start, off, skb_headlen(skb)); 3431f35d9d8aSRusty Russell return false; 3432f35d9d8aSRusty Russell } 3433f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 3434f35d9d8aSRusty Russell skb->csum_start = skb_headroom(skb) + start; 3435f35d9d8aSRusty Russell skb->csum_offset = off; 3436e5d5decaSJason Wang skb_set_transport_header(skb, start); 3437f35d9d8aSRusty Russell return true; 3438f35d9d8aSRusty Russell } 3439b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3440f35d9d8aSRusty Russell 34414497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 34424497b076SBen Hutchings { 3443e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 3444e87cc472SJoe Perches skb->dev->name); 34454497b076SBen Hutchings } 34464497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 3447bad43ca8SEric Dumazet 3448bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 3449bad43ca8SEric Dumazet { 34503d861f66SEric Dumazet if (head_stolen) { 34513d861f66SEric Dumazet skb_release_head_state(skb); 3452bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 34533d861f66SEric Dumazet } else { 3454bad43ca8SEric Dumazet __kfree_skb(skb); 3455bad43ca8SEric Dumazet } 34563d861f66SEric Dumazet } 3457bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 3458bad43ca8SEric Dumazet 3459bad43ca8SEric Dumazet /** 3460bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 3461bad43ca8SEric Dumazet * @to: prior buffer 3462bad43ca8SEric Dumazet * @from: buffer to add 3463bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 3464c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 3465bad43ca8SEric Dumazet */ 3466bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3467bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 3468bad43ca8SEric Dumazet { 3469bad43ca8SEric Dumazet int i, delta, len = from->len; 3470bad43ca8SEric Dumazet 3471bad43ca8SEric Dumazet *fragstolen = false; 3472bad43ca8SEric Dumazet 3473bad43ca8SEric Dumazet if (skb_cloned(to)) 3474bad43ca8SEric Dumazet return false; 3475bad43ca8SEric Dumazet 3476bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 3477bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 3478bad43ca8SEric Dumazet *delta_truesize = 0; 3479bad43ca8SEric Dumazet return true; 3480bad43ca8SEric Dumazet } 3481bad43ca8SEric Dumazet 3482bad43ca8SEric Dumazet if (skb_has_frag_list(to) || skb_has_frag_list(from)) 3483bad43ca8SEric Dumazet return false; 3484bad43ca8SEric Dumazet 3485bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 3486bad43ca8SEric Dumazet struct page *page; 3487bad43ca8SEric Dumazet unsigned int offset; 3488bad43ca8SEric Dumazet 3489bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3490bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 3491bad43ca8SEric Dumazet return false; 3492bad43ca8SEric Dumazet 3493bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 3494bad43ca8SEric Dumazet return false; 3495bad43ca8SEric Dumazet 3496bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 3497bad43ca8SEric Dumazet 3498bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 3499bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 3500bad43ca8SEric Dumazet 3501bad43ca8SEric Dumazet skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 3502bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 3503bad43ca8SEric Dumazet *fragstolen = true; 3504bad43ca8SEric Dumazet } else { 3505bad43ca8SEric Dumazet if (skb_shinfo(to)->nr_frags + 3506bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 3507bad43ca8SEric Dumazet return false; 3508bad43ca8SEric Dumazet 3509f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 3510bad43ca8SEric Dumazet } 3511bad43ca8SEric Dumazet 3512bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 3513bad43ca8SEric Dumazet 3514bad43ca8SEric Dumazet memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 3515bad43ca8SEric Dumazet skb_shinfo(from)->frags, 3516bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 3517bad43ca8SEric Dumazet skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 3518bad43ca8SEric Dumazet 3519bad43ca8SEric Dumazet if (!skb_cloned(from)) 3520bad43ca8SEric Dumazet skb_shinfo(from)->nr_frags = 0; 3521bad43ca8SEric Dumazet 35228ea853fdSLi RongQing /* if the skb is not cloned this does nothing 35238ea853fdSLi RongQing * since we set nr_frags to 0. 35248ea853fdSLi RongQing */ 3525bad43ca8SEric Dumazet for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 3526bad43ca8SEric Dumazet skb_frag_ref(from, i); 3527bad43ca8SEric Dumazet 3528bad43ca8SEric Dumazet to->truesize += delta; 3529bad43ca8SEric Dumazet to->len += len; 3530bad43ca8SEric Dumazet to->data_len += len; 3531bad43ca8SEric Dumazet 3532bad43ca8SEric Dumazet *delta_truesize = delta; 3533bad43ca8SEric Dumazet return true; 3534bad43ca8SEric Dumazet } 3535bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 3536621e84d6SNicolas Dichtel 3537621e84d6SNicolas Dichtel /** 35388b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 3539621e84d6SNicolas Dichtel * 3540621e84d6SNicolas Dichtel * @skb: buffer to clean 35418b27f277SNicolas Dichtel * @xnet: packet is crossing netns 3542621e84d6SNicolas Dichtel * 35438b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 35448b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 35458b27f277SNicolas Dichtel * operations. 35468b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 35478b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 35488b27f277SNicolas Dichtel * skb that could impact namespace isolation. 3549621e84d6SNicolas Dichtel */ 35508b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 3551621e84d6SNicolas Dichtel { 35528b27f277SNicolas Dichtel if (xnet) 3553621e84d6SNicolas Dichtel skb_orphan(skb); 3554621e84d6SNicolas Dichtel skb->tstamp.tv64 = 0; 3555621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 3556621e84d6SNicolas Dichtel skb->skb_iif = 0; 3557239c78dbSHannes Frederic Sowa skb->local_df = 0; 3558621e84d6SNicolas Dichtel skb_dst_drop(skb); 3559621e84d6SNicolas Dichtel skb->mark = 0; 3560621e84d6SNicolas Dichtel secpath_reset(skb); 3561621e84d6SNicolas Dichtel nf_reset(skb); 3562621e84d6SNicolas Dichtel nf_reset_trace(skb); 3563621e84d6SNicolas Dichtel } 3564621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 3565