12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 41da177e4SLinus Torvalds * 5113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 61da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Fixes: 91da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 101da177e4SLinus Torvalds * balancer bugs. 111da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 121da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 131da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 141da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 151da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 161da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 171da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 181da177e4SLinus Torvalds * only put in the headers 191da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 201da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 211da177e4SLinus Torvalds * Andi Kleen : slabified it. 221da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * NOTE: 251da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 261da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 271da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 281da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36e005d193SJoe Perches 371da177e4SLinus Torvalds #include <linux/module.h> 381da177e4SLinus Torvalds #include <linux/types.h> 391da177e4SLinus Torvalds #include <linux/kernel.h> 401da177e4SLinus Torvalds #include <linux/mm.h> 411da177e4SLinus Torvalds #include <linux/interrupt.h> 421da177e4SLinus Torvalds #include <linux/in.h> 431da177e4SLinus Torvalds #include <linux/inet.h> 441da177e4SLinus Torvalds #include <linux/slab.h> 45de960aa9SFlorian Westphal #include <linux/tcp.h> 46de960aa9SFlorian Westphal #include <linux/udp.h> 4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h> 481da177e4SLinus Torvalds #include <linux/netdevice.h> 491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 501da177e4SLinus Torvalds #include <net/pkt_sched.h> 511da177e4SLinus Torvalds #endif 521da177e4SLinus Torvalds #include <linux/string.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 549c55e01cSJens Axboe #include <linux/splice.h> 551da177e4SLinus Torvalds #include <linux/cache.h> 561da177e4SLinus Torvalds #include <linux/rtnetlink.h> 571da177e4SLinus Torvalds #include <linux/init.h> 58716ea3a7SDavid Howells #include <linux/scatterlist.h> 59ac45f602SPatrick Ohly #include <linux/errqueue.h> 60268bb0ceSLinus Torvalds #include <linux/prefetch.h> 610d5501c1SVlad Yasevich #include <linux/if_vlan.h> 622a2ea508SJohn Hurley #include <linux/mpls.h> 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds #include <net/protocol.h> 651da177e4SLinus Torvalds #include <net/dst.h> 661da177e4SLinus Torvalds #include <net/sock.h> 671da177e4SLinus Torvalds #include <net/checksum.h> 68ed1f50c3SPaul Durrant #include <net/ip6_checksum.h> 691da177e4SLinus Torvalds #include <net/xfrm.h> 708822e270SJohn Hurley #include <net/mpls.h> 713ee17bc7SMat Martineau #include <net/mptcp.h> 721da177e4SLinus Torvalds 737c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 74ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7551c56b00SEric Dumazet #include <linux/highmem.h> 76b245be1fSWillem de Bruijn #include <linux/capability.h> 77b245be1fSWillem de Bruijn #include <linux/user_namespace.h> 782544af03SMatteo Croce #include <linux/indirect_call_wrapper.h> 79a1f8e7f7SAl Viro 807b7ed885SBart Van Assche #include "datagram.h" 817b7ed885SBart Van Assche 8208009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init; 8308009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 84df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 85df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init; 86df5042f4SFlorian Westphal #endif 875f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 885f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags); 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds /** 91f05de73bSJean Sacren * skb_panic - private function for out-of-line support 921da177e4SLinus Torvalds * @skb: buffer 931da177e4SLinus Torvalds * @sz: size 94f05de73bSJean Sacren * @addr: address 9599d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 961da177e4SLinus Torvalds * 97f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 98f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 99f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 100f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 1011da177e4SLinus Torvalds */ 102f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 10399d5851eSJames Hogan const char msg[]) 1041da177e4SLinus Torvalds { 10541a46913SJesper Dangaard Brouer pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 10699d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 1074305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 10826095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1091da177e4SLinus Torvalds BUG(); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds 112f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1131da177e4SLinus Torvalds { 114f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1151da177e4SLinus Torvalds } 1161da177e4SLinus Torvalds 117f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 118f05de73bSJean Sacren { 119f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 120f05de73bSJean Sacren } 121c93bdd0eSMel Gorman 122c93bdd0eSMel Gorman /* 123c93bdd0eSMel Gorman * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 124c93bdd0eSMel Gorman * the caller if emergency pfmemalloc reserves are being used. If it is and 125c93bdd0eSMel Gorman * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 126c93bdd0eSMel Gorman * may be used. Otherwise, the packet data may be discarded until enough 127c93bdd0eSMel Gorman * memory is free 128c93bdd0eSMel Gorman */ 129c93bdd0eSMel Gorman #define kmalloc_reserve(size, gfp, node, pfmemalloc) \ 130c93bdd0eSMel Gorman __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) 13161c5e88aSstephen hemminger 13261c5e88aSstephen hemminger static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, 13361c5e88aSstephen hemminger unsigned long ip, bool *pfmemalloc) 134c93bdd0eSMel Gorman { 135c93bdd0eSMel Gorman void *obj; 136c93bdd0eSMel Gorman bool ret_pfmemalloc = false; 137c93bdd0eSMel Gorman 138c93bdd0eSMel Gorman /* 139c93bdd0eSMel Gorman * Try a regular allocation, when that fails and we're not entitled 140c93bdd0eSMel Gorman * to the reserves, fail. 141c93bdd0eSMel Gorman */ 142c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, 143c93bdd0eSMel Gorman flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 144c93bdd0eSMel Gorman node); 145c93bdd0eSMel Gorman if (obj || !(gfp_pfmemalloc_allowed(flags))) 146c93bdd0eSMel Gorman goto out; 147c93bdd0eSMel Gorman 148c93bdd0eSMel Gorman /* Try again but now we are using pfmemalloc reserves */ 149c93bdd0eSMel Gorman ret_pfmemalloc = true; 150c93bdd0eSMel Gorman obj = kmalloc_node_track_caller(size, flags, node); 151c93bdd0eSMel Gorman 152c93bdd0eSMel Gorman out: 153c93bdd0eSMel Gorman if (pfmemalloc) 154c93bdd0eSMel Gorman *pfmemalloc = ret_pfmemalloc; 155c93bdd0eSMel Gorman 156c93bdd0eSMel Gorman return obj; 157c93bdd0eSMel Gorman } 158c93bdd0eSMel Gorman 1591da177e4SLinus Torvalds /* Allocate a new skbuff. We do this ourselves so we can fill in a few 1601da177e4SLinus Torvalds * 'private' fields and also do memory statistics to find all the 1611da177e4SLinus Torvalds * [BEEP] leaks. 1621da177e4SLinus Torvalds * 1631da177e4SLinus Torvalds */ 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds /** 166d179cd12SDavid S. Miller * __alloc_skb - allocate a network buffer 1671da177e4SLinus Torvalds * @size: size to allocate 1681da177e4SLinus Torvalds * @gfp_mask: allocation mask 169c93bdd0eSMel Gorman * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 170c93bdd0eSMel Gorman * instead of head cache and allocate a cloned (child) skb. 171c93bdd0eSMel Gorman * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 172c93bdd0eSMel Gorman * allocations in case the data is required for writeback 173b30973f8SChristoph Hellwig * @node: numa node to allocate memory on 1741da177e4SLinus Torvalds * 1751da177e4SLinus Torvalds * Allocate a new &sk_buff. The returned buffer has no headroom and a 17694b6042cSBen Hutchings * tail room of at least size bytes. The object has a reference count 17794b6042cSBen Hutchings * of one. The return is the buffer. On a failure the return is %NULL. 1781da177e4SLinus Torvalds * 1791da177e4SLinus Torvalds * Buffers may only be allocated from interrupts using a @gfp_mask of 1801da177e4SLinus Torvalds * %GFP_ATOMIC. 1811da177e4SLinus Torvalds */ 182dd0fc66fSAl Viro struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 183c93bdd0eSMel Gorman int flags, int node) 1841da177e4SLinus Torvalds { 185e18b890bSChristoph Lameter struct kmem_cache *cache; 1864947d3efSBenjamin LaHaise struct skb_shared_info *shinfo; 1871da177e4SLinus Torvalds struct sk_buff *skb; 1881da177e4SLinus Torvalds u8 *data; 189c93bdd0eSMel Gorman bool pfmemalloc; 1901da177e4SLinus Torvalds 191c93bdd0eSMel Gorman cache = (flags & SKB_ALLOC_FCLONE) 192c93bdd0eSMel Gorman ? skbuff_fclone_cache : skbuff_head_cache; 193c93bdd0eSMel Gorman 194c93bdd0eSMel Gorman if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 195c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1968798b3fbSHerbert Xu 1971da177e4SLinus Torvalds /* Get the HEAD */ 198b30973f8SChristoph Hellwig skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 1991da177e4SLinus Torvalds if (!skb) 2001da177e4SLinus Torvalds goto out; 201ec7d2f2cSEric Dumazet prefetchw(skb); 2021da177e4SLinus Torvalds 20387fb4b7bSEric Dumazet /* We do our best to align skb_shared_info on a separate cache 20487fb4b7bSEric Dumazet * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 20587fb4b7bSEric Dumazet * aligned memory blocks, unless SLUB/SLAB debug is enabled. 20687fb4b7bSEric Dumazet * Both skb->head and skb_shared_info are cache line aligned. 20787fb4b7bSEric Dumazet */ 208bc417e30STony Lindgren size = SKB_DATA_ALIGN(size); 20987fb4b7bSEric Dumazet size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 210c93bdd0eSMel Gorman data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 2111da177e4SLinus Torvalds if (!data) 2121da177e4SLinus Torvalds goto nodata; 21387fb4b7bSEric Dumazet /* kmalloc(size) might give us more room than requested. 21487fb4b7bSEric Dumazet * Put skb_shared_info exactly at the end of allocated zone, 21587fb4b7bSEric Dumazet * to allow max possible filling before reallocation. 21687fb4b7bSEric Dumazet */ 21787fb4b7bSEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 218ec7d2f2cSEric Dumazet prefetchw(data + size); 2191da177e4SLinus Torvalds 220ca0605a7SArnaldo Carvalho de Melo /* 221c8005785SJohannes Berg * Only clear those fields we need to clear, not those that we will 222c8005785SJohannes Berg * actually initialise below. Hence, don't put any more fields after 223c8005785SJohannes Berg * the tail pointer in struct sk_buff! 224ca0605a7SArnaldo Carvalho de Melo */ 225ca0605a7SArnaldo Carvalho de Melo memset(skb, 0, offsetof(struct sk_buff, tail)); 22687fb4b7bSEric Dumazet /* Account for allocated memory : skb + skb->head */ 22787fb4b7bSEric Dumazet skb->truesize = SKB_TRUESIZE(size); 228c93bdd0eSMel Gorman skb->pfmemalloc = pfmemalloc; 22963354797SReshetova, Elena refcount_set(&skb->users, 1); 2301da177e4SLinus Torvalds skb->head = data; 2311da177e4SLinus Torvalds skb->data = data; 23227a884dcSArnaldo Carvalho de Melo skb_reset_tail_pointer(skb); 2334305b541SArnaldo Carvalho de Melo skb->end = skb->tail + size; 23435d04610SCong Wang skb->mac_header = (typeof(skb->mac_header))~0U; 23535d04610SCong Wang skb->transport_header = (typeof(skb->transport_header))~0U; 23619633e12SStephen Hemminger 2374947d3efSBenjamin LaHaise /* make sure we initialize shinfo sequentially */ 2384947d3efSBenjamin LaHaise shinfo = skb_shinfo(skb); 239ec7d2f2cSEric Dumazet memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 2404947d3efSBenjamin LaHaise atomic_set(&shinfo->dataref, 1); 2414947d3efSBenjamin LaHaise 242c93bdd0eSMel Gorman if (flags & SKB_ALLOC_FCLONE) { 243d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones; 2441da177e4SLinus Torvalds 245d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb1); 246d0bf4a9eSEric Dumazet 247d179cd12SDavid S. Miller skb->fclone = SKB_FCLONE_ORIG; 2482638595aSReshetova, Elena refcount_set(&fclones->fclone_ref, 1); 249d179cd12SDavid S. Miller 2506ffe75ebSEric Dumazet fclones->skb2.fclone = SKB_FCLONE_CLONE; 251d179cd12SDavid S. Miller } 2526370cc3bSAleksandr Nogikh 2536370cc3bSAleksandr Nogikh skb_set_kcov_handle(skb, kcov_common_handle()); 2546370cc3bSAleksandr Nogikh 2551da177e4SLinus Torvalds out: 2561da177e4SLinus Torvalds return skb; 2571da177e4SLinus Torvalds nodata: 2588798b3fbSHerbert Xu kmem_cache_free(cache, skb); 2591da177e4SLinus Torvalds skb = NULL; 2601da177e4SLinus Torvalds goto out; 2611da177e4SLinus Torvalds } 262b4ac530fSDavid S. Miller EXPORT_SYMBOL(__alloc_skb); 2631da177e4SLinus Torvalds 264ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */ 265ba0509b6SJesper Dangaard Brouer static struct sk_buff *__build_skb_around(struct sk_buff *skb, 266ba0509b6SJesper Dangaard Brouer void *data, unsigned int frag_size) 267ba0509b6SJesper Dangaard Brouer { 268ba0509b6SJesper Dangaard Brouer struct skb_shared_info *shinfo; 269ba0509b6SJesper Dangaard Brouer unsigned int size = frag_size ? : ksize(data); 270ba0509b6SJesper Dangaard Brouer 271ba0509b6SJesper Dangaard Brouer size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 272ba0509b6SJesper Dangaard Brouer 273ba0509b6SJesper Dangaard Brouer /* Assumes caller memset cleared SKB */ 274ba0509b6SJesper Dangaard Brouer skb->truesize = SKB_TRUESIZE(size); 275ba0509b6SJesper Dangaard Brouer refcount_set(&skb->users, 1); 276ba0509b6SJesper Dangaard Brouer skb->head = data; 277ba0509b6SJesper Dangaard Brouer skb->data = data; 278ba0509b6SJesper Dangaard Brouer skb_reset_tail_pointer(skb); 279ba0509b6SJesper Dangaard Brouer skb->end = skb->tail + size; 280ba0509b6SJesper Dangaard Brouer skb->mac_header = (typeof(skb->mac_header))~0U; 281ba0509b6SJesper Dangaard Brouer skb->transport_header = (typeof(skb->transport_header))~0U; 282ba0509b6SJesper Dangaard Brouer 283ba0509b6SJesper Dangaard Brouer /* make sure we initialize shinfo sequentially */ 284ba0509b6SJesper Dangaard Brouer shinfo = skb_shinfo(skb); 285ba0509b6SJesper Dangaard Brouer memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 286ba0509b6SJesper Dangaard Brouer atomic_set(&shinfo->dataref, 1); 287ba0509b6SJesper Dangaard Brouer 2886370cc3bSAleksandr Nogikh skb_set_kcov_handle(skb, kcov_common_handle()); 2896370cc3bSAleksandr Nogikh 290ba0509b6SJesper Dangaard Brouer return skb; 291ba0509b6SJesper Dangaard Brouer } 292ba0509b6SJesper Dangaard Brouer 2931da177e4SLinus Torvalds /** 2942ea2f62cSEric Dumazet * __build_skb - build a network buffer 295b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 2962ea2f62cSEric Dumazet * @frag_size: size of data, or 0 if head was kmalloced 297b2b5ce9dSEric Dumazet * 298b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 299deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 3002ea2f62cSEric Dumazet * @frag_size is 0, otherwise data should come from the page allocator 3012ea2f62cSEric Dumazet * or vmalloc() 302b2b5ce9dSEric Dumazet * The return is the new skb buffer. 303b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 304b2b5ce9dSEric Dumazet * Notes : 305b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 306b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 307b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 308b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 309b2b5ce9dSEric Dumazet * before giving packet to stack. 310b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 311b2b5ce9dSEric Dumazet */ 3122ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size) 313b2b5ce9dSEric Dumazet { 314b2b5ce9dSEric Dumazet struct sk_buff *skb; 315b2b5ce9dSEric Dumazet 316b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 317ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 318b2b5ce9dSEric Dumazet return NULL; 319b2b5ce9dSEric Dumazet 320b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 321b2b5ce9dSEric Dumazet 322ba0509b6SJesper Dangaard Brouer return __build_skb_around(skb, data, frag_size); 323b2b5ce9dSEric Dumazet } 3242ea2f62cSEric Dumazet 3252ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically 3262ea2f62cSEric Dumazet * takes care of skb->head and skb->pfmemalloc 3272ea2f62cSEric Dumazet * This means that if @frag_size is not zero, then @data must be backed 3282ea2f62cSEric Dumazet * by a page fragment, not kmalloc() or vmalloc() 3292ea2f62cSEric Dumazet */ 3302ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 3312ea2f62cSEric Dumazet { 3322ea2f62cSEric Dumazet struct sk_buff *skb = __build_skb(data, frag_size); 3332ea2f62cSEric Dumazet 3342ea2f62cSEric Dumazet if (skb && frag_size) { 3352ea2f62cSEric Dumazet skb->head_frag = 1; 3362f064f34SMichal Hocko if (page_is_pfmemalloc(virt_to_head_page(data))) 3372ea2f62cSEric Dumazet skb->pfmemalloc = 1; 3382ea2f62cSEric Dumazet } 3392ea2f62cSEric Dumazet return skb; 3402ea2f62cSEric Dumazet } 341b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 342b2b5ce9dSEric Dumazet 343ba0509b6SJesper Dangaard Brouer /** 344ba0509b6SJesper Dangaard Brouer * build_skb_around - build a network buffer around provided skb 345ba0509b6SJesper Dangaard Brouer * @skb: sk_buff provide by caller, must be memset cleared 346ba0509b6SJesper Dangaard Brouer * @data: data buffer provided by caller 347ba0509b6SJesper Dangaard Brouer * @frag_size: size of data, or 0 if head was kmalloced 348ba0509b6SJesper Dangaard Brouer */ 349ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb, 350ba0509b6SJesper Dangaard Brouer void *data, unsigned int frag_size) 351ba0509b6SJesper Dangaard Brouer { 352ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 353ba0509b6SJesper Dangaard Brouer return NULL; 354ba0509b6SJesper Dangaard Brouer 355ba0509b6SJesper Dangaard Brouer skb = __build_skb_around(skb, data, frag_size); 356ba0509b6SJesper Dangaard Brouer 357ba0509b6SJesper Dangaard Brouer if (skb && frag_size) { 358ba0509b6SJesper Dangaard Brouer skb->head_frag = 1; 359ba0509b6SJesper Dangaard Brouer if (page_is_pfmemalloc(virt_to_head_page(data))) 360ba0509b6SJesper Dangaard Brouer skb->pfmemalloc = 1; 361ba0509b6SJesper Dangaard Brouer } 362ba0509b6SJesper Dangaard Brouer return skb; 363ba0509b6SJesper Dangaard Brouer } 364ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around); 365ba0509b6SJesper Dangaard Brouer 366795bb1c0SJesper Dangaard Brouer #define NAPI_SKB_CACHE_SIZE 64 367795bb1c0SJesper Dangaard Brouer 368795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache { 369795bb1c0SJesper Dangaard Brouer struct page_frag_cache page; 370e0d7924aSAlexey Dobriyan unsigned int skb_count; 371795bb1c0SJesper Dangaard Brouer void *skb_cache[NAPI_SKB_CACHE_SIZE]; 372795bb1c0SJesper Dangaard Brouer }; 373795bb1c0SJesper Dangaard Brouer 374b63ae8caSAlexander Duyck static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 375795bb1c0SJesper Dangaard Brouer static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 376ffde7328SAlexander Duyck 377ffde7328SAlexander Duyck static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) 378ffde7328SAlexander Duyck { 379795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 3809451980aSAlexander Duyck 3818c2dd3e4SAlexander Duyck return page_frag_alloc(&nc->page, fragsz, gfp_mask); 382ffde7328SAlexander Duyck } 383ffde7328SAlexander Duyck 384ffde7328SAlexander Duyck void *napi_alloc_frag(unsigned int fragsz) 385ffde7328SAlexander Duyck { 3863bed3cc4SAlexander Duyck fragsz = SKB_DATA_ALIGN(fragsz); 3873bed3cc4SAlexander Duyck 388453f85d4SMel Gorman return __napi_alloc_frag(fragsz, GFP_ATOMIC); 389ffde7328SAlexander Duyck } 390ffde7328SAlexander Duyck EXPORT_SYMBOL(napi_alloc_frag); 391ffde7328SAlexander Duyck 3926f532612SEric Dumazet /** 3937ba7aeabSSebastian Andrzej Siewior * netdev_alloc_frag - allocate a page fragment 3947ba7aeabSSebastian Andrzej Siewior * @fragsz: fragment size 3957ba7aeabSSebastian Andrzej Siewior * 3967ba7aeabSSebastian Andrzej Siewior * Allocates a frag from a page for receive buffer. 3977ba7aeabSSebastian Andrzej Siewior * Uses GFP_ATOMIC allocations. 3987ba7aeabSSebastian Andrzej Siewior */ 3997ba7aeabSSebastian Andrzej Siewior void *netdev_alloc_frag(unsigned int fragsz) 4007ba7aeabSSebastian Andrzej Siewior { 4017ba7aeabSSebastian Andrzej Siewior struct page_frag_cache *nc; 4027ba7aeabSSebastian Andrzej Siewior void *data; 4037ba7aeabSSebastian Andrzej Siewior 4047ba7aeabSSebastian Andrzej Siewior fragsz = SKB_DATA_ALIGN(fragsz); 4057ba7aeabSSebastian Andrzej Siewior if (in_irq() || irqs_disabled()) { 4067ba7aeabSSebastian Andrzej Siewior nc = this_cpu_ptr(&netdev_alloc_cache); 4077ba7aeabSSebastian Andrzej Siewior data = page_frag_alloc(nc, fragsz, GFP_ATOMIC); 4087ba7aeabSSebastian Andrzej Siewior } else { 4097ba7aeabSSebastian Andrzej Siewior local_bh_disable(); 4107ba7aeabSSebastian Andrzej Siewior data = __napi_alloc_frag(fragsz, GFP_ATOMIC); 4117ba7aeabSSebastian Andrzej Siewior local_bh_enable(); 4127ba7aeabSSebastian Andrzej Siewior } 4137ba7aeabSSebastian Andrzej Siewior return data; 4147ba7aeabSSebastian Andrzej Siewior } 4157ba7aeabSSebastian Andrzej Siewior EXPORT_SYMBOL(netdev_alloc_frag); 4167ba7aeabSSebastian Andrzej Siewior 4177ba7aeabSSebastian Andrzej Siewior /** 418fd11a83dSAlexander Duyck * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 419fd11a83dSAlexander Duyck * @dev: network device to receive on 420d7499160SMasanari Iida * @len: length to allocate 421fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb 422fd11a83dSAlexander Duyck * 423fd11a83dSAlexander Duyck * Allocate a new &sk_buff and assign it a usage count of one. The 424fd11a83dSAlexander Duyck * buffer has NET_SKB_PAD headroom built in. Users should allocate 425fd11a83dSAlexander Duyck * the headroom they think they need without accounting for the 426fd11a83dSAlexander Duyck * built in space. The built in space is used for optimisations. 427fd11a83dSAlexander Duyck * 428fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 429fd11a83dSAlexander Duyck */ 4309451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 4319451980aSAlexander Duyck gfp_t gfp_mask) 432fd11a83dSAlexander Duyck { 433b63ae8caSAlexander Duyck struct page_frag_cache *nc; 434fd11a83dSAlexander Duyck struct sk_buff *skb; 4359451980aSAlexander Duyck bool pfmemalloc; 4369451980aSAlexander Duyck void *data; 437fd11a83dSAlexander Duyck 4389451980aSAlexander Duyck len += NET_SKB_PAD; 439fd11a83dSAlexander Duyck 44066c55602SAlexander Lobakin /* If requested length is either too small or too big, 44166c55602SAlexander Lobakin * we use kmalloc() for skb->head allocation. 44266c55602SAlexander Lobakin */ 44366c55602SAlexander Lobakin if (len <= SKB_WITH_OVERHEAD(1024) || 44466c55602SAlexander Lobakin len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 445d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 446a080e7bdSAlexander Duyck skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 447a080e7bdSAlexander Duyck if (!skb) 448a080e7bdSAlexander Duyck goto skb_fail; 449a080e7bdSAlexander Duyck goto skb_success; 450a080e7bdSAlexander Duyck } 4519451980aSAlexander Duyck 4529451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4539451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 4549451980aSAlexander Duyck 4559451980aSAlexander Duyck if (sk_memalloc_socks()) 4569451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 4579451980aSAlexander Duyck 45892dcabd7SSebastian Andrzej Siewior if (in_irq() || irqs_disabled()) { 4599451980aSAlexander Duyck nc = this_cpu_ptr(&netdev_alloc_cache); 4608c2dd3e4SAlexander Duyck data = page_frag_alloc(nc, len, gfp_mask); 4619451980aSAlexander Duyck pfmemalloc = nc->pfmemalloc; 46292dcabd7SSebastian Andrzej Siewior } else { 46392dcabd7SSebastian Andrzej Siewior local_bh_disable(); 46492dcabd7SSebastian Andrzej Siewior nc = this_cpu_ptr(&napi_alloc_cache.page); 46592dcabd7SSebastian Andrzej Siewior data = page_frag_alloc(nc, len, gfp_mask); 46692dcabd7SSebastian Andrzej Siewior pfmemalloc = nc->pfmemalloc; 46792dcabd7SSebastian Andrzej Siewior local_bh_enable(); 46892dcabd7SSebastian Andrzej Siewior } 4699451980aSAlexander Duyck 4709451980aSAlexander Duyck if (unlikely(!data)) 4719451980aSAlexander Duyck return NULL; 4729451980aSAlexander Duyck 4739451980aSAlexander Duyck skb = __build_skb(data, len); 4749451980aSAlexander Duyck if (unlikely(!skb)) { 475181edb2bSAlexander Duyck skb_free_frag(data); 4769451980aSAlexander Duyck return NULL; 4779451980aSAlexander Duyck } 4789451980aSAlexander Duyck 4799451980aSAlexander Duyck if (pfmemalloc) 4809451980aSAlexander Duyck skb->pfmemalloc = 1; 4819451980aSAlexander Duyck skb->head_frag = 1; 4829451980aSAlexander Duyck 483a080e7bdSAlexander Duyck skb_success: 4848af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 4857b2e497aSChristoph Hellwig skb->dev = dev; 486fd11a83dSAlexander Duyck 487a080e7bdSAlexander Duyck skb_fail: 4888af27456SChristoph Hellwig return skb; 4898af27456SChristoph Hellwig } 490b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 4911da177e4SLinus Torvalds 492fd11a83dSAlexander Duyck /** 493fd11a83dSAlexander Duyck * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 494fd11a83dSAlexander Duyck * @napi: napi instance this buffer was allocated for 495d7499160SMasanari Iida * @len: length to allocate 496fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 497fd11a83dSAlexander Duyck * 498fd11a83dSAlexander Duyck * Allocate a new sk_buff for use in NAPI receive. This buffer will 499fd11a83dSAlexander Duyck * attempt to allocate the head from a special reserved region used 500fd11a83dSAlexander Duyck * only for NAPI Rx allocation. By doing this we can save several 501fd11a83dSAlexander Duyck * CPU cycles by avoiding having to disable and re-enable IRQs. 502fd11a83dSAlexander Duyck * 503fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 504fd11a83dSAlexander Duyck */ 5059451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 5069451980aSAlexander Duyck gfp_t gfp_mask) 507fd11a83dSAlexander Duyck { 5083226b158SEric Dumazet struct napi_alloc_cache *nc; 509fd11a83dSAlexander Duyck struct sk_buff *skb; 5109451980aSAlexander Duyck void *data; 511fd11a83dSAlexander Duyck 5129451980aSAlexander Duyck len += NET_SKB_PAD + NET_IP_ALIGN; 513fd11a83dSAlexander Duyck 5143226b158SEric Dumazet /* If requested length is either too small or too big, 5153226b158SEric Dumazet * we use kmalloc() for skb->head allocation. 5163226b158SEric Dumazet */ 5173226b158SEric Dumazet if (len <= SKB_WITH_OVERHEAD(1024) || 5183226b158SEric Dumazet len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 519d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 520a080e7bdSAlexander Duyck skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 521a080e7bdSAlexander Duyck if (!skb) 522a080e7bdSAlexander Duyck goto skb_fail; 523a080e7bdSAlexander Duyck goto skb_success; 524a080e7bdSAlexander Duyck } 5259451980aSAlexander Duyck 5263226b158SEric Dumazet nc = this_cpu_ptr(&napi_alloc_cache); 5279451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5289451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 5299451980aSAlexander Duyck 5309451980aSAlexander Duyck if (sk_memalloc_socks()) 5319451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 5329451980aSAlexander Duyck 5338c2dd3e4SAlexander Duyck data = page_frag_alloc(&nc->page, len, gfp_mask); 5349451980aSAlexander Duyck if (unlikely(!data)) 5359451980aSAlexander Duyck return NULL; 5369451980aSAlexander Duyck 5379451980aSAlexander Duyck skb = __build_skb(data, len); 5389451980aSAlexander Duyck if (unlikely(!skb)) { 539181edb2bSAlexander Duyck skb_free_frag(data); 5409451980aSAlexander Duyck return NULL; 5419451980aSAlexander Duyck } 5429451980aSAlexander Duyck 543795bb1c0SJesper Dangaard Brouer if (nc->page.pfmemalloc) 5449451980aSAlexander Duyck skb->pfmemalloc = 1; 5459451980aSAlexander Duyck skb->head_frag = 1; 5469451980aSAlexander Duyck 547a080e7bdSAlexander Duyck skb_success: 548fd11a83dSAlexander Duyck skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 549fd11a83dSAlexander Duyck skb->dev = napi->dev; 550fd11a83dSAlexander Duyck 551a080e7bdSAlexander Duyck skb_fail: 552fd11a83dSAlexander Duyck return skb; 553fd11a83dSAlexander Duyck } 554fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb); 555fd11a83dSAlexander Duyck 556654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 55750269e19SEric Dumazet int size, unsigned int truesize) 558654bed16SPeter Zijlstra { 559654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 560654bed16SPeter Zijlstra skb->len += size; 561654bed16SPeter Zijlstra skb->data_len += size; 56250269e19SEric Dumazet skb->truesize += truesize; 563654bed16SPeter Zijlstra } 564654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 565654bed16SPeter Zijlstra 566f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 567f8e617e1SJason Wang unsigned int truesize) 568f8e617e1SJason Wang { 569f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 570f8e617e1SJason Wang 571f8e617e1SJason Wang skb_frag_size_add(frag, size); 572f8e617e1SJason Wang skb->len += size; 573f8e617e1SJason Wang skb->data_len += size; 574f8e617e1SJason Wang skb->truesize += truesize; 575f8e617e1SJason Wang } 576f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 577f8e617e1SJason Wang 57827b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 5791da177e4SLinus Torvalds { 580bd8a7036SEric Dumazet kfree_skb_list(*listp); 58127b437c8SHerbert Xu *listp = NULL; 5821da177e4SLinus Torvalds } 5831da177e4SLinus Torvalds 58427b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 58527b437c8SHerbert Xu { 58627b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 58727b437c8SHerbert Xu } 58827b437c8SHerbert Xu 5891da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 5901da177e4SLinus Torvalds { 5911da177e4SLinus Torvalds struct sk_buff *list; 5921da177e4SLinus Torvalds 593fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 5941da177e4SLinus Torvalds skb_get(list); 5951da177e4SLinus Torvalds } 5961da177e4SLinus Torvalds 597d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 598d3836f21SEric Dumazet { 599181edb2bSAlexander Duyck unsigned char *head = skb->head; 600181edb2bSAlexander Duyck 601d3836f21SEric Dumazet if (skb->head_frag) 602181edb2bSAlexander Duyck skb_free_frag(head); 603d3836f21SEric Dumazet else 604181edb2bSAlexander Duyck kfree(head); 605d3836f21SEric Dumazet } 606d3836f21SEric Dumazet 6075bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 6081da177e4SLinus Torvalds { 609ff04a771SEric Dumazet struct skb_shared_info *shinfo = skb_shinfo(skb); 6101da177e4SLinus Torvalds int i; 611ff04a771SEric Dumazet 612ff04a771SEric Dumazet if (skb->cloned && 613ff04a771SEric Dumazet atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 614ff04a771SEric Dumazet &shinfo->dataref)) 615ff04a771SEric Dumazet return; 616ff04a771SEric Dumazet 61770c43167SJonathan Lemon skb_zcopy_clear(skb, true); 61870c43167SJonathan Lemon 619ff04a771SEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) 620ff04a771SEric Dumazet __skb_frag_unref(&shinfo->frags[i]); 6211da177e4SLinus Torvalds 622ff04a771SEric Dumazet if (shinfo->frag_list) 623ff04a771SEric Dumazet kfree_skb_list(shinfo->frag_list); 6241da177e4SLinus Torvalds 625d3836f21SEric Dumazet skb_free_head(skb); 6261da177e4SLinus Torvalds } 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds /* 6291da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 6301da177e4SLinus Torvalds */ 6312d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 6321da177e4SLinus Torvalds { 633d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones; 634d179cd12SDavid S. Miller 635d179cd12SDavid S. Miller switch (skb->fclone) { 636d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 6371da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 6386ffe75ebSEric Dumazet return; 639d179cd12SDavid S. Miller 640d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 641d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb1); 6426ffe75ebSEric Dumazet 6436ffe75ebSEric Dumazet /* We usually free the clone (TX completion) before original skb 6446ffe75ebSEric Dumazet * This test would have no chance to be true for the clone, 6456ffe75ebSEric Dumazet * while here, branch prediction will be good. 6466ffe75ebSEric Dumazet */ 6472638595aSReshetova, Elena if (refcount_read(&fclones->fclone_ref) == 1) 6486ffe75ebSEric Dumazet goto fastpath; 649d179cd12SDavid S. Miller break; 650d179cd12SDavid S. Miller 6516ffe75ebSEric Dumazet default: /* SKB_FCLONE_CLONE */ 652d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb2); 653d179cd12SDavid S. Miller break; 6543ff50b79SStephen Hemminger } 6552638595aSReshetova, Elena if (!refcount_dec_and_test(&fclones->fclone_ref)) 6566ffe75ebSEric Dumazet return; 6576ffe75ebSEric Dumazet fastpath: 6586ffe75ebSEric Dumazet kmem_cache_free(skbuff_fclone_cache, fclones); 6591da177e4SLinus Torvalds } 6601da177e4SLinus Torvalds 6610a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb) 6621da177e4SLinus Torvalds { 663adf30907SEric Dumazet skb_dst_drop(skb); 6641da177e4SLinus Torvalds if (skb->destructor) { 6659c2b3328SStephen Hemminger WARN_ON(in_irq()); 6661da177e4SLinus Torvalds skb->destructor(skb); 6671da177e4SLinus Torvalds } 668a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 669cb9c6836SFlorian Westphal nf_conntrack_put(skb_nfct(skb)); 6702fc72c7bSKOVACS Krisztian #endif 671df5042f4SFlorian Westphal skb_ext_put(skb); 67204a4bb55SLennert Buytenhek } 67304a4bb55SLennert Buytenhek 67404a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 67504a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 67604a4bb55SLennert Buytenhek { 67704a4bb55SLennert Buytenhek skb_release_head_state(skb); 678a28b1b90SFlorian Westphal if (likely(skb->head)) 6792d4baff8SHerbert Xu skb_release_data(skb); 6802d4baff8SHerbert Xu } 6811da177e4SLinus Torvalds 6822d4baff8SHerbert Xu /** 6832d4baff8SHerbert Xu * __kfree_skb - private function 6842d4baff8SHerbert Xu * @skb: buffer 6852d4baff8SHerbert Xu * 6862d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 6872d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 6882d4baff8SHerbert Xu * always call kfree_skb 6892d4baff8SHerbert Xu */ 6902d4baff8SHerbert Xu 6912d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 6922d4baff8SHerbert Xu { 6932d4baff8SHerbert Xu skb_release_all(skb); 6941da177e4SLinus Torvalds kfree_skbmem(skb); 6951da177e4SLinus Torvalds } 696b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 6971da177e4SLinus Torvalds 6981da177e4SLinus Torvalds /** 699231d06aeSJörn Engel * kfree_skb - free an sk_buff 700231d06aeSJörn Engel * @skb: buffer to free 701231d06aeSJörn Engel * 702231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 703231d06aeSJörn Engel * hit zero. 704231d06aeSJörn Engel */ 705231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 706231d06aeSJörn Engel { 7073889a803SPaolo Abeni if (!skb_unref(skb)) 708231d06aeSJörn Engel return; 7093889a803SPaolo Abeni 710ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 711231d06aeSJörn Engel __kfree_skb(skb); 712231d06aeSJörn Engel } 713b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 714231d06aeSJörn Engel 715bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 716bd8a7036SEric Dumazet { 717bd8a7036SEric Dumazet while (segs) { 718bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 719bd8a7036SEric Dumazet 720bd8a7036SEric Dumazet kfree_skb(segs); 721bd8a7036SEric Dumazet segs = next; 722bd8a7036SEric Dumazet } 723bd8a7036SEric Dumazet } 724bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 725bd8a7036SEric Dumazet 7266413139dSWillem de Bruijn /* Dump skb information and contents. 7276413139dSWillem de Bruijn * 7286413139dSWillem de Bruijn * Must only be called from net_ratelimit()-ed paths. 7296413139dSWillem de Bruijn * 730302af7c6SVladimir Oltean * Dumps whole packets if full_pkt, only headers otherwise. 7316413139dSWillem de Bruijn */ 7326413139dSWillem de Bruijn void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 7336413139dSWillem de Bruijn { 7346413139dSWillem de Bruijn struct skb_shared_info *sh = skb_shinfo(skb); 7356413139dSWillem de Bruijn struct net_device *dev = skb->dev; 7366413139dSWillem de Bruijn struct sock *sk = skb->sk; 7376413139dSWillem de Bruijn struct sk_buff *list_skb; 7386413139dSWillem de Bruijn bool has_mac, has_trans; 7396413139dSWillem de Bruijn int headroom, tailroom; 7406413139dSWillem de Bruijn int i, len, seg_len; 7416413139dSWillem de Bruijn 7426413139dSWillem de Bruijn if (full_pkt) 7436413139dSWillem de Bruijn len = skb->len; 7446413139dSWillem de Bruijn else 7456413139dSWillem de Bruijn len = min_t(int, skb->len, MAX_HEADER + 128); 7466413139dSWillem de Bruijn 7476413139dSWillem de Bruijn headroom = skb_headroom(skb); 7486413139dSWillem de Bruijn tailroom = skb_tailroom(skb); 7496413139dSWillem de Bruijn 7506413139dSWillem de Bruijn has_mac = skb_mac_header_was_set(skb); 7516413139dSWillem de Bruijn has_trans = skb_transport_header_was_set(skb); 7526413139dSWillem de Bruijn 7536413139dSWillem de Bruijn printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 7546413139dSWillem de Bruijn "mac=(%d,%d) net=(%d,%d) trans=%d\n" 7556413139dSWillem de Bruijn "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 7566413139dSWillem de Bruijn "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 7576413139dSWillem de Bruijn "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 7586413139dSWillem de Bruijn level, skb->len, headroom, skb_headlen(skb), tailroom, 7596413139dSWillem de Bruijn has_mac ? skb->mac_header : -1, 7606413139dSWillem de Bruijn has_mac ? skb_mac_header_len(skb) : -1, 7616413139dSWillem de Bruijn skb->network_header, 7626413139dSWillem de Bruijn has_trans ? skb_network_header_len(skb) : -1, 7636413139dSWillem de Bruijn has_trans ? skb->transport_header : -1, 7646413139dSWillem de Bruijn sh->tx_flags, sh->nr_frags, 7656413139dSWillem de Bruijn sh->gso_size, sh->gso_type, sh->gso_segs, 7666413139dSWillem de Bruijn skb->csum, skb->ip_summed, skb->csum_complete_sw, 7676413139dSWillem de Bruijn skb->csum_valid, skb->csum_level, 7686413139dSWillem de Bruijn skb->hash, skb->sw_hash, skb->l4_hash, 7696413139dSWillem de Bruijn ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 7706413139dSWillem de Bruijn 7716413139dSWillem de Bruijn if (dev) 7726413139dSWillem de Bruijn printk("%sdev name=%s feat=0x%pNF\n", 7736413139dSWillem de Bruijn level, dev->name, &dev->features); 7746413139dSWillem de Bruijn if (sk) 775db8051f3SQian Cai printk("%ssk family=%hu type=%u proto=%u\n", 7766413139dSWillem de Bruijn level, sk->sk_family, sk->sk_type, sk->sk_protocol); 7776413139dSWillem de Bruijn 7786413139dSWillem de Bruijn if (full_pkt && headroom) 7796413139dSWillem de Bruijn print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 7806413139dSWillem de Bruijn 16, 1, skb->head, headroom, false); 7816413139dSWillem de Bruijn 7826413139dSWillem de Bruijn seg_len = min_t(int, skb_headlen(skb), len); 7836413139dSWillem de Bruijn if (seg_len) 7846413139dSWillem de Bruijn print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 7856413139dSWillem de Bruijn 16, 1, skb->data, seg_len, false); 7866413139dSWillem de Bruijn len -= seg_len; 7876413139dSWillem de Bruijn 7886413139dSWillem de Bruijn if (full_pkt && tailroom) 7896413139dSWillem de Bruijn print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 7906413139dSWillem de Bruijn 16, 1, skb_tail_pointer(skb), tailroom, false); 7916413139dSWillem de Bruijn 7926413139dSWillem de Bruijn for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 7936413139dSWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 7946413139dSWillem de Bruijn u32 p_off, p_len, copied; 7956413139dSWillem de Bruijn struct page *p; 7966413139dSWillem de Bruijn u8 *vaddr; 7976413139dSWillem de Bruijn 798b54c9d5bSJonathan Lemon skb_frag_foreach_page(frag, skb_frag_off(frag), 7996413139dSWillem de Bruijn skb_frag_size(frag), p, p_off, p_len, 8006413139dSWillem de Bruijn copied) { 8016413139dSWillem de Bruijn seg_len = min_t(int, p_len, len); 8026413139dSWillem de Bruijn vaddr = kmap_atomic(p); 8036413139dSWillem de Bruijn print_hex_dump(level, "skb frag: ", 8046413139dSWillem de Bruijn DUMP_PREFIX_OFFSET, 8056413139dSWillem de Bruijn 16, 1, vaddr + p_off, seg_len, false); 8066413139dSWillem de Bruijn kunmap_atomic(vaddr); 8076413139dSWillem de Bruijn len -= seg_len; 8086413139dSWillem de Bruijn if (!len) 8096413139dSWillem de Bruijn break; 8106413139dSWillem de Bruijn } 8116413139dSWillem de Bruijn } 8126413139dSWillem de Bruijn 8136413139dSWillem de Bruijn if (full_pkt && skb_has_frag_list(skb)) { 8146413139dSWillem de Bruijn printk("skb fraglist:\n"); 8156413139dSWillem de Bruijn skb_walk_frags(skb, list_skb) 8166413139dSWillem de Bruijn skb_dump(level, list_skb, true); 8176413139dSWillem de Bruijn } 8186413139dSWillem de Bruijn } 8196413139dSWillem de Bruijn EXPORT_SYMBOL(skb_dump); 8206413139dSWillem de Bruijn 821d1a203eaSStephen Hemminger /** 82225121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 82325121173SMichael S. Tsirkin * @skb: buffer that triggered an error 82425121173SMichael S. Tsirkin * 82525121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 82625121173SMichael S. Tsirkin * skb must be freed afterwards. 82725121173SMichael S. Tsirkin */ 82825121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 82925121173SMichael S. Tsirkin { 8301f8b977aSWillem de Bruijn skb_zcopy_clear(skb, true); 83125121173SMichael S. Tsirkin } 83225121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 83325121173SMichael S. Tsirkin 834be769db2SHerbert Xu #ifdef CONFIG_TRACEPOINTS 83525121173SMichael S. Tsirkin /** 836ead2ceb0SNeil Horman * consume_skb - free an skbuff 837ead2ceb0SNeil Horman * @skb: buffer to free 838ead2ceb0SNeil Horman * 839ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 840ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 841ead2ceb0SNeil Horman * is being dropped after a failure and notes that 842ead2ceb0SNeil Horman */ 843ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 844ead2ceb0SNeil Horman { 8453889a803SPaolo Abeni if (!skb_unref(skb)) 846ead2ceb0SNeil Horman return; 8473889a803SPaolo Abeni 84807dc22e7SKoki Sanagi trace_consume_skb(skb); 849ead2ceb0SNeil Horman __kfree_skb(skb); 850ead2ceb0SNeil Horman } 851ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 852be769db2SHerbert Xu #endif 853ead2ceb0SNeil Horman 8540a463c78SPaolo Abeni /** 855c1639be9SMauro Carvalho Chehab * __consume_stateless_skb - free an skbuff, assuming it is stateless 8560a463c78SPaolo Abeni * @skb: buffer to free 8570a463c78SPaolo Abeni * 858ca2c1418SPaolo Abeni * Alike consume_skb(), but this variant assumes that this is the last 859ca2c1418SPaolo Abeni * skb reference and all the head states have been already dropped 8600a463c78SPaolo Abeni */ 861ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb) 8620a463c78SPaolo Abeni { 8630a463c78SPaolo Abeni trace_consume_skb(skb); 8640a463c78SPaolo Abeni skb_release_data(skb); 8650a463c78SPaolo Abeni kfree_skbmem(skb); 8660a463c78SPaolo Abeni } 8670a463c78SPaolo Abeni 868795bb1c0SJesper Dangaard Brouer void __kfree_skb_flush(void) 869795bb1c0SJesper Dangaard Brouer { 870795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 871795bb1c0SJesper Dangaard Brouer 872795bb1c0SJesper Dangaard Brouer /* flush skb_cache if containing objects */ 873795bb1c0SJesper Dangaard Brouer if (nc->skb_count) { 874795bb1c0SJesper Dangaard Brouer kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, 875795bb1c0SJesper Dangaard Brouer nc->skb_cache); 876795bb1c0SJesper Dangaard Brouer nc->skb_count = 0; 877795bb1c0SJesper Dangaard Brouer } 878795bb1c0SJesper Dangaard Brouer } 879795bb1c0SJesper Dangaard Brouer 88015fad714SJesper Dangaard Brouer static inline void _kfree_skb_defer(struct sk_buff *skb) 881795bb1c0SJesper Dangaard Brouer { 882795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 883795bb1c0SJesper Dangaard Brouer 884795bb1c0SJesper Dangaard Brouer /* drop skb->head and call any destructors for packet */ 885795bb1c0SJesper Dangaard Brouer skb_release_all(skb); 886795bb1c0SJesper Dangaard Brouer 887795bb1c0SJesper Dangaard Brouer /* record skb to CPU local list */ 888795bb1c0SJesper Dangaard Brouer nc->skb_cache[nc->skb_count++] = skb; 889795bb1c0SJesper Dangaard Brouer 890795bb1c0SJesper Dangaard Brouer #ifdef CONFIG_SLUB 891795bb1c0SJesper Dangaard Brouer /* SLUB writes into objects when freeing */ 892795bb1c0SJesper Dangaard Brouer prefetchw(skb); 893795bb1c0SJesper Dangaard Brouer #endif 894795bb1c0SJesper Dangaard Brouer 895795bb1c0SJesper Dangaard Brouer /* flush skb_cache if it is filled */ 896795bb1c0SJesper Dangaard Brouer if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 897795bb1c0SJesper Dangaard Brouer kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE, 898795bb1c0SJesper Dangaard Brouer nc->skb_cache); 899795bb1c0SJesper Dangaard Brouer nc->skb_count = 0; 900795bb1c0SJesper Dangaard Brouer } 901795bb1c0SJesper Dangaard Brouer } 90215fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb) 90315fad714SJesper Dangaard Brouer { 90415fad714SJesper Dangaard Brouer _kfree_skb_defer(skb); 90515fad714SJesper Dangaard Brouer } 906795bb1c0SJesper Dangaard Brouer 907795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget) 908795bb1c0SJesper Dangaard Brouer { 909885eb0a5SJesper Dangaard Brouer /* Zero budget indicate non-NAPI context called us, like netpoll */ 910795bb1c0SJesper Dangaard Brouer if (unlikely(!budget)) { 911885eb0a5SJesper Dangaard Brouer dev_consume_skb_any(skb); 912795bb1c0SJesper Dangaard Brouer return; 913795bb1c0SJesper Dangaard Brouer } 914795bb1c0SJesper Dangaard Brouer 9156454eca8SYunsheng Lin lockdep_assert_in_softirq(); 9166454eca8SYunsheng Lin 9177608894eSPaolo Abeni if (!skb_unref(skb)) 918795bb1c0SJesper Dangaard Brouer return; 9197608894eSPaolo Abeni 920795bb1c0SJesper Dangaard Brouer /* if reaching here SKB is ready to free */ 921795bb1c0SJesper Dangaard Brouer trace_consume_skb(skb); 922795bb1c0SJesper Dangaard Brouer 923795bb1c0SJesper Dangaard Brouer /* if SKB is a clone, don't handle this case */ 924abbdb5a7SEric Dumazet if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 925795bb1c0SJesper Dangaard Brouer __kfree_skb(skb); 926795bb1c0SJesper Dangaard Brouer return; 927795bb1c0SJesper Dangaard Brouer } 928795bb1c0SJesper Dangaard Brouer 92915fad714SJesper Dangaard Brouer _kfree_skb_defer(skb); 930795bb1c0SJesper Dangaard Brouer } 931795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb); 932795bb1c0SJesper Dangaard Brouer 933b1937227SEric Dumazet /* Make sure a field is enclosed inside headers_start/headers_end section */ 934b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \ 935b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 936b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); \ 937b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 938b1937227SEric Dumazet offsetof(struct sk_buff, headers_end)); \ 939b1937227SEric Dumazet 940dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 941dec18810SHerbert Xu { 942dec18810SHerbert Xu new->tstamp = old->tstamp; 943b1937227SEric Dumazet /* We do not copy old->sk */ 944dec18810SHerbert Xu new->dev = old->dev; 945b1937227SEric Dumazet memcpy(new->cb, old->cb, sizeof(old->cb)); 9467fee226aSEric Dumazet skb_dst_copy(new, old); 947df5042f4SFlorian Westphal __skb_ext_copy(new, old); 948b1937227SEric Dumazet __nf_copy(new, old, false); 9496aa895b0SPatrick McHardy 950b1937227SEric Dumazet /* Note : this field could be in headers_start/headers_end section 951b1937227SEric Dumazet * It is not yet because we do not want to have a 16 bit hole 952b1937227SEric Dumazet */ 953b1937227SEric Dumazet new->queue_mapping = old->queue_mapping; 95406021292SEliezer Tamir 955b1937227SEric Dumazet memcpy(&new->headers_start, &old->headers_start, 956b1937227SEric Dumazet offsetof(struct sk_buff, headers_end) - 957b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); 958b1937227SEric Dumazet CHECK_SKB_FIELD(protocol); 959b1937227SEric Dumazet CHECK_SKB_FIELD(csum); 960b1937227SEric Dumazet CHECK_SKB_FIELD(hash); 961b1937227SEric Dumazet CHECK_SKB_FIELD(priority); 962b1937227SEric Dumazet CHECK_SKB_FIELD(skb_iif); 963b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_proto); 964b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_tci); 965b1937227SEric Dumazet CHECK_SKB_FIELD(transport_header); 966b1937227SEric Dumazet CHECK_SKB_FIELD(network_header); 967b1937227SEric Dumazet CHECK_SKB_FIELD(mac_header); 968b1937227SEric Dumazet CHECK_SKB_FIELD(inner_protocol); 969b1937227SEric Dumazet CHECK_SKB_FIELD(inner_transport_header); 970b1937227SEric Dumazet CHECK_SKB_FIELD(inner_network_header); 971b1937227SEric Dumazet CHECK_SKB_FIELD(inner_mac_header); 972b1937227SEric Dumazet CHECK_SKB_FIELD(mark); 973b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK 974b1937227SEric Dumazet CHECK_SKB_FIELD(secmark); 975b1937227SEric Dumazet #endif 976e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 977b1937227SEric Dumazet CHECK_SKB_FIELD(napi_id); 97806021292SEliezer Tamir #endif 9792bd82484SEric Dumazet #ifdef CONFIG_XPS 9802bd82484SEric Dumazet CHECK_SKB_FIELD(sender_cpu); 9812bd82484SEric Dumazet #endif 982b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED 983b1937227SEric Dumazet CHECK_SKB_FIELD(tc_index); 984b1937227SEric Dumazet #endif 985b1937227SEric Dumazet 986dec18810SHerbert Xu } 987dec18810SHerbert Xu 98882c49a35SHerbert Xu /* 98982c49a35SHerbert Xu * You should not add any new code to this function. Add it to 99082c49a35SHerbert Xu * __copy_skb_header above instead. 99182c49a35SHerbert Xu */ 992e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 9931da177e4SLinus Torvalds { 9941da177e4SLinus Torvalds #define C(x) n->x = skb->x 9951da177e4SLinus Torvalds 9961da177e4SLinus Torvalds n->next = n->prev = NULL; 9971da177e4SLinus Torvalds n->sk = NULL; 998dec18810SHerbert Xu __copy_skb_header(n, skb); 999dec18810SHerbert Xu 10001da177e4SLinus Torvalds C(len); 10011da177e4SLinus Torvalds C(data_len); 10023e6b3b2eSAlexey Dobriyan C(mac_len); 1003334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 100402f1c89dSPaul Moore n->cloned = 1; 10051da177e4SLinus Torvalds n->nohdr = 0; 1006b13dda9fSEric Dumazet n->peeked = 0; 1007e78bfb07SStefano Brivio C(pfmemalloc); 10081da177e4SLinus Torvalds n->destructor = NULL; 10091da177e4SLinus Torvalds C(tail); 10101da177e4SLinus Torvalds C(end); 101102f1c89dSPaul Moore C(head); 1012d3836f21SEric Dumazet C(head_frag); 101302f1c89dSPaul Moore C(data); 101402f1c89dSPaul Moore C(truesize); 101563354797SReshetova, Elena refcount_set(&n->users, 1); 10161da177e4SLinus Torvalds 10171da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 10181da177e4SLinus Torvalds skb->cloned = 1; 10191da177e4SLinus Torvalds 10201da177e4SLinus Torvalds return n; 1021e0053ec0SHerbert Xu #undef C 1022e0053ec0SHerbert Xu } 1023e0053ec0SHerbert Xu 1024e0053ec0SHerbert Xu /** 1025da29e4b4SJakub Kicinski * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1026da29e4b4SJakub Kicinski * @first: first sk_buff of the msg 1027da29e4b4SJakub Kicinski */ 1028da29e4b4SJakub Kicinski struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1029da29e4b4SJakub Kicinski { 1030da29e4b4SJakub Kicinski struct sk_buff *n; 1031da29e4b4SJakub Kicinski 1032da29e4b4SJakub Kicinski n = alloc_skb(0, GFP_ATOMIC); 1033da29e4b4SJakub Kicinski if (!n) 1034da29e4b4SJakub Kicinski return NULL; 1035da29e4b4SJakub Kicinski 1036da29e4b4SJakub Kicinski n->len = first->len; 1037da29e4b4SJakub Kicinski n->data_len = first->len; 1038da29e4b4SJakub Kicinski n->truesize = first->truesize; 1039da29e4b4SJakub Kicinski 1040da29e4b4SJakub Kicinski skb_shinfo(n)->frag_list = first; 1041da29e4b4SJakub Kicinski 1042da29e4b4SJakub Kicinski __copy_skb_header(n, first); 1043da29e4b4SJakub Kicinski n->destructor = NULL; 1044da29e4b4SJakub Kicinski 1045da29e4b4SJakub Kicinski return n; 1046da29e4b4SJakub Kicinski } 1047da29e4b4SJakub Kicinski EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1048da29e4b4SJakub Kicinski 1049da29e4b4SJakub Kicinski /** 1050e0053ec0SHerbert Xu * skb_morph - morph one skb into another 1051e0053ec0SHerbert Xu * @dst: the skb to receive the contents 1052e0053ec0SHerbert Xu * @src: the skb to supply the contents 1053e0053ec0SHerbert Xu * 1054e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 1055e0053ec0SHerbert Xu * supplied by the user. 1056e0053ec0SHerbert Xu * 1057e0053ec0SHerbert Xu * The target skb is returned upon exit. 1058e0053ec0SHerbert Xu */ 1059e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1060e0053ec0SHerbert Xu { 10612d4baff8SHerbert Xu skb_release_all(dst); 1062e0053ec0SHerbert Xu return __skb_clone(dst, src); 1063e0053ec0SHerbert Xu } 1064e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 1065e0053ec0SHerbert Xu 10666f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1067a91dbff5SWillem de Bruijn { 1068a91dbff5SWillem de Bruijn unsigned long max_pg, num_pg, new_pg, old_pg; 1069a91dbff5SWillem de Bruijn struct user_struct *user; 1070a91dbff5SWillem de Bruijn 1071a91dbff5SWillem de Bruijn if (capable(CAP_IPC_LOCK) || !size) 1072a91dbff5SWillem de Bruijn return 0; 1073a91dbff5SWillem de Bruijn 1074a91dbff5SWillem de Bruijn num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1075a91dbff5SWillem de Bruijn max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1076a91dbff5SWillem de Bruijn user = mmp->user ? : current_user(); 1077a91dbff5SWillem de Bruijn 1078a91dbff5SWillem de Bruijn do { 1079a91dbff5SWillem de Bruijn old_pg = atomic_long_read(&user->locked_vm); 1080a91dbff5SWillem de Bruijn new_pg = old_pg + num_pg; 1081a91dbff5SWillem de Bruijn if (new_pg > max_pg) 1082a91dbff5SWillem de Bruijn return -ENOBUFS; 1083a91dbff5SWillem de Bruijn } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1084a91dbff5SWillem de Bruijn old_pg); 1085a91dbff5SWillem de Bruijn 1086a91dbff5SWillem de Bruijn if (!mmp->user) { 1087a91dbff5SWillem de Bruijn mmp->user = get_uid(user); 1088a91dbff5SWillem de Bruijn mmp->num_pg = num_pg; 1089a91dbff5SWillem de Bruijn } else { 1090a91dbff5SWillem de Bruijn mmp->num_pg += num_pg; 1091a91dbff5SWillem de Bruijn } 1092a91dbff5SWillem de Bruijn 1093a91dbff5SWillem de Bruijn return 0; 1094a91dbff5SWillem de Bruijn } 10956f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1096a91dbff5SWillem de Bruijn 10976f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp) 1098a91dbff5SWillem de Bruijn { 1099a91dbff5SWillem de Bruijn if (mmp->user) { 1100a91dbff5SWillem de Bruijn atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1101a91dbff5SWillem de Bruijn free_uid(mmp->user); 1102a91dbff5SWillem de Bruijn } 1103a91dbff5SWillem de Bruijn } 11046f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1105a91dbff5SWillem de Bruijn 11068c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 110752267790SWillem de Bruijn { 110852267790SWillem de Bruijn struct ubuf_info *uarg; 110952267790SWillem de Bruijn struct sk_buff *skb; 111052267790SWillem de Bruijn 111152267790SWillem de Bruijn WARN_ON_ONCE(!in_task()); 111252267790SWillem de Bruijn 111352267790SWillem de Bruijn skb = sock_omalloc(sk, 0, GFP_KERNEL); 111452267790SWillem de Bruijn if (!skb) 111552267790SWillem de Bruijn return NULL; 111652267790SWillem de Bruijn 111752267790SWillem de Bruijn BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 111852267790SWillem de Bruijn uarg = (void *)skb->cb; 1119a91dbff5SWillem de Bruijn uarg->mmp.user = NULL; 1120a91dbff5SWillem de Bruijn 1121a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) { 1122a91dbff5SWillem de Bruijn kfree_skb(skb); 1123a91dbff5SWillem de Bruijn return NULL; 1124a91dbff5SWillem de Bruijn } 112552267790SWillem de Bruijn 11268c793822SJonathan Lemon uarg->callback = msg_zerocopy_callback; 11274ab6c99dSWillem de Bruijn uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 11284ab6c99dSWillem de Bruijn uarg->len = 1; 11294ab6c99dSWillem de Bruijn uarg->bytelen = size; 113052267790SWillem de Bruijn uarg->zerocopy = 1; 113104c2d33eSJonathan Lemon uarg->flags = SKBFL_ZEROCOPY_FRAG; 1132c1d1b437SEric Dumazet refcount_set(&uarg->refcnt, 1); 113352267790SWillem de Bruijn sock_hold(sk); 113452267790SWillem de Bruijn 113552267790SWillem de Bruijn return uarg; 113652267790SWillem de Bruijn } 11378c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); 113852267790SWillem de Bruijn 113952267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 114052267790SWillem de Bruijn { 114152267790SWillem de Bruijn return container_of((void *)uarg, struct sk_buff, cb); 114252267790SWillem de Bruijn } 114352267790SWillem de Bruijn 11448c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 11454ab6c99dSWillem de Bruijn struct ubuf_info *uarg) 11464ab6c99dSWillem de Bruijn { 11474ab6c99dSWillem de Bruijn if (uarg) { 11484ab6c99dSWillem de Bruijn const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 11494ab6c99dSWillem de Bruijn u32 bytelen, next; 11504ab6c99dSWillem de Bruijn 11514ab6c99dSWillem de Bruijn /* realloc only when socket is locked (TCP, UDP cork), 11524ab6c99dSWillem de Bruijn * so uarg->len and sk_zckey access is serialized 11534ab6c99dSWillem de Bruijn */ 11544ab6c99dSWillem de Bruijn if (!sock_owned_by_user(sk)) { 11554ab6c99dSWillem de Bruijn WARN_ON_ONCE(1); 11564ab6c99dSWillem de Bruijn return NULL; 11574ab6c99dSWillem de Bruijn } 11584ab6c99dSWillem de Bruijn 11594ab6c99dSWillem de Bruijn bytelen = uarg->bytelen + size; 11604ab6c99dSWillem de Bruijn if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 11614ab6c99dSWillem de Bruijn /* TCP can create new skb to attach new uarg */ 11624ab6c99dSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 11634ab6c99dSWillem de Bruijn goto new_alloc; 11644ab6c99dSWillem de Bruijn return NULL; 11654ab6c99dSWillem de Bruijn } 11664ab6c99dSWillem de Bruijn 11674ab6c99dSWillem de Bruijn next = (u32)atomic_read(&sk->sk_zckey); 11684ab6c99dSWillem de Bruijn if ((u32)(uarg->id + uarg->len) == next) { 1169a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) 1170a91dbff5SWillem de Bruijn return NULL; 11714ab6c99dSWillem de Bruijn uarg->len++; 11724ab6c99dSWillem de Bruijn uarg->bytelen = bytelen; 11734ab6c99dSWillem de Bruijn atomic_set(&sk->sk_zckey, ++next); 1174100f6d8eSWillem de Bruijn 1175100f6d8eSWillem de Bruijn /* no extra ref when appending to datagram (MSG_MORE) */ 1176100f6d8eSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 11778e044917SJonathan Lemon net_zcopy_get(uarg); 1178100f6d8eSWillem de Bruijn 11794ab6c99dSWillem de Bruijn return uarg; 11804ab6c99dSWillem de Bruijn } 11814ab6c99dSWillem de Bruijn } 11824ab6c99dSWillem de Bruijn 11834ab6c99dSWillem de Bruijn new_alloc: 11848c793822SJonathan Lemon return msg_zerocopy_alloc(sk, size); 11854ab6c99dSWillem de Bruijn } 11868c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 11874ab6c99dSWillem de Bruijn 11884ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 11894ab6c99dSWillem de Bruijn { 11904ab6c99dSWillem de Bruijn struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 11914ab6c99dSWillem de Bruijn u32 old_lo, old_hi; 11924ab6c99dSWillem de Bruijn u64 sum_len; 11934ab6c99dSWillem de Bruijn 11944ab6c99dSWillem de Bruijn old_lo = serr->ee.ee_info; 11954ab6c99dSWillem de Bruijn old_hi = serr->ee.ee_data; 11964ab6c99dSWillem de Bruijn sum_len = old_hi - old_lo + 1ULL + len; 11974ab6c99dSWillem de Bruijn 11984ab6c99dSWillem de Bruijn if (sum_len >= (1ULL << 32)) 11994ab6c99dSWillem de Bruijn return false; 12004ab6c99dSWillem de Bruijn 12014ab6c99dSWillem de Bruijn if (lo != old_hi + 1) 12024ab6c99dSWillem de Bruijn return false; 12034ab6c99dSWillem de Bruijn 12044ab6c99dSWillem de Bruijn serr->ee.ee_data += len; 12054ab6c99dSWillem de Bruijn return true; 12064ab6c99dSWillem de Bruijn } 12074ab6c99dSWillem de Bruijn 12088c793822SJonathan Lemon static void __msg_zerocopy_callback(struct ubuf_info *uarg) 120952267790SWillem de Bruijn { 12104ab6c99dSWillem de Bruijn struct sk_buff *tail, *skb = skb_from_uarg(uarg); 121152267790SWillem de Bruijn struct sock_exterr_skb *serr; 121252267790SWillem de Bruijn struct sock *sk = skb->sk; 12134ab6c99dSWillem de Bruijn struct sk_buff_head *q; 12144ab6c99dSWillem de Bruijn unsigned long flags; 12154ab6c99dSWillem de Bruijn u32 lo, hi; 12164ab6c99dSWillem de Bruijn u16 len; 121752267790SWillem de Bruijn 1218ccaffff1SWillem de Bruijn mm_unaccount_pinned_pages(&uarg->mmp); 1219ccaffff1SWillem de Bruijn 12204ab6c99dSWillem de Bruijn /* if !len, there was only 1 call, and it was aborted 12214ab6c99dSWillem de Bruijn * so do not queue a completion notification 12224ab6c99dSWillem de Bruijn */ 12234ab6c99dSWillem de Bruijn if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 122452267790SWillem de Bruijn goto release; 122552267790SWillem de Bruijn 12264ab6c99dSWillem de Bruijn len = uarg->len; 12274ab6c99dSWillem de Bruijn lo = uarg->id; 12284ab6c99dSWillem de Bruijn hi = uarg->id + len - 1; 12294ab6c99dSWillem de Bruijn 123052267790SWillem de Bruijn serr = SKB_EXT_ERR(skb); 123152267790SWillem de Bruijn memset(serr, 0, sizeof(*serr)); 123252267790SWillem de Bruijn serr->ee.ee_errno = 0; 123352267790SWillem de Bruijn serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 12344ab6c99dSWillem de Bruijn serr->ee.ee_data = hi; 12354ab6c99dSWillem de Bruijn serr->ee.ee_info = lo; 123675518851SJonathan Lemon if (!uarg->zerocopy) 123752267790SWillem de Bruijn serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 123852267790SWillem de Bruijn 12394ab6c99dSWillem de Bruijn q = &sk->sk_error_queue; 12404ab6c99dSWillem de Bruijn spin_lock_irqsave(&q->lock, flags); 12414ab6c99dSWillem de Bruijn tail = skb_peek_tail(q); 12424ab6c99dSWillem de Bruijn if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 12434ab6c99dSWillem de Bruijn !skb_zerocopy_notify_extend(tail, lo, len)) { 12444ab6c99dSWillem de Bruijn __skb_queue_tail(q, skb); 124552267790SWillem de Bruijn skb = NULL; 12464ab6c99dSWillem de Bruijn } 12474ab6c99dSWillem de Bruijn spin_unlock_irqrestore(&q->lock, flags); 124852267790SWillem de Bruijn 124952267790SWillem de Bruijn sk->sk_error_report(sk); 125052267790SWillem de Bruijn 125152267790SWillem de Bruijn release: 125252267790SWillem de Bruijn consume_skb(skb); 125352267790SWillem de Bruijn sock_put(sk); 125452267790SWillem de Bruijn } 125575518851SJonathan Lemon 12568c793822SJonathan Lemon void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 125736177832SJonathan Lemon bool success) 125875518851SJonathan Lemon { 125975518851SJonathan Lemon uarg->zerocopy = uarg->zerocopy & success; 126075518851SJonathan Lemon 126175518851SJonathan Lemon if (refcount_dec_and_test(&uarg->refcnt)) 12628c793822SJonathan Lemon __msg_zerocopy_callback(uarg); 126375518851SJonathan Lemon } 12648c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 126552267790SWillem de Bruijn 12668c793822SJonathan Lemon void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 126752267790SWillem de Bruijn { 126852267790SWillem de Bruijn struct sock *sk = skb_from_uarg(uarg)->sk; 126952267790SWillem de Bruijn 127052267790SWillem de Bruijn atomic_dec(&sk->sk_zckey); 12714ab6c99dSWillem de Bruijn uarg->len--; 127252267790SWillem de Bruijn 127352900d22SWillem de Bruijn if (have_uref) 12748c793822SJonathan Lemon msg_zerocopy_callback(NULL, uarg, true); 127552267790SWillem de Bruijn } 12768c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 127752267790SWillem de Bruijn 1278b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1279b5947e5dSWillem de Bruijn { 1280b5947e5dSWillem de Bruijn return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1281b5947e5dSWillem de Bruijn } 1282b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1283b5947e5dSWillem de Bruijn 128452267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 128552267790SWillem de Bruijn struct msghdr *msg, int len, 128652267790SWillem de Bruijn struct ubuf_info *uarg) 128752267790SWillem de Bruijn { 12884ab6c99dSWillem de Bruijn struct ubuf_info *orig_uarg = skb_zcopy(skb); 128952267790SWillem de Bruijn struct iov_iter orig_iter = msg->msg_iter; 129052267790SWillem de Bruijn int err, orig_len = skb->len; 129152267790SWillem de Bruijn 12924ab6c99dSWillem de Bruijn /* An skb can only point to one uarg. This edge case happens when 12934ab6c99dSWillem de Bruijn * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 12944ab6c99dSWillem de Bruijn */ 12954ab6c99dSWillem de Bruijn if (orig_uarg && uarg != orig_uarg) 12964ab6c99dSWillem de Bruijn return -EEXIST; 12974ab6c99dSWillem de Bruijn 129852267790SWillem de Bruijn err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 129952267790SWillem de Bruijn if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 130054d43117SWillem de Bruijn struct sock *save_sk = skb->sk; 130154d43117SWillem de Bruijn 130252267790SWillem de Bruijn /* Streams do not free skb on error. Reset to prev state. */ 130352267790SWillem de Bruijn msg->msg_iter = orig_iter; 130454d43117SWillem de Bruijn skb->sk = sk; 130552267790SWillem de Bruijn ___pskb_trim(skb, orig_len); 130654d43117SWillem de Bruijn skb->sk = save_sk; 130752267790SWillem de Bruijn return err; 130852267790SWillem de Bruijn } 130952267790SWillem de Bruijn 131052900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, NULL); 131152267790SWillem de Bruijn return skb->len - orig_len; 131252267790SWillem de Bruijn } 131352267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 131452267790SWillem de Bruijn 13151f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 131652267790SWillem de Bruijn gfp_t gfp_mask) 131752267790SWillem de Bruijn { 131852267790SWillem de Bruijn if (skb_zcopy(orig)) { 131952267790SWillem de Bruijn if (skb_zcopy(nskb)) { 132052267790SWillem de Bruijn /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 132152267790SWillem de Bruijn if (!gfp_mask) { 132252267790SWillem de Bruijn WARN_ON_ONCE(1); 132352267790SWillem de Bruijn return -ENOMEM; 132452267790SWillem de Bruijn } 132552267790SWillem de Bruijn if (skb_uarg(nskb) == skb_uarg(orig)) 132652267790SWillem de Bruijn return 0; 132752267790SWillem de Bruijn if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 132852267790SWillem de Bruijn return -EIO; 132952267790SWillem de Bruijn } 133052900d22SWillem de Bruijn skb_zcopy_set(nskb, skb_uarg(orig), NULL); 133152267790SWillem de Bruijn } 133252267790SWillem de Bruijn return 0; 133352267790SWillem de Bruijn } 133452267790SWillem de Bruijn 13352c53040fSBen Hutchings /** 13362c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 133748c83012SMichael S. Tsirkin * @skb: the skb to modify 133848c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 133948c83012SMichael S. Tsirkin * 134006b4feb3SJonathan Lemon * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 134148c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 134248c83012SMichael S. Tsirkin * to userspace pages. 134348c83012SMichael S. Tsirkin * 134448c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 134548c83012SMichael S. Tsirkin * %GFP_ATOMIC. 134648c83012SMichael S. Tsirkin * 134748c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 134848c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 134948c83012SMichael S. Tsirkin */ 135048c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1351a6686f2fSShirley Ma { 1352a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 1353a6686f2fSShirley Ma struct page *page, *head = NULL; 13543ece7826SWillem de Bruijn int i, new_frags; 13553ece7826SWillem de Bruijn u32 d_off; 1356a6686f2fSShirley Ma 13573ece7826SWillem de Bruijn if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 13583ece7826SWillem de Bruijn return -EINVAL; 13593ece7826SWillem de Bruijn 1360f72c4ac6SWillem de Bruijn if (!num_frags) 1361f72c4ac6SWillem de Bruijn goto release; 1362f72c4ac6SWillem de Bruijn 13633ece7826SWillem de Bruijn new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 13643ece7826SWillem de Bruijn for (i = 0; i < new_frags; i++) { 136502756ed4SKrishna Kumar page = alloc_page(gfp_mask); 1366a6686f2fSShirley Ma if (!page) { 1367a6686f2fSShirley Ma while (head) { 136840dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 1369a6686f2fSShirley Ma put_page(head); 1370a6686f2fSShirley Ma head = next; 1371a6686f2fSShirley Ma } 1372a6686f2fSShirley Ma return -ENOMEM; 1373a6686f2fSShirley Ma } 13743ece7826SWillem de Bruijn set_page_private(page, (unsigned long)head); 13753ece7826SWillem de Bruijn head = page; 13763ece7826SWillem de Bruijn } 13773ece7826SWillem de Bruijn 13783ece7826SWillem de Bruijn page = head; 13793ece7826SWillem de Bruijn d_off = 0; 13803ece7826SWillem de Bruijn for (i = 0; i < num_frags; i++) { 13813ece7826SWillem de Bruijn skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 13823ece7826SWillem de Bruijn u32 p_off, p_len, copied; 13833ece7826SWillem de Bruijn struct page *p; 13843ece7826SWillem de Bruijn u8 *vaddr; 1385c613c209SWillem de Bruijn 1386b54c9d5bSJonathan Lemon skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1387c613c209SWillem de Bruijn p, p_off, p_len, copied) { 13883ece7826SWillem de Bruijn u32 copy, done = 0; 1389c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 13903ece7826SWillem de Bruijn 13913ece7826SWillem de Bruijn while (done < p_len) { 13923ece7826SWillem de Bruijn if (d_off == PAGE_SIZE) { 13933ece7826SWillem de Bruijn d_off = 0; 13943ece7826SWillem de Bruijn page = (struct page *)page_private(page); 13953ece7826SWillem de Bruijn } 13963ece7826SWillem de Bruijn copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 13973ece7826SWillem de Bruijn memcpy(page_address(page) + d_off, 13983ece7826SWillem de Bruijn vaddr + p_off + done, copy); 13993ece7826SWillem de Bruijn done += copy; 14003ece7826SWillem de Bruijn d_off += copy; 14013ece7826SWillem de Bruijn } 140251c56b00SEric Dumazet kunmap_atomic(vaddr); 1403c613c209SWillem de Bruijn } 1404a6686f2fSShirley Ma } 1405a6686f2fSShirley Ma 1406a6686f2fSShirley Ma /* skb frags release userspace buffers */ 140702756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 1408a8605c60SIan Campbell skb_frag_unref(skb, i); 1409a6686f2fSShirley Ma 1410a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 14113ece7826SWillem de Bruijn for (i = 0; i < new_frags - 1; i++) { 14123ece7826SWillem de Bruijn __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 141340dadff2SSunghan Suh head = (struct page *)page_private(head); 1414a6686f2fSShirley Ma } 14153ece7826SWillem de Bruijn __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 14163ece7826SWillem de Bruijn skb_shinfo(skb)->nr_frags = new_frags; 141748c83012SMichael S. Tsirkin 1418b90ddd56SWillem de Bruijn release: 14191f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 1420a6686f2fSShirley Ma return 0; 1421a6686f2fSShirley Ma } 1422dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1423a6686f2fSShirley Ma 1424e0053ec0SHerbert Xu /** 1425e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 1426e0053ec0SHerbert Xu * @skb: buffer to clone 1427e0053ec0SHerbert Xu * @gfp_mask: allocation priority 1428e0053ec0SHerbert Xu * 1429e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1430e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 1431e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 1432e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 1433e0053ec0SHerbert Xu * 1434e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 1435e0053ec0SHerbert Xu * %GFP_ATOMIC. 1436e0053ec0SHerbert Xu */ 1437e0053ec0SHerbert Xu 1438e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1439e0053ec0SHerbert Xu { 1440d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones = container_of(skb, 1441d0bf4a9eSEric Dumazet struct sk_buff_fclones, 1442d0bf4a9eSEric Dumazet skb1); 14436ffe75ebSEric Dumazet struct sk_buff *n; 1444e0053ec0SHerbert Xu 144570008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1446a6686f2fSShirley Ma return NULL; 1447a6686f2fSShirley Ma 1448e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 14492638595aSReshetova, Elena refcount_read(&fclones->fclone_ref) == 1) { 14506ffe75ebSEric Dumazet n = &fclones->skb2; 14512638595aSReshetova, Elena refcount_set(&fclones->fclone_ref, 2); 1452e0053ec0SHerbert Xu } else { 1453c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1454c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1455c93bdd0eSMel Gorman 1456e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1457e0053ec0SHerbert Xu if (!n) 1458e0053ec0SHerbert Xu return NULL; 1459fe55f6d5SVegard Nossum 1460e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 1461e0053ec0SHerbert Xu } 1462e0053ec0SHerbert Xu 1463e0053ec0SHerbert Xu return __skb_clone(n, skb); 14641da177e4SLinus Torvalds } 1465b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 14661da177e4SLinus Torvalds 1467b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off) 1468f5b17294SPravin B Shelar { 1469030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 1470030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 1471030737bcSEric Dumazet skb->csum_start += off; 1472f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 1473f5b17294SPravin B Shelar skb->transport_header += off; 1474f5b17294SPravin B Shelar skb->network_header += off; 1475f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 1476f5b17294SPravin B Shelar skb->mac_header += off; 1477f5b17294SPravin B Shelar skb->inner_transport_header += off; 1478f5b17294SPravin B Shelar skb->inner_network_header += off; 1479aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 1480f5b17294SPravin B Shelar } 1481b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update); 1482f5b17294SPravin B Shelar 148308303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 14841da177e4SLinus Torvalds { 1485dec18810SHerbert Xu __copy_skb_header(new, old); 1486dec18810SHerbert Xu 14877967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 14887967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 14897967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 14901da177e4SLinus Torvalds } 149108303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header); 14921da177e4SLinus Torvalds 1493c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1494c93bdd0eSMel Gorman { 1495c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1496c93bdd0eSMel Gorman return SKB_ALLOC_RX; 1497c93bdd0eSMel Gorman return 0; 1498c93bdd0eSMel Gorman } 1499c93bdd0eSMel Gorman 15001da177e4SLinus Torvalds /** 15011da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 15021da177e4SLinus Torvalds * @skb: buffer to copy 15031da177e4SLinus Torvalds * @gfp_mask: allocation priority 15041da177e4SLinus Torvalds * 15051da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 15061da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 15071da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 15081da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 15091da177e4SLinus Torvalds * 15101da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 15111da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 15121da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 15131da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 15141da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 15151da177e4SLinus Torvalds */ 15161da177e4SLinus Torvalds 1517dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 15181da177e4SLinus Torvalds { 15196602cebbSEric Dumazet int headerlen = skb_headroom(skb); 1520ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 1521c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 1522c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 15236602cebbSEric Dumazet 15241da177e4SLinus Torvalds if (!n) 15251da177e4SLinus Torvalds return NULL; 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds /* Set the data pointer */ 15281da177e4SLinus Torvalds skb_reserve(n, headerlen); 15291da177e4SLinus Torvalds /* Set the tail pointer and length */ 15301da177e4SLinus Torvalds skb_put(n, skb->len); 15311da177e4SLinus Torvalds 15329f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 15331da177e4SLinus Torvalds 153408303c18SIlya Lesokhin skb_copy_header(n, skb); 15351da177e4SLinus Torvalds return n; 15361da177e4SLinus Torvalds } 1537b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 15381da177e4SLinus Torvalds 15391da177e4SLinus Torvalds /** 1540bad93e9dSOctavian Purdila * __pskb_copy_fclone - create copy of an sk_buff with private head. 15411da177e4SLinus Torvalds * @skb: buffer to copy 1542117632e6SEric Dumazet * @headroom: headroom of new skb 15431da177e4SLinus Torvalds * @gfp_mask: allocation priority 1544bad93e9dSOctavian Purdila * @fclone: if true allocate the copy of the skb from the fclone 1545bad93e9dSOctavian Purdila * cache instead of the head cache; it is recommended to set this 1546bad93e9dSOctavian Purdila * to true for the cases where the copy will likely be cloned 15471da177e4SLinus Torvalds * 15481da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 15491da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 15501da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 15511da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 15521da177e4SLinus Torvalds * or the pointer to the buffer on success. 15531da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 15541da177e4SLinus Torvalds */ 15551da177e4SLinus Torvalds 1556bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1557bad93e9dSOctavian Purdila gfp_t gfp_mask, bool fclone) 15581da177e4SLinus Torvalds { 1559117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 1560bad93e9dSOctavian Purdila int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1561bad93e9dSOctavian Purdila struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 15626602cebbSEric Dumazet 15631da177e4SLinus Torvalds if (!n) 15641da177e4SLinus Torvalds goto out; 15651da177e4SLinus Torvalds 15661da177e4SLinus Torvalds /* Set the data pointer */ 1567117632e6SEric Dumazet skb_reserve(n, headroom); 15681da177e4SLinus Torvalds /* Set the tail pointer and length */ 15691da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 15701da177e4SLinus Torvalds /* Copy the bytes */ 1571d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 15721da177e4SLinus Torvalds 157325f484a6SHerbert Xu n->truesize += skb->data_len; 15741da177e4SLinus Torvalds n->data_len = skb->data_len; 15751da177e4SLinus Torvalds n->len = skb->len; 15761da177e4SLinus Torvalds 15771da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 15781da177e4SLinus Torvalds int i; 15791da177e4SLinus Torvalds 15801f8b977aSWillem de Bruijn if (skb_orphan_frags(skb, gfp_mask) || 15811f8b977aSWillem de Bruijn skb_zerocopy_clone(n, skb, gfp_mask)) { 15821511022cSDan Carpenter kfree_skb(n); 15831511022cSDan Carpenter n = NULL; 1584a6686f2fSShirley Ma goto out; 1585a6686f2fSShirley Ma } 15861da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 15871da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1588ea2ab693SIan Campbell skb_frag_ref(skb, i); 15891da177e4SLinus Torvalds } 15901da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 15911da177e4SLinus Torvalds } 15921da177e4SLinus Torvalds 159321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 15941da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 15951da177e4SLinus Torvalds skb_clone_fraglist(n); 15961da177e4SLinus Torvalds } 15971da177e4SLinus Torvalds 159808303c18SIlya Lesokhin skb_copy_header(n, skb); 15991da177e4SLinus Torvalds out: 16001da177e4SLinus Torvalds return n; 16011da177e4SLinus Torvalds } 1602bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone); 16031da177e4SLinus Torvalds 16041da177e4SLinus Torvalds /** 16051da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 16061da177e4SLinus Torvalds * @skb: buffer to reallocate 16071da177e4SLinus Torvalds * @nhead: room to add at head 16081da177e4SLinus Torvalds * @ntail: room to add at tail 16091da177e4SLinus Torvalds * @gfp_mask: allocation priority 16101da177e4SLinus Torvalds * 1611bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1612bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 16131da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 16141da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 16151da177e4SLinus Torvalds * 16161da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 16171da177e4SLinus Torvalds * reloaded after call to this function. 16181da177e4SLinus Torvalds */ 16191da177e4SLinus Torvalds 162086a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1621dd0fc66fSAl Viro gfp_t gfp_mask) 16221da177e4SLinus Torvalds { 1623158f323bSEric Dumazet int i, osize = skb_end_offset(skb); 1624158f323bSEric Dumazet int size = osize + nhead + ntail; 16251da177e4SLinus Torvalds long off; 1626158f323bSEric Dumazet u8 *data; 16271da177e4SLinus Torvalds 16284edd87adSHerbert Xu BUG_ON(nhead < 0); 16294edd87adSHerbert Xu 16309f77fad3STim Hansen BUG_ON(skb_shared(skb)); 16311da177e4SLinus Torvalds 16321da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 16331da177e4SLinus Torvalds 1634c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1635c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1636c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1637c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 16381da177e4SLinus Torvalds if (!data) 16391da177e4SLinus Torvalds goto nodata; 164087151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 16411da177e4SLinus Torvalds 16421da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 16436602cebbSEric Dumazet * optimized for the cases when header is void. 16446602cebbSEric Dumazet */ 16456602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 16466602cebbSEric Dumazet 16476602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 16486602cebbSEric Dumazet skb_shinfo(skb), 1649fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 16501da177e4SLinus Torvalds 16513e24591aSAlexander Duyck /* 16523e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 16533e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 16543e24591aSAlexander Duyck * be since all we did is relocate the values 16553e24591aSAlexander Duyck */ 16563e24591aSAlexander Duyck if (skb_cloned(skb)) { 165770008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1658a6686f2fSShirley Ma goto nofrags; 16591f8b977aSWillem de Bruijn if (skb_zcopy(skb)) 1660c1d1b437SEric Dumazet refcount_inc(&skb_uarg(skb)->refcnt); 16611da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1662ea2ab693SIan Campbell skb_frag_ref(skb, i); 16631da177e4SLinus Torvalds 166421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 16651da177e4SLinus Torvalds skb_clone_fraglist(skb); 16661da177e4SLinus Torvalds 16671da177e4SLinus Torvalds skb_release_data(skb); 16683e24591aSAlexander Duyck } else { 16693e24591aSAlexander Duyck skb_free_head(skb); 16701fd63041SEric Dumazet } 16711da177e4SLinus Torvalds off = (data + nhead) - skb->head; 16721da177e4SLinus Torvalds 16731da177e4SLinus Torvalds skb->head = data; 1674d3836f21SEric Dumazet skb->head_frag = 0; 16751da177e4SLinus Torvalds skb->data += off; 16764305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 16774305b541SArnaldo Carvalho de Melo skb->end = size; 167856eb8882SPatrick McHardy off = nhead; 16794305b541SArnaldo Carvalho de Melo #else 16804305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 168156eb8882SPatrick McHardy #endif 168227a884dcSArnaldo Carvalho de Melo skb->tail += off; 1683b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 16841da177e4SLinus Torvalds skb->cloned = 0; 1685334a8132SPatrick McHardy skb->hdr_len = 0; 16861da177e4SLinus Torvalds skb->nohdr = 0; 16871da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 1688158f323bSEric Dumazet 1689de8f3a83SDaniel Borkmann skb_metadata_clear(skb); 1690de8f3a83SDaniel Borkmann 1691158f323bSEric Dumazet /* It is not generally safe to change skb->truesize. 1692158f323bSEric Dumazet * For the moment, we really care of rx path, or 1693158f323bSEric Dumazet * when skb is orphaned (not attached to a socket). 1694158f323bSEric Dumazet */ 1695158f323bSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 1696158f323bSEric Dumazet skb->truesize += size - osize; 1697158f323bSEric Dumazet 16981da177e4SLinus Torvalds return 0; 16991da177e4SLinus Torvalds 1700a6686f2fSShirley Ma nofrags: 1701a6686f2fSShirley Ma kfree(data); 17021da177e4SLinus Torvalds nodata: 17031da177e4SLinus Torvalds return -ENOMEM; 17041da177e4SLinus Torvalds } 1705b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 17061da177e4SLinus Torvalds 17071da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 17081da177e4SLinus Torvalds 17091da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 17101da177e4SLinus Torvalds { 17111da177e4SLinus Torvalds struct sk_buff *skb2; 17121da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 17131da177e4SLinus Torvalds 17141da177e4SLinus Torvalds if (delta <= 0) 17151da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 17161da177e4SLinus Torvalds else { 17171da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 17181da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 17191da177e4SLinus Torvalds GFP_ATOMIC)) { 17201da177e4SLinus Torvalds kfree_skb(skb2); 17211da177e4SLinus Torvalds skb2 = NULL; 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds } 17241da177e4SLinus Torvalds return skb2; 17251da177e4SLinus Torvalds } 1726b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 17271da177e4SLinus Torvalds 17281da177e4SLinus Torvalds /** 17291da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 17301da177e4SLinus Torvalds * @skb: buffer to copy 17311da177e4SLinus Torvalds * @newheadroom: new free bytes at head 17321da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 17331da177e4SLinus Torvalds * @gfp_mask: allocation priority 17341da177e4SLinus Torvalds * 17351da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 17361da177e4SLinus Torvalds * allocate additional space. 17371da177e4SLinus Torvalds * 17381da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 17391da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 17401da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 17411da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 17421da177e4SLinus Torvalds * 17431da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 17441da177e4SLinus Torvalds * is called from an interrupt. 17451da177e4SLinus Torvalds */ 17461da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 174786a76cafSVictor Fusco int newheadroom, int newtailroom, 1748dd0fc66fSAl Viro gfp_t gfp_mask) 17491da177e4SLinus Torvalds { 17501da177e4SLinus Torvalds /* 17511da177e4SLinus Torvalds * Allocate the copy buffer 17521da177e4SLinus Torvalds */ 1753c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1754c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1755c93bdd0eSMel Gorman NUMA_NO_NODE); 1756efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 17571da177e4SLinus Torvalds int head_copy_len, head_copy_off; 17581da177e4SLinus Torvalds 17591da177e4SLinus Torvalds if (!n) 17601da177e4SLinus Torvalds return NULL; 17611da177e4SLinus Torvalds 17621da177e4SLinus Torvalds skb_reserve(n, newheadroom); 17631da177e4SLinus Torvalds 17641da177e4SLinus Torvalds /* Set the tail pointer and length */ 17651da177e4SLinus Torvalds skb_put(n, skb->len); 17661da177e4SLinus Torvalds 1767efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 17681da177e4SLinus Torvalds head_copy_off = 0; 17691da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 17701da177e4SLinus Torvalds head_copy_len = newheadroom; 17711da177e4SLinus Torvalds else 17721da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 17731da177e4SLinus Torvalds 17741da177e4SLinus Torvalds /* Copy the linear header and data. */ 17759f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 17769f77fad3STim Hansen skb->len + head_copy_len)); 17771da177e4SLinus Torvalds 177808303c18SIlya Lesokhin skb_copy_header(n, skb); 17791da177e4SLinus Torvalds 1780030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1781efd1e8d5SPatrick McHardy 17821da177e4SLinus Torvalds return n; 17831da177e4SLinus Torvalds } 1784b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 17851da177e4SLinus Torvalds 17861da177e4SLinus Torvalds /** 1787cd0a137aSFlorian Fainelli * __skb_pad - zero pad the tail of an skb 17881da177e4SLinus Torvalds * @skb: buffer to pad 17891da177e4SLinus Torvalds * @pad: space to pad 1790cd0a137aSFlorian Fainelli * @free_on_error: free buffer on error 17911da177e4SLinus Torvalds * 17921da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 17931da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 17941da177e4SLinus Torvalds * beyond the buffer end onto the wire. 17951da177e4SLinus Torvalds * 1796cd0a137aSFlorian Fainelli * May return error in out of memory cases. The skb is freed on error 1797cd0a137aSFlorian Fainelli * if @free_on_error is true. 17981da177e4SLinus Torvalds */ 17991da177e4SLinus Torvalds 1800cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 18011da177e4SLinus Torvalds { 18025b057c6bSHerbert Xu int err; 18035b057c6bSHerbert Xu int ntail; 18041da177e4SLinus Torvalds 18051da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 18065b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 18071da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 18085b057c6bSHerbert Xu return 0; 18091da177e4SLinus Torvalds } 18101da177e4SLinus Torvalds 18114305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 18125b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 18135b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 18145b057c6bSHerbert Xu if (unlikely(err)) 18155b057c6bSHerbert Xu goto free_skb; 18165b057c6bSHerbert Xu } 18175b057c6bSHerbert Xu 18185b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 18195b057c6bSHerbert Xu * to be audited. 18205b057c6bSHerbert Xu */ 18215b057c6bSHerbert Xu err = skb_linearize(skb); 18225b057c6bSHerbert Xu if (unlikely(err)) 18235b057c6bSHerbert Xu goto free_skb; 18245b057c6bSHerbert Xu 18255b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 18265b057c6bSHerbert Xu return 0; 18275b057c6bSHerbert Xu 18285b057c6bSHerbert Xu free_skb: 1829cd0a137aSFlorian Fainelli if (free_on_error) 18301da177e4SLinus Torvalds kfree_skb(skb); 18315b057c6bSHerbert Xu return err; 18321da177e4SLinus Torvalds } 1833cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad); 18341da177e4SLinus Torvalds 18350dde3e16SIlpo Järvinen /** 18360c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 18370c7ddf36SMathias Krause * @skb: start of the buffer to use 18380c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 18390c7ddf36SMathias Krause * @len: amount of data to add 18400c7ddf36SMathias Krause * 18410c7ddf36SMathias Krause * This function extends the used data area of the potentially 18420c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 18430c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 18440c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 18450c7ddf36SMathias Krause * returned. 18460c7ddf36SMathias Krause */ 18470c7ddf36SMathias Krause 18484df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 18490c7ddf36SMathias Krause { 18500c7ddf36SMathias Krause if (tail != skb) { 18510c7ddf36SMathias Krause skb->data_len += len; 18520c7ddf36SMathias Krause skb->len += len; 18530c7ddf36SMathias Krause } 18540c7ddf36SMathias Krause return skb_put(tail, len); 18550c7ddf36SMathias Krause } 18560c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 18570c7ddf36SMathias Krause 18580c7ddf36SMathias Krause /** 18590dde3e16SIlpo Järvinen * skb_put - add data to a buffer 18600dde3e16SIlpo Järvinen * @skb: buffer to use 18610dde3e16SIlpo Järvinen * @len: amount of data to add 18620dde3e16SIlpo Järvinen * 18630dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 18640dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 18650dde3e16SIlpo Järvinen * first byte of the extra data is returned. 18660dde3e16SIlpo Järvinen */ 18674df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len) 18680dde3e16SIlpo Järvinen { 18694df864c1SJohannes Berg void *tmp = skb_tail_pointer(skb); 18700dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 18710dde3e16SIlpo Järvinen skb->tail += len; 18720dde3e16SIlpo Järvinen skb->len += len; 18730dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 18740dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 18750dde3e16SIlpo Järvinen return tmp; 18760dde3e16SIlpo Järvinen } 18770dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 18780dde3e16SIlpo Järvinen 18796be8ac2fSIlpo Järvinen /** 1880c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1881c2aa270aSIlpo Järvinen * @skb: buffer to use 1882c2aa270aSIlpo Järvinen * @len: amount of data to add 1883c2aa270aSIlpo Järvinen * 1884c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1885c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1886c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1887c2aa270aSIlpo Järvinen */ 1888d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len) 1889c2aa270aSIlpo Järvinen { 1890c2aa270aSIlpo Järvinen skb->data -= len; 1891c2aa270aSIlpo Järvinen skb->len += len; 1892c2aa270aSIlpo Järvinen if (unlikely(skb->data < skb->head)) 1893c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1894c2aa270aSIlpo Järvinen return skb->data; 1895c2aa270aSIlpo Järvinen } 1896c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1897c2aa270aSIlpo Järvinen 1898c2aa270aSIlpo Järvinen /** 18996be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 19006be8ac2fSIlpo Järvinen * @skb: buffer to use 19016be8ac2fSIlpo Järvinen * @len: amount of data to remove 19026be8ac2fSIlpo Järvinen * 19036be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 19046be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 19056be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 19066be8ac2fSIlpo Järvinen * the old data. 19076be8ac2fSIlpo Järvinen */ 1908af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len) 19096be8ac2fSIlpo Järvinen { 191047d29646SDavid S. Miller return skb_pull_inline(skb, len); 19116be8ac2fSIlpo Järvinen } 19126be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 19136be8ac2fSIlpo Järvinen 1914419ae74eSIlpo Järvinen /** 1915419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1916419ae74eSIlpo Järvinen * @skb: buffer to alter 1917419ae74eSIlpo Järvinen * @len: new length 1918419ae74eSIlpo Järvinen * 1919419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1920419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1921419ae74eSIlpo Järvinen * The skb must be linear. 1922419ae74eSIlpo Järvinen */ 1923419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1924419ae74eSIlpo Järvinen { 1925419ae74eSIlpo Järvinen if (skb->len > len) 1926419ae74eSIlpo Järvinen __skb_trim(skb, len); 1927419ae74eSIlpo Järvinen } 1928419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1929419ae74eSIlpo Järvinen 19303cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 19311da177e4SLinus Torvalds */ 19321da177e4SLinus Torvalds 19333cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 19341da177e4SLinus Torvalds { 193527b437c8SHerbert Xu struct sk_buff **fragp; 193627b437c8SHerbert Xu struct sk_buff *frag; 19371da177e4SLinus Torvalds int offset = skb_headlen(skb); 19381da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 19391da177e4SLinus Torvalds int i; 194027b437c8SHerbert Xu int err; 194127b437c8SHerbert Xu 194227b437c8SHerbert Xu if (skb_cloned(skb) && 194327b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 194427b437c8SHerbert Xu return err; 19451da177e4SLinus Torvalds 1946f4d26fb3SHerbert Xu i = 0; 1947f4d26fb3SHerbert Xu if (offset >= len) 1948f4d26fb3SHerbert Xu goto drop_pages; 1949f4d26fb3SHerbert Xu 1950f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 19519e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 195227b437c8SHerbert Xu 195327b437c8SHerbert Xu if (end < len) { 19541da177e4SLinus Torvalds offset = end; 195527b437c8SHerbert Xu continue; 19561da177e4SLinus Torvalds } 19571da177e4SLinus Torvalds 19589e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 195927b437c8SHerbert Xu 1960f4d26fb3SHerbert Xu drop_pages: 196127b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 196227b437c8SHerbert Xu 196327b437c8SHerbert Xu for (; i < nfrags; i++) 1964ea2ab693SIan Campbell skb_frag_unref(skb, i); 196527b437c8SHerbert Xu 196621dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 196727b437c8SHerbert Xu skb_drop_fraglist(skb); 1968f4d26fb3SHerbert Xu goto done; 196927b437c8SHerbert Xu } 197027b437c8SHerbert Xu 197127b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 197227b437c8SHerbert Xu fragp = &frag->next) { 197327b437c8SHerbert Xu int end = offset + frag->len; 197427b437c8SHerbert Xu 197527b437c8SHerbert Xu if (skb_shared(frag)) { 197627b437c8SHerbert Xu struct sk_buff *nfrag; 197727b437c8SHerbert Xu 197827b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 197927b437c8SHerbert Xu if (unlikely(!nfrag)) 198027b437c8SHerbert Xu return -ENOMEM; 198127b437c8SHerbert Xu 198227b437c8SHerbert Xu nfrag->next = frag->next; 198385bb2a60SEric Dumazet consume_skb(frag); 198427b437c8SHerbert Xu frag = nfrag; 198527b437c8SHerbert Xu *fragp = frag; 198627b437c8SHerbert Xu } 198727b437c8SHerbert Xu 198827b437c8SHerbert Xu if (end < len) { 198927b437c8SHerbert Xu offset = end; 199027b437c8SHerbert Xu continue; 199127b437c8SHerbert Xu } 199227b437c8SHerbert Xu 199327b437c8SHerbert Xu if (end > len && 199427b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 199527b437c8SHerbert Xu return err; 199627b437c8SHerbert Xu 199727b437c8SHerbert Xu if (frag->next) 199827b437c8SHerbert Xu skb_drop_list(&frag->next); 199927b437c8SHerbert Xu break; 200027b437c8SHerbert Xu } 200127b437c8SHerbert Xu 2002f4d26fb3SHerbert Xu done: 200327b437c8SHerbert Xu if (len > skb_headlen(skb)) { 20041da177e4SLinus Torvalds skb->data_len -= skb->len - len; 20051da177e4SLinus Torvalds skb->len = len; 20061da177e4SLinus Torvalds } else { 20071da177e4SLinus Torvalds skb->len = len; 20081da177e4SLinus Torvalds skb->data_len = 0; 200927a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 20101da177e4SLinus Torvalds } 20111da177e4SLinus Torvalds 2012c21b48ccSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 2013c21b48ccSEric Dumazet skb_condense(skb); 20141da177e4SLinus Torvalds return 0; 20151da177e4SLinus Torvalds } 2016b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 20171da177e4SLinus Torvalds 201888078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly 201988078d98SEric Dumazet */ 202088078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 202188078d98SEric Dumazet { 202288078d98SEric Dumazet if (skb->ip_summed == CHECKSUM_COMPLETE) { 202388078d98SEric Dumazet int delta = skb->len - len; 202488078d98SEric Dumazet 2025d55bef50SDimitris Michailidis skb->csum = csum_block_sub(skb->csum, 2026d55bef50SDimitris Michailidis skb_checksum(skb, len, delta, 0), 2027d55bef50SDimitris Michailidis len); 202854970a2fSVasily Averin } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 202954970a2fSVasily Averin int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 203054970a2fSVasily Averin int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 203154970a2fSVasily Averin 203254970a2fSVasily Averin if (offset + sizeof(__sum16) > hdlen) 203354970a2fSVasily Averin return -EINVAL; 203488078d98SEric Dumazet } 203588078d98SEric Dumazet return __pskb_trim(skb, len); 203688078d98SEric Dumazet } 203788078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow); 203888078d98SEric Dumazet 20391da177e4SLinus Torvalds /** 20401da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 20411da177e4SLinus Torvalds * @skb: buffer to reallocate 20421da177e4SLinus Torvalds * @delta: number of bytes to advance tail 20431da177e4SLinus Torvalds * 20441da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 20451da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 20461da177e4SLinus Torvalds * data from fragmented part. 20471da177e4SLinus Torvalds * 20481da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 20491da177e4SLinus Torvalds * 20501da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 20511da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 20521da177e4SLinus Torvalds * 20531da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 20541da177e4SLinus Torvalds * reloaded after call to this function. 20551da177e4SLinus Torvalds */ 20561da177e4SLinus Torvalds 20571da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 20581da177e4SLinus Torvalds * when it is necessary. 20591da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 20601da177e4SLinus Torvalds * 2. It may change skb pointers. 20611da177e4SLinus Torvalds * 20621da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 20631da177e4SLinus Torvalds */ 2064af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta) 20651da177e4SLinus Torvalds { 20661da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 20671da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 20681da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 20691da177e4SLinus Torvalds */ 20704305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 20711da177e4SLinus Torvalds 20721da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 20731da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 20741da177e4SLinus Torvalds GFP_ATOMIC)) 20751da177e4SLinus Torvalds return NULL; 20761da177e4SLinus Torvalds } 20771da177e4SLinus Torvalds 20789f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 20799f77fad3STim Hansen skb_tail_pointer(skb), delta)); 20801da177e4SLinus Torvalds 20811da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 20821da177e4SLinus Torvalds * size of pulled pages. Superb. 20831da177e4SLinus Torvalds */ 208421dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 20851da177e4SLinus Torvalds goto pull_pages; 20861da177e4SLinus Torvalds 20871da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 20881da177e4SLinus Torvalds eat = delta; 20891da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 20909e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 20919e903e08SEric Dumazet 20929e903e08SEric Dumazet if (size >= eat) 20931da177e4SLinus Torvalds goto pull_pages; 20949e903e08SEric Dumazet eat -= size; 20951da177e4SLinus Torvalds } 20961da177e4SLinus Torvalds 20971da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 209809001b03SWenhua Shi * Certainly, it is possible to add an offset to skb data, 20991da177e4SLinus Torvalds * but taking into account that pulling is expected to 21001da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 21011da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 21021da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 21031da177e4SLinus Torvalds */ 21041da177e4SLinus Torvalds if (eat) { 21051da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 21061da177e4SLinus Torvalds struct sk_buff *clone = NULL; 21071da177e4SLinus Torvalds struct sk_buff *insp = NULL; 21081da177e4SLinus Torvalds 21091da177e4SLinus Torvalds do { 21101da177e4SLinus Torvalds if (list->len <= eat) { 21111da177e4SLinus Torvalds /* Eaten as whole. */ 21121da177e4SLinus Torvalds eat -= list->len; 21131da177e4SLinus Torvalds list = list->next; 21141da177e4SLinus Torvalds insp = list; 21151da177e4SLinus Torvalds } else { 21161da177e4SLinus Torvalds /* Eaten partially. */ 21171da177e4SLinus Torvalds 21181da177e4SLinus Torvalds if (skb_shared(list)) { 21191da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 21201da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 21211da177e4SLinus Torvalds if (!clone) 21221da177e4SLinus Torvalds return NULL; 21231da177e4SLinus Torvalds insp = list->next; 21241da177e4SLinus Torvalds list = clone; 21251da177e4SLinus Torvalds } else { 21261da177e4SLinus Torvalds /* This may be pulled without 21271da177e4SLinus Torvalds * problems. */ 21281da177e4SLinus Torvalds insp = list; 21291da177e4SLinus Torvalds } 21301da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 21311da177e4SLinus Torvalds kfree_skb(clone); 21321da177e4SLinus Torvalds return NULL; 21331da177e4SLinus Torvalds } 21341da177e4SLinus Torvalds break; 21351da177e4SLinus Torvalds } 21361da177e4SLinus Torvalds } while (eat); 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds /* Free pulled out fragments. */ 21391da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 21401da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 21411da177e4SLinus Torvalds kfree_skb(list); 21421da177e4SLinus Torvalds } 21431da177e4SLinus Torvalds /* And insert new clone at head. */ 21441da177e4SLinus Torvalds if (clone) { 21451da177e4SLinus Torvalds clone->next = list; 21461da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 21471da177e4SLinus Torvalds } 21481da177e4SLinus Torvalds } 21491da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 21501da177e4SLinus Torvalds 21511da177e4SLinus Torvalds pull_pages: 21521da177e4SLinus Torvalds eat = delta; 21531da177e4SLinus Torvalds k = 0; 21541da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 21559e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 21569e903e08SEric Dumazet 21579e903e08SEric Dumazet if (size <= eat) { 2158ea2ab693SIan Campbell skb_frag_unref(skb, i); 21599e903e08SEric Dumazet eat -= size; 21601da177e4SLinus Torvalds } else { 2161b54c9d5bSJonathan Lemon skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2162b54c9d5bSJonathan Lemon 2163b54c9d5bSJonathan Lemon *frag = skb_shinfo(skb)->frags[i]; 21641da177e4SLinus Torvalds if (eat) { 2165b54c9d5bSJonathan Lemon skb_frag_off_add(frag, eat); 2166b54c9d5bSJonathan Lemon skb_frag_size_sub(frag, eat); 21673ccc6c6fSlinzhang if (!i) 21683ccc6c6fSlinzhang goto end; 21691da177e4SLinus Torvalds eat = 0; 21701da177e4SLinus Torvalds } 21711da177e4SLinus Torvalds k++; 21721da177e4SLinus Torvalds } 21731da177e4SLinus Torvalds } 21741da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 21751da177e4SLinus Torvalds 21763ccc6c6fSlinzhang end: 21771da177e4SLinus Torvalds skb->tail += delta; 21781da177e4SLinus Torvalds skb->data_len -= delta; 21791da177e4SLinus Torvalds 21801f8b977aSWillem de Bruijn if (!skb->data_len) 21811f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 21821f8b977aSWillem de Bruijn 218327a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 21841da177e4SLinus Torvalds } 2185b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 21861da177e4SLinus Torvalds 218722019b17SEric Dumazet /** 218822019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 218922019b17SEric Dumazet * @skb: source skb 219022019b17SEric Dumazet * @offset: offset in source 219122019b17SEric Dumazet * @to: destination buffer 219222019b17SEric Dumazet * @len: number of bytes to copy 219322019b17SEric Dumazet * 219422019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 219522019b17SEric Dumazet * destination buffer. 219622019b17SEric Dumazet * 219722019b17SEric Dumazet * CAUTION ! : 219822019b17SEric Dumazet * If its prototype is ever changed, 219922019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 220022019b17SEric Dumazet * since it is called from BPF assembly code. 220122019b17SEric Dumazet */ 22021da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 22031da177e4SLinus Torvalds { 22041a028e50SDavid S. Miller int start = skb_headlen(skb); 2205fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2206fbb398a8SDavid S. Miller int i, copy; 22071da177e4SLinus Torvalds 22081da177e4SLinus Torvalds if (offset > (int)skb->len - len) 22091da177e4SLinus Torvalds goto fault; 22101da177e4SLinus Torvalds 22111da177e4SLinus Torvalds /* Copy header. */ 22121a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 22131da177e4SLinus Torvalds if (copy > len) 22141da177e4SLinus Torvalds copy = len; 2215d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 22161da177e4SLinus Torvalds if ((len -= copy) == 0) 22171da177e4SLinus Torvalds return 0; 22181da177e4SLinus Torvalds offset += copy; 22191da177e4SLinus Torvalds to += copy; 22201da177e4SLinus Torvalds } 22211da177e4SLinus Torvalds 22221da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 22231a028e50SDavid S. Miller int end; 222451c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 22251da177e4SLinus Torvalds 2226547b792cSIlpo Järvinen WARN_ON(start > offset + len); 22271a028e50SDavid S. Miller 222851c56b00SEric Dumazet end = start + skb_frag_size(f); 22291da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2230c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2231c613c209SWillem de Bruijn struct page *p; 22321da177e4SLinus Torvalds u8 *vaddr; 22331da177e4SLinus Torvalds 22341da177e4SLinus Torvalds if (copy > len) 22351da177e4SLinus Torvalds copy = len; 22361da177e4SLinus Torvalds 2237c613c209SWillem de Bruijn skb_frag_foreach_page(f, 2238b54c9d5bSJonathan Lemon skb_frag_off(f) + offset - start, 2239c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2240c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2241c613c209SWillem de Bruijn memcpy(to + copied, vaddr + p_off, p_len); 224251c56b00SEric Dumazet kunmap_atomic(vaddr); 2243c613c209SWillem de Bruijn } 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds if ((len -= copy) == 0) 22461da177e4SLinus Torvalds return 0; 22471da177e4SLinus Torvalds offset += copy; 22481da177e4SLinus Torvalds to += copy; 22491da177e4SLinus Torvalds } 22501a028e50SDavid S. Miller start = end; 22511da177e4SLinus Torvalds } 22521da177e4SLinus Torvalds 2253fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 22541a028e50SDavid S. Miller int end; 22551da177e4SLinus Torvalds 2256547b792cSIlpo Järvinen WARN_ON(start > offset + len); 22571a028e50SDavid S. Miller 2258fbb398a8SDavid S. Miller end = start + frag_iter->len; 22591da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 22601da177e4SLinus Torvalds if (copy > len) 22611da177e4SLinus Torvalds copy = len; 2262fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 22631da177e4SLinus Torvalds goto fault; 22641da177e4SLinus Torvalds if ((len -= copy) == 0) 22651da177e4SLinus Torvalds return 0; 22661da177e4SLinus Torvalds offset += copy; 22671da177e4SLinus Torvalds to += copy; 22681da177e4SLinus Torvalds } 22691a028e50SDavid S. Miller start = end; 22701da177e4SLinus Torvalds } 2271a6686f2fSShirley Ma 22721da177e4SLinus Torvalds if (!len) 22731da177e4SLinus Torvalds return 0; 22741da177e4SLinus Torvalds 22751da177e4SLinus Torvalds fault: 22761da177e4SLinus Torvalds return -EFAULT; 22771da177e4SLinus Torvalds } 2278b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 22791da177e4SLinus Torvalds 22809c55e01cSJens Axboe /* 22819c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 22829c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 22839c55e01cSJens Axboe */ 22849c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 22859c55e01cSJens Axboe { 22868b9d3728SJarek Poplawski put_page(spd->pages[i]); 22878b9d3728SJarek Poplawski } 22889c55e01cSJens Axboe 2289a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 22904fb66994SJarek Poplawski unsigned int *offset, 229118aafc62SEric Dumazet struct sock *sk) 22928b9d3728SJarek Poplawski { 22935640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 22948b9d3728SJarek Poplawski 22955640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 22968b9d3728SJarek Poplawski return NULL; 22974fb66994SJarek Poplawski 22985640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 22994fb66994SJarek Poplawski 23005640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 23015640f768SEric Dumazet page_address(page) + *offset, *len); 23025640f768SEric Dumazet *offset = pfrag->offset; 23035640f768SEric Dumazet pfrag->offset += *len; 23044fb66994SJarek Poplawski 23055640f768SEric Dumazet return pfrag->page; 23069c55e01cSJens Axboe } 23079c55e01cSJens Axboe 230841c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 230941c73a0dSEric Dumazet struct page *page, 231041c73a0dSEric Dumazet unsigned int offset) 231141c73a0dSEric Dumazet { 231241c73a0dSEric Dumazet return spd->nr_pages && 231341c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 231441c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 231541c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 231641c73a0dSEric Dumazet } 231741c73a0dSEric Dumazet 23189c55e01cSJens Axboe /* 23199c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 23209c55e01cSJens Axboe */ 2321a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 232235f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 23234fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 232418aafc62SEric Dumazet bool linear, 23257a67e56fSJarek Poplawski struct sock *sk) 23269c55e01cSJens Axboe { 232741c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2328a108d5f3SDavid S. Miller return true; 23299c55e01cSJens Axboe 23308b9d3728SJarek Poplawski if (linear) { 233118aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 23328b9d3728SJarek Poplawski if (!page) 2333a108d5f3SDavid S. Miller return true; 233441c73a0dSEric Dumazet } 233541c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 233641c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 2337a108d5f3SDavid S. Miller return false; 233841c73a0dSEric Dumazet } 23398b9d3728SJarek Poplawski get_page(page); 23409c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 23414fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 23429c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 23439c55e01cSJens Axboe spd->nr_pages++; 23448b9d3728SJarek Poplawski 2345a108d5f3SDavid S. Miller return false; 23469c55e01cSJens Axboe } 23479c55e01cSJens Axboe 2348a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 23492870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 235018aafc62SEric Dumazet unsigned int *len, 2351d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 235235f3d14dSJens Axboe struct sock *sk, 235335f3d14dSJens Axboe struct pipe_inode_info *pipe) 23549c55e01cSJens Axboe { 23552870c43dSOctavian Purdila if (!*len) 2356a108d5f3SDavid S. Miller return true; 23579c55e01cSJens Axboe 23582870c43dSOctavian Purdila /* skip this segment if already processed */ 23592870c43dSOctavian Purdila if (*off >= plen) { 23602870c43dSOctavian Purdila *off -= plen; 2361a108d5f3SDavid S. Miller return false; 23622870c43dSOctavian Purdila } 23632870c43dSOctavian Purdila 23642870c43dSOctavian Purdila /* ignore any bits we already processed */ 23659ca1b22dSEric Dumazet poff += *off; 23669ca1b22dSEric Dumazet plen -= *off; 23672870c43dSOctavian Purdila *off = 0; 23682870c43dSOctavian Purdila 236918aafc62SEric Dumazet do { 237018aafc62SEric Dumazet unsigned int flen = min(*len, plen); 23712870c43dSOctavian Purdila 237218aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 237318aafc62SEric Dumazet linear, sk)) 2374a108d5f3SDavid S. Miller return true; 237518aafc62SEric Dumazet poff += flen; 237618aafc62SEric Dumazet plen -= flen; 23772870c43dSOctavian Purdila *len -= flen; 237818aafc62SEric Dumazet } while (*len && plen); 23792870c43dSOctavian Purdila 2380a108d5f3SDavid S. Miller return false; 2381db43a282SOctavian Purdila } 23829c55e01cSJens Axboe 23839c55e01cSJens Axboe /* 2384a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 23852870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 23869c55e01cSJens Axboe */ 2387a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 238835f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 238935f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 23902870c43dSOctavian Purdila { 23912870c43dSOctavian Purdila int seg; 2392fa9835e5STom Herbert struct sk_buff *iter; 23939c55e01cSJens Axboe 23941d0c0b32SEric Dumazet /* map the linear part : 23952996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 23962996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 23972996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 23989c55e01cSJens Axboe */ 23992870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 24002870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 24012870c43dSOctavian Purdila skb_headlen(skb), 240218aafc62SEric Dumazet offset, len, spd, 24033a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 24041d0c0b32SEric Dumazet sk, pipe)) 2405a108d5f3SDavid S. Miller return true; 24069c55e01cSJens Axboe 24079c55e01cSJens Axboe /* 24089c55e01cSJens Axboe * then map the fragments 24099c55e01cSJens Axboe */ 24109c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 24119c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 24129c55e01cSJens Axboe 2413ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 2414b54c9d5bSJonathan Lemon skb_frag_off(f), skb_frag_size(f), 241518aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 2416a108d5f3SDavid S. Miller return true; 24179c55e01cSJens Axboe } 24189c55e01cSJens Axboe 2419fa9835e5STom Herbert skb_walk_frags(skb, iter) { 2420fa9835e5STom Herbert if (*offset >= iter->len) { 2421fa9835e5STom Herbert *offset -= iter->len; 2422fa9835e5STom Herbert continue; 2423fa9835e5STom Herbert } 2424fa9835e5STom Herbert /* __skb_splice_bits() only fails if the output has no room 2425fa9835e5STom Herbert * left, so no point in going over the frag_list for the error 2426fa9835e5STom Herbert * case. 2427fa9835e5STom Herbert */ 2428fa9835e5STom Herbert if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2429fa9835e5STom Herbert return true; 2430fa9835e5STom Herbert } 2431fa9835e5STom Herbert 2432a108d5f3SDavid S. Miller return false; 24339c55e01cSJens Axboe } 24349c55e01cSJens Axboe 24359c55e01cSJens Axboe /* 24369c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 2437fa9835e5STom Herbert * the fragments, and the frag list. 24389c55e01cSJens Axboe */ 2439a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 24409c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 244125869262SAl Viro unsigned int flags) 24429c55e01cSJens Axboe { 244341c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 244441c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 24459c55e01cSJens Axboe struct splice_pipe_desc spd = { 24469c55e01cSJens Axboe .pages = pages, 24479c55e01cSJens Axboe .partial = partial, 2448047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 244928a625cbSMiklos Szeredi .ops = &nosteal_pipe_buf_ops, 24509c55e01cSJens Axboe .spd_release = sock_spd_release, 24519c55e01cSJens Axboe }; 245235f3d14dSJens Axboe int ret = 0; 245335f3d14dSJens Axboe 2454fa9835e5STom Herbert __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 24559c55e01cSJens Axboe 2456a60e3cc7SHannes Frederic Sowa if (spd.nr_pages) 245725869262SAl Viro ret = splice_to_pipe(pipe, &spd); 24589c55e01cSJens Axboe 245935f3d14dSJens Axboe return ret; 24609c55e01cSJens Axboe } 24612b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits); 24629c55e01cSJens Axboe 246320bf50deSTom Herbert /* Send skb data on a socket. Socket must be locked. */ 246420bf50deSTom Herbert int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 246520bf50deSTom Herbert int len) 246620bf50deSTom Herbert { 246720bf50deSTom Herbert unsigned int orig_len = len; 246820bf50deSTom Herbert struct sk_buff *head = skb; 246920bf50deSTom Herbert unsigned short fragidx; 247020bf50deSTom Herbert int slen, ret; 247120bf50deSTom Herbert 247220bf50deSTom Herbert do_frag_list: 247320bf50deSTom Herbert 247420bf50deSTom Herbert /* Deal with head data */ 247520bf50deSTom Herbert while (offset < skb_headlen(skb) && len) { 247620bf50deSTom Herbert struct kvec kv; 247720bf50deSTom Herbert struct msghdr msg; 247820bf50deSTom Herbert 247920bf50deSTom Herbert slen = min_t(int, len, skb_headlen(skb) - offset); 248020bf50deSTom Herbert kv.iov_base = skb->data + offset; 2481db5980d8SJohn Fastabend kv.iov_len = slen; 248220bf50deSTom Herbert memset(&msg, 0, sizeof(msg)); 2483bd95e678SJohn Fastabend msg.msg_flags = MSG_DONTWAIT; 248420bf50deSTom Herbert 248520bf50deSTom Herbert ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); 248620bf50deSTom Herbert if (ret <= 0) 248720bf50deSTom Herbert goto error; 248820bf50deSTom Herbert 248920bf50deSTom Herbert offset += ret; 249020bf50deSTom Herbert len -= ret; 249120bf50deSTom Herbert } 249220bf50deSTom Herbert 249320bf50deSTom Herbert /* All the data was skb head? */ 249420bf50deSTom Herbert if (!len) 249520bf50deSTom Herbert goto out; 249620bf50deSTom Herbert 249720bf50deSTom Herbert /* Make offset relative to start of frags */ 249820bf50deSTom Herbert offset -= skb_headlen(skb); 249920bf50deSTom Herbert 250020bf50deSTom Herbert /* Find where we are in frag list */ 250120bf50deSTom Herbert for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 250220bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 250320bf50deSTom Herbert 2504d8e18a51SMatthew Wilcox (Oracle) if (offset < skb_frag_size(frag)) 250520bf50deSTom Herbert break; 250620bf50deSTom Herbert 2507d8e18a51SMatthew Wilcox (Oracle) offset -= skb_frag_size(frag); 250820bf50deSTom Herbert } 250920bf50deSTom Herbert 251020bf50deSTom Herbert for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 251120bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 251220bf50deSTom Herbert 2513d8e18a51SMatthew Wilcox (Oracle) slen = min_t(size_t, len, skb_frag_size(frag) - offset); 251420bf50deSTom Herbert 251520bf50deSTom Herbert while (slen) { 2516d8e18a51SMatthew Wilcox (Oracle) ret = kernel_sendpage_locked(sk, skb_frag_page(frag), 2517b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset, 251820bf50deSTom Herbert slen, MSG_DONTWAIT); 251920bf50deSTom Herbert if (ret <= 0) 252020bf50deSTom Herbert goto error; 252120bf50deSTom Herbert 252220bf50deSTom Herbert len -= ret; 252320bf50deSTom Herbert offset += ret; 252420bf50deSTom Herbert slen -= ret; 252520bf50deSTom Herbert } 252620bf50deSTom Herbert 252720bf50deSTom Herbert offset = 0; 252820bf50deSTom Herbert } 252920bf50deSTom Herbert 253020bf50deSTom Herbert if (len) { 253120bf50deSTom Herbert /* Process any frag lists */ 253220bf50deSTom Herbert 253320bf50deSTom Herbert if (skb == head) { 253420bf50deSTom Herbert if (skb_has_frag_list(skb)) { 253520bf50deSTom Herbert skb = skb_shinfo(skb)->frag_list; 253620bf50deSTom Herbert goto do_frag_list; 253720bf50deSTom Herbert } 253820bf50deSTom Herbert } else if (skb->next) { 253920bf50deSTom Herbert skb = skb->next; 254020bf50deSTom Herbert goto do_frag_list; 254120bf50deSTom Herbert } 254220bf50deSTom Herbert } 254320bf50deSTom Herbert 254420bf50deSTom Herbert out: 254520bf50deSTom Herbert return orig_len - len; 254620bf50deSTom Herbert 254720bf50deSTom Herbert error: 254820bf50deSTom Herbert return orig_len == len ? ret : orig_len - len; 254920bf50deSTom Herbert } 255020bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked); 255120bf50deSTom Herbert 2552357b40a1SHerbert Xu /** 2553357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 2554357b40a1SHerbert Xu * @skb: destination buffer 2555357b40a1SHerbert Xu * @offset: offset in destination 2556357b40a1SHerbert Xu * @from: source buffer 2557357b40a1SHerbert Xu * @len: number of bytes to copy 2558357b40a1SHerbert Xu * 2559357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 2560357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 2561357b40a1SHerbert Xu * traversing fragment lists and such. 2562357b40a1SHerbert Xu */ 2563357b40a1SHerbert Xu 25640c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2565357b40a1SHerbert Xu { 25661a028e50SDavid S. Miller int start = skb_headlen(skb); 2567fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2568fbb398a8SDavid S. Miller int i, copy; 2569357b40a1SHerbert Xu 2570357b40a1SHerbert Xu if (offset > (int)skb->len - len) 2571357b40a1SHerbert Xu goto fault; 2572357b40a1SHerbert Xu 25731a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 2574357b40a1SHerbert Xu if (copy > len) 2575357b40a1SHerbert Xu copy = len; 257627d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 2577357b40a1SHerbert Xu if ((len -= copy) == 0) 2578357b40a1SHerbert Xu return 0; 2579357b40a1SHerbert Xu offset += copy; 2580357b40a1SHerbert Xu from += copy; 2581357b40a1SHerbert Xu } 2582357b40a1SHerbert Xu 2583357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2584357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 25851a028e50SDavid S. Miller int end; 2586357b40a1SHerbert Xu 2587547b792cSIlpo Järvinen WARN_ON(start > offset + len); 25881a028e50SDavid S. Miller 25899e903e08SEric Dumazet end = start + skb_frag_size(frag); 2590357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2591c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2592c613c209SWillem de Bruijn struct page *p; 2593357b40a1SHerbert Xu u8 *vaddr; 2594357b40a1SHerbert Xu 2595357b40a1SHerbert Xu if (copy > len) 2596357b40a1SHerbert Xu copy = len; 2597357b40a1SHerbert Xu 2598c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2599b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2600c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2601c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2602c613c209SWillem de Bruijn memcpy(vaddr + p_off, from + copied, p_len); 260351c56b00SEric Dumazet kunmap_atomic(vaddr); 2604c613c209SWillem de Bruijn } 2605357b40a1SHerbert Xu 2606357b40a1SHerbert Xu if ((len -= copy) == 0) 2607357b40a1SHerbert Xu return 0; 2608357b40a1SHerbert Xu offset += copy; 2609357b40a1SHerbert Xu from += copy; 2610357b40a1SHerbert Xu } 26111a028e50SDavid S. Miller start = end; 2612357b40a1SHerbert Xu } 2613357b40a1SHerbert Xu 2614fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 26151a028e50SDavid S. Miller int end; 2616357b40a1SHerbert Xu 2617547b792cSIlpo Järvinen WARN_ON(start > offset + len); 26181a028e50SDavid S. Miller 2619fbb398a8SDavid S. Miller end = start + frag_iter->len; 2620357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2621357b40a1SHerbert Xu if (copy > len) 2622357b40a1SHerbert Xu copy = len; 2623fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 26241a028e50SDavid S. Miller from, copy)) 2625357b40a1SHerbert Xu goto fault; 2626357b40a1SHerbert Xu if ((len -= copy) == 0) 2627357b40a1SHerbert Xu return 0; 2628357b40a1SHerbert Xu offset += copy; 2629357b40a1SHerbert Xu from += copy; 2630357b40a1SHerbert Xu } 26311a028e50SDavid S. Miller start = end; 2632357b40a1SHerbert Xu } 2633357b40a1SHerbert Xu if (!len) 2634357b40a1SHerbert Xu return 0; 2635357b40a1SHerbert Xu 2636357b40a1SHerbert Xu fault: 2637357b40a1SHerbert Xu return -EFAULT; 2638357b40a1SHerbert Xu } 2639357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 2640357b40a1SHerbert Xu 26411da177e4SLinus Torvalds /* Checksum skb data. */ 26422817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 26432817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 26441da177e4SLinus Torvalds { 26451a028e50SDavid S. Miller int start = skb_headlen(skb); 26461a028e50SDavid S. Miller int i, copy = start - offset; 2647fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 26481da177e4SLinus Torvalds int pos = 0; 26491da177e4SLinus Torvalds 26501da177e4SLinus Torvalds /* Checksum header. */ 26511da177e4SLinus Torvalds if (copy > 0) { 26521da177e4SLinus Torvalds if (copy > len) 26531da177e4SLinus Torvalds copy = len; 26542544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 26552544af03SMatteo Croce skb->data + offset, copy, csum); 26561da177e4SLinus Torvalds if ((len -= copy) == 0) 26571da177e4SLinus Torvalds return csum; 26581da177e4SLinus Torvalds offset += copy; 26591da177e4SLinus Torvalds pos = copy; 26601da177e4SLinus Torvalds } 26611da177e4SLinus Torvalds 26621da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 26631a028e50SDavid S. Miller int end; 266451c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 26651da177e4SLinus Torvalds 2666547b792cSIlpo Järvinen WARN_ON(start > offset + len); 26671a028e50SDavid S. Miller 266851c56b00SEric Dumazet end = start + skb_frag_size(frag); 26691da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2670c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2671c613c209SWillem de Bruijn struct page *p; 267244bb9363SAl Viro __wsum csum2; 26731da177e4SLinus Torvalds u8 *vaddr; 26741da177e4SLinus Torvalds 26751da177e4SLinus Torvalds if (copy > len) 26761da177e4SLinus Torvalds copy = len; 2677c613c209SWillem de Bruijn 2678c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2679b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2680c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2681c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 26822544af03SMatteo Croce csum2 = INDIRECT_CALL_1(ops->update, 26832544af03SMatteo Croce csum_partial_ext, 26842544af03SMatteo Croce vaddr + p_off, p_len, 0); 268551c56b00SEric Dumazet kunmap_atomic(vaddr); 26862544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, 26872544af03SMatteo Croce csum_block_add_ext, csum, 26882544af03SMatteo Croce csum2, pos, p_len); 2689c613c209SWillem de Bruijn pos += p_len; 2690c613c209SWillem de Bruijn } 2691c613c209SWillem de Bruijn 26921da177e4SLinus Torvalds if (!(len -= copy)) 26931da177e4SLinus Torvalds return csum; 26941da177e4SLinus Torvalds offset += copy; 26951da177e4SLinus Torvalds } 26961a028e50SDavid S. Miller start = end; 26971da177e4SLinus Torvalds } 26981da177e4SLinus Torvalds 2699fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 27001a028e50SDavid S. Miller int end; 27011da177e4SLinus Torvalds 2702547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27031a028e50SDavid S. Miller 2704fbb398a8SDavid S. Miller end = start + frag_iter->len; 27051da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 27065f92a738SAl Viro __wsum csum2; 27071da177e4SLinus Torvalds if (copy > len) 27081da177e4SLinus Torvalds copy = len; 27092817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 27102817a336SDaniel Borkmann copy, 0, ops); 27112544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 27122544af03SMatteo Croce csum, csum2, pos, copy); 27131da177e4SLinus Torvalds if ((len -= copy) == 0) 27141da177e4SLinus Torvalds return csum; 27151da177e4SLinus Torvalds offset += copy; 27161da177e4SLinus Torvalds pos += copy; 27171da177e4SLinus Torvalds } 27181a028e50SDavid S. Miller start = end; 27191da177e4SLinus Torvalds } 272009a62660SKris Katterjohn BUG_ON(len); 27211da177e4SLinus Torvalds 27221da177e4SLinus Torvalds return csum; 27231da177e4SLinus Torvalds } 27242817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 27252817a336SDaniel Borkmann 27262817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 27272817a336SDaniel Borkmann int len, __wsum csum) 27282817a336SDaniel Borkmann { 27292817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2730cea80ea8SDaniel Borkmann .update = csum_partial_ext, 27312817a336SDaniel Borkmann .combine = csum_block_add_ext, 27322817a336SDaniel Borkmann }; 27332817a336SDaniel Borkmann 27342817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 27352817a336SDaniel Borkmann } 2736b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 27371da177e4SLinus Torvalds 27381da177e4SLinus Torvalds /* Both of above in one bottle. */ 27391da177e4SLinus Torvalds 274081d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 27418d5930dfSAl Viro u8 *to, int len) 27421da177e4SLinus Torvalds { 27431a028e50SDavid S. Miller int start = skb_headlen(skb); 27441a028e50SDavid S. Miller int i, copy = start - offset; 2745fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 27461da177e4SLinus Torvalds int pos = 0; 27478d5930dfSAl Viro __wsum csum = 0; 27481da177e4SLinus Torvalds 27491da177e4SLinus Torvalds /* Copy header. */ 27501da177e4SLinus Torvalds if (copy > 0) { 27511da177e4SLinus Torvalds if (copy > len) 27521da177e4SLinus Torvalds copy = len; 27531da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 2754cc44c17bSAl Viro copy); 27551da177e4SLinus Torvalds if ((len -= copy) == 0) 27561da177e4SLinus Torvalds return csum; 27571da177e4SLinus Torvalds offset += copy; 27581da177e4SLinus Torvalds to += copy; 27591da177e4SLinus Torvalds pos = copy; 27601da177e4SLinus Torvalds } 27611da177e4SLinus Torvalds 27621da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 27631a028e50SDavid S. Miller int end; 27641da177e4SLinus Torvalds 2765547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27661a028e50SDavid S. Miller 27679e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 27681da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2769c613c209SWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2770c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2771c613c209SWillem de Bruijn struct page *p; 27725084205fSAl Viro __wsum csum2; 27731da177e4SLinus Torvalds u8 *vaddr; 27741da177e4SLinus Torvalds 27751da177e4SLinus Torvalds if (copy > len) 27761da177e4SLinus Torvalds copy = len; 2777c613c209SWillem de Bruijn 2778c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2779b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2780c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2781c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2782c613c209SWillem de Bruijn csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2783c613c209SWillem de Bruijn to + copied, 2784cc44c17bSAl Viro p_len); 278551c56b00SEric Dumazet kunmap_atomic(vaddr); 27861da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 2787c613c209SWillem de Bruijn pos += p_len; 2788c613c209SWillem de Bruijn } 2789c613c209SWillem de Bruijn 27901da177e4SLinus Torvalds if (!(len -= copy)) 27911da177e4SLinus Torvalds return csum; 27921da177e4SLinus Torvalds offset += copy; 27931da177e4SLinus Torvalds to += copy; 27941da177e4SLinus Torvalds } 27951a028e50SDavid S. Miller start = end; 27961da177e4SLinus Torvalds } 27971da177e4SLinus Torvalds 2798fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 279981d77662SAl Viro __wsum csum2; 28001a028e50SDavid S. Miller int end; 28011da177e4SLinus Torvalds 2802547b792cSIlpo Järvinen WARN_ON(start > offset + len); 28031a028e50SDavid S. Miller 2804fbb398a8SDavid S. Miller end = start + frag_iter->len; 28051da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 28061da177e4SLinus Torvalds if (copy > len) 28071da177e4SLinus Torvalds copy = len; 2808fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 28091a028e50SDavid S. Miller offset - start, 28108d5930dfSAl Viro to, copy); 28111da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 28121da177e4SLinus Torvalds if ((len -= copy) == 0) 28131da177e4SLinus Torvalds return csum; 28141da177e4SLinus Torvalds offset += copy; 28151da177e4SLinus Torvalds to += copy; 28161da177e4SLinus Torvalds pos += copy; 28171da177e4SLinus Torvalds } 28181a028e50SDavid S. Miller start = end; 28191da177e4SLinus Torvalds } 282009a62660SKris Katterjohn BUG_ON(len); 28211da177e4SLinus Torvalds return csum; 28221da177e4SLinus Torvalds } 2823b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 28241da177e4SLinus Torvalds 282549f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 282649f8e832SCong Wang { 282749f8e832SCong Wang __sum16 sum; 282849f8e832SCong Wang 282949f8e832SCong Wang sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 283014641931SCong Wang /* See comments in __skb_checksum_complete(). */ 283149f8e832SCong Wang if (likely(!sum)) { 283249f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 283349f8e832SCong Wang !skb->csum_complete_sw) 28347fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 283549f8e832SCong Wang } 283649f8e832SCong Wang if (!skb_shared(skb)) 283749f8e832SCong Wang skb->csum_valid = !sum; 283849f8e832SCong Wang return sum; 283949f8e832SCong Wang } 284049f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head); 284149f8e832SCong Wang 284214641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum, 284314641931SCong Wang * which has been changed from the hardware checksum, for example, by 284414641931SCong Wang * __skb_checksum_validate_complete(). And, the original skb->csum must 284514641931SCong Wang * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 284614641931SCong Wang * 284714641931SCong Wang * It returns non-zero if the recomputed checksum is still invalid, otherwise 284814641931SCong Wang * zero. The new checksum is stored back into skb->csum unless the skb is 284914641931SCong Wang * shared. 285014641931SCong Wang */ 285149f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb) 285249f8e832SCong Wang { 285349f8e832SCong Wang __wsum csum; 285449f8e832SCong Wang __sum16 sum; 285549f8e832SCong Wang 285649f8e832SCong Wang csum = skb_checksum(skb, 0, skb->len, 0); 285749f8e832SCong Wang 285849f8e832SCong Wang sum = csum_fold(csum_add(skb->csum, csum)); 285914641931SCong Wang /* This check is inverted, because we already knew the hardware 286014641931SCong Wang * checksum is invalid before calling this function. So, if the 286114641931SCong Wang * re-computed checksum is valid instead, then we have a mismatch 286214641931SCong Wang * between the original skb->csum and skb_checksum(). This means either 286314641931SCong Wang * the original hardware checksum is incorrect or we screw up skb->csum 286414641931SCong Wang * when moving skb->data around. 286514641931SCong Wang */ 286649f8e832SCong Wang if (likely(!sum)) { 286749f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 286849f8e832SCong Wang !skb->csum_complete_sw) 28697fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 287049f8e832SCong Wang } 287149f8e832SCong Wang 287249f8e832SCong Wang if (!skb_shared(skb)) { 287349f8e832SCong Wang /* Save full packet checksum */ 287449f8e832SCong Wang skb->csum = csum; 287549f8e832SCong Wang skb->ip_summed = CHECKSUM_COMPLETE; 287649f8e832SCong Wang skb->csum_complete_sw = 1; 287749f8e832SCong Wang skb->csum_valid = !sum; 287849f8e832SCong Wang } 287949f8e832SCong Wang 288049f8e832SCong Wang return sum; 288149f8e832SCong Wang } 288249f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete); 288349f8e832SCong Wang 28849617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 28859617813dSDavide Caratti { 28869617813dSDavide Caratti net_warn_ratelimited( 28879617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 28889617813dSDavide Caratti __func__); 28899617813dSDavide Caratti return 0; 28909617813dSDavide Caratti } 28919617813dSDavide Caratti 28929617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 28939617813dSDavide Caratti int offset, int len) 28949617813dSDavide Caratti { 28959617813dSDavide Caratti net_warn_ratelimited( 28969617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 28979617813dSDavide Caratti __func__); 28989617813dSDavide Caratti return 0; 28999617813dSDavide Caratti } 29009617813dSDavide Caratti 29019617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = { 29029617813dSDavide Caratti .update = warn_crc32c_csum_update, 29039617813dSDavide Caratti .combine = warn_crc32c_csum_combine, 29049617813dSDavide Caratti }; 29059617813dSDavide Caratti 29069617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 29079617813dSDavide Caratti &default_crc32c_ops; 29089617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub); 29099617813dSDavide Caratti 2910af2806f8SThomas Graf /** 2911af2806f8SThomas Graf * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2912af2806f8SThomas Graf * @from: source buffer 2913af2806f8SThomas Graf * 2914af2806f8SThomas Graf * Calculates the amount of linear headroom needed in the 'to' skb passed 2915af2806f8SThomas Graf * into skb_zerocopy(). 2916af2806f8SThomas Graf */ 2917af2806f8SThomas Graf unsigned int 2918af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from) 2919af2806f8SThomas Graf { 2920af2806f8SThomas Graf unsigned int hlen = 0; 2921af2806f8SThomas Graf 2922af2806f8SThomas Graf if (!from->head_frag || 2923af2806f8SThomas Graf skb_headlen(from) < L1_CACHE_BYTES || 2924af2806f8SThomas Graf skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2925af2806f8SThomas Graf hlen = skb_headlen(from); 2926af2806f8SThomas Graf 2927af2806f8SThomas Graf if (skb_has_frag_list(from)) 2928af2806f8SThomas Graf hlen = from->len; 2929af2806f8SThomas Graf 2930af2806f8SThomas Graf return hlen; 2931af2806f8SThomas Graf } 2932af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2933af2806f8SThomas Graf 2934af2806f8SThomas Graf /** 2935af2806f8SThomas Graf * skb_zerocopy - Zero copy skb to skb 2936af2806f8SThomas Graf * @to: destination buffer 29377fceb4deSMasanari Iida * @from: source buffer 2938af2806f8SThomas Graf * @len: number of bytes to copy from source buffer 2939af2806f8SThomas Graf * @hlen: size of linear headroom in destination buffer 2940af2806f8SThomas Graf * 2941af2806f8SThomas Graf * Copies up to `len` bytes from `from` to `to` by creating references 2942af2806f8SThomas Graf * to the frags in the source buffer. 2943af2806f8SThomas Graf * 2944af2806f8SThomas Graf * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2945af2806f8SThomas Graf * headroom in the `to` buffer. 294636d5fe6aSZoltan Kiss * 294736d5fe6aSZoltan Kiss * Return value: 294836d5fe6aSZoltan Kiss * 0: everything is OK 294936d5fe6aSZoltan Kiss * -ENOMEM: couldn't orphan frags of @from due to lack of memory 295036d5fe6aSZoltan Kiss * -EFAULT: skb_copy_bits() found some problem with skb geometry 2951af2806f8SThomas Graf */ 295236d5fe6aSZoltan Kiss int 295336d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2954af2806f8SThomas Graf { 2955af2806f8SThomas Graf int i, j = 0; 2956af2806f8SThomas Graf int plen = 0; /* length of skb->head fragment */ 295736d5fe6aSZoltan Kiss int ret; 2958af2806f8SThomas Graf struct page *page; 2959af2806f8SThomas Graf unsigned int offset; 2960af2806f8SThomas Graf 2961af2806f8SThomas Graf BUG_ON(!from->head_frag && !hlen); 2962af2806f8SThomas Graf 2963af2806f8SThomas Graf /* dont bother with small payloads */ 296436d5fe6aSZoltan Kiss if (len <= skb_tailroom(to)) 296536d5fe6aSZoltan Kiss return skb_copy_bits(from, 0, skb_put(to, len), len); 2966af2806f8SThomas Graf 2967af2806f8SThomas Graf if (hlen) { 296836d5fe6aSZoltan Kiss ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 296936d5fe6aSZoltan Kiss if (unlikely(ret)) 297036d5fe6aSZoltan Kiss return ret; 2971af2806f8SThomas Graf len -= hlen; 2972af2806f8SThomas Graf } else { 2973af2806f8SThomas Graf plen = min_t(int, skb_headlen(from), len); 2974af2806f8SThomas Graf if (plen) { 2975af2806f8SThomas Graf page = virt_to_head_page(from->head); 2976af2806f8SThomas Graf offset = from->data - (unsigned char *)page_address(page); 2977af2806f8SThomas Graf __skb_fill_page_desc(to, 0, page, offset, plen); 2978af2806f8SThomas Graf get_page(page); 2979af2806f8SThomas Graf j = 1; 2980af2806f8SThomas Graf len -= plen; 2981af2806f8SThomas Graf } 2982af2806f8SThomas Graf } 2983af2806f8SThomas Graf 2984af2806f8SThomas Graf to->truesize += len + plen; 2985af2806f8SThomas Graf to->len += len + plen; 2986af2806f8SThomas Graf to->data_len += len + plen; 2987af2806f8SThomas Graf 298836d5fe6aSZoltan Kiss if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 298936d5fe6aSZoltan Kiss skb_tx_error(from); 299036d5fe6aSZoltan Kiss return -ENOMEM; 299136d5fe6aSZoltan Kiss } 29921f8b977aSWillem de Bruijn skb_zerocopy_clone(to, from, GFP_ATOMIC); 299336d5fe6aSZoltan Kiss 2994af2806f8SThomas Graf for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2995d8e18a51SMatthew Wilcox (Oracle) int size; 2996d8e18a51SMatthew Wilcox (Oracle) 2997af2806f8SThomas Graf if (!len) 2998af2806f8SThomas Graf break; 2999af2806f8SThomas Graf skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3000d8e18a51SMatthew Wilcox (Oracle) size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3001d8e18a51SMatthew Wilcox (Oracle) len); 3002d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3003d8e18a51SMatthew Wilcox (Oracle) len -= size; 3004af2806f8SThomas Graf skb_frag_ref(to, j); 3005af2806f8SThomas Graf j++; 3006af2806f8SThomas Graf } 3007af2806f8SThomas Graf skb_shinfo(to)->nr_frags = j; 300836d5fe6aSZoltan Kiss 300936d5fe6aSZoltan Kiss return 0; 3010af2806f8SThomas Graf } 3011af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy); 3012af2806f8SThomas Graf 30131da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 30141da177e4SLinus Torvalds { 3015d3bc23e7SAl Viro __wsum csum; 30161da177e4SLinus Torvalds long csstart; 30171da177e4SLinus Torvalds 301884fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 301955508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 30201da177e4SLinus Torvalds else 30211da177e4SLinus Torvalds csstart = skb_headlen(skb); 30221da177e4SLinus Torvalds 302309a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 30241da177e4SLinus Torvalds 3025d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 30261da177e4SLinus Torvalds 30271da177e4SLinus Torvalds csum = 0; 30281da177e4SLinus Torvalds if (csstart != skb->len) 30291da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 30308d5930dfSAl Viro skb->len - csstart); 30311da177e4SLinus Torvalds 303284fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 3033ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 30341da177e4SLinus Torvalds 3035d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 30361da177e4SLinus Torvalds } 30371da177e4SLinus Torvalds } 3038b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 30391da177e4SLinus Torvalds 30401da177e4SLinus Torvalds /** 30411da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 30421da177e4SLinus Torvalds * @list: list to dequeue from 30431da177e4SLinus Torvalds * 30441da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 30451da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 30461da177e4SLinus Torvalds * returned or %NULL if the list is empty. 30471da177e4SLinus Torvalds */ 30481da177e4SLinus Torvalds 30491da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 30501da177e4SLinus Torvalds { 30511da177e4SLinus Torvalds unsigned long flags; 30521da177e4SLinus Torvalds struct sk_buff *result; 30531da177e4SLinus Torvalds 30541da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 30551da177e4SLinus Torvalds result = __skb_dequeue(list); 30561da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 30571da177e4SLinus Torvalds return result; 30581da177e4SLinus Torvalds } 3059b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 30601da177e4SLinus Torvalds 30611da177e4SLinus Torvalds /** 30621da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 30631da177e4SLinus Torvalds * @list: list to dequeue from 30641da177e4SLinus Torvalds * 30651da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 30661da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 30671da177e4SLinus Torvalds * returned or %NULL if the list is empty. 30681da177e4SLinus Torvalds */ 30691da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 30701da177e4SLinus Torvalds { 30711da177e4SLinus Torvalds unsigned long flags; 30721da177e4SLinus Torvalds struct sk_buff *result; 30731da177e4SLinus Torvalds 30741da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 30751da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 30761da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 30771da177e4SLinus Torvalds return result; 30781da177e4SLinus Torvalds } 3079b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 30801da177e4SLinus Torvalds 30811da177e4SLinus Torvalds /** 30821da177e4SLinus Torvalds * skb_queue_purge - empty a list 30831da177e4SLinus Torvalds * @list: list to empty 30841da177e4SLinus Torvalds * 30851da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 30861da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 30871da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 30881da177e4SLinus Torvalds */ 30891da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 30901da177e4SLinus Torvalds { 30911da177e4SLinus Torvalds struct sk_buff *skb; 30921da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 30931da177e4SLinus Torvalds kfree_skb(skb); 30941da177e4SLinus Torvalds } 3095b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 30961da177e4SLinus Torvalds 30971da177e4SLinus Torvalds /** 30989f5afeaeSYaogong Wang * skb_rbtree_purge - empty a skb rbtree 30999f5afeaeSYaogong Wang * @root: root of the rbtree to empty 3100385114deSPeter Oskolkov * Return value: the sum of truesizes of all purged skbs. 31019f5afeaeSYaogong Wang * 31029f5afeaeSYaogong Wang * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 31039f5afeaeSYaogong Wang * the list and one reference dropped. This function does not take 31049f5afeaeSYaogong Wang * any lock. Synchronization should be handled by the caller (e.g., TCP 31059f5afeaeSYaogong Wang * out-of-order queue is protected by the socket lock). 31069f5afeaeSYaogong Wang */ 3107385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root) 31089f5afeaeSYaogong Wang { 31097c90584cSEric Dumazet struct rb_node *p = rb_first(root); 3110385114deSPeter Oskolkov unsigned int sum = 0; 31119f5afeaeSYaogong Wang 31127c90584cSEric Dumazet while (p) { 31137c90584cSEric Dumazet struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 31147c90584cSEric Dumazet 31157c90584cSEric Dumazet p = rb_next(p); 31167c90584cSEric Dumazet rb_erase(&skb->rbnode, root); 3117385114deSPeter Oskolkov sum += skb->truesize; 31189f5afeaeSYaogong Wang kfree_skb(skb); 31197c90584cSEric Dumazet } 3120385114deSPeter Oskolkov return sum; 31219f5afeaeSYaogong Wang } 31229f5afeaeSYaogong Wang 31239f5afeaeSYaogong Wang /** 31241da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 31251da177e4SLinus Torvalds * @list: list to use 31261da177e4SLinus Torvalds * @newsk: buffer to queue 31271da177e4SLinus Torvalds * 31281da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 31291da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 31301da177e4SLinus Torvalds * safely. 31311da177e4SLinus Torvalds * 31321da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 31331da177e4SLinus Torvalds */ 31341da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 31351da177e4SLinus Torvalds { 31361da177e4SLinus Torvalds unsigned long flags; 31371da177e4SLinus Torvalds 31381da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31391da177e4SLinus Torvalds __skb_queue_head(list, newsk); 31401da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31411da177e4SLinus Torvalds } 3142b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 31431da177e4SLinus Torvalds 31441da177e4SLinus Torvalds /** 31451da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 31461da177e4SLinus Torvalds * @list: list to use 31471da177e4SLinus Torvalds * @newsk: buffer to queue 31481da177e4SLinus Torvalds * 31491da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 31501da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 31511da177e4SLinus Torvalds * safely. 31521da177e4SLinus Torvalds * 31531da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 31541da177e4SLinus Torvalds */ 31551da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 31561da177e4SLinus Torvalds { 31571da177e4SLinus Torvalds unsigned long flags; 31581da177e4SLinus Torvalds 31591da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31601da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 31611da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31621da177e4SLinus Torvalds } 3163b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 31648728b834SDavid S. Miller 31651da177e4SLinus Torvalds /** 31661da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 31671da177e4SLinus Torvalds * @skb: buffer to remove 31688728b834SDavid S. Miller * @list: list to use 31691da177e4SLinus Torvalds * 31708728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 31718728b834SDavid S. Miller * function is atomic with respect to other list locked calls 31721da177e4SLinus Torvalds * 31738728b834SDavid S. Miller * You must know what list the SKB is on. 31741da177e4SLinus Torvalds */ 31758728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 31761da177e4SLinus Torvalds { 31771da177e4SLinus Torvalds unsigned long flags; 31781da177e4SLinus Torvalds 31791da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31808728b834SDavid S. Miller __skb_unlink(skb, list); 31811da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31821da177e4SLinus Torvalds } 3183b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 31841da177e4SLinus Torvalds 31851da177e4SLinus Torvalds /** 31861da177e4SLinus Torvalds * skb_append - append a buffer 31871da177e4SLinus Torvalds * @old: buffer to insert after 31881da177e4SLinus Torvalds * @newsk: buffer to insert 31898728b834SDavid S. Miller * @list: list to use 31901da177e4SLinus Torvalds * 31911da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 31921da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 31931da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 31941da177e4SLinus Torvalds */ 31958728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 31961da177e4SLinus Torvalds { 31971da177e4SLinus Torvalds unsigned long flags; 31981da177e4SLinus Torvalds 31998728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 32007de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 32018728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 32021da177e4SLinus Torvalds } 3203b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 32041da177e4SLinus Torvalds 32051da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 32061da177e4SLinus Torvalds struct sk_buff* skb1, 32071da177e4SLinus Torvalds const u32 len, const int pos) 32081da177e4SLinus Torvalds { 32091da177e4SLinus Torvalds int i; 32101da177e4SLinus Torvalds 3211d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3212d626f62bSArnaldo Carvalho de Melo pos - len); 32131da177e4SLinus Torvalds /* And move data appendix as is. */ 32141da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 32151da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 32161da177e4SLinus Torvalds 32171da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 32181da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 32191da177e4SLinus Torvalds skb1->data_len = skb->data_len; 32201da177e4SLinus Torvalds skb1->len += skb1->data_len; 32211da177e4SLinus Torvalds skb->data_len = 0; 32221da177e4SLinus Torvalds skb->len = len; 322327a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 32241da177e4SLinus Torvalds } 32251da177e4SLinus Torvalds 32261da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 32271da177e4SLinus Torvalds struct sk_buff* skb1, 32281da177e4SLinus Torvalds const u32 len, int pos) 32291da177e4SLinus Torvalds { 32301da177e4SLinus Torvalds int i, k = 0; 32311da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 32321da177e4SLinus Torvalds 32331da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 32341da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 32351da177e4SLinus Torvalds skb->len = len; 32361da177e4SLinus Torvalds skb->data_len = len - pos; 32371da177e4SLinus Torvalds 32381da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 32399e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 32401da177e4SLinus Torvalds 32411da177e4SLinus Torvalds if (pos + size > len) { 32421da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 32431da177e4SLinus Torvalds 32441da177e4SLinus Torvalds if (pos < len) { 32451da177e4SLinus Torvalds /* Split frag. 32461da177e4SLinus Torvalds * We have two variants in this case: 32471da177e4SLinus Torvalds * 1. Move all the frag to the second 32481da177e4SLinus Torvalds * part, if it is possible. F.e. 32491da177e4SLinus Torvalds * this approach is mandatory for TUX, 32501da177e4SLinus Torvalds * where splitting is expensive. 32511da177e4SLinus Torvalds * 2. Split is accurately. We make this. 32521da177e4SLinus Torvalds */ 3253ea2ab693SIan Campbell skb_frag_ref(skb, i); 3254b54c9d5bSJonathan Lemon skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 32559e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 32569e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 32571da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 32581da177e4SLinus Torvalds } 32591da177e4SLinus Torvalds k++; 32601da177e4SLinus Torvalds } else 32611da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 32621da177e4SLinus Torvalds pos += size; 32631da177e4SLinus Torvalds } 32641da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 32651da177e4SLinus Torvalds } 32661da177e4SLinus Torvalds 32671da177e4SLinus Torvalds /** 32681da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 32691da177e4SLinus Torvalds * @skb: the buffer to split 32701da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 32711da177e4SLinus Torvalds * @len: new length for skb 32721da177e4SLinus Torvalds */ 32731da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 32741da177e4SLinus Torvalds { 32751da177e4SLinus Torvalds int pos = skb_headlen(skb); 32761da177e4SLinus Torvalds 327706b4feb3SJonathan Lemon skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; 32781f8b977aSWillem de Bruijn skb_zerocopy_clone(skb1, skb, 0); 32791da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 32801da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 32811da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 32821da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 32831da177e4SLinus Torvalds } 3284b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 32851da177e4SLinus Torvalds 32869f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 32879f782db3SIlpo Järvinen * 32889f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 32899f782db3SIlpo Järvinen */ 3290832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 3291832d11c5SIlpo Järvinen { 3292*097b9146SMarco Elver int ret = 0; 3293*097b9146SMarco Elver 3294*097b9146SMarco Elver if (skb_cloned(skb)) { 3295*097b9146SMarco Elver /* Save and restore truesize: pskb_expand_head() may reallocate 3296*097b9146SMarco Elver * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we 3297*097b9146SMarco Elver * cannot change truesize at this point. 3298*097b9146SMarco Elver */ 3299*097b9146SMarco Elver unsigned int save_truesize = skb->truesize; 3300*097b9146SMarco Elver 3301*097b9146SMarco Elver ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3302*097b9146SMarco Elver skb->truesize = save_truesize; 3303*097b9146SMarco Elver } 3304*097b9146SMarco Elver return ret; 3305832d11c5SIlpo Järvinen } 3306832d11c5SIlpo Järvinen 3307832d11c5SIlpo Järvinen /** 3308832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 3309832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 3310832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 3311832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 3312832d11c5SIlpo Järvinen * 3313832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 331420e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 3315832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 3316832d11c5SIlpo Järvinen * 3317832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 3318832d11c5SIlpo Järvinen * 3319832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 3320832d11c5SIlpo Järvinen * to have non-paged data as well. 3321832d11c5SIlpo Järvinen * 3322832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 3323832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 3324832d11c5SIlpo Järvinen */ 3325832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3326832d11c5SIlpo Järvinen { 3327832d11c5SIlpo Järvinen int from, to, merge, todo; 3328d8e18a51SMatthew Wilcox (Oracle) skb_frag_t *fragfrom, *fragto; 3329832d11c5SIlpo Järvinen 3330832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 3331f8071cdeSEric Dumazet 3332f8071cdeSEric Dumazet if (skb_headlen(skb)) 3333f8071cdeSEric Dumazet return 0; 33341f8b977aSWillem de Bruijn if (skb_zcopy(tgt) || skb_zcopy(skb)) 33351f8b977aSWillem de Bruijn return 0; 3336832d11c5SIlpo Järvinen 3337832d11c5SIlpo Järvinen todo = shiftlen; 3338832d11c5SIlpo Järvinen from = 0; 3339832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 3340832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3341832d11c5SIlpo Järvinen 3342832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 3343832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 3344832d11c5SIlpo Järvinen */ 3345832d11c5SIlpo Järvinen if (!to || 3346ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3347b54c9d5bSJonathan Lemon skb_frag_off(fragfrom))) { 3348832d11c5SIlpo Järvinen merge = -1; 3349832d11c5SIlpo Järvinen } else { 3350832d11c5SIlpo Järvinen merge = to - 1; 3351832d11c5SIlpo Järvinen 33529e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3353832d11c5SIlpo Järvinen if (todo < 0) { 3354832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 3355832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 3356832d11c5SIlpo Järvinen return 0; 3357832d11c5SIlpo Järvinen 33589f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 33599f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3360832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3361832d11c5SIlpo Järvinen 33629e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 33639e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 3364b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, shiftlen); 3365832d11c5SIlpo Järvinen 3366832d11c5SIlpo Järvinen goto onlymerged; 3367832d11c5SIlpo Järvinen } 3368832d11c5SIlpo Järvinen 3369832d11c5SIlpo Järvinen from++; 3370832d11c5SIlpo Järvinen } 3371832d11c5SIlpo Järvinen 3372832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 3373832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 3374832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3375832d11c5SIlpo Järvinen return 0; 3376832d11c5SIlpo Järvinen 3377832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3378832d11c5SIlpo Järvinen return 0; 3379832d11c5SIlpo Järvinen 3380832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3381832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 3382832d11c5SIlpo Järvinen return 0; 3383832d11c5SIlpo Järvinen 3384832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3385832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 3386832d11c5SIlpo Järvinen 33879e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 3388832d11c5SIlpo Järvinen *fragto = *fragfrom; 33899e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3390832d11c5SIlpo Järvinen from++; 3391832d11c5SIlpo Järvinen to++; 3392832d11c5SIlpo Järvinen 3393832d11c5SIlpo Järvinen } else { 3394ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 3395b54c9d5bSJonathan Lemon skb_frag_page_copy(fragto, fragfrom); 3396b54c9d5bSJonathan Lemon skb_frag_off_copy(fragto, fragfrom); 33979e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 3398832d11c5SIlpo Järvinen 3399b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, todo); 34009e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 3401832d11c5SIlpo Järvinen todo = 0; 3402832d11c5SIlpo Järvinen 3403832d11c5SIlpo Järvinen to++; 3404832d11c5SIlpo Järvinen break; 3405832d11c5SIlpo Järvinen } 3406832d11c5SIlpo Järvinen } 3407832d11c5SIlpo Järvinen 3408832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 3409832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 3410832d11c5SIlpo Järvinen 3411832d11c5SIlpo Järvinen if (merge >= 0) { 3412832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 3413832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3414832d11c5SIlpo Järvinen 34159e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 3416ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 3417832d11c5SIlpo Järvinen } 3418832d11c5SIlpo Järvinen 3419832d11c5SIlpo Järvinen /* Reposition in the original skb */ 3420832d11c5SIlpo Järvinen to = 0; 3421832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 3422832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3423832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 3424832d11c5SIlpo Järvinen 3425832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3426832d11c5SIlpo Järvinen 3427832d11c5SIlpo Järvinen onlymerged: 3428832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 3429832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 3430832d11c5SIlpo Järvinen */ 3431832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 3432832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 3433832d11c5SIlpo Järvinen 3434832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 3435832d11c5SIlpo Järvinen skb->len -= shiftlen; 3436832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 3437832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 3438832d11c5SIlpo Järvinen tgt->len += shiftlen; 3439832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 3440832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 3441832d11c5SIlpo Järvinen 3442832d11c5SIlpo Järvinen return shiftlen; 3443832d11c5SIlpo Järvinen } 3444832d11c5SIlpo Järvinen 3445677e90edSThomas Graf /** 3446677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 3447677e90edSThomas Graf * @skb: the buffer to read 3448677e90edSThomas Graf * @from: lower offset of data to be read 3449677e90edSThomas Graf * @to: upper offset of data to be read 3450677e90edSThomas Graf * @st: state variable 3451677e90edSThomas Graf * 3452677e90edSThomas Graf * Initializes the specified state variable. Must be called before 3453677e90edSThomas Graf * invoking skb_seq_read() for the first time. 3454677e90edSThomas Graf */ 3455677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3456677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 3457677e90edSThomas Graf { 3458677e90edSThomas Graf st->lower_offset = from; 3459677e90edSThomas Graf st->upper_offset = to; 3460677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 3461677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 3462677e90edSThomas Graf st->frag_data = NULL; 346397550f6fSWillem de Bruijn st->frag_off = 0; 3464677e90edSThomas Graf } 3465b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 3466677e90edSThomas Graf 3467677e90edSThomas Graf /** 3468677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 3469677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 3470677e90edSThomas Graf * @data: destination pointer for data to be returned 3471677e90edSThomas Graf * @st: state variable 3472677e90edSThomas Graf * 3473bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 3474677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 3475bc32383cSMathias Krause * the head of the data block to @data and returns the length 3476677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 3477677e90edSThomas Graf * offset has been reached. 3478677e90edSThomas Graf * 3479677e90edSThomas Graf * The caller is not required to consume all of the data 3480bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 3481677e90edSThomas Graf * of bytes already consumed and the next call to 3482677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 3483677e90edSThomas Graf * 348425985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 3485e793c0f7SMasanari Iida * this limitation is the cost for zerocopy sequential 3486677e90edSThomas Graf * reads of potentially non linear data. 3487677e90edSThomas Graf * 3488bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 3489677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 3490677e90edSThomas Graf * a stack for this purpose. 3491677e90edSThomas Graf */ 3492677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3493677e90edSThomas Graf struct skb_seq_state *st) 3494677e90edSThomas Graf { 3495677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3496677e90edSThomas Graf skb_frag_t *frag; 3497677e90edSThomas Graf 3498aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 3499aeb193eaSWedson Almeida Filho if (st->frag_data) { 3500aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 3501aeb193eaSWedson Almeida Filho st->frag_data = NULL; 3502aeb193eaSWedson Almeida Filho } 3503677e90edSThomas Graf return 0; 3504aeb193eaSWedson Almeida Filho } 3505677e90edSThomas Graf 3506677e90edSThomas Graf next_skb: 350795e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3508677e90edSThomas Graf 3509995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 351095e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3511677e90edSThomas Graf return block_limit - abs_offset; 3512677e90edSThomas Graf } 3513677e90edSThomas Graf 3514677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 3515677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 3516677e90edSThomas Graf 3517677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 351897550f6fSWillem de Bruijn unsigned int pg_idx, pg_off, pg_sz; 3519677e90edSThomas Graf 352097550f6fSWillem de Bruijn frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 352197550f6fSWillem de Bruijn 352297550f6fSWillem de Bruijn pg_idx = 0; 352397550f6fSWillem de Bruijn pg_off = skb_frag_off(frag); 352497550f6fSWillem de Bruijn pg_sz = skb_frag_size(frag); 352597550f6fSWillem de Bruijn 352697550f6fSWillem de Bruijn if (skb_frag_must_loop(skb_frag_page(frag))) { 352797550f6fSWillem de Bruijn pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 352897550f6fSWillem de Bruijn pg_off = offset_in_page(pg_off + st->frag_off); 352997550f6fSWillem de Bruijn pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 353097550f6fSWillem de Bruijn PAGE_SIZE - pg_off); 353197550f6fSWillem de Bruijn } 353297550f6fSWillem de Bruijn 353397550f6fSWillem de Bruijn block_limit = pg_sz + st->stepped_offset; 3534677e90edSThomas Graf if (abs_offset < block_limit) { 3535677e90edSThomas Graf if (!st->frag_data) 353697550f6fSWillem de Bruijn st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 3537677e90edSThomas Graf 353897550f6fSWillem de Bruijn *data = (u8 *)st->frag_data + pg_off + 3539677e90edSThomas Graf (abs_offset - st->stepped_offset); 3540677e90edSThomas Graf 3541677e90edSThomas Graf return block_limit - abs_offset; 3542677e90edSThomas Graf } 3543677e90edSThomas Graf 3544677e90edSThomas Graf if (st->frag_data) { 354551c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3546677e90edSThomas Graf st->frag_data = NULL; 3547677e90edSThomas Graf } 3548677e90edSThomas Graf 354997550f6fSWillem de Bruijn st->stepped_offset += pg_sz; 355097550f6fSWillem de Bruijn st->frag_off += pg_sz; 355197550f6fSWillem de Bruijn if (st->frag_off == skb_frag_size(frag)) { 355297550f6fSWillem de Bruijn st->frag_off = 0; 3553677e90edSThomas Graf st->frag_idx++; 355497550f6fSWillem de Bruijn } 3555677e90edSThomas Graf } 3556677e90edSThomas Graf 35575b5a60daSOlaf Kirch if (st->frag_data) { 355851c56b00SEric Dumazet kunmap_atomic(st->frag_data); 35595b5a60daSOlaf Kirch st->frag_data = NULL; 35605b5a60daSOlaf Kirch } 35615b5a60daSOlaf Kirch 356221dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3563677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 356495e3b24cSHerbert Xu st->frag_idx = 0; 3565677e90edSThomas Graf goto next_skb; 356671b3346dSShyam Iyer } else if (st->cur_skb->next) { 356771b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 356871b3346dSShyam Iyer st->frag_idx = 0; 3569677e90edSThomas Graf goto next_skb; 3570677e90edSThomas Graf } 3571677e90edSThomas Graf 3572677e90edSThomas Graf return 0; 3573677e90edSThomas Graf } 3574b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 3575677e90edSThomas Graf 3576677e90edSThomas Graf /** 3577677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 3578677e90edSThomas Graf * @st: state variable 3579677e90edSThomas Graf * 3580677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 3581677e90edSThomas Graf * returned 0. 3582677e90edSThomas Graf */ 3583677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 3584677e90edSThomas Graf { 3585677e90edSThomas Graf if (st->frag_data) 358651c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3587677e90edSThomas Graf } 3588b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 3589677e90edSThomas Graf 35903fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 35913fc7e8a6SThomas Graf 35923fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 35933fc7e8a6SThomas Graf struct ts_config *conf, 35943fc7e8a6SThomas Graf struct ts_state *state) 35953fc7e8a6SThomas Graf { 35963fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 35973fc7e8a6SThomas Graf } 35983fc7e8a6SThomas Graf 35993fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 36003fc7e8a6SThomas Graf { 36013fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 36023fc7e8a6SThomas Graf } 36033fc7e8a6SThomas Graf 36043fc7e8a6SThomas Graf /** 36053fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 36063fc7e8a6SThomas Graf * @skb: the buffer to look in 36073fc7e8a6SThomas Graf * @from: search offset 36083fc7e8a6SThomas Graf * @to: search limit 36093fc7e8a6SThomas Graf * @config: textsearch configuration 36103fc7e8a6SThomas Graf * 36113fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 36123fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 36133fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 36143fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 36153fc7e8a6SThomas Graf */ 36163fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3617059a2440SBojan Prtvar unsigned int to, struct ts_config *config) 36183fc7e8a6SThomas Graf { 3619059a2440SBojan Prtvar struct ts_state state; 3620f72b948dSPhil Oester unsigned int ret; 3621f72b948dSPhil Oester 36223fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 36233fc7e8a6SThomas Graf config->finish = skb_ts_finish; 36243fc7e8a6SThomas Graf 3625059a2440SBojan Prtvar skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 36263fc7e8a6SThomas Graf 3627059a2440SBojan Prtvar ret = textsearch_find(config, &state); 3628f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 36293fc7e8a6SThomas Graf } 3630b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 36313fc7e8a6SThomas Graf 3632be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3633be12a1feSHannes Frederic Sowa int offset, size_t size) 3634be12a1feSHannes Frederic Sowa { 3635be12a1feSHannes Frederic Sowa int i = skb_shinfo(skb)->nr_frags; 3636be12a1feSHannes Frederic Sowa 3637be12a1feSHannes Frederic Sowa if (skb_can_coalesce(skb, i, page, offset)) { 3638be12a1feSHannes Frederic Sowa skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3639be12a1feSHannes Frederic Sowa } else if (i < MAX_SKB_FRAGS) { 3640be12a1feSHannes Frederic Sowa get_page(page); 3641be12a1feSHannes Frederic Sowa skb_fill_page_desc(skb, i, page, offset, size); 3642be12a1feSHannes Frederic Sowa } else { 3643be12a1feSHannes Frederic Sowa return -EMSGSIZE; 3644be12a1feSHannes Frederic Sowa } 3645be12a1feSHannes Frederic Sowa 3646be12a1feSHannes Frederic Sowa return 0; 3647be12a1feSHannes Frederic Sowa } 3648be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3649be12a1feSHannes Frederic Sowa 3650cbb042f9SHerbert Xu /** 3651cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 3652cbb042f9SHerbert Xu * @skb: buffer to update 3653cbb042f9SHerbert Xu * @len: length of data pulled 3654cbb042f9SHerbert Xu * 3655cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 3656fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 365784fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 365884fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 365984fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 3660cbb042f9SHerbert Xu */ 3661af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3662cbb042f9SHerbert Xu { 366331b33dfbSPravin B Shelar unsigned char *data = skb->data; 366431b33dfbSPravin B Shelar 3665cbb042f9SHerbert Xu BUG_ON(len > skb->len); 366631b33dfbSPravin B Shelar __skb_pull(skb, len); 366731b33dfbSPravin B Shelar skb_postpull_rcsum(skb, data, len); 366831b33dfbSPravin B Shelar return skb->data; 3669cbb042f9SHerbert Xu } 3670f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3671f94691acSArnaldo Carvalho de Melo 367213acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 367313acc94eSYonghong Song { 367413acc94eSYonghong Song skb_frag_t head_frag; 367513acc94eSYonghong Song struct page *page; 367613acc94eSYonghong Song 367713acc94eSYonghong Song page = virt_to_head_page(frag_skb->head); 3678d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(&head_frag, page); 3679b54c9d5bSJonathan Lemon skb_frag_off_set(&head_frag, frag_skb->data - 3680b54c9d5bSJonathan Lemon (unsigned char *)page_address(page)); 3681d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); 368213acc94eSYonghong Song return head_frag; 368313acc94eSYonghong Song } 368413acc94eSYonghong Song 36853a1296a3SSteffen Klassert struct sk_buff *skb_segment_list(struct sk_buff *skb, 36863a1296a3SSteffen Klassert netdev_features_t features, 36873a1296a3SSteffen Klassert unsigned int offset) 36883a1296a3SSteffen Klassert { 36893a1296a3SSteffen Klassert struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 36903a1296a3SSteffen Klassert unsigned int tnl_hlen = skb_tnl_header_len(skb); 36913a1296a3SSteffen Klassert unsigned int delta_truesize = 0; 36923a1296a3SSteffen Klassert unsigned int delta_len = 0; 36933a1296a3SSteffen Klassert struct sk_buff *tail = NULL; 369453475c5dSDongseok Yi struct sk_buff *nskb, *tmp; 369553475c5dSDongseok Yi int err; 36963a1296a3SSteffen Klassert 36973a1296a3SSteffen Klassert skb_push(skb, -skb_network_offset(skb) + offset); 36983a1296a3SSteffen Klassert 36993a1296a3SSteffen Klassert skb_shinfo(skb)->frag_list = NULL; 37003a1296a3SSteffen Klassert 37013a1296a3SSteffen Klassert do { 37023a1296a3SSteffen Klassert nskb = list_skb; 37033a1296a3SSteffen Klassert list_skb = list_skb->next; 37043a1296a3SSteffen Klassert 370553475c5dSDongseok Yi err = 0; 370653475c5dSDongseok Yi if (skb_shared(nskb)) { 370753475c5dSDongseok Yi tmp = skb_clone(nskb, GFP_ATOMIC); 370853475c5dSDongseok Yi if (tmp) { 370953475c5dSDongseok Yi consume_skb(nskb); 371053475c5dSDongseok Yi nskb = tmp; 371153475c5dSDongseok Yi err = skb_unclone(nskb, GFP_ATOMIC); 371253475c5dSDongseok Yi } else { 371353475c5dSDongseok Yi err = -ENOMEM; 371453475c5dSDongseok Yi } 371553475c5dSDongseok Yi } 371653475c5dSDongseok Yi 37173a1296a3SSteffen Klassert if (!tail) 37183a1296a3SSteffen Klassert skb->next = nskb; 37193a1296a3SSteffen Klassert else 37203a1296a3SSteffen Klassert tail->next = nskb; 37213a1296a3SSteffen Klassert 372253475c5dSDongseok Yi if (unlikely(err)) { 372353475c5dSDongseok Yi nskb->next = list_skb; 372453475c5dSDongseok Yi goto err_linearize; 372553475c5dSDongseok Yi } 372653475c5dSDongseok Yi 37273a1296a3SSteffen Klassert tail = nskb; 37283a1296a3SSteffen Klassert 37293a1296a3SSteffen Klassert delta_len += nskb->len; 37303a1296a3SSteffen Klassert delta_truesize += nskb->truesize; 37313a1296a3SSteffen Klassert 37323a1296a3SSteffen Klassert skb_push(nskb, -skb_network_offset(nskb) + offset); 37333a1296a3SSteffen Klassert 3734cf673ed0SFlorian Westphal skb_release_head_state(nskb); 37353a1296a3SSteffen Klassert __copy_skb_header(nskb, skb); 37363a1296a3SSteffen Klassert 37373a1296a3SSteffen Klassert skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 37383a1296a3SSteffen Klassert skb_copy_from_linear_data_offset(skb, -tnl_hlen, 37393a1296a3SSteffen Klassert nskb->data - tnl_hlen, 37403a1296a3SSteffen Klassert offset + tnl_hlen); 37413a1296a3SSteffen Klassert 37423a1296a3SSteffen Klassert if (skb_needs_linearize(nskb, features) && 37433a1296a3SSteffen Klassert __skb_linearize(nskb)) 37443a1296a3SSteffen Klassert goto err_linearize; 37453a1296a3SSteffen Klassert 37463a1296a3SSteffen Klassert } while (list_skb); 37473a1296a3SSteffen Klassert 37483a1296a3SSteffen Klassert skb->truesize = skb->truesize - delta_truesize; 37493a1296a3SSteffen Klassert skb->data_len = skb->data_len - delta_len; 37503a1296a3SSteffen Klassert skb->len = skb->len - delta_len; 37513a1296a3SSteffen Klassert 37523a1296a3SSteffen Klassert skb_gso_reset(skb); 37533a1296a3SSteffen Klassert 37543a1296a3SSteffen Klassert skb->prev = tail; 37553a1296a3SSteffen Klassert 37563a1296a3SSteffen Klassert if (skb_needs_linearize(skb, features) && 37573a1296a3SSteffen Klassert __skb_linearize(skb)) 37583a1296a3SSteffen Klassert goto err_linearize; 37593a1296a3SSteffen Klassert 37603a1296a3SSteffen Klassert skb_get(skb); 37613a1296a3SSteffen Klassert 37623a1296a3SSteffen Klassert return skb; 37633a1296a3SSteffen Klassert 37643a1296a3SSteffen Klassert err_linearize: 37653a1296a3SSteffen Klassert kfree_skb_list(skb->next); 37663a1296a3SSteffen Klassert skb->next = NULL; 37673a1296a3SSteffen Klassert return ERR_PTR(-ENOMEM); 37683a1296a3SSteffen Klassert } 37693a1296a3SSteffen Klassert EXPORT_SYMBOL_GPL(skb_segment_list); 37703a1296a3SSteffen Klassert 37713a1296a3SSteffen Klassert int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 37723a1296a3SSteffen Klassert { 37733a1296a3SSteffen Klassert if (unlikely(p->len + skb->len >= 65536)) 37743a1296a3SSteffen Klassert return -E2BIG; 37753a1296a3SSteffen Klassert 37763a1296a3SSteffen Klassert if (NAPI_GRO_CB(p)->last == p) 37773a1296a3SSteffen Klassert skb_shinfo(p)->frag_list = skb; 37783a1296a3SSteffen Klassert else 37793a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last->next = skb; 37803a1296a3SSteffen Klassert 37813a1296a3SSteffen Klassert skb_pull(skb, skb_gro_offset(skb)); 37823a1296a3SSteffen Klassert 37833a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last = skb; 37843a1296a3SSteffen Klassert NAPI_GRO_CB(p)->count++; 37853a1296a3SSteffen Klassert p->data_len += skb->len; 37863a1296a3SSteffen Klassert p->truesize += skb->truesize; 37873a1296a3SSteffen Klassert p->len += skb->len; 37883a1296a3SSteffen Klassert 37893a1296a3SSteffen Klassert NAPI_GRO_CB(skb)->same_flow = 1; 37903a1296a3SSteffen Klassert 37913a1296a3SSteffen Klassert return 0; 37923a1296a3SSteffen Klassert } 37933a1296a3SSteffen Klassert 3794f4c50d99SHerbert Xu /** 3795f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 3796df5771ffSMichael S. Tsirkin * @head_skb: buffer to segment 3797576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 3798f4c50d99SHerbert Xu * 3799f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 38004c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 38014c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 3802f4c50d99SHerbert Xu */ 3803df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb, 3804df5771ffSMichael S. Tsirkin netdev_features_t features) 3805f4c50d99SHerbert Xu { 3806f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 3807f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 38081a4cedafSMichael S. Tsirkin struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3809df5771ffSMichael S. Tsirkin skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3810df5771ffSMichael S. Tsirkin unsigned int mss = skb_shinfo(head_skb)->gso_size; 3811df5771ffSMichael S. Tsirkin unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 38121fd819ecSMichael S. Tsirkin struct sk_buff *frag_skb = head_skb; 3813f4c50d99SHerbert Xu unsigned int offset = doffset; 3814df5771ffSMichael S. Tsirkin unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3815802ab55aSAlexander Duyck unsigned int partial_segs = 0; 3816f4c50d99SHerbert Xu unsigned int headroom; 3817802ab55aSAlexander Duyck unsigned int len = head_skb->len; 3818ec5f0615SPravin B Shelar __be16 proto; 381936c98382SAlexander Duyck bool csum, sg; 3820df5771ffSMichael S. Tsirkin int nfrags = skb_shinfo(head_skb)->nr_frags; 3821f4c50d99SHerbert Xu int err = -ENOMEM; 3822f4c50d99SHerbert Xu int i = 0; 3823f4c50d99SHerbert Xu int pos; 3824f4c50d99SHerbert Xu 38253dcbdb13SShmulik Ladkani if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && 38263dcbdb13SShmulik Ladkani (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { 38273dcbdb13SShmulik Ladkani /* gso_size is untrusted, and we have a frag_list with a linear 38283dcbdb13SShmulik Ladkani * non head_frag head. 38293dcbdb13SShmulik Ladkani * 38303dcbdb13SShmulik Ladkani * (we assume checking the first list_skb member suffices; 38313dcbdb13SShmulik Ladkani * i.e if either of the list_skb members have non head_frag 38323dcbdb13SShmulik Ladkani * head, then the first one has too). 38333dcbdb13SShmulik Ladkani * 38343dcbdb13SShmulik Ladkani * If head_skb's headlen does not fit requested gso_size, it 38353dcbdb13SShmulik Ladkani * means that the frag_list members do NOT terminate on exact 38363dcbdb13SShmulik Ladkani * gso_size boundaries. Hence we cannot perform skb_frag_t page 38373dcbdb13SShmulik Ladkani * sharing. Therefore we must fallback to copying the frag_list 38383dcbdb13SShmulik Ladkani * skbs; we do so by disabling SG. 38393dcbdb13SShmulik Ladkani */ 38403dcbdb13SShmulik Ladkani if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) 38413dcbdb13SShmulik Ladkani features &= ~NETIF_F_SG; 38423dcbdb13SShmulik Ladkani } 38433dcbdb13SShmulik Ladkani 38445882a07cSWei-Chun Chao __skb_push(head_skb, doffset); 38452f631133SMiaohe Lin proto = skb_network_protocol(head_skb, NULL); 3846ec5f0615SPravin B Shelar if (unlikely(!proto)) 3847ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 3848ec5f0615SPravin B Shelar 384936c98382SAlexander Duyck sg = !!(features & NETIF_F_SG); 3850f245d079SAlexander Duyck csum = !!can_checksum_protocol(features, proto); 38517e2b10c1STom Herbert 385207b26c94SSteffen Klassert if (sg && csum && (mss != GSO_BY_FRAGS)) { 385307b26c94SSteffen Klassert if (!(features & NETIF_F_GSO_PARTIAL)) { 385407b26c94SSteffen Klassert struct sk_buff *iter; 385543170c4eSIlan Tayari unsigned int frag_len; 385607b26c94SSteffen Klassert 385707b26c94SSteffen Klassert if (!list_skb || 385807b26c94SSteffen Klassert !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 385907b26c94SSteffen Klassert goto normal; 386007b26c94SSteffen Klassert 386143170c4eSIlan Tayari /* If we get here then all the required 386243170c4eSIlan Tayari * GSO features except frag_list are supported. 386343170c4eSIlan Tayari * Try to split the SKB to multiple GSO SKBs 386443170c4eSIlan Tayari * with no frag_list. 386543170c4eSIlan Tayari * Currently we can do that only when the buffers don't 386643170c4eSIlan Tayari * have a linear part and all the buffers except 386743170c4eSIlan Tayari * the last are of the same length. 386807b26c94SSteffen Klassert */ 386943170c4eSIlan Tayari frag_len = list_skb->len; 387007b26c94SSteffen Klassert skb_walk_frags(head_skb, iter) { 387143170c4eSIlan Tayari if (frag_len != iter->len && iter->next) 387243170c4eSIlan Tayari goto normal; 3873eaffadbbSIlan Tayari if (skb_headlen(iter) && !iter->head_frag) 387407b26c94SSteffen Klassert goto normal; 387507b26c94SSteffen Klassert 387607b26c94SSteffen Klassert len -= iter->len; 387707b26c94SSteffen Klassert } 387843170c4eSIlan Tayari 387943170c4eSIlan Tayari if (len != frag_len) 388043170c4eSIlan Tayari goto normal; 388107b26c94SSteffen Klassert } 388207b26c94SSteffen Klassert 3883802ab55aSAlexander Duyck /* GSO partial only requires that we trim off any excess that 3884802ab55aSAlexander Duyck * doesn't fit into an MSS sized block, so take care of that 3885802ab55aSAlexander Duyck * now. 3886802ab55aSAlexander Duyck */ 3887802ab55aSAlexander Duyck partial_segs = len / mss; 3888d7fb5a80SAlexander Duyck if (partial_segs > 1) 3889802ab55aSAlexander Duyck mss *= partial_segs; 3890d7fb5a80SAlexander Duyck else 3891d7fb5a80SAlexander Duyck partial_segs = 0; 3892802ab55aSAlexander Duyck } 3893802ab55aSAlexander Duyck 389407b26c94SSteffen Klassert normal: 3895df5771ffSMichael S. Tsirkin headroom = skb_headroom(head_skb); 3896df5771ffSMichael S. Tsirkin pos = skb_headlen(head_skb); 3897f4c50d99SHerbert Xu 3898f4c50d99SHerbert Xu do { 3899f4c50d99SHerbert Xu struct sk_buff *nskb; 39008cb19905SMichael S. Tsirkin skb_frag_t *nskb_frag; 3901c8884eddSHerbert Xu int hsize; 3902f4c50d99SHerbert Xu int size; 3903f4c50d99SHerbert Xu 39043953c46cSMarcelo Ricardo Leitner if (unlikely(mss == GSO_BY_FRAGS)) { 39053953c46cSMarcelo Ricardo Leitner len = list_skb->len; 39063953c46cSMarcelo Ricardo Leitner } else { 3907df5771ffSMichael S. Tsirkin len = head_skb->len - offset; 3908f4c50d99SHerbert Xu if (len > mss) 3909f4c50d99SHerbert Xu len = mss; 39103953c46cSMarcelo Ricardo Leitner } 3911f4c50d99SHerbert Xu 3912df5771ffSMichael S. Tsirkin hsize = skb_headlen(head_skb) - offset; 3913f4c50d99SHerbert Xu 3914dbd50f23SXin Long if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 39151a4cedafSMichael S. Tsirkin (skb_headlen(list_skb) == len || sg)) { 39161a4cedafSMichael S. Tsirkin BUG_ON(skb_headlen(list_skb) > len); 391789319d38SHerbert Xu 39189d8506ccSHerbert Xu i = 0; 39191a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 39201a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 39211fd819ecSMichael S. Tsirkin frag_skb = list_skb; 39221a4cedafSMichael S. Tsirkin pos += skb_headlen(list_skb); 39239d8506ccSHerbert Xu 39249d8506ccSHerbert Xu while (pos < offset + len) { 39259d8506ccSHerbert Xu BUG_ON(i >= nfrags); 39269d8506ccSHerbert Xu 39274e1beba1SMichael S. Tsirkin size = skb_frag_size(frag); 39289d8506ccSHerbert Xu if (pos + size > offset + len) 39299d8506ccSHerbert Xu break; 39309d8506ccSHerbert Xu 39319d8506ccSHerbert Xu i++; 39329d8506ccSHerbert Xu pos += size; 39334e1beba1SMichael S. Tsirkin frag++; 39349d8506ccSHerbert Xu } 39359d8506ccSHerbert Xu 39361a4cedafSMichael S. Tsirkin nskb = skb_clone(list_skb, GFP_ATOMIC); 39371a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 393889319d38SHerbert Xu 3939f4c50d99SHerbert Xu if (unlikely(!nskb)) 3940f4c50d99SHerbert Xu goto err; 3941f4c50d99SHerbert Xu 39429d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 39439d8506ccSHerbert Xu kfree_skb(nskb); 39449d8506ccSHerbert Xu goto err; 39459d8506ccSHerbert Xu } 39469d8506ccSHerbert Xu 3947ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 394889319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 394989319d38SHerbert Xu kfree_skb(nskb); 395089319d38SHerbert Xu goto err; 395189319d38SHerbert Xu } 395289319d38SHerbert Xu 3953ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 395489319d38SHerbert Xu skb_release_head_state(nskb); 395589319d38SHerbert Xu __skb_push(nskb, doffset); 395689319d38SHerbert Xu } else { 395700b229f7SPaolo Abeni if (hsize < 0) 395800b229f7SPaolo Abeni hsize = 0; 3959dbd50f23SXin Long if (hsize > len || !sg) 3960dbd50f23SXin Long hsize = len; 3961dbd50f23SXin Long 3962c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 3963df5771ffSMichael S. Tsirkin GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 3964c93bdd0eSMel Gorman NUMA_NO_NODE); 396589319d38SHerbert Xu 396689319d38SHerbert Xu if (unlikely(!nskb)) 396789319d38SHerbert Xu goto err; 396889319d38SHerbert Xu 396989319d38SHerbert Xu skb_reserve(nskb, headroom); 397089319d38SHerbert Xu __skb_put(nskb, doffset); 397189319d38SHerbert Xu } 397289319d38SHerbert Xu 3973f4c50d99SHerbert Xu if (segs) 3974f4c50d99SHerbert Xu tail->next = nskb; 3975f4c50d99SHerbert Xu else 3976f4c50d99SHerbert Xu segs = nskb; 3977f4c50d99SHerbert Xu tail = nskb; 3978f4c50d99SHerbert Xu 3979df5771ffSMichael S. Tsirkin __copy_skb_header(nskb, head_skb); 3980f4c50d99SHerbert Xu 3981030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 3982fcdfe3a7SVlad Yasevich skb_reset_mac_len(nskb); 398368c33163SPravin B Shelar 3984df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 398568c33163SPravin B Shelar nskb->data - tnl_hlen, 398668c33163SPravin B Shelar doffset + tnl_hlen); 398789319d38SHerbert Xu 39889d8506ccSHerbert Xu if (nskb->len == len + doffset) 39891cdbcb79SSimon Horman goto perform_csum_check; 399089319d38SHerbert Xu 39917fbeffedSAlexander Duyck if (!sg) { 39921454c9faSYadu Kishore if (!csum) { 39937fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 39946f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 399576443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 399676443456SAlexander Duyck skb_copy_and_csum_bits(head_skb, offset, 39971454c9faSYadu Kishore skb_put(nskb, 39981454c9faSYadu Kishore len), 39998d5930dfSAl Viro len); 40007e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 4001de843723STom Herbert skb_headroom(nskb) + doffset; 40021454c9faSYadu Kishore } else { 40031454c9faSYadu Kishore skb_copy_bits(head_skb, offset, 40041454c9faSYadu Kishore skb_put(nskb, len), 40051454c9faSYadu Kishore len); 40061454c9faSYadu Kishore } 4007f4c50d99SHerbert Xu continue; 4008f4c50d99SHerbert Xu } 4009f4c50d99SHerbert Xu 40108cb19905SMichael S. Tsirkin nskb_frag = skb_shinfo(nskb)->frags; 4011f4c50d99SHerbert Xu 4012df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, offset, 4013d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 4014f4c50d99SHerbert Xu 401506b4feb3SJonathan Lemon skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 401606b4feb3SJonathan Lemon SKBFL_SHARED_FRAG; 4017cef401deSEric Dumazet 4018bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4019bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4020bf5c25d6SWillem de Bruijn goto err; 4021bf5c25d6SWillem de Bruijn 40229d8506ccSHerbert Xu while (pos < offset + len) { 40239d8506ccSHerbert Xu if (i >= nfrags) { 40249d8506ccSHerbert Xu i = 0; 40251a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 40261a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 40271fd819ecSMichael S. Tsirkin frag_skb = list_skb; 402813acc94eSYonghong Song if (!skb_headlen(list_skb)) { 40299d8506ccSHerbert Xu BUG_ON(!nfrags); 403013acc94eSYonghong Song } else { 403113acc94eSYonghong Song BUG_ON(!list_skb->head_frag); 40329d8506ccSHerbert Xu 403313acc94eSYonghong Song /* to make room for head_frag. */ 403413acc94eSYonghong Song i--; 403513acc94eSYonghong Song frag--; 403613acc94eSYonghong Song } 4037bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4038bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, 4039bf5c25d6SWillem de Bruijn GFP_ATOMIC)) 4040bf5c25d6SWillem de Bruijn goto err; 4041bf5c25d6SWillem de Bruijn 40421a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 40439d8506ccSHerbert Xu } 40449d8506ccSHerbert Xu 40459d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 40469d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 40479d8506ccSHerbert Xu net_warn_ratelimited( 40489d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 40499d8506ccSHerbert Xu pos, mss); 4050ff907a11SEric Dumazet err = -EINVAL; 40519d8506ccSHerbert Xu goto err; 40529d8506ccSHerbert Xu } 40539d8506ccSHerbert Xu 405413acc94eSYonghong Song *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 40558cb19905SMichael S. Tsirkin __skb_frag_ref(nskb_frag); 40568cb19905SMichael S. Tsirkin size = skb_frag_size(nskb_frag); 4057f4c50d99SHerbert Xu 4058f4c50d99SHerbert Xu if (pos < offset) { 4059b54c9d5bSJonathan Lemon skb_frag_off_add(nskb_frag, offset - pos); 40608cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, offset - pos); 4061f4c50d99SHerbert Xu } 4062f4c50d99SHerbert Xu 406389319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 4064f4c50d99SHerbert Xu 4065f4c50d99SHerbert Xu if (pos + size <= offset + len) { 4066f4c50d99SHerbert Xu i++; 40674e1beba1SMichael S. Tsirkin frag++; 4068f4c50d99SHerbert Xu pos += size; 4069f4c50d99SHerbert Xu } else { 40708cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 407189319d38SHerbert Xu goto skip_fraglist; 4072f4c50d99SHerbert Xu } 4073f4c50d99SHerbert Xu 40748cb19905SMichael S. Tsirkin nskb_frag++; 4075f4c50d99SHerbert Xu } 4076f4c50d99SHerbert Xu 407789319d38SHerbert Xu skip_fraglist: 4078f4c50d99SHerbert Xu nskb->data_len = len - hsize; 4079f4c50d99SHerbert Xu nskb->len += nskb->data_len; 4080f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 4081ec5f0615SPravin B Shelar 40821cdbcb79SSimon Horman perform_csum_check: 40837fbeffedSAlexander Duyck if (!csum) { 4084ff907a11SEric Dumazet if (skb_has_shared_frag(nskb) && 4085ff907a11SEric Dumazet __skb_linearize(nskb)) 4086ddff00d4SAlexander Duyck goto err; 4087ff907a11SEric Dumazet 40887fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 4089ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 409076443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 409176443456SAlexander Duyck skb_checksum(nskb, doffset, 409276443456SAlexander Duyck nskb->len - doffset, 0); 40937e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 40947e2b10c1STom Herbert skb_headroom(nskb) + doffset; 4095ec5f0615SPravin B Shelar } 4096df5771ffSMichael S. Tsirkin } while ((offset += len) < head_skb->len); 4097f4c50d99SHerbert Xu 4098bec3cfdcSEric Dumazet /* Some callers want to get the end of the list. 4099bec3cfdcSEric Dumazet * Put it in segs->prev to avoid walking the list. 4100bec3cfdcSEric Dumazet * (see validate_xmit_skb_list() for example) 4101bec3cfdcSEric Dumazet */ 4102bec3cfdcSEric Dumazet segs->prev = tail; 4103432c856fSToshiaki Makita 4104802ab55aSAlexander Duyck if (partial_segs) { 410507b26c94SSteffen Klassert struct sk_buff *iter; 4106802ab55aSAlexander Duyck int type = skb_shinfo(head_skb)->gso_type; 410707b26c94SSteffen Klassert unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4108802ab55aSAlexander Duyck 4109802ab55aSAlexander Duyck /* Update type to add partial and then remove dodgy if set */ 411007b26c94SSteffen Klassert type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4111802ab55aSAlexander Duyck type &= ~SKB_GSO_DODGY; 4112802ab55aSAlexander Duyck 4113802ab55aSAlexander Duyck /* Update GSO info and prepare to start updating headers on 4114802ab55aSAlexander Duyck * our way back down the stack of protocols. 4115802ab55aSAlexander Duyck */ 411607b26c94SSteffen Klassert for (iter = segs; iter; iter = iter->next) { 411707b26c94SSteffen Klassert skb_shinfo(iter)->gso_size = gso_size; 411807b26c94SSteffen Klassert skb_shinfo(iter)->gso_segs = partial_segs; 411907b26c94SSteffen Klassert skb_shinfo(iter)->gso_type = type; 412007b26c94SSteffen Klassert SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 412107b26c94SSteffen Klassert } 412207b26c94SSteffen Klassert 412307b26c94SSteffen Klassert if (tail->len - doffset <= gso_size) 412407b26c94SSteffen Klassert skb_shinfo(tail)->gso_size = 0; 412507b26c94SSteffen Klassert else if (tail != segs) 412607b26c94SSteffen Klassert skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4127802ab55aSAlexander Duyck } 4128802ab55aSAlexander Duyck 4129432c856fSToshiaki Makita /* Following permits correct backpressure, for protocols 4130432c856fSToshiaki Makita * using skb_set_owner_w(). 4131432c856fSToshiaki Makita * Idea is to tranfert ownership from head_skb to last segment. 4132432c856fSToshiaki Makita */ 4133432c856fSToshiaki Makita if (head_skb->destructor == sock_wfree) { 4134432c856fSToshiaki Makita swap(tail->truesize, head_skb->truesize); 4135432c856fSToshiaki Makita swap(tail->destructor, head_skb->destructor); 4136432c856fSToshiaki Makita swap(tail->sk, head_skb->sk); 4137432c856fSToshiaki Makita } 4138f4c50d99SHerbert Xu return segs; 4139f4c50d99SHerbert Xu 4140f4c50d99SHerbert Xu err: 4141289dccbeSEric Dumazet kfree_skb_list(segs); 4142f4c50d99SHerbert Xu return ERR_PTR(err); 4143f4c50d99SHerbert Xu } 4144f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 4145f4c50d99SHerbert Xu 4146d4546c25SDavid Miller int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 414771d93b39SHerbert Xu { 41488a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 414967147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 415067147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 41518a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 4152715dc1f3SEric Dumazet unsigned int delta_truesize; 4153d4546c25SDavid Miller struct sk_buff *lp; 415471d93b39SHerbert Xu 41550ab03f35SSteffen Klassert if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 415671d93b39SHerbert Xu return -E2BIG; 415771d93b39SHerbert Xu 415829e98242SEric Dumazet lp = NAPI_GRO_CB(p)->last; 41598a29111cSEric Dumazet pinfo = skb_shinfo(lp); 41608a29111cSEric Dumazet 41618a29111cSEric Dumazet if (headlen <= offset) { 416242da6994SHerbert Xu skb_frag_t *frag; 416366e92fcfSHerbert Xu skb_frag_t *frag2; 41649aaa156cSHerbert Xu int i = skbinfo->nr_frags; 41659aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 416642da6994SHerbert Xu 416766e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 41688a29111cSEric Dumazet goto merge; 416981705ad1SHerbert Xu 41708a29111cSEric Dumazet offset -= headlen; 41719aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 41729aaa156cSHerbert Xu skbinfo->nr_frags = 0; 4173f5572068SHerbert Xu 41749aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 41759aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 417666e92fcfSHerbert Xu do { 417766e92fcfSHerbert Xu *--frag = *--frag2; 417866e92fcfSHerbert Xu } while (--i); 417966e92fcfSHerbert Xu 4180b54c9d5bSJonathan Lemon skb_frag_off_add(frag, offset); 41819e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 418266e92fcfSHerbert Xu 4183715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 4184ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 4185ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 4186715dc1f3SEric Dumazet 4187f5572068SHerbert Xu skb->truesize -= skb->data_len; 4188f5572068SHerbert Xu skb->len -= skb->data_len; 4189f5572068SHerbert Xu skb->data_len = 0; 4190f5572068SHerbert Xu 4191715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 41925d38a079SHerbert Xu goto done; 4193d7e8883cSEric Dumazet } else if (skb->head_frag) { 4194d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 4195d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 4196d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 4197d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 4198d7e8883cSEric Dumazet unsigned int first_offset; 4199d7e8883cSEric Dumazet 4200d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 42018a29111cSEric Dumazet goto merge; 4202d7e8883cSEric Dumazet 4203d7e8883cSEric Dumazet first_offset = skb->data - 4204d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 4205d7e8883cSEric Dumazet offset; 4206d7e8883cSEric Dumazet 4207d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4208d7e8883cSEric Dumazet 4209d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(frag, page); 4210b54c9d5bSJonathan Lemon skb_frag_off_set(frag, first_offset); 4211d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 4212d7e8883cSEric Dumazet 4213d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4214d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 4215d7e8883cSEric Dumazet 4216715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4217d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4218d7e8883cSEric Dumazet goto done; 42198a29111cSEric Dumazet } 422071d93b39SHerbert Xu 422171d93b39SHerbert Xu merge: 4222715dc1f3SEric Dumazet delta_truesize = skb->truesize; 422367147ba9SHerbert Xu if (offset > headlen) { 4224d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 4225d1dc7abfSMichal Schmidt 4226b54c9d5bSJonathan Lemon skb_frag_off_add(&skbinfo->frags[0], eat); 42279e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 4228d1dc7abfSMichal Schmidt skb->data_len -= eat; 4229d1dc7abfSMichal Schmidt skb->len -= eat; 423067147ba9SHerbert Xu offset = headlen; 423156035022SHerbert Xu } 423256035022SHerbert Xu 423367147ba9SHerbert Xu __skb_pull(skb, offset); 423456035022SHerbert Xu 423529e98242SEric Dumazet if (NAPI_GRO_CB(p)->last == p) 42368a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 42378a29111cSEric Dumazet else 4238c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 4239c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 4240f4a775d1SEric Dumazet __skb_header_release(skb); 42418a29111cSEric Dumazet lp = p; 424271d93b39SHerbert Xu 42435d38a079SHerbert Xu done: 42445d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 424537fe4732SHerbert Xu p->data_len += len; 4246715dc1f3SEric Dumazet p->truesize += delta_truesize; 424737fe4732SHerbert Xu p->len += len; 42488a29111cSEric Dumazet if (lp != p) { 42498a29111cSEric Dumazet lp->data_len += len; 42508a29111cSEric Dumazet lp->truesize += delta_truesize; 42518a29111cSEric Dumazet lp->len += len; 42528a29111cSEric Dumazet } 425371d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 425471d93b39SHerbert Xu return 0; 425571d93b39SHerbert Xu } 425671d93b39SHerbert Xu 4257df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 4258df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE 8 4259df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4260df5042f4SFlorian Westphal 4261df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = { 4262df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4263df5042f4SFlorian Westphal [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4264df5042f4SFlorian Westphal #endif 42654165079bSFlorian Westphal #ifdef CONFIG_XFRM 42664165079bSFlorian Westphal [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 42674165079bSFlorian Westphal #endif 426895a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 426995a7233cSPaul Blakey [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 427095a7233cSPaul Blakey #endif 42713ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 42723ee17bc7SMat Martineau [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 42733ee17bc7SMat Martineau #endif 4274df5042f4SFlorian Westphal }; 4275df5042f4SFlorian Westphal 4276df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void) 4277df5042f4SFlorian Westphal { 4278df5042f4SFlorian Westphal return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4279df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4280df5042f4SFlorian Westphal skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4281df5042f4SFlorian Westphal #endif 42824165079bSFlorian Westphal #ifdef CONFIG_XFRM 42834165079bSFlorian Westphal skb_ext_type_len[SKB_EXT_SEC_PATH] + 42844165079bSFlorian Westphal #endif 428595a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 428695a7233cSPaul Blakey skb_ext_type_len[TC_SKB_EXT] + 428795a7233cSPaul Blakey #endif 42883ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 42893ee17bc7SMat Martineau skb_ext_type_len[SKB_EXT_MPTCP] + 42903ee17bc7SMat Martineau #endif 4291df5042f4SFlorian Westphal 0; 4292df5042f4SFlorian Westphal } 4293df5042f4SFlorian Westphal 4294df5042f4SFlorian Westphal static void skb_extensions_init(void) 4295df5042f4SFlorian Westphal { 4296df5042f4SFlorian Westphal BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4297df5042f4SFlorian Westphal BUILD_BUG_ON(skb_ext_total_length() > 255); 4298df5042f4SFlorian Westphal 4299df5042f4SFlorian Westphal skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4300df5042f4SFlorian Westphal SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4301df5042f4SFlorian Westphal 0, 4302df5042f4SFlorian Westphal SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4303df5042f4SFlorian Westphal NULL); 4304df5042f4SFlorian Westphal } 4305df5042f4SFlorian Westphal #else 4306df5042f4SFlorian Westphal static void skb_extensions_init(void) {} 4307df5042f4SFlorian Westphal #endif 4308df5042f4SFlorian Westphal 43091da177e4SLinus Torvalds void __init skb_init(void) 43101da177e4SLinus Torvalds { 431179a8a642SKees Cook skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 43121da177e4SLinus Torvalds sizeof(struct sk_buff), 43131da177e4SLinus Torvalds 0, 4314e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 431579a8a642SKees Cook offsetof(struct sk_buff, cb), 431679a8a642SKees Cook sizeof_field(struct sk_buff, cb), 431720c2df83SPaul Mundt NULL); 4318d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4319d0bf4a9eSEric Dumazet sizeof(struct sk_buff_fclones), 4320d179cd12SDavid S. Miller 0, 4321e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 432220c2df83SPaul Mundt NULL); 4323df5042f4SFlorian Westphal skb_extensions_init(); 43241da177e4SLinus Torvalds } 43251da177e4SLinus Torvalds 432651c739d1SDavid S. Miller static int 432748a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 432848a1df65SJason A. Donenfeld unsigned int recursion_level) 4329716ea3a7SDavid Howells { 43301a028e50SDavid S. Miller int start = skb_headlen(skb); 43311a028e50SDavid S. Miller int i, copy = start - offset; 4332fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 4333716ea3a7SDavid Howells int elt = 0; 4334716ea3a7SDavid Howells 433548a1df65SJason A. Donenfeld if (unlikely(recursion_level >= 24)) 433648a1df65SJason A. Donenfeld return -EMSGSIZE; 433748a1df65SJason A. Donenfeld 4338716ea3a7SDavid Howells if (copy > 0) { 4339716ea3a7SDavid Howells if (copy > len) 4340716ea3a7SDavid Howells copy = len; 4341642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 4342716ea3a7SDavid Howells elt++; 4343716ea3a7SDavid Howells if ((len -= copy) == 0) 4344716ea3a7SDavid Howells return elt; 4345716ea3a7SDavid Howells offset += copy; 4346716ea3a7SDavid Howells } 4347716ea3a7SDavid Howells 4348716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 43491a028e50SDavid S. Miller int end; 4350716ea3a7SDavid Howells 4351547b792cSIlpo Järvinen WARN_ON(start > offset + len); 43521a028e50SDavid S. Miller 43539e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4354716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 4355716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 435648a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 435748a1df65SJason A. Donenfeld return -EMSGSIZE; 4358716ea3a7SDavid Howells 4359716ea3a7SDavid Howells if (copy > len) 4360716ea3a7SDavid Howells copy = len; 4361ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4362b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start); 4363716ea3a7SDavid Howells elt++; 4364716ea3a7SDavid Howells if (!(len -= copy)) 4365716ea3a7SDavid Howells return elt; 4366716ea3a7SDavid Howells offset += copy; 4367716ea3a7SDavid Howells } 43681a028e50SDavid S. Miller start = end; 4369716ea3a7SDavid Howells } 4370716ea3a7SDavid Howells 4371fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 437248a1df65SJason A. Donenfeld int end, ret; 4373716ea3a7SDavid Howells 4374547b792cSIlpo Järvinen WARN_ON(start > offset + len); 43751a028e50SDavid S. Miller 4376fbb398a8SDavid S. Miller end = start + frag_iter->len; 4377716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 437848a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 437948a1df65SJason A. Donenfeld return -EMSGSIZE; 438048a1df65SJason A. Donenfeld 4381716ea3a7SDavid Howells if (copy > len) 4382716ea3a7SDavid Howells copy = len; 438348a1df65SJason A. Donenfeld ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 438448a1df65SJason A. Donenfeld copy, recursion_level + 1); 438548a1df65SJason A. Donenfeld if (unlikely(ret < 0)) 438648a1df65SJason A. Donenfeld return ret; 438748a1df65SJason A. Donenfeld elt += ret; 4388716ea3a7SDavid Howells if ((len -= copy) == 0) 4389716ea3a7SDavid Howells return elt; 4390716ea3a7SDavid Howells offset += copy; 4391716ea3a7SDavid Howells } 43921a028e50SDavid S. Miller start = end; 4393716ea3a7SDavid Howells } 4394716ea3a7SDavid Howells BUG_ON(len); 4395716ea3a7SDavid Howells return elt; 4396716ea3a7SDavid Howells } 4397716ea3a7SDavid Howells 439848a1df65SJason A. Donenfeld /** 439948a1df65SJason A. Donenfeld * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 440048a1df65SJason A. Donenfeld * @skb: Socket buffer containing the buffers to be mapped 440148a1df65SJason A. Donenfeld * @sg: The scatter-gather list to map into 440248a1df65SJason A. Donenfeld * @offset: The offset into the buffer's contents to start mapping 440348a1df65SJason A. Donenfeld * @len: Length of buffer space to be mapped 440448a1df65SJason A. Donenfeld * 440548a1df65SJason A. Donenfeld * Fill the specified scatter-gather list with mappings/pointers into a 440648a1df65SJason A. Donenfeld * region of the buffer space attached to a socket buffer. Returns either 440748a1df65SJason A. Donenfeld * the number of scatterlist items used, or -EMSGSIZE if the contents 440848a1df65SJason A. Donenfeld * could not fit. 440948a1df65SJason A. Donenfeld */ 441048a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 441148a1df65SJason A. Donenfeld { 441248a1df65SJason A. Donenfeld int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 441348a1df65SJason A. Donenfeld 441448a1df65SJason A. Donenfeld if (nsg <= 0) 441548a1df65SJason A. Donenfeld return nsg; 441648a1df65SJason A. Donenfeld 441748a1df65SJason A. Donenfeld sg_mark_end(&sg[nsg - 1]); 441848a1df65SJason A. Donenfeld 441948a1df65SJason A. Donenfeld return nsg; 442048a1df65SJason A. Donenfeld } 442148a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec); 442248a1df65SJason A. Donenfeld 442325a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 442425a91d8dSFan Du * sglist without mark the sg which contain last skb data as the end. 442525a91d8dSFan Du * So the caller can mannipulate sg list as will when padding new data after 442625a91d8dSFan Du * the first call without calling sg_unmark_end to expend sg list. 442725a91d8dSFan Du * 442825a91d8dSFan Du * Scenario to use skb_to_sgvec_nomark: 442925a91d8dSFan Du * 1. sg_init_table 443025a91d8dSFan Du * 2. skb_to_sgvec_nomark(payload1) 443125a91d8dSFan Du * 3. skb_to_sgvec_nomark(payload2) 443225a91d8dSFan Du * 443325a91d8dSFan Du * This is equivalent to: 443425a91d8dSFan Du * 1. sg_init_table 443525a91d8dSFan Du * 2. skb_to_sgvec(payload1) 443625a91d8dSFan Du * 3. sg_unmark_end 443725a91d8dSFan Du * 4. skb_to_sgvec(payload2) 443825a91d8dSFan Du * 443925a91d8dSFan Du * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 444025a91d8dSFan Du * is more preferable. 444125a91d8dSFan Du */ 444225a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 444325a91d8dSFan Du int offset, int len) 444425a91d8dSFan Du { 444548a1df65SJason A. Donenfeld return __skb_to_sgvec(skb, sg, offset, len, 0); 444625a91d8dSFan Du } 444725a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 444825a91d8dSFan Du 444951c739d1SDavid S. Miller 445051c739d1SDavid S. Miller 4451716ea3a7SDavid Howells /** 4452716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 4453716ea3a7SDavid Howells * @skb: The socket buffer to check. 4454716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 4455716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 4456716ea3a7SDavid Howells * 4457716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 4458716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 4459716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 4460716ea3a7SDavid Howells * 4461716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 4462716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 4463716ea3a7SDavid Howells * set to point to the skb in which this space begins. 4464716ea3a7SDavid Howells * 4465716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 4466716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 4467716ea3a7SDavid Howells */ 4468716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4469716ea3a7SDavid Howells { 4470716ea3a7SDavid Howells int copyflag; 4471716ea3a7SDavid Howells int elt; 4472716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 4473716ea3a7SDavid Howells 4474716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 4475716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 4476716ea3a7SDavid Howells * at the moment even if they are anonymous). 4477716ea3a7SDavid Howells */ 4478716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4479c15fc199SMiaohe Lin !__pskb_pull_tail(skb, __skb_pagelen(skb))) 4480716ea3a7SDavid Howells return -ENOMEM; 4481716ea3a7SDavid Howells 4482716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 448321dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 4484716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 4485716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 4486716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 4487716ea3a7SDavid Howells * space, 128 bytes is fair. */ 4488716ea3a7SDavid Howells 4489716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 4490716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4491716ea3a7SDavid Howells return -ENOMEM; 4492716ea3a7SDavid Howells 4493716ea3a7SDavid Howells /* Voila! */ 4494716ea3a7SDavid Howells *trailer = skb; 4495716ea3a7SDavid Howells return 1; 4496716ea3a7SDavid Howells } 4497716ea3a7SDavid Howells 4498716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 4499716ea3a7SDavid Howells 4500716ea3a7SDavid Howells elt = 1; 4501716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 4502716ea3a7SDavid Howells copyflag = 0; 4503716ea3a7SDavid Howells 4504716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 4505716ea3a7SDavid Howells int ntail = 0; 4506716ea3a7SDavid Howells 4507716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 4508716ea3a7SDavid Howells * this can happen on input. Copy it and everything 4509716ea3a7SDavid Howells * after it. */ 4510716ea3a7SDavid Howells 4511716ea3a7SDavid Howells if (skb_shared(skb1)) 4512716ea3a7SDavid Howells copyflag = 1; 4513716ea3a7SDavid Howells 4514716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 4515716ea3a7SDavid Howells 4516716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 4517716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 451821dc3301SDavid S. Miller skb_has_frag_list(skb1) || 4519716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 4520716ea3a7SDavid Howells ntail = tailbits + 128; 4521716ea3a7SDavid Howells } 4522716ea3a7SDavid Howells 4523716ea3a7SDavid Howells if (copyflag || 4524716ea3a7SDavid Howells skb_cloned(skb1) || 4525716ea3a7SDavid Howells ntail || 4526716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 452721dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 4528716ea3a7SDavid Howells struct sk_buff *skb2; 4529716ea3a7SDavid Howells 4530716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 4531716ea3a7SDavid Howells if (ntail == 0) 4532716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 4533716ea3a7SDavid Howells else 4534716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 4535716ea3a7SDavid Howells skb_headroom(skb1), 4536716ea3a7SDavid Howells ntail, 4537716ea3a7SDavid Howells GFP_ATOMIC); 4538716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 4539716ea3a7SDavid Howells return -ENOMEM; 4540716ea3a7SDavid Howells 4541716ea3a7SDavid Howells if (skb1->sk) 4542716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 4543716ea3a7SDavid Howells 4544716ea3a7SDavid Howells /* Looking around. Are we still alive? 4545716ea3a7SDavid Howells * OK, link new skb, drop old one */ 4546716ea3a7SDavid Howells 4547716ea3a7SDavid Howells skb2->next = skb1->next; 4548716ea3a7SDavid Howells *skb_p = skb2; 4549716ea3a7SDavid Howells kfree_skb(skb1); 4550716ea3a7SDavid Howells skb1 = skb2; 4551716ea3a7SDavid Howells } 4552716ea3a7SDavid Howells elt++; 4553716ea3a7SDavid Howells *trailer = skb1; 4554716ea3a7SDavid Howells skb_p = &skb1->next; 4555716ea3a7SDavid Howells } 4556716ea3a7SDavid Howells 4557716ea3a7SDavid Howells return elt; 4558716ea3a7SDavid Howells } 4559b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 4560716ea3a7SDavid Howells 4561b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 4562b1faf566SEric Dumazet { 4563b1faf566SEric Dumazet struct sock *sk = skb->sk; 4564b1faf566SEric Dumazet 4565b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4566b1faf566SEric Dumazet } 4567b1faf566SEric Dumazet 45688605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb) 45698605330aSSoheil Hassas Yeganeh { 45708605330aSSoheil Hassas Yeganeh /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 45718605330aSSoheil Hassas Yeganeh * So, it is safe to (mis)use it to mark skbs on the error queue. 45728605330aSSoheil Hassas Yeganeh */ 45738605330aSSoheil Hassas Yeganeh skb->pkt_type = PACKET_OUTGOING; 45748605330aSSoheil Hassas Yeganeh BUILD_BUG_ON(PACKET_OUTGOING == 0); 45758605330aSSoheil Hassas Yeganeh } 45768605330aSSoheil Hassas Yeganeh 4577b1faf566SEric Dumazet /* 4578b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4579b1faf566SEric Dumazet */ 4580b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4581b1faf566SEric Dumazet { 4582b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4583ebb3b78dSEric Dumazet (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 4584b1faf566SEric Dumazet return -ENOMEM; 4585b1faf566SEric Dumazet 4586b1faf566SEric Dumazet skb_orphan(skb); 4587b1faf566SEric Dumazet skb->sk = sk; 4588b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 4589b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 45908605330aSSoheil Hassas Yeganeh skb_set_err_queue(skb); 4591b1faf566SEric Dumazet 4592abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 4593abb57ea4SEric Dumazet skb_dst_force(skb); 4594abb57ea4SEric Dumazet 4595b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 4596b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 45976e5d58fdSVinicius Costa Gomes sk->sk_error_report(sk); 4598b1faf566SEric Dumazet return 0; 4599b1faf566SEric Dumazet } 4600b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 4601b1faf566SEric Dumazet 460283a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb) 460383a1a1a7SSoheil Hassas Yeganeh { 460483a1a1a7SSoheil Hassas Yeganeh return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 460583a1a1a7SSoheil Hassas Yeganeh SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 460683a1a1a7SSoheil Hassas Yeganeh } 460783a1a1a7SSoheil Hassas Yeganeh 4608364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4609364a9e93SWillem de Bruijn { 4610364a9e93SWillem de Bruijn struct sk_buff_head *q = &sk->sk_error_queue; 461183a1a1a7SSoheil Hassas Yeganeh struct sk_buff *skb, *skb_next = NULL; 461283a1a1a7SSoheil Hassas Yeganeh bool icmp_next = false; 4613997d5c3fSEric Dumazet unsigned long flags; 4614364a9e93SWillem de Bruijn 4615997d5c3fSEric Dumazet spin_lock_irqsave(&q->lock, flags); 4616364a9e93SWillem de Bruijn skb = __skb_dequeue(q); 461738b25793SSoheil Hassas Yeganeh if (skb && (skb_next = skb_peek(q))) { 461883a1a1a7SSoheil Hassas Yeganeh icmp_next = is_icmp_err_skb(skb_next); 461938b25793SSoheil Hassas Yeganeh if (icmp_next) 4620985f7337SWillem de Bruijn sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 462138b25793SSoheil Hassas Yeganeh } 4622997d5c3fSEric Dumazet spin_unlock_irqrestore(&q->lock, flags); 4623364a9e93SWillem de Bruijn 462483a1a1a7SSoheil Hassas Yeganeh if (is_icmp_err_skb(skb) && !icmp_next) 462583a1a1a7SSoheil Hassas Yeganeh sk->sk_err = 0; 462683a1a1a7SSoheil Hassas Yeganeh 462783a1a1a7SSoheil Hassas Yeganeh if (skb_next) 4628364a9e93SWillem de Bruijn sk->sk_error_report(sk); 4629364a9e93SWillem de Bruijn 4630364a9e93SWillem de Bruijn return skb; 4631364a9e93SWillem de Bruijn } 4632364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb); 4633364a9e93SWillem de Bruijn 4634cab41c47SAlexander Duyck /** 4635cab41c47SAlexander Duyck * skb_clone_sk - create clone of skb, and take reference to socket 4636cab41c47SAlexander Duyck * @skb: the skb to clone 4637cab41c47SAlexander Duyck * 4638cab41c47SAlexander Duyck * This function creates a clone of a buffer that holds a reference on 4639cab41c47SAlexander Duyck * sk_refcnt. Buffers created via this function are meant to be 4640cab41c47SAlexander Duyck * returned using sock_queue_err_skb, or free via kfree_skb. 4641cab41c47SAlexander Duyck * 4642cab41c47SAlexander Duyck * When passing buffers allocated with this function to sock_queue_err_skb 4643cab41c47SAlexander Duyck * it is necessary to wrap the call with sock_hold/sock_put in order to 4644cab41c47SAlexander Duyck * prevent the socket from being released prior to being enqueued on 4645cab41c47SAlexander Duyck * the sk_error_queue. 4646cab41c47SAlexander Duyck */ 464762bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb) 464862bccb8cSAlexander Duyck { 464962bccb8cSAlexander Duyck struct sock *sk = skb->sk; 465062bccb8cSAlexander Duyck struct sk_buff *clone; 465162bccb8cSAlexander Duyck 465241c6d650SReshetova, Elena if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 465362bccb8cSAlexander Duyck return NULL; 465462bccb8cSAlexander Duyck 465562bccb8cSAlexander Duyck clone = skb_clone(skb, GFP_ATOMIC); 465662bccb8cSAlexander Duyck if (!clone) { 465762bccb8cSAlexander Duyck sock_put(sk); 465862bccb8cSAlexander Duyck return NULL; 465962bccb8cSAlexander Duyck } 466062bccb8cSAlexander Duyck 466162bccb8cSAlexander Duyck clone->sk = sk; 466262bccb8cSAlexander Duyck clone->destructor = sock_efree; 466362bccb8cSAlexander Duyck 466462bccb8cSAlexander Duyck return clone; 466562bccb8cSAlexander Duyck } 466662bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk); 466762bccb8cSAlexander Duyck 466837846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb, 466937846ef0SAlexander Duyck struct sock *sk, 46704ef1b286SSoheil Hassas Yeganeh int tstype, 46714ef1b286SSoheil Hassas Yeganeh bool opt_stats) 4672ac45f602SPatrick Ohly { 4673ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 4674ac45f602SPatrick Ohly int err; 4675ac45f602SPatrick Ohly 46764ef1b286SSoheil Hassas Yeganeh BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 46774ef1b286SSoheil Hassas Yeganeh 4678ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 4679ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 4680ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 4681ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4682e7fd2885SWillem de Bruijn serr->ee.ee_info = tstype; 46834ef1b286SSoheil Hassas Yeganeh serr->opt_stats = opt_stats; 46841862d620SWillem de Bruijn serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 46854ed2d765SWillem de Bruijn if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 468609c2d251SWillem de Bruijn serr->ee.ee_data = skb_shinfo(skb)->tskey; 4687ac5cc977SWANG Cong if (sk->sk_protocol == IPPROTO_TCP && 4688ac5cc977SWANG Cong sk->sk_type == SOCK_STREAM) 46894ed2d765SWillem de Bruijn serr->ee.ee_data -= sk->sk_tskey; 46904ed2d765SWillem de Bruijn } 469129030374SEric Dumazet 4692ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 469329030374SEric Dumazet 4694ac45f602SPatrick Ohly if (err) 4695ac45f602SPatrick Ohly kfree_skb(skb); 4696ac45f602SPatrick Ohly } 469737846ef0SAlexander Duyck 4698b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4699b245be1fSWillem de Bruijn { 4700b245be1fSWillem de Bruijn bool ret; 4701b245be1fSWillem de Bruijn 4702b245be1fSWillem de Bruijn if (likely(sysctl_tstamp_allow_data || tsonly)) 4703b245be1fSWillem de Bruijn return true; 4704b245be1fSWillem de Bruijn 4705b245be1fSWillem de Bruijn read_lock_bh(&sk->sk_callback_lock); 4706b245be1fSWillem de Bruijn ret = sk->sk_socket && sk->sk_socket->file && 4707b245be1fSWillem de Bruijn file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4708b245be1fSWillem de Bruijn read_unlock_bh(&sk->sk_callback_lock); 4709b245be1fSWillem de Bruijn return ret; 4710b245be1fSWillem de Bruijn } 4711b245be1fSWillem de Bruijn 471237846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb, 471337846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps) 471437846ef0SAlexander Duyck { 471537846ef0SAlexander Duyck struct sock *sk = skb->sk; 471637846ef0SAlexander Duyck 4717b245be1fSWillem de Bruijn if (!skb_may_tx_timestamp(sk, false)) 471835b99dffSWillem de Bruijn goto err; 4719b245be1fSWillem de Bruijn 47209ac25fc0SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 47219ac25fc0SEric Dumazet * but only if the socket refcount is not zero. 47229ac25fc0SEric Dumazet */ 472341c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 472437846ef0SAlexander Duyck *skb_hwtstamps(skb) = *hwtstamps; 47254ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 472637846ef0SAlexander Duyck sock_put(sk); 472735b99dffSWillem de Bruijn return; 472837846ef0SAlexander Duyck } 472935b99dffSWillem de Bruijn 473035b99dffSWillem de Bruijn err: 473135b99dffSWillem de Bruijn kfree_skb(skb); 47329ac25fc0SEric Dumazet } 473337846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 473437846ef0SAlexander Duyck 473537846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb, 4736e7ed11eeSYousuk Seung const struct sk_buff *ack_skb, 473737846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps, 473837846ef0SAlexander Duyck struct sock *sk, int tstype) 473937846ef0SAlexander Duyck { 474037846ef0SAlexander Duyck struct sk_buff *skb; 47414ef1b286SSoheil Hassas Yeganeh bool tsonly, opt_stats = false; 474237846ef0SAlexander Duyck 47433a8dd971SWillem de Bruijn if (!sk) 47443a8dd971SWillem de Bruijn return; 47453a8dd971SWillem de Bruijn 4746b50a5c70SMiroslav Lichvar if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4747b50a5c70SMiroslav Lichvar skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4748b50a5c70SMiroslav Lichvar return; 4749b50a5c70SMiroslav Lichvar 47503a8dd971SWillem de Bruijn tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 47513a8dd971SWillem de Bruijn if (!skb_may_tx_timestamp(sk, tsonly)) 475237846ef0SAlexander Duyck return; 475337846ef0SAlexander Duyck 47541c885808SFrancis Yan if (tsonly) { 47551c885808SFrancis Yan #ifdef CONFIG_INET 47561c885808SFrancis Yan if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 47571c885808SFrancis Yan sk->sk_protocol == IPPROTO_TCP && 47584ef1b286SSoheil Hassas Yeganeh sk->sk_type == SOCK_STREAM) { 4759e7ed11eeSYousuk Seung skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 4760e7ed11eeSYousuk Seung ack_skb); 47614ef1b286SSoheil Hassas Yeganeh opt_stats = true; 47624ef1b286SSoheil Hassas Yeganeh } else 47631c885808SFrancis Yan #endif 47641c885808SFrancis Yan skb = alloc_skb(0, GFP_ATOMIC); 47651c885808SFrancis Yan } else { 476637846ef0SAlexander Duyck skb = skb_clone(orig_skb, GFP_ATOMIC); 47671c885808SFrancis Yan } 476837846ef0SAlexander Duyck if (!skb) 476937846ef0SAlexander Duyck return; 477037846ef0SAlexander Duyck 477149ca0d8bSWillem de Bruijn if (tsonly) { 4772fff88030SWillem de Bruijn skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4773fff88030SWillem de Bruijn SKBTX_ANY_TSTAMP; 477449ca0d8bSWillem de Bruijn skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 477549ca0d8bSWillem de Bruijn } 477649ca0d8bSWillem de Bruijn 477749ca0d8bSWillem de Bruijn if (hwtstamps) 477849ca0d8bSWillem de Bruijn *skb_hwtstamps(skb) = *hwtstamps; 477949ca0d8bSWillem de Bruijn else 478049ca0d8bSWillem de Bruijn skb->tstamp = ktime_get_real(); 478149ca0d8bSWillem de Bruijn 47824ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 478337846ef0SAlexander Duyck } 4784e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4785e7fd2885SWillem de Bruijn 4786e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb, 4787e7fd2885SWillem de Bruijn struct skb_shared_hwtstamps *hwtstamps) 4788e7fd2885SWillem de Bruijn { 4789e7ed11eeSYousuk Seung return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 4790e7fd2885SWillem de Bruijn SCM_TSTAMP_SND); 4791e7fd2885SWillem de Bruijn } 4792ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4793ac45f602SPatrick Ohly 47946e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 47956e3e939fSJohannes Berg { 47966e3e939fSJohannes Berg struct sock *sk = skb->sk; 47976e3e939fSJohannes Berg struct sock_exterr_skb *serr; 4798dd4f1072SEric Dumazet int err = 1; 47996e3e939fSJohannes Berg 48006e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 48016e3e939fSJohannes Berg skb->wifi_acked = acked; 48026e3e939fSJohannes Berg 48036e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 48046e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 48056e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 48066e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 48076e3e939fSJohannes Berg 4808dd4f1072SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 4809dd4f1072SEric Dumazet * but only if the socket refcount is not zero. 4810dd4f1072SEric Dumazet */ 481141c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 48126e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 4813dd4f1072SEric Dumazet sock_put(sk); 4814dd4f1072SEric Dumazet } 48156e3e939fSJohannes Berg if (err) 48166e3e939fSJohannes Berg kfree_skb(skb); 48176e3e939fSJohannes Berg } 48186e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 48196e3e939fSJohannes Berg 4820f35d9d8aSRusty Russell /** 4821f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 4822f35d9d8aSRusty Russell * @skb: the skb to set 4823f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 4824f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 4825f35d9d8aSRusty Russell * 4826f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 4827f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4828f35d9d8aSRusty Russell * 4829f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 4830f35d9d8aSRusty Russell * returns false you should drop the packet. 4831f35d9d8aSRusty Russell */ 4832f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4833f35d9d8aSRusty Russell { 483452b5d6f5SEric Dumazet u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 483552b5d6f5SEric Dumazet u32 csum_start = skb_headroom(skb) + (u32)start; 483652b5d6f5SEric Dumazet 483752b5d6f5SEric Dumazet if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 483852b5d6f5SEric Dumazet net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 483952b5d6f5SEric Dumazet start, off, skb_headroom(skb), skb_headlen(skb)); 4840f35d9d8aSRusty Russell return false; 4841f35d9d8aSRusty Russell } 4842f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 484352b5d6f5SEric Dumazet skb->csum_start = csum_start; 4844f35d9d8aSRusty Russell skb->csum_offset = off; 4845e5d5decaSJason Wang skb_set_transport_header(skb, start); 4846f35d9d8aSRusty Russell return true; 4847f35d9d8aSRusty Russell } 4848b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4849f35d9d8aSRusty Russell 4850ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4851ed1f50c3SPaul Durrant unsigned int max) 4852ed1f50c3SPaul Durrant { 4853ed1f50c3SPaul Durrant if (skb_headlen(skb) >= len) 4854ed1f50c3SPaul Durrant return 0; 4855ed1f50c3SPaul Durrant 4856ed1f50c3SPaul Durrant /* If we need to pullup then pullup to the max, so we 4857ed1f50c3SPaul Durrant * won't need to do it again. 4858ed1f50c3SPaul Durrant */ 4859ed1f50c3SPaul Durrant if (max > skb->len) 4860ed1f50c3SPaul Durrant max = skb->len; 4861ed1f50c3SPaul Durrant 4862ed1f50c3SPaul Durrant if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4863ed1f50c3SPaul Durrant return -ENOMEM; 4864ed1f50c3SPaul Durrant 4865ed1f50c3SPaul Durrant if (skb_headlen(skb) < len) 4866ed1f50c3SPaul Durrant return -EPROTO; 4867ed1f50c3SPaul Durrant 4868ed1f50c3SPaul Durrant return 0; 4869ed1f50c3SPaul Durrant } 4870ed1f50c3SPaul Durrant 4871f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4) 4872f9708b43SJan Beulich 4873f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4874f9708b43SJan Beulich typeof(IPPROTO_IP) proto, 4875f9708b43SJan Beulich unsigned int off) 4876f9708b43SJan Beulich { 4877f9708b43SJan Beulich int err; 4878f9708b43SJan Beulich 4879161d1792SKees Cook switch (proto) { 4880f9708b43SJan Beulich case IPPROTO_TCP: 4881f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4882f9708b43SJan Beulich off + MAX_TCP_HDR_LEN); 4883f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4884f9708b43SJan Beulich offsetof(struct tcphdr, 4885f9708b43SJan Beulich check))) 4886f9708b43SJan Beulich err = -EPROTO; 4887f9708b43SJan Beulich return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4888f9708b43SJan Beulich 4889f9708b43SJan Beulich case IPPROTO_UDP: 4890f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4891f9708b43SJan Beulich off + sizeof(struct udphdr)); 4892f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4893f9708b43SJan Beulich offsetof(struct udphdr, 4894f9708b43SJan Beulich check))) 4895f9708b43SJan Beulich err = -EPROTO; 4896f9708b43SJan Beulich return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4897f9708b43SJan Beulich } 4898f9708b43SJan Beulich 4899f9708b43SJan Beulich return ERR_PTR(-EPROTO); 4900f9708b43SJan Beulich } 4901f9708b43SJan Beulich 4902ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 4903ed1f50c3SPaul Durrant * maximally sized IP and TCP or UDP headers. 4904ed1f50c3SPaul Durrant */ 4905ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128 4906ed1f50c3SPaul Durrant 4907f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4908ed1f50c3SPaul Durrant { 4909ed1f50c3SPaul Durrant unsigned int off; 4910ed1f50c3SPaul Durrant bool fragment; 4911f9708b43SJan Beulich __sum16 *csum; 4912ed1f50c3SPaul Durrant int err; 4913ed1f50c3SPaul Durrant 4914ed1f50c3SPaul Durrant fragment = false; 4915ed1f50c3SPaul Durrant 4916ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 4917ed1f50c3SPaul Durrant sizeof(struct iphdr), 4918ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 4919ed1f50c3SPaul Durrant if (err < 0) 4920ed1f50c3SPaul Durrant goto out; 4921ed1f50c3SPaul Durrant 492211f920d2SMiaohe Lin if (ip_is_fragment(ip_hdr(skb))) 4923ed1f50c3SPaul Durrant fragment = true; 4924ed1f50c3SPaul Durrant 4925ed1f50c3SPaul Durrant off = ip_hdrlen(skb); 4926ed1f50c3SPaul Durrant 4927ed1f50c3SPaul Durrant err = -EPROTO; 4928ed1f50c3SPaul Durrant 4929ed1f50c3SPaul Durrant if (fragment) 4930ed1f50c3SPaul Durrant goto out; 4931ed1f50c3SPaul Durrant 4932f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4933f9708b43SJan Beulich if (IS_ERR(csum)) 4934f9708b43SJan Beulich return PTR_ERR(csum); 4935ed1f50c3SPaul Durrant 4936ed1f50c3SPaul Durrant if (recalculate) 4937f9708b43SJan Beulich *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4938ed1f50c3SPaul Durrant ip_hdr(skb)->daddr, 4939ed1f50c3SPaul Durrant skb->len - off, 4940f9708b43SJan Beulich ip_hdr(skb)->protocol, 0); 4941ed1f50c3SPaul Durrant err = 0; 4942ed1f50c3SPaul Durrant 4943ed1f50c3SPaul Durrant out: 4944ed1f50c3SPaul Durrant return err; 4945ed1f50c3SPaul Durrant } 4946ed1f50c3SPaul Durrant 4947ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 4948ed1f50c3SPaul Durrant * an IPv6 header, all options, and a maximal TCP or UDP header. 4949ed1f50c3SPaul Durrant */ 4950ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256 4951ed1f50c3SPaul Durrant 4952ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \ 4953ed1f50c3SPaul Durrant (type *)(skb_network_header(skb) + (off)) 4954ed1f50c3SPaul Durrant 4955ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4956ed1f50c3SPaul Durrant { 4957ed1f50c3SPaul Durrant int err; 4958ed1f50c3SPaul Durrant u8 nexthdr; 4959ed1f50c3SPaul Durrant unsigned int off; 4960ed1f50c3SPaul Durrant unsigned int len; 4961ed1f50c3SPaul Durrant bool fragment; 4962ed1f50c3SPaul Durrant bool done; 4963f9708b43SJan Beulich __sum16 *csum; 4964ed1f50c3SPaul Durrant 4965ed1f50c3SPaul Durrant fragment = false; 4966ed1f50c3SPaul Durrant done = false; 4967ed1f50c3SPaul Durrant 4968ed1f50c3SPaul Durrant off = sizeof(struct ipv6hdr); 4969ed1f50c3SPaul Durrant 4970ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 4971ed1f50c3SPaul Durrant if (err < 0) 4972ed1f50c3SPaul Durrant goto out; 4973ed1f50c3SPaul Durrant 4974ed1f50c3SPaul Durrant nexthdr = ipv6_hdr(skb)->nexthdr; 4975ed1f50c3SPaul Durrant 4976ed1f50c3SPaul Durrant len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 4977ed1f50c3SPaul Durrant while (off <= len && !done) { 4978ed1f50c3SPaul Durrant switch (nexthdr) { 4979ed1f50c3SPaul Durrant case IPPROTO_DSTOPTS: 4980ed1f50c3SPaul Durrant case IPPROTO_HOPOPTS: 4981ed1f50c3SPaul Durrant case IPPROTO_ROUTING: { 4982ed1f50c3SPaul Durrant struct ipv6_opt_hdr *hp; 4983ed1f50c3SPaul Durrant 4984ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 4985ed1f50c3SPaul Durrant off + 4986ed1f50c3SPaul Durrant sizeof(struct ipv6_opt_hdr), 4987ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 4988ed1f50c3SPaul Durrant if (err < 0) 4989ed1f50c3SPaul Durrant goto out; 4990ed1f50c3SPaul Durrant 4991ed1f50c3SPaul Durrant hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 4992ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 4993ed1f50c3SPaul Durrant off += ipv6_optlen(hp); 4994ed1f50c3SPaul Durrant break; 4995ed1f50c3SPaul Durrant } 4996ed1f50c3SPaul Durrant case IPPROTO_AH: { 4997ed1f50c3SPaul Durrant struct ip_auth_hdr *hp; 4998ed1f50c3SPaul Durrant 4999ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5000ed1f50c3SPaul Durrant off + 5001ed1f50c3SPaul Durrant sizeof(struct ip_auth_hdr), 5002ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5003ed1f50c3SPaul Durrant if (err < 0) 5004ed1f50c3SPaul Durrant goto out; 5005ed1f50c3SPaul Durrant 5006ed1f50c3SPaul Durrant hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5007ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5008ed1f50c3SPaul Durrant off += ipv6_authlen(hp); 5009ed1f50c3SPaul Durrant break; 5010ed1f50c3SPaul Durrant } 5011ed1f50c3SPaul Durrant case IPPROTO_FRAGMENT: { 5012ed1f50c3SPaul Durrant struct frag_hdr *hp; 5013ed1f50c3SPaul Durrant 5014ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5015ed1f50c3SPaul Durrant off + 5016ed1f50c3SPaul Durrant sizeof(struct frag_hdr), 5017ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5018ed1f50c3SPaul Durrant if (err < 0) 5019ed1f50c3SPaul Durrant goto out; 5020ed1f50c3SPaul Durrant 5021ed1f50c3SPaul Durrant hp = OPT_HDR(struct frag_hdr, skb, off); 5022ed1f50c3SPaul Durrant 5023ed1f50c3SPaul Durrant if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5024ed1f50c3SPaul Durrant fragment = true; 5025ed1f50c3SPaul Durrant 5026ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5027ed1f50c3SPaul Durrant off += sizeof(struct frag_hdr); 5028ed1f50c3SPaul Durrant break; 5029ed1f50c3SPaul Durrant } 5030ed1f50c3SPaul Durrant default: 5031ed1f50c3SPaul Durrant done = true; 5032ed1f50c3SPaul Durrant break; 5033ed1f50c3SPaul Durrant } 5034ed1f50c3SPaul Durrant } 5035ed1f50c3SPaul Durrant 5036ed1f50c3SPaul Durrant err = -EPROTO; 5037ed1f50c3SPaul Durrant 5038ed1f50c3SPaul Durrant if (!done || fragment) 5039ed1f50c3SPaul Durrant goto out; 5040ed1f50c3SPaul Durrant 5041f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, nexthdr, off); 5042f9708b43SJan Beulich if (IS_ERR(csum)) 5043f9708b43SJan Beulich return PTR_ERR(csum); 5044ed1f50c3SPaul Durrant 5045ed1f50c3SPaul Durrant if (recalculate) 5046f9708b43SJan Beulich *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5047ed1f50c3SPaul Durrant &ipv6_hdr(skb)->daddr, 5048f9708b43SJan Beulich skb->len - off, nexthdr, 0); 5049ed1f50c3SPaul Durrant err = 0; 5050ed1f50c3SPaul Durrant 5051ed1f50c3SPaul Durrant out: 5052ed1f50c3SPaul Durrant return err; 5053ed1f50c3SPaul Durrant } 5054ed1f50c3SPaul Durrant 5055ed1f50c3SPaul Durrant /** 5056ed1f50c3SPaul Durrant * skb_checksum_setup - set up partial checksum offset 5057ed1f50c3SPaul Durrant * @skb: the skb to set up 5058ed1f50c3SPaul Durrant * @recalculate: if true the pseudo-header checksum will be recalculated 5059ed1f50c3SPaul Durrant */ 5060ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5061ed1f50c3SPaul Durrant { 5062ed1f50c3SPaul Durrant int err; 5063ed1f50c3SPaul Durrant 5064ed1f50c3SPaul Durrant switch (skb->protocol) { 5065ed1f50c3SPaul Durrant case htons(ETH_P_IP): 5066f9708b43SJan Beulich err = skb_checksum_setup_ipv4(skb, recalculate); 5067ed1f50c3SPaul Durrant break; 5068ed1f50c3SPaul Durrant 5069ed1f50c3SPaul Durrant case htons(ETH_P_IPV6): 5070ed1f50c3SPaul Durrant err = skb_checksum_setup_ipv6(skb, recalculate); 5071ed1f50c3SPaul Durrant break; 5072ed1f50c3SPaul Durrant 5073ed1f50c3SPaul Durrant default: 5074ed1f50c3SPaul Durrant err = -EPROTO; 5075ed1f50c3SPaul Durrant break; 5076ed1f50c3SPaul Durrant } 5077ed1f50c3SPaul Durrant 5078ed1f50c3SPaul Durrant return err; 5079ed1f50c3SPaul Durrant } 5080ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup); 5081ed1f50c3SPaul Durrant 50829afd85c9SLinus Lüssing /** 50839afd85c9SLinus Lüssing * skb_checksum_maybe_trim - maybe trims the given skb 50849afd85c9SLinus Lüssing * @skb: the skb to check 50859afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 50869afd85c9SLinus Lüssing * 50879afd85c9SLinus Lüssing * Checks whether the given skb has data beyond the given transport length. 50889afd85c9SLinus Lüssing * If so, returns a cloned skb trimmed to this transport length. 50899afd85c9SLinus Lüssing * Otherwise returns the provided skb. Returns NULL in error cases 50909afd85c9SLinus Lüssing * (e.g. transport_len exceeds skb length or out-of-memory). 50919afd85c9SLinus Lüssing * 5092a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5093a516993fSLinus Lüssing * differs from the provided skb. 50949afd85c9SLinus Lüssing */ 50959afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 50969afd85c9SLinus Lüssing unsigned int transport_len) 50979afd85c9SLinus Lüssing { 50989afd85c9SLinus Lüssing struct sk_buff *skb_chk; 50999afd85c9SLinus Lüssing unsigned int len = skb_transport_offset(skb) + transport_len; 51009afd85c9SLinus Lüssing int ret; 51019afd85c9SLinus Lüssing 5102a516993fSLinus Lüssing if (skb->len < len) 51039afd85c9SLinus Lüssing return NULL; 5104a516993fSLinus Lüssing else if (skb->len == len) 51059afd85c9SLinus Lüssing return skb; 51069afd85c9SLinus Lüssing 51079afd85c9SLinus Lüssing skb_chk = skb_clone(skb, GFP_ATOMIC); 51089afd85c9SLinus Lüssing if (!skb_chk) 51099afd85c9SLinus Lüssing return NULL; 51109afd85c9SLinus Lüssing 51119afd85c9SLinus Lüssing ret = pskb_trim_rcsum(skb_chk, len); 51129afd85c9SLinus Lüssing if (ret) { 51139afd85c9SLinus Lüssing kfree_skb(skb_chk); 51149afd85c9SLinus Lüssing return NULL; 51159afd85c9SLinus Lüssing } 51169afd85c9SLinus Lüssing 51179afd85c9SLinus Lüssing return skb_chk; 51189afd85c9SLinus Lüssing } 51199afd85c9SLinus Lüssing 51209afd85c9SLinus Lüssing /** 51219afd85c9SLinus Lüssing * skb_checksum_trimmed - validate checksum of an skb 51229afd85c9SLinus Lüssing * @skb: the skb to check 51239afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 51249afd85c9SLinus Lüssing * @skb_chkf: checksum function to use 51259afd85c9SLinus Lüssing * 51269afd85c9SLinus Lüssing * Applies the given checksum function skb_chkf to the provided skb. 51279afd85c9SLinus Lüssing * Returns a checked and maybe trimmed skb. Returns NULL on error. 51289afd85c9SLinus Lüssing * 51299afd85c9SLinus Lüssing * If the skb has data beyond the given transport length, then a 51309afd85c9SLinus Lüssing * trimmed & cloned skb is checked and returned. 51319afd85c9SLinus Lüssing * 5132a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5133a516993fSLinus Lüssing * differs from the provided skb. 51349afd85c9SLinus Lüssing */ 51359afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 51369afd85c9SLinus Lüssing unsigned int transport_len, 51379afd85c9SLinus Lüssing __sum16(*skb_chkf)(struct sk_buff *skb)) 51389afd85c9SLinus Lüssing { 51399afd85c9SLinus Lüssing struct sk_buff *skb_chk; 51409afd85c9SLinus Lüssing unsigned int offset = skb_transport_offset(skb); 5141fcba67c9SLinus Lüssing __sum16 ret; 51429afd85c9SLinus Lüssing 51439afd85c9SLinus Lüssing skb_chk = skb_checksum_maybe_trim(skb, transport_len); 51449afd85c9SLinus Lüssing if (!skb_chk) 5145a516993fSLinus Lüssing goto err; 51469afd85c9SLinus Lüssing 5147a516993fSLinus Lüssing if (!pskb_may_pull(skb_chk, offset)) 5148a516993fSLinus Lüssing goto err; 51499afd85c9SLinus Lüssing 51509b368814SLinus Lüssing skb_pull_rcsum(skb_chk, offset); 51519afd85c9SLinus Lüssing ret = skb_chkf(skb_chk); 51529b368814SLinus Lüssing skb_push_rcsum(skb_chk, offset); 51539afd85c9SLinus Lüssing 5154a516993fSLinus Lüssing if (ret) 5155a516993fSLinus Lüssing goto err; 51569afd85c9SLinus Lüssing 51579afd85c9SLinus Lüssing return skb_chk; 5158a516993fSLinus Lüssing 5159a516993fSLinus Lüssing err: 5160a516993fSLinus Lüssing if (skb_chk && skb_chk != skb) 5161a516993fSLinus Lüssing kfree_skb(skb_chk); 5162a516993fSLinus Lüssing 5163a516993fSLinus Lüssing return NULL; 5164a516993fSLinus Lüssing 51659afd85c9SLinus Lüssing } 51669afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed); 51679afd85c9SLinus Lüssing 51684497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 51694497b076SBen Hutchings { 5170e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5171e87cc472SJoe Perches skb->dev->name); 51724497b076SBen Hutchings } 51734497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5174bad43ca8SEric Dumazet 5175bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5176bad43ca8SEric Dumazet { 51773d861f66SEric Dumazet if (head_stolen) { 51783d861f66SEric Dumazet skb_release_head_state(skb); 5179bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 51803d861f66SEric Dumazet } else { 5181bad43ca8SEric Dumazet __kfree_skb(skb); 5182bad43ca8SEric Dumazet } 51833d861f66SEric Dumazet } 5184bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 5185bad43ca8SEric Dumazet 5186bad43ca8SEric Dumazet /** 5187bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 5188bad43ca8SEric Dumazet * @to: prior buffer 5189bad43ca8SEric Dumazet * @from: buffer to add 5190bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 5191c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 5192bad43ca8SEric Dumazet */ 5193bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5194bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 5195bad43ca8SEric Dumazet { 5196c818fa9eSEric Dumazet struct skb_shared_info *to_shinfo, *from_shinfo; 5197bad43ca8SEric Dumazet int i, delta, len = from->len; 5198bad43ca8SEric Dumazet 5199bad43ca8SEric Dumazet *fragstolen = false; 5200bad43ca8SEric Dumazet 5201bad43ca8SEric Dumazet if (skb_cloned(to)) 5202bad43ca8SEric Dumazet return false; 5203bad43ca8SEric Dumazet 5204bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 5205e93a0435SEric Dumazet if (len) 5206bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5207bad43ca8SEric Dumazet *delta_truesize = 0; 5208bad43ca8SEric Dumazet return true; 5209bad43ca8SEric Dumazet } 5210bad43ca8SEric Dumazet 5211c818fa9eSEric Dumazet to_shinfo = skb_shinfo(to); 5212c818fa9eSEric Dumazet from_shinfo = skb_shinfo(from); 5213c818fa9eSEric Dumazet if (to_shinfo->frag_list || from_shinfo->frag_list) 5214bad43ca8SEric Dumazet return false; 52151f8b977aSWillem de Bruijn if (skb_zcopy(to) || skb_zcopy(from)) 52161f8b977aSWillem de Bruijn return false; 5217bad43ca8SEric Dumazet 5218bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 5219bad43ca8SEric Dumazet struct page *page; 5220bad43ca8SEric Dumazet unsigned int offset; 5221bad43ca8SEric Dumazet 5222c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5223c818fa9eSEric Dumazet from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5224bad43ca8SEric Dumazet return false; 5225bad43ca8SEric Dumazet 5226bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 5227bad43ca8SEric Dumazet return false; 5228bad43ca8SEric Dumazet 5229bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5230bad43ca8SEric Dumazet 5231bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 5232bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 5233bad43ca8SEric Dumazet 5234c818fa9eSEric Dumazet skb_fill_page_desc(to, to_shinfo->nr_frags, 5235bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 5236bad43ca8SEric Dumazet *fragstolen = true; 5237bad43ca8SEric Dumazet } else { 5238c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5239c818fa9eSEric Dumazet from_shinfo->nr_frags > MAX_SKB_FRAGS) 5240bad43ca8SEric Dumazet return false; 5241bad43ca8SEric Dumazet 5242f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5243bad43ca8SEric Dumazet } 5244bad43ca8SEric Dumazet 5245bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 5246bad43ca8SEric Dumazet 5247c818fa9eSEric Dumazet memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5248c818fa9eSEric Dumazet from_shinfo->frags, 5249c818fa9eSEric Dumazet from_shinfo->nr_frags * sizeof(skb_frag_t)); 5250c818fa9eSEric Dumazet to_shinfo->nr_frags += from_shinfo->nr_frags; 5251bad43ca8SEric Dumazet 5252bad43ca8SEric Dumazet if (!skb_cloned(from)) 5253c818fa9eSEric Dumazet from_shinfo->nr_frags = 0; 5254bad43ca8SEric Dumazet 52558ea853fdSLi RongQing /* if the skb is not cloned this does nothing 52568ea853fdSLi RongQing * since we set nr_frags to 0. 52578ea853fdSLi RongQing */ 5258c818fa9eSEric Dumazet for (i = 0; i < from_shinfo->nr_frags; i++) 5259c818fa9eSEric Dumazet __skb_frag_ref(&from_shinfo->frags[i]); 5260bad43ca8SEric Dumazet 5261bad43ca8SEric Dumazet to->truesize += delta; 5262bad43ca8SEric Dumazet to->len += len; 5263bad43ca8SEric Dumazet to->data_len += len; 5264bad43ca8SEric Dumazet 5265bad43ca8SEric Dumazet *delta_truesize = delta; 5266bad43ca8SEric Dumazet return true; 5267bad43ca8SEric Dumazet } 5268bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 5269621e84d6SNicolas Dichtel 5270621e84d6SNicolas Dichtel /** 52718b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 5272621e84d6SNicolas Dichtel * 5273621e84d6SNicolas Dichtel * @skb: buffer to clean 52748b27f277SNicolas Dichtel * @xnet: packet is crossing netns 5275621e84d6SNicolas Dichtel * 52768b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 52778b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 52788b27f277SNicolas Dichtel * operations. 52798b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 52808b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 52818b27f277SNicolas Dichtel * skb that could impact namespace isolation. 5282621e84d6SNicolas Dichtel */ 52838b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5284621e84d6SNicolas Dichtel { 5285621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 5286621e84d6SNicolas Dichtel skb->skb_iif = 0; 528760ff7467SWANG Cong skb->ignore_df = 0; 5288621e84d6SNicolas Dichtel skb_dst_drop(skb); 5289174e2381SFlorian Westphal skb_ext_reset(skb); 5290895b5c9fSFlorian Westphal nf_reset_ct(skb); 5291621e84d6SNicolas Dichtel nf_reset_trace(skb); 5292213dd74aSHerbert Xu 52936f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV 52946f9a5069SPetr Machata skb->offload_fwd_mark = 0; 5295875e8939SIdo Schimmel skb->offload_l3_fwd_mark = 0; 52966f9a5069SPetr Machata #endif 52976f9a5069SPetr Machata 5298213dd74aSHerbert Xu if (!xnet) 5299213dd74aSHerbert Xu return; 5300213dd74aSHerbert Xu 53012b5ec1a5SYe Yin ipvs_reset(skb); 5302213dd74aSHerbert Xu skb->mark = 0; 5303c47d8c2fSJesus Sanchez-Palencia skb->tstamp = 0; 5304621e84d6SNicolas Dichtel } 5305621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 5306de960aa9SFlorian Westphal 5307de960aa9SFlorian Westphal /** 5308de960aa9SFlorian Westphal * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5309de960aa9SFlorian Westphal * 5310de960aa9SFlorian Westphal * @skb: GSO skb 5311de960aa9SFlorian Westphal * 5312de960aa9SFlorian Westphal * skb_gso_transport_seglen is used to determine the real size of the 5313de960aa9SFlorian Westphal * individual segments, including Layer4 headers (TCP/UDP). 5314de960aa9SFlorian Westphal * 5315de960aa9SFlorian Westphal * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5316de960aa9SFlorian Westphal */ 5317a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5318de960aa9SFlorian Westphal { 5319de960aa9SFlorian Westphal const struct skb_shared_info *shinfo = skb_shinfo(skb); 5320f993bc25SFlorian Westphal unsigned int thlen = 0; 5321f993bc25SFlorian Westphal 5322f993bc25SFlorian Westphal if (skb->encapsulation) { 5323f993bc25SFlorian Westphal thlen = skb_inner_transport_header(skb) - 5324f993bc25SFlorian Westphal skb_transport_header(skb); 5325de960aa9SFlorian Westphal 5326de960aa9SFlorian Westphal if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5327f993bc25SFlorian Westphal thlen += inner_tcp_hdrlen(skb); 5328f993bc25SFlorian Westphal } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5329f993bc25SFlorian Westphal thlen = tcp_hdrlen(skb); 53301dd27cdeSDaniel Axtens } else if (unlikely(skb_is_gso_sctp(skb))) { 533190017accSMarcelo Ricardo Leitner thlen = sizeof(struct sctphdr); 5332ee80d1ebSWillem de Bruijn } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5333ee80d1ebSWillem de Bruijn thlen = sizeof(struct udphdr); 5334f993bc25SFlorian Westphal } 53356d39d589SFlorian Westphal /* UFO sets gso_size to the size of the fragmentation 53366d39d589SFlorian Westphal * payload, i.e. the size of the L4 (UDP) header is already 53376d39d589SFlorian Westphal * accounted for. 53386d39d589SFlorian Westphal */ 5339f993bc25SFlorian Westphal return thlen + shinfo->gso_size; 5340de960aa9SFlorian Westphal } 5341a4a77718SDaniel Axtens 5342a4a77718SDaniel Axtens /** 5343a4a77718SDaniel Axtens * skb_gso_network_seglen - Return length of individual segments of a gso packet 5344a4a77718SDaniel Axtens * 5345a4a77718SDaniel Axtens * @skb: GSO skb 5346a4a77718SDaniel Axtens * 5347a4a77718SDaniel Axtens * skb_gso_network_seglen is used to determine the real size of the 5348a4a77718SDaniel Axtens * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5349a4a77718SDaniel Axtens * 5350a4a77718SDaniel Axtens * The MAC/L2 header is not accounted for. 5351a4a77718SDaniel Axtens */ 5352a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5353a4a77718SDaniel Axtens { 5354a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - 5355a4a77718SDaniel Axtens skb_network_header(skb); 5356a4a77718SDaniel Axtens 5357a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5358a4a77718SDaniel Axtens } 5359a4a77718SDaniel Axtens 5360a4a77718SDaniel Axtens /** 5361a4a77718SDaniel Axtens * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5362a4a77718SDaniel Axtens * 5363a4a77718SDaniel Axtens * @skb: GSO skb 5364a4a77718SDaniel Axtens * 5365a4a77718SDaniel Axtens * skb_gso_mac_seglen is used to determine the real size of the 5366a4a77718SDaniel Axtens * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5367a4a77718SDaniel Axtens * headers (TCP/UDP). 5368a4a77718SDaniel Axtens */ 5369a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5370a4a77718SDaniel Axtens { 5371a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5372a4a77718SDaniel Axtens 5373a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5374a4a77718SDaniel Axtens } 53750d5501c1SVlad Yasevich 5376ae7ef81eSMarcelo Ricardo Leitner /** 53772b16f048SDaniel Axtens * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 53782b16f048SDaniel Axtens * 53792b16f048SDaniel Axtens * There are a couple of instances where we have a GSO skb, and we 53802b16f048SDaniel Axtens * want to determine what size it would be after it is segmented. 53812b16f048SDaniel Axtens * 53822b16f048SDaniel Axtens * We might want to check: 53832b16f048SDaniel Axtens * - L3+L4+payload size (e.g. IP forwarding) 53842b16f048SDaniel Axtens * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 53852b16f048SDaniel Axtens * 53862b16f048SDaniel Axtens * This is a helper to do that correctly considering GSO_BY_FRAGS. 53872b16f048SDaniel Axtens * 538849682bfaSMathieu Malaterre * @skb: GSO skb 538949682bfaSMathieu Malaterre * 53902b16f048SDaniel Axtens * @seg_len: The segmented length (from skb_gso_*_seglen). In the 53912b16f048SDaniel Axtens * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 53922b16f048SDaniel Axtens * 53932b16f048SDaniel Axtens * @max_len: The maximum permissible length. 53942b16f048SDaniel Axtens * 53952b16f048SDaniel Axtens * Returns true if the segmented length <= max length. 53962b16f048SDaniel Axtens */ 53972b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb, 53982b16f048SDaniel Axtens unsigned int seg_len, 53992b16f048SDaniel Axtens unsigned int max_len) { 54002b16f048SDaniel Axtens const struct skb_shared_info *shinfo = skb_shinfo(skb); 54012b16f048SDaniel Axtens const struct sk_buff *iter; 54022b16f048SDaniel Axtens 54032b16f048SDaniel Axtens if (shinfo->gso_size != GSO_BY_FRAGS) 54042b16f048SDaniel Axtens return seg_len <= max_len; 54052b16f048SDaniel Axtens 54062b16f048SDaniel Axtens /* Undo this so we can re-use header sizes */ 54072b16f048SDaniel Axtens seg_len -= GSO_BY_FRAGS; 54082b16f048SDaniel Axtens 54092b16f048SDaniel Axtens skb_walk_frags(skb, iter) { 54102b16f048SDaniel Axtens if (seg_len + skb_headlen(iter) > max_len) 54112b16f048SDaniel Axtens return false; 54122b16f048SDaniel Axtens } 54132b16f048SDaniel Axtens 54142b16f048SDaniel Axtens return true; 54152b16f048SDaniel Axtens } 54162b16f048SDaniel Axtens 54172b16f048SDaniel Axtens /** 5418779b7931SDaniel Axtens * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5419ae7ef81eSMarcelo Ricardo Leitner * 5420ae7ef81eSMarcelo Ricardo Leitner * @skb: GSO skb 542176f21b99SDavid S. Miller * @mtu: MTU to validate against 5422ae7ef81eSMarcelo Ricardo Leitner * 5423779b7931SDaniel Axtens * skb_gso_validate_network_len validates if a given skb will fit a 5424779b7931SDaniel Axtens * wanted MTU once split. It considers L3 headers, L4 headers, and the 5425779b7931SDaniel Axtens * payload. 5426ae7ef81eSMarcelo Ricardo Leitner */ 5427779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5428ae7ef81eSMarcelo Ricardo Leitner { 54292b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5430ae7ef81eSMarcelo Ricardo Leitner } 5431779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5432ae7ef81eSMarcelo Ricardo Leitner 54332b16f048SDaniel Axtens /** 54342b16f048SDaniel Axtens * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 54352b16f048SDaniel Axtens * 54362b16f048SDaniel Axtens * @skb: GSO skb 54372b16f048SDaniel Axtens * @len: length to validate against 54382b16f048SDaniel Axtens * 54392b16f048SDaniel Axtens * skb_gso_validate_mac_len validates if a given skb will fit a wanted 54402b16f048SDaniel Axtens * length once split, including L2, L3 and L4 headers and the payload. 54412b16f048SDaniel Axtens */ 54422b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 54432b16f048SDaniel Axtens { 54442b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 54452b16f048SDaniel Axtens } 54462b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 54472b16f048SDaniel Axtens 54480d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 54490d5501c1SVlad Yasevich { 5450d85e8be2SYuya Kusakabe int mac_len, meta_len; 5451d85e8be2SYuya Kusakabe void *meta; 54524bbb3e0eSToshiaki Makita 54530d5501c1SVlad Yasevich if (skb_cow(skb, skb_headroom(skb)) < 0) { 54540d5501c1SVlad Yasevich kfree_skb(skb); 54550d5501c1SVlad Yasevich return NULL; 54560d5501c1SVlad Yasevich } 54570d5501c1SVlad Yasevich 54584bbb3e0eSToshiaki Makita mac_len = skb->data - skb_mac_header(skb); 5459ae474573SToshiaki Makita if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 54604bbb3e0eSToshiaki Makita memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 54614bbb3e0eSToshiaki Makita mac_len - VLAN_HLEN - ETH_TLEN); 5462ae474573SToshiaki Makita } 5463d85e8be2SYuya Kusakabe 5464d85e8be2SYuya Kusakabe meta_len = skb_metadata_len(skb); 5465d85e8be2SYuya Kusakabe if (meta_len) { 5466d85e8be2SYuya Kusakabe meta = skb_metadata_end(skb) - meta_len; 5467d85e8be2SYuya Kusakabe memmove(meta + VLAN_HLEN, meta, meta_len); 5468d85e8be2SYuya Kusakabe } 5469d85e8be2SYuya Kusakabe 54700d5501c1SVlad Yasevich skb->mac_header += VLAN_HLEN; 54710d5501c1SVlad Yasevich return skb; 54720d5501c1SVlad Yasevich } 54730d5501c1SVlad Yasevich 54740d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 54750d5501c1SVlad Yasevich { 54760d5501c1SVlad Yasevich struct vlan_hdr *vhdr; 54770d5501c1SVlad Yasevich u16 vlan_tci; 54780d5501c1SVlad Yasevich 5479df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb))) { 54800d5501c1SVlad Yasevich /* vlan_tci is already set-up so leave this for another time */ 54810d5501c1SVlad Yasevich return skb; 54820d5501c1SVlad Yasevich } 54830d5501c1SVlad Yasevich 54840d5501c1SVlad Yasevich skb = skb_share_check(skb, GFP_ATOMIC); 54850d5501c1SVlad Yasevich if (unlikely(!skb)) 54860d5501c1SVlad Yasevich goto err_free; 548755eff0ebSMiaohe Lin /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 548855eff0ebSMiaohe Lin if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 54890d5501c1SVlad Yasevich goto err_free; 54900d5501c1SVlad Yasevich 54910d5501c1SVlad Yasevich vhdr = (struct vlan_hdr *)skb->data; 54920d5501c1SVlad Yasevich vlan_tci = ntohs(vhdr->h_vlan_TCI); 54930d5501c1SVlad Yasevich __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 54940d5501c1SVlad Yasevich 54950d5501c1SVlad Yasevich skb_pull_rcsum(skb, VLAN_HLEN); 54960d5501c1SVlad Yasevich vlan_set_encap_proto(skb, vhdr); 54970d5501c1SVlad Yasevich 54980d5501c1SVlad Yasevich skb = skb_reorder_vlan_header(skb); 54990d5501c1SVlad Yasevich if (unlikely(!skb)) 55000d5501c1SVlad Yasevich goto err_free; 55010d5501c1SVlad Yasevich 55020d5501c1SVlad Yasevich skb_reset_network_header(skb); 55038be33ecfSAlexander Lobakin if (!skb_transport_header_was_set(skb)) 55040d5501c1SVlad Yasevich skb_reset_transport_header(skb); 55050d5501c1SVlad Yasevich skb_reset_mac_len(skb); 55060d5501c1SVlad Yasevich 55070d5501c1SVlad Yasevich return skb; 55080d5501c1SVlad Yasevich 55090d5501c1SVlad Yasevich err_free: 55100d5501c1SVlad Yasevich kfree_skb(skb); 55110d5501c1SVlad Yasevich return NULL; 55120d5501c1SVlad Yasevich } 55130d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag); 55142e4e4410SEric Dumazet 5515e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len) 5516e2195121SJiri Pirko { 5517e2195121SJiri Pirko if (!pskb_may_pull(skb, write_len)) 5518e2195121SJiri Pirko return -ENOMEM; 5519e2195121SJiri Pirko 5520e2195121SJiri Pirko if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5521e2195121SJiri Pirko return 0; 5522e2195121SJiri Pirko 5523e2195121SJiri Pirko return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5524e2195121SJiri Pirko } 5525e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable); 5526e2195121SJiri Pirko 5527bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly. 5528bfca4c52SShmulik Ladkani * expects a non skb_vlan_tag_present skb with a vlan tag payload 5529bfca4c52SShmulik Ladkani */ 5530bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 553193515d53SJiri Pirko { 553293515d53SJiri Pirko struct vlan_hdr *vhdr; 5533b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 553493515d53SJiri Pirko int err; 553593515d53SJiri Pirko 5536b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5537b6a79208SShmulik Ladkani "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5538b6a79208SShmulik Ladkani offset)) { 5539b6a79208SShmulik Ladkani return -EINVAL; 5540b6a79208SShmulik Ladkani } 5541b6a79208SShmulik Ladkani 554293515d53SJiri Pirko err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 554393515d53SJiri Pirko if (unlikely(err)) 5544b6a79208SShmulik Ladkani return err; 554593515d53SJiri Pirko 554693515d53SJiri Pirko skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 554793515d53SJiri Pirko 554893515d53SJiri Pirko vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 554993515d53SJiri Pirko *vlan_tci = ntohs(vhdr->h_vlan_TCI); 555093515d53SJiri Pirko 555193515d53SJiri Pirko memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 555293515d53SJiri Pirko __skb_pull(skb, VLAN_HLEN); 555393515d53SJiri Pirko 555493515d53SJiri Pirko vlan_set_encap_proto(skb, vhdr); 555593515d53SJiri Pirko skb->mac_header += VLAN_HLEN; 555693515d53SJiri Pirko 555793515d53SJiri Pirko if (skb_network_offset(skb) < ETH_HLEN) 555893515d53SJiri Pirko skb_set_network_header(skb, ETH_HLEN); 555993515d53SJiri Pirko 556093515d53SJiri Pirko skb_reset_mac_len(skb); 556193515d53SJiri Pirko 556293515d53SJiri Pirko return err; 556393515d53SJiri Pirko } 5564bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop); 556593515d53SJiri Pirko 5566b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload. 5567b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5568b6a79208SShmulik Ladkani */ 556993515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb) 557093515d53SJiri Pirko { 557193515d53SJiri Pirko u16 vlan_tci; 557293515d53SJiri Pirko __be16 vlan_proto; 557393515d53SJiri Pirko int err; 557493515d53SJiri Pirko 5575df8a39deSJiri Pirko if (likely(skb_vlan_tag_present(skb))) { 5576b1817524SMichał Mirosław __vlan_hwaccel_clear_tag(skb); 557793515d53SJiri Pirko } else { 5578ecf4ee41SShmulik Ladkani if (unlikely(!eth_type_vlan(skb->protocol))) 557993515d53SJiri Pirko return 0; 558093515d53SJiri Pirko 558193515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 558293515d53SJiri Pirko if (err) 558393515d53SJiri Pirko return err; 558493515d53SJiri Pirko } 558593515d53SJiri Pirko /* move next vlan tag to hw accel tag */ 5586ecf4ee41SShmulik Ladkani if (likely(!eth_type_vlan(skb->protocol))) 558793515d53SJiri Pirko return 0; 558893515d53SJiri Pirko 558993515d53SJiri Pirko vlan_proto = skb->protocol; 559093515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 559193515d53SJiri Pirko if (unlikely(err)) 559293515d53SJiri Pirko return err; 559393515d53SJiri Pirko 559493515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 559593515d53SJiri Pirko return 0; 559693515d53SJiri Pirko } 559793515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop); 559893515d53SJiri Pirko 5599b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5600b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5601b6a79208SShmulik Ladkani */ 560293515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 560393515d53SJiri Pirko { 5604df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 5605b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 560693515d53SJiri Pirko int err; 560793515d53SJiri Pirko 5608b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5609b6a79208SShmulik Ladkani "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5610b6a79208SShmulik Ladkani offset)) { 5611b6a79208SShmulik Ladkani return -EINVAL; 5612b6a79208SShmulik Ladkani } 5613b6a79208SShmulik Ladkani 561493515d53SJiri Pirko err = __vlan_insert_tag(skb, skb->vlan_proto, 5615df8a39deSJiri Pirko skb_vlan_tag_get(skb)); 5616b6a79208SShmulik Ladkani if (err) 561793515d53SJiri Pirko return err; 56189241e2dfSDaniel Borkmann 561993515d53SJiri Pirko skb->protocol = skb->vlan_proto; 562093515d53SJiri Pirko skb->mac_len += VLAN_HLEN; 562193515d53SJiri Pirko 56226b83d28aSDaniel Borkmann skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 562393515d53SJiri Pirko } 562493515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 562593515d53SJiri Pirko return 0; 562693515d53SJiri Pirko } 562793515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push); 562893515d53SJiri Pirko 562919fbcb36SGuillaume Nault /** 563019fbcb36SGuillaume Nault * skb_eth_pop() - Drop the Ethernet header at the head of a packet 563119fbcb36SGuillaume Nault * 563219fbcb36SGuillaume Nault * @skb: Socket buffer to modify 563319fbcb36SGuillaume Nault * 563419fbcb36SGuillaume Nault * Drop the Ethernet header of @skb. 563519fbcb36SGuillaume Nault * 563619fbcb36SGuillaume Nault * Expects that skb->data points to the mac header and that no VLAN tags are 563719fbcb36SGuillaume Nault * present. 563819fbcb36SGuillaume Nault * 563919fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 564019fbcb36SGuillaume Nault */ 564119fbcb36SGuillaume Nault int skb_eth_pop(struct sk_buff *skb) 564219fbcb36SGuillaume Nault { 564319fbcb36SGuillaume Nault if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 564419fbcb36SGuillaume Nault skb_network_offset(skb) < ETH_HLEN) 564519fbcb36SGuillaume Nault return -EPROTO; 564619fbcb36SGuillaume Nault 564719fbcb36SGuillaume Nault skb_pull_rcsum(skb, ETH_HLEN); 564819fbcb36SGuillaume Nault skb_reset_mac_header(skb); 564919fbcb36SGuillaume Nault skb_reset_mac_len(skb); 565019fbcb36SGuillaume Nault 565119fbcb36SGuillaume Nault return 0; 565219fbcb36SGuillaume Nault } 565319fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_pop); 565419fbcb36SGuillaume Nault 565519fbcb36SGuillaume Nault /** 565619fbcb36SGuillaume Nault * skb_eth_push() - Add a new Ethernet header at the head of a packet 565719fbcb36SGuillaume Nault * 565819fbcb36SGuillaume Nault * @skb: Socket buffer to modify 565919fbcb36SGuillaume Nault * @dst: Destination MAC address of the new header 566019fbcb36SGuillaume Nault * @src: Source MAC address of the new header 566119fbcb36SGuillaume Nault * 566219fbcb36SGuillaume Nault * Prepend @skb with a new Ethernet header. 566319fbcb36SGuillaume Nault * 566419fbcb36SGuillaume Nault * Expects that skb->data points to the mac header, which must be empty. 566519fbcb36SGuillaume Nault * 566619fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 566719fbcb36SGuillaume Nault */ 566819fbcb36SGuillaume Nault int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 566919fbcb36SGuillaume Nault const unsigned char *src) 567019fbcb36SGuillaume Nault { 567119fbcb36SGuillaume Nault struct ethhdr *eth; 567219fbcb36SGuillaume Nault int err; 567319fbcb36SGuillaume Nault 567419fbcb36SGuillaume Nault if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 567519fbcb36SGuillaume Nault return -EPROTO; 567619fbcb36SGuillaume Nault 567719fbcb36SGuillaume Nault err = skb_cow_head(skb, sizeof(*eth)); 567819fbcb36SGuillaume Nault if (err < 0) 567919fbcb36SGuillaume Nault return err; 568019fbcb36SGuillaume Nault 568119fbcb36SGuillaume Nault skb_push(skb, sizeof(*eth)); 568219fbcb36SGuillaume Nault skb_reset_mac_header(skb); 568319fbcb36SGuillaume Nault skb_reset_mac_len(skb); 568419fbcb36SGuillaume Nault 568519fbcb36SGuillaume Nault eth = eth_hdr(skb); 568619fbcb36SGuillaume Nault ether_addr_copy(eth->h_dest, dst); 568719fbcb36SGuillaume Nault ether_addr_copy(eth->h_source, src); 568819fbcb36SGuillaume Nault eth->h_proto = skb->protocol; 568919fbcb36SGuillaume Nault 569019fbcb36SGuillaume Nault skb_postpush_rcsum(skb, eth, sizeof(*eth)); 569119fbcb36SGuillaume Nault 569219fbcb36SGuillaume Nault return 0; 569319fbcb36SGuillaume Nault } 569419fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_push); 569519fbcb36SGuillaume Nault 56968822e270SJohn Hurley /* Update the ethertype of hdr and the skb csum value if required. */ 56978822e270SJohn Hurley static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 56988822e270SJohn Hurley __be16 ethertype) 56998822e270SJohn Hurley { 57008822e270SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 57018822e270SJohn Hurley __be16 diff[] = { ~hdr->h_proto, ethertype }; 57028822e270SJohn Hurley 57038822e270SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 57048822e270SJohn Hurley } 57058822e270SJohn Hurley 57068822e270SJohn Hurley hdr->h_proto = ethertype; 57078822e270SJohn Hurley } 57088822e270SJohn Hurley 57098822e270SJohn Hurley /** 5710e7dbfed1SMartin Varghese * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 5711e7dbfed1SMartin Varghese * the packet 57128822e270SJohn Hurley * 57138822e270SJohn Hurley * @skb: buffer 57148822e270SJohn Hurley * @mpls_lse: MPLS label stack entry to push 57158822e270SJohn Hurley * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5716fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 5717e7dbfed1SMartin Varghese * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 5718e7dbfed1SMartin Varghese * ethernet 57198822e270SJohn Hurley * 57208822e270SJohn Hurley * Expects skb->data at mac header. 57218822e270SJohn Hurley * 57228822e270SJohn Hurley * Returns 0 on success, -errno otherwise. 57238822e270SJohn Hurley */ 5724fa4e0f88SDavide Caratti int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5725d04ac224SMartin Varghese int mac_len, bool ethernet) 57268822e270SJohn Hurley { 57278822e270SJohn Hurley struct mpls_shim_hdr *lse; 57288822e270SJohn Hurley int err; 57298822e270SJohn Hurley 57308822e270SJohn Hurley if (unlikely(!eth_p_mpls(mpls_proto))) 57318822e270SJohn Hurley return -EINVAL; 57328822e270SJohn Hurley 57338822e270SJohn Hurley /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 57348822e270SJohn Hurley if (skb->encapsulation) 57358822e270SJohn Hurley return -EINVAL; 57368822e270SJohn Hurley 57378822e270SJohn Hurley err = skb_cow_head(skb, MPLS_HLEN); 57388822e270SJohn Hurley if (unlikely(err)) 57398822e270SJohn Hurley return err; 57408822e270SJohn Hurley 57418822e270SJohn Hurley if (!skb->inner_protocol) { 5742e7dbfed1SMartin Varghese skb_set_inner_network_header(skb, skb_network_offset(skb)); 57438822e270SJohn Hurley skb_set_inner_protocol(skb, skb->protocol); 57448822e270SJohn Hurley } 57458822e270SJohn Hurley 57468822e270SJohn Hurley skb_push(skb, MPLS_HLEN); 57478822e270SJohn Hurley memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5748fa4e0f88SDavide Caratti mac_len); 57498822e270SJohn Hurley skb_reset_mac_header(skb); 5750fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5751e7dbfed1SMartin Varghese skb_reset_mac_len(skb); 57528822e270SJohn Hurley 57538822e270SJohn Hurley lse = mpls_hdr(skb); 57548822e270SJohn Hurley lse->label_stack_entry = mpls_lse; 57558822e270SJohn Hurley skb_postpush_rcsum(skb, lse, MPLS_HLEN); 57568822e270SJohn Hurley 57574296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) 57588822e270SJohn Hurley skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 57598822e270SJohn Hurley skb->protocol = mpls_proto; 57608822e270SJohn Hurley 57618822e270SJohn Hurley return 0; 57628822e270SJohn Hurley } 57638822e270SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_push); 57648822e270SJohn Hurley 57652e4e4410SEric Dumazet /** 5766ed246ceeSJohn Hurley * skb_mpls_pop() - pop the outermost MPLS header 5767ed246ceeSJohn Hurley * 5768ed246ceeSJohn Hurley * @skb: buffer 5769ed246ceeSJohn Hurley * @next_proto: ethertype of header after popped MPLS header 5770fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 577176f99f98SMartin Varghese * @ethernet: flag to indicate if the packet is ethernet 5772ed246ceeSJohn Hurley * 5773ed246ceeSJohn Hurley * Expects skb->data at mac header. 5774ed246ceeSJohn Hurley * 5775ed246ceeSJohn Hurley * Returns 0 on success, -errno otherwise. 5776ed246ceeSJohn Hurley */ 5777040b5cfbSMartin Varghese int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5778040b5cfbSMartin Varghese bool ethernet) 5779ed246ceeSJohn Hurley { 5780ed246ceeSJohn Hurley int err; 5781ed246ceeSJohn Hurley 5782ed246ceeSJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5783dedc5a08SDavide Caratti return 0; 5784ed246ceeSJohn Hurley 5785fa4e0f88SDavide Caratti err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 5786ed246ceeSJohn Hurley if (unlikely(err)) 5787ed246ceeSJohn Hurley return err; 5788ed246ceeSJohn Hurley 5789ed246ceeSJohn Hurley skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5790ed246ceeSJohn Hurley memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5791fa4e0f88SDavide Caratti mac_len); 5792ed246ceeSJohn Hurley 5793ed246ceeSJohn Hurley __skb_pull(skb, MPLS_HLEN); 5794ed246ceeSJohn Hurley skb_reset_mac_header(skb); 5795fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5796ed246ceeSJohn Hurley 57974296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) { 5798ed246ceeSJohn Hurley struct ethhdr *hdr; 5799ed246ceeSJohn Hurley 5800ed246ceeSJohn Hurley /* use mpls_hdr() to get ethertype to account for VLANs. */ 5801ed246ceeSJohn Hurley hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5802ed246ceeSJohn Hurley skb_mod_eth_type(skb, hdr, next_proto); 5803ed246ceeSJohn Hurley } 5804ed246ceeSJohn Hurley skb->protocol = next_proto; 5805ed246ceeSJohn Hurley 5806ed246ceeSJohn Hurley return 0; 5807ed246ceeSJohn Hurley } 5808ed246ceeSJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_pop); 5809ed246ceeSJohn Hurley 5810ed246ceeSJohn Hurley /** 5811d27cf5c5SJohn Hurley * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5812d27cf5c5SJohn Hurley * 5813d27cf5c5SJohn Hurley * @skb: buffer 5814d27cf5c5SJohn Hurley * @mpls_lse: new MPLS label stack entry to update to 5815d27cf5c5SJohn Hurley * 5816d27cf5c5SJohn Hurley * Expects skb->data at mac header. 5817d27cf5c5SJohn Hurley * 5818d27cf5c5SJohn Hurley * Returns 0 on success, -errno otherwise. 5819d27cf5c5SJohn Hurley */ 5820d27cf5c5SJohn Hurley int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5821d27cf5c5SJohn Hurley { 5822d27cf5c5SJohn Hurley int err; 5823d27cf5c5SJohn Hurley 5824d27cf5c5SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5825d27cf5c5SJohn Hurley return -EINVAL; 5826d27cf5c5SJohn Hurley 5827d27cf5c5SJohn Hurley err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5828d27cf5c5SJohn Hurley if (unlikely(err)) 5829d27cf5c5SJohn Hurley return err; 5830d27cf5c5SJohn Hurley 5831d27cf5c5SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 5832d27cf5c5SJohn Hurley __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 5833d27cf5c5SJohn Hurley 5834d27cf5c5SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5835d27cf5c5SJohn Hurley } 5836d27cf5c5SJohn Hurley 5837d27cf5c5SJohn Hurley mpls_hdr(skb)->label_stack_entry = mpls_lse; 5838d27cf5c5SJohn Hurley 5839d27cf5c5SJohn Hurley return 0; 5840d27cf5c5SJohn Hurley } 5841d27cf5c5SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 5842d27cf5c5SJohn Hurley 5843d27cf5c5SJohn Hurley /** 58442a2ea508SJohn Hurley * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 58452a2ea508SJohn Hurley * 58462a2ea508SJohn Hurley * @skb: buffer 58472a2ea508SJohn Hurley * 58482a2ea508SJohn Hurley * Expects skb->data at mac header. 58492a2ea508SJohn Hurley * 58502a2ea508SJohn Hurley * Returns 0 on success, -errno otherwise. 58512a2ea508SJohn Hurley */ 58522a2ea508SJohn Hurley int skb_mpls_dec_ttl(struct sk_buff *skb) 58532a2ea508SJohn Hurley { 58542a2ea508SJohn Hurley u32 lse; 58552a2ea508SJohn Hurley u8 ttl; 58562a2ea508SJohn Hurley 58572a2ea508SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 58582a2ea508SJohn Hurley return -EINVAL; 58592a2ea508SJohn Hurley 586013de4ed9SDavide Caratti if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 586113de4ed9SDavide Caratti return -ENOMEM; 586213de4ed9SDavide Caratti 58632a2ea508SJohn Hurley lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 58642a2ea508SJohn Hurley ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 58652a2ea508SJohn Hurley if (!--ttl) 58662a2ea508SJohn Hurley return -EINVAL; 58672a2ea508SJohn Hurley 58682a2ea508SJohn Hurley lse &= ~MPLS_LS_TTL_MASK; 58692a2ea508SJohn Hurley lse |= ttl << MPLS_LS_TTL_SHIFT; 58702a2ea508SJohn Hurley 58712a2ea508SJohn Hurley return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 58722a2ea508SJohn Hurley } 58732a2ea508SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 58742a2ea508SJohn Hurley 58752a2ea508SJohn Hurley /** 58762e4e4410SEric Dumazet * alloc_skb_with_frags - allocate skb with page frags 58772e4e4410SEric Dumazet * 5878de3f0d0eSMasanari Iida * @header_len: size of linear part 5879de3f0d0eSMasanari Iida * @data_len: needed length in frags 5880de3f0d0eSMasanari Iida * @max_page_order: max page order desired. 5881de3f0d0eSMasanari Iida * @errcode: pointer to error code if any 5882de3f0d0eSMasanari Iida * @gfp_mask: allocation mask 58832e4e4410SEric Dumazet * 58842e4e4410SEric Dumazet * This can be used to allocate a paged skb, given a maximal order for frags. 58852e4e4410SEric Dumazet */ 58862e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 58872e4e4410SEric Dumazet unsigned long data_len, 58882e4e4410SEric Dumazet int max_page_order, 58892e4e4410SEric Dumazet int *errcode, 58902e4e4410SEric Dumazet gfp_t gfp_mask) 58912e4e4410SEric Dumazet { 58922e4e4410SEric Dumazet int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 58932e4e4410SEric Dumazet unsigned long chunk; 58942e4e4410SEric Dumazet struct sk_buff *skb; 58952e4e4410SEric Dumazet struct page *page; 58962e4e4410SEric Dumazet int i; 58972e4e4410SEric Dumazet 58982e4e4410SEric Dumazet *errcode = -EMSGSIZE; 58992e4e4410SEric Dumazet /* Note this test could be relaxed, if we succeed to allocate 59002e4e4410SEric Dumazet * high order pages... 59012e4e4410SEric Dumazet */ 59022e4e4410SEric Dumazet if (npages > MAX_SKB_FRAGS) 59032e4e4410SEric Dumazet return NULL; 59042e4e4410SEric Dumazet 59052e4e4410SEric Dumazet *errcode = -ENOBUFS; 5906f8c468e8SDavid Rientjes skb = alloc_skb(header_len, gfp_mask); 59072e4e4410SEric Dumazet if (!skb) 59082e4e4410SEric Dumazet return NULL; 59092e4e4410SEric Dumazet 59102e4e4410SEric Dumazet skb->truesize += npages << PAGE_SHIFT; 59112e4e4410SEric Dumazet 59122e4e4410SEric Dumazet for (i = 0; npages > 0; i++) { 59132e4e4410SEric Dumazet int order = max_page_order; 59142e4e4410SEric Dumazet 59152e4e4410SEric Dumazet while (order) { 59162e4e4410SEric Dumazet if (npages >= 1 << order) { 5917d0164adcSMel Gorman page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 59182e4e4410SEric Dumazet __GFP_COMP | 5919d14b56f5SMichal Hocko __GFP_NOWARN, 59202e4e4410SEric Dumazet order); 59212e4e4410SEric Dumazet if (page) 59222e4e4410SEric Dumazet goto fill_page; 59232e4e4410SEric Dumazet /* Do not retry other high order allocations */ 59242e4e4410SEric Dumazet order = 1; 59252e4e4410SEric Dumazet max_page_order = 0; 59262e4e4410SEric Dumazet } 59272e4e4410SEric Dumazet order--; 59282e4e4410SEric Dumazet } 59292e4e4410SEric Dumazet page = alloc_page(gfp_mask); 59302e4e4410SEric Dumazet if (!page) 59312e4e4410SEric Dumazet goto failure; 59322e4e4410SEric Dumazet fill_page: 59332e4e4410SEric Dumazet chunk = min_t(unsigned long, data_len, 59342e4e4410SEric Dumazet PAGE_SIZE << order); 59352e4e4410SEric Dumazet skb_fill_page_desc(skb, i, page, 0, chunk); 59362e4e4410SEric Dumazet data_len -= chunk; 59372e4e4410SEric Dumazet npages -= 1 << order; 59382e4e4410SEric Dumazet } 59392e4e4410SEric Dumazet return skb; 59402e4e4410SEric Dumazet 59412e4e4410SEric Dumazet failure: 59422e4e4410SEric Dumazet kfree_skb(skb); 59432e4e4410SEric Dumazet return NULL; 59442e4e4410SEric Dumazet } 59452e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags); 59466fa01ccdSSowmini Varadhan 59476fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */ 59486fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 59496fa01ccdSSowmini Varadhan const int headlen, gfp_t gfp_mask) 59506fa01ccdSSowmini Varadhan { 59516fa01ccdSSowmini Varadhan int i; 59526fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 59536fa01ccdSSowmini Varadhan int new_hlen = headlen - off; 59546fa01ccdSSowmini Varadhan u8 *data; 59556fa01ccdSSowmini Varadhan 59566fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 59576fa01ccdSSowmini Varadhan 59586fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 59596fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 59606fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 59616fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 59626fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 59636fa01ccdSSowmini Varadhan if (!data) 59646fa01ccdSSowmini Varadhan return -ENOMEM; 59656fa01ccdSSowmini Varadhan 59666fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 59676fa01ccdSSowmini Varadhan 59686fa01ccdSSowmini Varadhan /* Copy real data, and all frags */ 59696fa01ccdSSowmini Varadhan skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 59706fa01ccdSSowmini Varadhan skb->len -= off; 59716fa01ccdSSowmini Varadhan 59726fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 59736fa01ccdSSowmini Varadhan skb_shinfo(skb), 59746fa01ccdSSowmini Varadhan offsetof(struct skb_shared_info, 59756fa01ccdSSowmini Varadhan frags[skb_shinfo(skb)->nr_frags])); 59766fa01ccdSSowmini Varadhan if (skb_cloned(skb)) { 59776fa01ccdSSowmini Varadhan /* drop the old head gracefully */ 59786fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 59796fa01ccdSSowmini Varadhan kfree(data); 59806fa01ccdSSowmini Varadhan return -ENOMEM; 59816fa01ccdSSowmini Varadhan } 59826fa01ccdSSowmini Varadhan for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 59836fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 59846fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 59856fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 59866fa01ccdSSowmini Varadhan skb_release_data(skb); 59876fa01ccdSSowmini Varadhan } else { 59886fa01ccdSSowmini Varadhan /* we can reuse existing recount- all we did was 59896fa01ccdSSowmini Varadhan * relocate values 59906fa01ccdSSowmini Varadhan */ 59916fa01ccdSSowmini Varadhan skb_free_head(skb); 59926fa01ccdSSowmini Varadhan } 59936fa01ccdSSowmini Varadhan 59946fa01ccdSSowmini Varadhan skb->head = data; 59956fa01ccdSSowmini Varadhan skb->data = data; 59966fa01ccdSSowmini Varadhan skb->head_frag = 0; 59976fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 59986fa01ccdSSowmini Varadhan skb->end = size; 59996fa01ccdSSowmini Varadhan #else 60006fa01ccdSSowmini Varadhan skb->end = skb->head + size; 60016fa01ccdSSowmini Varadhan #endif 60026fa01ccdSSowmini Varadhan skb_set_tail_pointer(skb, skb_headlen(skb)); 60036fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 60046fa01ccdSSowmini Varadhan skb->cloned = 0; 60056fa01ccdSSowmini Varadhan skb->hdr_len = 0; 60066fa01ccdSSowmini Varadhan skb->nohdr = 0; 60076fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 60086fa01ccdSSowmini Varadhan 60096fa01ccdSSowmini Varadhan return 0; 60106fa01ccdSSowmini Varadhan } 60116fa01ccdSSowmini Varadhan 60126fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 60136fa01ccdSSowmini Varadhan 60146fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into 60156fa01ccdSSowmini Varadhan * pskb_carve() 60166fa01ccdSSowmini Varadhan */ 60176fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb, 60186fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo, int eat, 60196fa01ccdSSowmini Varadhan gfp_t gfp_mask) 60206fa01ccdSSowmini Varadhan { 60216fa01ccdSSowmini Varadhan struct sk_buff *list = shinfo->frag_list; 60226fa01ccdSSowmini Varadhan struct sk_buff *clone = NULL; 60236fa01ccdSSowmini Varadhan struct sk_buff *insp = NULL; 60246fa01ccdSSowmini Varadhan 60256fa01ccdSSowmini Varadhan do { 60266fa01ccdSSowmini Varadhan if (!list) { 60276fa01ccdSSowmini Varadhan pr_err("Not enough bytes to eat. Want %d\n", eat); 60286fa01ccdSSowmini Varadhan return -EFAULT; 60296fa01ccdSSowmini Varadhan } 60306fa01ccdSSowmini Varadhan if (list->len <= eat) { 60316fa01ccdSSowmini Varadhan /* Eaten as whole. */ 60326fa01ccdSSowmini Varadhan eat -= list->len; 60336fa01ccdSSowmini Varadhan list = list->next; 60346fa01ccdSSowmini Varadhan insp = list; 60356fa01ccdSSowmini Varadhan } else { 60366fa01ccdSSowmini Varadhan /* Eaten partially. */ 60376fa01ccdSSowmini Varadhan if (skb_shared(list)) { 60386fa01ccdSSowmini Varadhan clone = skb_clone(list, gfp_mask); 60396fa01ccdSSowmini Varadhan if (!clone) 60406fa01ccdSSowmini Varadhan return -ENOMEM; 60416fa01ccdSSowmini Varadhan insp = list->next; 60426fa01ccdSSowmini Varadhan list = clone; 60436fa01ccdSSowmini Varadhan } else { 60446fa01ccdSSowmini Varadhan /* This may be pulled without problems. */ 60456fa01ccdSSowmini Varadhan insp = list; 60466fa01ccdSSowmini Varadhan } 60476fa01ccdSSowmini Varadhan if (pskb_carve(list, eat, gfp_mask) < 0) { 60486fa01ccdSSowmini Varadhan kfree_skb(clone); 60496fa01ccdSSowmini Varadhan return -ENOMEM; 60506fa01ccdSSowmini Varadhan } 60516fa01ccdSSowmini Varadhan break; 60526fa01ccdSSowmini Varadhan } 60536fa01ccdSSowmini Varadhan } while (eat); 60546fa01ccdSSowmini Varadhan 60556fa01ccdSSowmini Varadhan /* Free pulled out fragments. */ 60566fa01ccdSSowmini Varadhan while ((list = shinfo->frag_list) != insp) { 60576fa01ccdSSowmini Varadhan shinfo->frag_list = list->next; 60586fa01ccdSSowmini Varadhan kfree_skb(list); 60596fa01ccdSSowmini Varadhan } 60606fa01ccdSSowmini Varadhan /* And insert new clone at head. */ 60616fa01ccdSSowmini Varadhan if (clone) { 60626fa01ccdSSowmini Varadhan clone->next = list; 60636fa01ccdSSowmini Varadhan shinfo->frag_list = clone; 60646fa01ccdSSowmini Varadhan } 60656fa01ccdSSowmini Varadhan return 0; 60666fa01ccdSSowmini Varadhan } 60676fa01ccdSSowmini Varadhan 60686fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the 60696fa01ccdSSowmini Varadhan * non-linear part of skb 60706fa01ccdSSowmini Varadhan */ 60716fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 60726fa01ccdSSowmini Varadhan int pos, gfp_t gfp_mask) 60736fa01ccdSSowmini Varadhan { 60746fa01ccdSSowmini Varadhan int i, k = 0; 60756fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 60766fa01ccdSSowmini Varadhan u8 *data; 60776fa01ccdSSowmini Varadhan const int nfrags = skb_shinfo(skb)->nr_frags; 60786fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo; 60796fa01ccdSSowmini Varadhan 60806fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 60816fa01ccdSSowmini Varadhan 60826fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 60836fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 60846fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 60856fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 60866fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 60876fa01ccdSSowmini Varadhan if (!data) 60886fa01ccdSSowmini Varadhan return -ENOMEM; 60896fa01ccdSSowmini Varadhan 60906fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 60916fa01ccdSSowmini Varadhan 60926fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 6093e3ec1e8cSMiaohe Lin skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 60946fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 60956fa01ccdSSowmini Varadhan kfree(data); 60966fa01ccdSSowmini Varadhan return -ENOMEM; 60976fa01ccdSSowmini Varadhan } 60986fa01ccdSSowmini Varadhan shinfo = (struct skb_shared_info *)(data + size); 60996fa01ccdSSowmini Varadhan for (i = 0; i < nfrags; i++) { 61006fa01ccdSSowmini Varadhan int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 61016fa01ccdSSowmini Varadhan 61026fa01ccdSSowmini Varadhan if (pos + fsize > off) { 61036fa01ccdSSowmini Varadhan shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 61046fa01ccdSSowmini Varadhan 61056fa01ccdSSowmini Varadhan if (pos < off) { 61066fa01ccdSSowmini Varadhan /* Split frag. 61076fa01ccdSSowmini Varadhan * We have two variants in this case: 61086fa01ccdSSowmini Varadhan * 1. Move all the frag to the second 61096fa01ccdSSowmini Varadhan * part, if it is possible. F.e. 61106fa01ccdSSowmini Varadhan * this approach is mandatory for TUX, 61116fa01ccdSSowmini Varadhan * where splitting is expensive. 61126fa01ccdSSowmini Varadhan * 2. Split is accurately. We make this. 61136fa01ccdSSowmini Varadhan */ 6114b54c9d5bSJonathan Lemon skb_frag_off_add(&shinfo->frags[0], off - pos); 61156fa01ccdSSowmini Varadhan skb_frag_size_sub(&shinfo->frags[0], off - pos); 61166fa01ccdSSowmini Varadhan } 61176fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 61186fa01ccdSSowmini Varadhan k++; 61196fa01ccdSSowmini Varadhan } 61206fa01ccdSSowmini Varadhan pos += fsize; 61216fa01ccdSSowmini Varadhan } 61226fa01ccdSSowmini Varadhan shinfo->nr_frags = k; 61236fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 61246fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 61256fa01ccdSSowmini Varadhan 61266fa01ccdSSowmini Varadhan /* split line is in frag list */ 6127eabe8618SMiaohe Lin if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6128eabe8618SMiaohe Lin /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6129eabe8618SMiaohe Lin if (skb_has_frag_list(skb)) 6130eabe8618SMiaohe Lin kfree_skb_list(skb_shinfo(skb)->frag_list); 6131eabe8618SMiaohe Lin kfree(data); 6132eabe8618SMiaohe Lin return -ENOMEM; 61336fa01ccdSSowmini Varadhan } 61346fa01ccdSSowmini Varadhan skb_release_data(skb); 61356fa01ccdSSowmini Varadhan 61366fa01ccdSSowmini Varadhan skb->head = data; 61376fa01ccdSSowmini Varadhan skb->head_frag = 0; 61386fa01ccdSSowmini Varadhan skb->data = data; 61396fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 61406fa01ccdSSowmini Varadhan skb->end = size; 61416fa01ccdSSowmini Varadhan #else 61426fa01ccdSSowmini Varadhan skb->end = skb->head + size; 61436fa01ccdSSowmini Varadhan #endif 61446fa01ccdSSowmini Varadhan skb_reset_tail_pointer(skb); 61456fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 61466fa01ccdSSowmini Varadhan skb->cloned = 0; 61476fa01ccdSSowmini Varadhan skb->hdr_len = 0; 61486fa01ccdSSowmini Varadhan skb->nohdr = 0; 61496fa01ccdSSowmini Varadhan skb->len -= off; 61506fa01ccdSSowmini Varadhan skb->data_len = skb->len; 61516fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 61526fa01ccdSSowmini Varadhan return 0; 61536fa01ccdSSowmini Varadhan } 61546fa01ccdSSowmini Varadhan 61556fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */ 61566fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 61576fa01ccdSSowmini Varadhan { 61586fa01ccdSSowmini Varadhan int headlen = skb_headlen(skb); 61596fa01ccdSSowmini Varadhan 61606fa01ccdSSowmini Varadhan if (len < headlen) 61616fa01ccdSSowmini Varadhan return pskb_carve_inside_header(skb, len, headlen, gfp); 61626fa01ccdSSowmini Varadhan else 61636fa01ccdSSowmini Varadhan return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 61646fa01ccdSSowmini Varadhan } 61656fa01ccdSSowmini Varadhan 61666fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in 61676fa01ccdSSowmini Varadhan * a new skb 61686fa01ccdSSowmini Varadhan */ 61696fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 61706fa01ccdSSowmini Varadhan int to_copy, gfp_t gfp) 61716fa01ccdSSowmini Varadhan { 61726fa01ccdSSowmini Varadhan struct sk_buff *clone = skb_clone(skb, gfp); 61736fa01ccdSSowmini Varadhan 61746fa01ccdSSowmini Varadhan if (!clone) 61756fa01ccdSSowmini Varadhan return NULL; 61766fa01ccdSSowmini Varadhan 61776fa01ccdSSowmini Varadhan if (pskb_carve(clone, off, gfp) < 0 || 61786fa01ccdSSowmini Varadhan pskb_trim(clone, to_copy)) { 61796fa01ccdSSowmini Varadhan kfree_skb(clone); 61806fa01ccdSSowmini Varadhan return NULL; 61816fa01ccdSSowmini Varadhan } 61826fa01ccdSSowmini Varadhan return clone; 61836fa01ccdSSowmini Varadhan } 61846fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract); 6185c8c8b127SEric Dumazet 6186c8c8b127SEric Dumazet /** 6187c8c8b127SEric Dumazet * skb_condense - try to get rid of fragments/frag_list if possible 6188c8c8b127SEric Dumazet * @skb: buffer 6189c8c8b127SEric Dumazet * 6190c8c8b127SEric Dumazet * Can be used to save memory before skb is added to a busy queue. 6191c8c8b127SEric Dumazet * If packet has bytes in frags and enough tail room in skb->head, 6192c8c8b127SEric Dumazet * pull all of them, so that we can free the frags right now and adjust 6193c8c8b127SEric Dumazet * truesize. 6194c8c8b127SEric Dumazet * Notes: 6195c8c8b127SEric Dumazet * We do not reallocate skb->head thus can not fail. 6196c8c8b127SEric Dumazet * Caller must re-evaluate skb->truesize if needed. 6197c8c8b127SEric Dumazet */ 6198c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb) 6199c8c8b127SEric Dumazet { 62003174fed9SEric Dumazet if (skb->data_len) { 62013174fed9SEric Dumazet if (skb->data_len > skb->end - skb->tail || 6202c8c8b127SEric Dumazet skb_cloned(skb)) 6203c8c8b127SEric Dumazet return; 6204c8c8b127SEric Dumazet 6205c8c8b127SEric Dumazet /* Nice, we can free page frag(s) right now */ 6206c8c8b127SEric Dumazet __pskb_pull_tail(skb, skb->data_len); 62073174fed9SEric Dumazet } 62083174fed9SEric Dumazet /* At this point, skb->truesize might be over estimated, 62093174fed9SEric Dumazet * because skb had a fragment, and fragments do not tell 62103174fed9SEric Dumazet * their truesize. 62113174fed9SEric Dumazet * When we pulled its content into skb->head, fragment 62123174fed9SEric Dumazet * was freed, but __pskb_pull_tail() could not possibly 62133174fed9SEric Dumazet * adjust skb->truesize, not knowing the frag truesize. 6214c8c8b127SEric Dumazet */ 6215c8c8b127SEric Dumazet skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6216c8c8b127SEric Dumazet } 6217df5042f4SFlorian Westphal 6218df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 6219df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6220df5042f4SFlorian Westphal { 6221df5042f4SFlorian Westphal return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6222df5042f4SFlorian Westphal } 6223df5042f4SFlorian Westphal 62248b69a803SPaolo Abeni /** 62258b69a803SPaolo Abeni * __skb_ext_alloc - allocate a new skb extensions storage 62268b69a803SPaolo Abeni * 62274930f483SFlorian Westphal * @flags: See kmalloc(). 62284930f483SFlorian Westphal * 62298b69a803SPaolo Abeni * Returns the newly allocated pointer. The pointer can later attached to a 62308b69a803SPaolo Abeni * skb via __skb_ext_set(). 62318b69a803SPaolo Abeni * Note: caller must handle the skb_ext as an opaque data. 62328b69a803SPaolo Abeni */ 62334930f483SFlorian Westphal struct skb_ext *__skb_ext_alloc(gfp_t flags) 6234df5042f4SFlorian Westphal { 62354930f483SFlorian Westphal struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6236df5042f4SFlorian Westphal 6237df5042f4SFlorian Westphal if (new) { 6238df5042f4SFlorian Westphal memset(new->offset, 0, sizeof(new->offset)); 6239df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6240df5042f4SFlorian Westphal } 6241df5042f4SFlorian Westphal 6242df5042f4SFlorian Westphal return new; 6243df5042f4SFlorian Westphal } 6244df5042f4SFlorian Westphal 62454165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 62464165079bSFlorian Westphal unsigned int old_active) 6247df5042f4SFlorian Westphal { 6248df5042f4SFlorian Westphal struct skb_ext *new; 6249df5042f4SFlorian Westphal 6250df5042f4SFlorian Westphal if (refcount_read(&old->refcnt) == 1) 6251df5042f4SFlorian Westphal return old; 6252df5042f4SFlorian Westphal 6253df5042f4SFlorian Westphal new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6254df5042f4SFlorian Westphal if (!new) 6255df5042f4SFlorian Westphal return NULL; 6256df5042f4SFlorian Westphal 6257df5042f4SFlorian Westphal memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6258df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6259df5042f4SFlorian Westphal 62604165079bSFlorian Westphal #ifdef CONFIG_XFRM 62614165079bSFlorian Westphal if (old_active & (1 << SKB_EXT_SEC_PATH)) { 62624165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 62634165079bSFlorian Westphal unsigned int i; 62644165079bSFlorian Westphal 62654165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 62664165079bSFlorian Westphal xfrm_state_hold(sp->xvec[i]); 62674165079bSFlorian Westphal } 62684165079bSFlorian Westphal #endif 6269df5042f4SFlorian Westphal __skb_ext_put(old); 6270df5042f4SFlorian Westphal return new; 6271df5042f4SFlorian Westphal } 6272df5042f4SFlorian Westphal 6273df5042f4SFlorian Westphal /** 62748b69a803SPaolo Abeni * __skb_ext_set - attach the specified extension storage to this skb 62758b69a803SPaolo Abeni * @skb: buffer 62768b69a803SPaolo Abeni * @id: extension id 62778b69a803SPaolo Abeni * @ext: extension storage previously allocated via __skb_ext_alloc() 62788b69a803SPaolo Abeni * 62798b69a803SPaolo Abeni * Existing extensions, if any, are cleared. 62808b69a803SPaolo Abeni * 62818b69a803SPaolo Abeni * Returns the pointer to the extension. 62828b69a803SPaolo Abeni */ 62838b69a803SPaolo Abeni void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 62848b69a803SPaolo Abeni struct skb_ext *ext) 62858b69a803SPaolo Abeni { 62868b69a803SPaolo Abeni unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 62878b69a803SPaolo Abeni 62888b69a803SPaolo Abeni skb_ext_put(skb); 62898b69a803SPaolo Abeni newlen = newoff + skb_ext_type_len[id]; 62908b69a803SPaolo Abeni ext->chunks = newlen; 62918b69a803SPaolo Abeni ext->offset[id] = newoff; 62928b69a803SPaolo Abeni skb->extensions = ext; 62938b69a803SPaolo Abeni skb->active_extensions = 1 << id; 62948b69a803SPaolo Abeni return skb_ext_get_ptr(ext, id); 62958b69a803SPaolo Abeni } 62968b69a803SPaolo Abeni 62978b69a803SPaolo Abeni /** 6298df5042f4SFlorian Westphal * skb_ext_add - allocate space for given extension, COW if needed 6299df5042f4SFlorian Westphal * @skb: buffer 6300df5042f4SFlorian Westphal * @id: extension to allocate space for 6301df5042f4SFlorian Westphal * 6302df5042f4SFlorian Westphal * Allocates enough space for the given extension. 6303df5042f4SFlorian Westphal * If the extension is already present, a pointer to that extension 6304df5042f4SFlorian Westphal * is returned. 6305df5042f4SFlorian Westphal * 6306df5042f4SFlorian Westphal * If the skb was cloned, COW applies and the returned memory can be 6307df5042f4SFlorian Westphal * modified without changing the extension space of clones buffers. 6308df5042f4SFlorian Westphal * 6309df5042f4SFlorian Westphal * Returns pointer to the extension or NULL on allocation failure. 6310df5042f4SFlorian Westphal */ 6311df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6312df5042f4SFlorian Westphal { 6313df5042f4SFlorian Westphal struct skb_ext *new, *old = NULL; 6314df5042f4SFlorian Westphal unsigned int newlen, newoff; 6315df5042f4SFlorian Westphal 6316df5042f4SFlorian Westphal if (skb->active_extensions) { 6317df5042f4SFlorian Westphal old = skb->extensions; 6318df5042f4SFlorian Westphal 63194165079bSFlorian Westphal new = skb_ext_maybe_cow(old, skb->active_extensions); 6320df5042f4SFlorian Westphal if (!new) 6321df5042f4SFlorian Westphal return NULL; 6322df5042f4SFlorian Westphal 6323682ec859SPaolo Abeni if (__skb_ext_exist(new, id)) 6324df5042f4SFlorian Westphal goto set_active; 6325df5042f4SFlorian Westphal 6326e94e50bdSPaolo Abeni newoff = new->chunks; 6327df5042f4SFlorian Westphal } else { 6328df5042f4SFlorian Westphal newoff = SKB_EXT_CHUNKSIZEOF(*new); 6329df5042f4SFlorian Westphal 63304930f483SFlorian Westphal new = __skb_ext_alloc(GFP_ATOMIC); 6331df5042f4SFlorian Westphal if (!new) 6332df5042f4SFlorian Westphal return NULL; 6333df5042f4SFlorian Westphal } 6334df5042f4SFlorian Westphal 6335df5042f4SFlorian Westphal newlen = newoff + skb_ext_type_len[id]; 6336df5042f4SFlorian Westphal new->chunks = newlen; 6337df5042f4SFlorian Westphal new->offset[id] = newoff; 6338df5042f4SFlorian Westphal set_active: 6339682ec859SPaolo Abeni skb->extensions = new; 6340df5042f4SFlorian Westphal skb->active_extensions |= 1 << id; 6341df5042f4SFlorian Westphal return skb_ext_get_ptr(new, id); 6342df5042f4SFlorian Westphal } 6343df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add); 6344df5042f4SFlorian Westphal 63454165079bSFlorian Westphal #ifdef CONFIG_XFRM 63464165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp) 63474165079bSFlorian Westphal { 63484165079bSFlorian Westphal unsigned int i; 63494165079bSFlorian Westphal 63504165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 63514165079bSFlorian Westphal xfrm_state_put(sp->xvec[i]); 63524165079bSFlorian Westphal } 63534165079bSFlorian Westphal #endif 63544165079bSFlorian Westphal 6355df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6356df5042f4SFlorian Westphal { 6357df5042f4SFlorian Westphal struct skb_ext *ext = skb->extensions; 6358df5042f4SFlorian Westphal 6359df5042f4SFlorian Westphal skb->active_extensions &= ~(1 << id); 6360df5042f4SFlorian Westphal if (skb->active_extensions == 0) { 6361df5042f4SFlorian Westphal skb->extensions = NULL; 6362df5042f4SFlorian Westphal __skb_ext_put(ext); 63634165079bSFlorian Westphal #ifdef CONFIG_XFRM 63644165079bSFlorian Westphal } else if (id == SKB_EXT_SEC_PATH && 63654165079bSFlorian Westphal refcount_read(&ext->refcnt) == 1) { 63664165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 63674165079bSFlorian Westphal 63684165079bSFlorian Westphal skb_ext_put_sp(sp); 63694165079bSFlorian Westphal sp->len = 0; 63704165079bSFlorian Westphal #endif 6371df5042f4SFlorian Westphal } 6372df5042f4SFlorian Westphal } 6373df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del); 6374df5042f4SFlorian Westphal 6375df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext) 6376df5042f4SFlorian Westphal { 6377df5042f4SFlorian Westphal /* If this is last clone, nothing can increment 6378df5042f4SFlorian Westphal * it after check passes. Avoids one atomic op. 6379df5042f4SFlorian Westphal */ 6380df5042f4SFlorian Westphal if (refcount_read(&ext->refcnt) == 1) 6381df5042f4SFlorian Westphal goto free_now; 6382df5042f4SFlorian Westphal 6383df5042f4SFlorian Westphal if (!refcount_dec_and_test(&ext->refcnt)) 6384df5042f4SFlorian Westphal return; 6385df5042f4SFlorian Westphal free_now: 63864165079bSFlorian Westphal #ifdef CONFIG_XFRM 63874165079bSFlorian Westphal if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 63884165079bSFlorian Westphal skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 63894165079bSFlorian Westphal #endif 63904165079bSFlorian Westphal 6391df5042f4SFlorian Westphal kmem_cache_free(skbuff_ext_cache, ext); 6392df5042f4SFlorian Westphal } 6393df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put); 6394df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */ 6395