12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 41da177e4SLinus Torvalds * 5113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 61da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Fixes: 91da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 101da177e4SLinus Torvalds * balancer bugs. 111da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 121da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 131da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 141da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 151da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 161da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 171da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 181da177e4SLinus Torvalds * only put in the headers 191da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 201da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 211da177e4SLinus Torvalds * Andi Kleen : slabified it. 221da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * NOTE: 251da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 261da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 271da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 281da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36e005d193SJoe Perches 371da177e4SLinus Torvalds #include <linux/module.h> 381da177e4SLinus Torvalds #include <linux/types.h> 391da177e4SLinus Torvalds #include <linux/kernel.h> 401da177e4SLinus Torvalds #include <linux/mm.h> 411da177e4SLinus Torvalds #include <linux/interrupt.h> 421da177e4SLinus Torvalds #include <linux/in.h> 431da177e4SLinus Torvalds #include <linux/inet.h> 441da177e4SLinus Torvalds #include <linux/slab.h> 45de960aa9SFlorian Westphal #include <linux/tcp.h> 46de960aa9SFlorian Westphal #include <linux/udp.h> 4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h> 481da177e4SLinus Torvalds #include <linux/netdevice.h> 491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 501da177e4SLinus Torvalds #include <net/pkt_sched.h> 511da177e4SLinus Torvalds #endif 521da177e4SLinus Torvalds #include <linux/string.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 549c55e01cSJens Axboe #include <linux/splice.h> 551da177e4SLinus Torvalds #include <linux/cache.h> 561da177e4SLinus Torvalds #include <linux/rtnetlink.h> 571da177e4SLinus Torvalds #include <linux/init.h> 58716ea3a7SDavid Howells #include <linux/scatterlist.h> 59ac45f602SPatrick Ohly #include <linux/errqueue.h> 60268bb0ceSLinus Torvalds #include <linux/prefetch.h> 610d5501c1SVlad Yasevich #include <linux/if_vlan.h> 622a2ea508SJohn Hurley #include <linux/mpls.h> 63183f47fcSSebastian Andrzej Siewior #include <linux/kcov.h> 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds #include <net/protocol.h> 661da177e4SLinus Torvalds #include <net/dst.h> 671da177e4SLinus Torvalds #include <net/sock.h> 681da177e4SLinus Torvalds #include <net/checksum.h> 69ed1f50c3SPaul Durrant #include <net/ip6_checksum.h> 701da177e4SLinus Torvalds #include <net/xfrm.h> 718822e270SJohn Hurley #include <net/mpls.h> 723ee17bc7SMat Martineau #include <net/mptcp.h> 736a5bcd84SIlias Apalodimas #include <net/page_pool.h> 741da177e4SLinus Torvalds 757c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 76ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7751c56b00SEric Dumazet #include <linux/highmem.h> 78b245be1fSWillem de Bruijn #include <linux/capability.h> 79b245be1fSWillem de Bruijn #include <linux/user_namespace.h> 802544af03SMatteo Croce #include <linux/indirect_call_wrapper.h> 81a1f8e7f7SAl Viro 827b7ed885SBart Van Assche #include "datagram.h" 837b7ed885SBart Van Assche 8408009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init; 8508009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 86df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 87df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init; 88df5042f4SFlorian Westphal #endif 895f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 905f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags); 911da177e4SLinus Torvalds 921da177e4SLinus Torvalds /** 93f05de73bSJean Sacren * skb_panic - private function for out-of-line support 941da177e4SLinus Torvalds * @skb: buffer 951da177e4SLinus Torvalds * @sz: size 96f05de73bSJean Sacren * @addr: address 9799d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 981da177e4SLinus Torvalds * 99f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 100f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 101f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 102f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 1031da177e4SLinus Torvalds */ 104f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 10599d5851eSJames Hogan const char msg[]) 1061da177e4SLinus Torvalds { 10741a46913SJesper Dangaard Brouer pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 10899d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 1094305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 11026095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1111da177e4SLinus Torvalds BUG(); 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 114f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1151da177e4SLinus Torvalds { 116f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1171da177e4SLinus Torvalds } 1181da177e4SLinus Torvalds 119f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 120f05de73bSJean Sacren { 121f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 122f05de73bSJean Sacren } 123c93bdd0eSMel Gorman 12450fad4b5SAlexander Lobakin #define NAPI_SKB_CACHE_SIZE 64 125f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_BULK 16 126f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 12750fad4b5SAlexander Lobakin 12850fad4b5SAlexander Lobakin struct napi_alloc_cache { 12950fad4b5SAlexander Lobakin struct page_frag_cache page; 13050fad4b5SAlexander Lobakin unsigned int skb_count; 13150fad4b5SAlexander Lobakin void *skb_cache[NAPI_SKB_CACHE_SIZE]; 13250fad4b5SAlexander Lobakin }; 13350fad4b5SAlexander Lobakin 13450fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 13550fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 13650fad4b5SAlexander Lobakin 13750fad4b5SAlexander Lobakin static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, 13850fad4b5SAlexander Lobakin unsigned int align_mask) 13950fad4b5SAlexander Lobakin { 14050fad4b5SAlexander Lobakin struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 14150fad4b5SAlexander Lobakin 14250fad4b5SAlexander Lobakin return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); 14350fad4b5SAlexander Lobakin } 14450fad4b5SAlexander Lobakin 14550fad4b5SAlexander Lobakin void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 14650fad4b5SAlexander Lobakin { 14750fad4b5SAlexander Lobakin fragsz = SKB_DATA_ALIGN(fragsz); 14850fad4b5SAlexander Lobakin 14950fad4b5SAlexander Lobakin return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 15050fad4b5SAlexander Lobakin } 15150fad4b5SAlexander Lobakin EXPORT_SYMBOL(__napi_alloc_frag_align); 15250fad4b5SAlexander Lobakin 15350fad4b5SAlexander Lobakin void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 15450fad4b5SAlexander Lobakin { 15550fad4b5SAlexander Lobakin struct page_frag_cache *nc; 15650fad4b5SAlexander Lobakin void *data; 15750fad4b5SAlexander Lobakin 15850fad4b5SAlexander Lobakin fragsz = SKB_DATA_ALIGN(fragsz); 15950fad4b5SAlexander Lobakin if (in_irq() || irqs_disabled()) { 16050fad4b5SAlexander Lobakin nc = this_cpu_ptr(&netdev_alloc_cache); 16150fad4b5SAlexander Lobakin data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); 16250fad4b5SAlexander Lobakin } else { 16350fad4b5SAlexander Lobakin local_bh_disable(); 16450fad4b5SAlexander Lobakin data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 16550fad4b5SAlexander Lobakin local_bh_enable(); 16650fad4b5SAlexander Lobakin } 16750fad4b5SAlexander Lobakin return data; 16850fad4b5SAlexander Lobakin } 16950fad4b5SAlexander Lobakin EXPORT_SYMBOL(__netdev_alloc_frag_align); 17050fad4b5SAlexander Lobakin 171f450d539SAlexander Lobakin static struct sk_buff *napi_skb_cache_get(void) 172f450d539SAlexander Lobakin { 173f450d539SAlexander Lobakin struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 174f450d539SAlexander Lobakin struct sk_buff *skb; 175f450d539SAlexander Lobakin 176f450d539SAlexander Lobakin if (unlikely(!nc->skb_count)) 177f450d539SAlexander Lobakin nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, 178f450d539SAlexander Lobakin GFP_ATOMIC, 179f450d539SAlexander Lobakin NAPI_SKB_CACHE_BULK, 180f450d539SAlexander Lobakin nc->skb_cache); 181f450d539SAlexander Lobakin if (unlikely(!nc->skb_count)) 182f450d539SAlexander Lobakin return NULL; 183f450d539SAlexander Lobakin 184f450d539SAlexander Lobakin skb = nc->skb_cache[--nc->skb_count]; 185f450d539SAlexander Lobakin kasan_unpoison_object_data(skbuff_head_cache, skb); 186f450d539SAlexander Lobakin 187f450d539SAlexander Lobakin return skb; 188f450d539SAlexander Lobakin } 189f450d539SAlexander Lobakin 190ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */ 191483126b3SAlexander Lobakin static void __build_skb_around(struct sk_buff *skb, void *data, 192483126b3SAlexander Lobakin unsigned int frag_size) 193ba0509b6SJesper Dangaard Brouer { 194ba0509b6SJesper Dangaard Brouer struct skb_shared_info *shinfo; 195ba0509b6SJesper Dangaard Brouer unsigned int size = frag_size ? : ksize(data); 196ba0509b6SJesper Dangaard Brouer 197ba0509b6SJesper Dangaard Brouer size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 198ba0509b6SJesper Dangaard Brouer 199ba0509b6SJesper Dangaard Brouer /* Assumes caller memset cleared SKB */ 200ba0509b6SJesper Dangaard Brouer skb->truesize = SKB_TRUESIZE(size); 201ba0509b6SJesper Dangaard Brouer refcount_set(&skb->users, 1); 202ba0509b6SJesper Dangaard Brouer skb->head = data; 203ba0509b6SJesper Dangaard Brouer skb->data = data; 204ba0509b6SJesper Dangaard Brouer skb_reset_tail_pointer(skb); 205ba0509b6SJesper Dangaard Brouer skb->end = skb->tail + size; 206ba0509b6SJesper Dangaard Brouer skb->mac_header = (typeof(skb->mac_header))~0U; 207ba0509b6SJesper Dangaard Brouer skb->transport_header = (typeof(skb->transport_header))~0U; 208ba0509b6SJesper Dangaard Brouer 209ba0509b6SJesper Dangaard Brouer /* make sure we initialize shinfo sequentially */ 210ba0509b6SJesper Dangaard Brouer shinfo = skb_shinfo(skb); 211ba0509b6SJesper Dangaard Brouer memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 212ba0509b6SJesper Dangaard Brouer atomic_set(&shinfo->dataref, 1); 213ba0509b6SJesper Dangaard Brouer 2146370cc3bSAleksandr Nogikh skb_set_kcov_handle(skb, kcov_common_handle()); 215ba0509b6SJesper Dangaard Brouer } 216ba0509b6SJesper Dangaard Brouer 2171da177e4SLinus Torvalds /** 2182ea2f62cSEric Dumazet * __build_skb - build a network buffer 219b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 2202ea2f62cSEric Dumazet * @frag_size: size of data, or 0 if head was kmalloced 221b2b5ce9dSEric Dumazet * 222b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 223deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 2242ea2f62cSEric Dumazet * @frag_size is 0, otherwise data should come from the page allocator 2252ea2f62cSEric Dumazet * or vmalloc() 226b2b5ce9dSEric Dumazet * The return is the new skb buffer. 227b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 228b2b5ce9dSEric Dumazet * Notes : 229b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 230b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 231b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 232b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 233b2b5ce9dSEric Dumazet * before giving packet to stack. 234b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 235b2b5ce9dSEric Dumazet */ 2362ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size) 237b2b5ce9dSEric Dumazet { 238b2b5ce9dSEric Dumazet struct sk_buff *skb; 239b2b5ce9dSEric Dumazet 240b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 241ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 242b2b5ce9dSEric Dumazet return NULL; 243b2b5ce9dSEric Dumazet 244b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 245483126b3SAlexander Lobakin __build_skb_around(skb, data, frag_size); 246b2b5ce9dSEric Dumazet 247483126b3SAlexander Lobakin return skb; 248b2b5ce9dSEric Dumazet } 2492ea2f62cSEric Dumazet 2502ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically 2512ea2f62cSEric Dumazet * takes care of skb->head and skb->pfmemalloc 2522ea2f62cSEric Dumazet * This means that if @frag_size is not zero, then @data must be backed 2532ea2f62cSEric Dumazet * by a page fragment, not kmalloc() or vmalloc() 2542ea2f62cSEric Dumazet */ 2552ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 2562ea2f62cSEric Dumazet { 2572ea2f62cSEric Dumazet struct sk_buff *skb = __build_skb(data, frag_size); 2582ea2f62cSEric Dumazet 2592ea2f62cSEric Dumazet if (skb && frag_size) { 2602ea2f62cSEric Dumazet skb->head_frag = 1; 2612f064f34SMichal Hocko if (page_is_pfmemalloc(virt_to_head_page(data))) 2622ea2f62cSEric Dumazet skb->pfmemalloc = 1; 2632ea2f62cSEric Dumazet } 2642ea2f62cSEric Dumazet return skb; 2652ea2f62cSEric Dumazet } 266b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 267b2b5ce9dSEric Dumazet 268ba0509b6SJesper Dangaard Brouer /** 269ba0509b6SJesper Dangaard Brouer * build_skb_around - build a network buffer around provided skb 270ba0509b6SJesper Dangaard Brouer * @skb: sk_buff provide by caller, must be memset cleared 271ba0509b6SJesper Dangaard Brouer * @data: data buffer provided by caller 272ba0509b6SJesper Dangaard Brouer * @frag_size: size of data, or 0 if head was kmalloced 273ba0509b6SJesper Dangaard Brouer */ 274ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb, 275ba0509b6SJesper Dangaard Brouer void *data, unsigned int frag_size) 276ba0509b6SJesper Dangaard Brouer { 277ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 278ba0509b6SJesper Dangaard Brouer return NULL; 279ba0509b6SJesper Dangaard Brouer 280483126b3SAlexander Lobakin __build_skb_around(skb, data, frag_size); 281ba0509b6SJesper Dangaard Brouer 282483126b3SAlexander Lobakin if (frag_size) { 283ba0509b6SJesper Dangaard Brouer skb->head_frag = 1; 284ba0509b6SJesper Dangaard Brouer if (page_is_pfmemalloc(virt_to_head_page(data))) 285ba0509b6SJesper Dangaard Brouer skb->pfmemalloc = 1; 286ba0509b6SJesper Dangaard Brouer } 287ba0509b6SJesper Dangaard Brouer return skb; 288ba0509b6SJesper Dangaard Brouer } 289ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around); 290ba0509b6SJesper Dangaard Brouer 291f450d539SAlexander Lobakin /** 292f450d539SAlexander Lobakin * __napi_build_skb - build a network buffer 293f450d539SAlexander Lobakin * @data: data buffer provided by caller 294f450d539SAlexander Lobakin * @frag_size: size of data, or 0 if head was kmalloced 295f450d539SAlexander Lobakin * 296f450d539SAlexander Lobakin * Version of __build_skb() that uses NAPI percpu caches to obtain 297f450d539SAlexander Lobakin * skbuff_head instead of inplace allocation. 298f450d539SAlexander Lobakin * 299f450d539SAlexander Lobakin * Returns a new &sk_buff on success, %NULL on allocation failure. 300f450d539SAlexander Lobakin */ 301f450d539SAlexander Lobakin static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 302f450d539SAlexander Lobakin { 303f450d539SAlexander Lobakin struct sk_buff *skb; 304f450d539SAlexander Lobakin 305f450d539SAlexander Lobakin skb = napi_skb_cache_get(); 306f450d539SAlexander Lobakin if (unlikely(!skb)) 307f450d539SAlexander Lobakin return NULL; 308f450d539SAlexander Lobakin 309f450d539SAlexander Lobakin memset(skb, 0, offsetof(struct sk_buff, tail)); 310f450d539SAlexander Lobakin __build_skb_around(skb, data, frag_size); 311f450d539SAlexander Lobakin 312f450d539SAlexander Lobakin return skb; 313f450d539SAlexander Lobakin } 314f450d539SAlexander Lobakin 315f450d539SAlexander Lobakin /** 316f450d539SAlexander Lobakin * napi_build_skb - build a network buffer 317f450d539SAlexander Lobakin * @data: data buffer provided by caller 318f450d539SAlexander Lobakin * @frag_size: size of data, or 0 if head was kmalloced 319f450d539SAlexander Lobakin * 320f450d539SAlexander Lobakin * Version of __napi_build_skb() that takes care of skb->head_frag 321f450d539SAlexander Lobakin * and skb->pfmemalloc when the data is a page or page fragment. 322f450d539SAlexander Lobakin * 323f450d539SAlexander Lobakin * Returns a new &sk_buff on success, %NULL on allocation failure. 324f450d539SAlexander Lobakin */ 325f450d539SAlexander Lobakin struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 326f450d539SAlexander Lobakin { 327f450d539SAlexander Lobakin struct sk_buff *skb = __napi_build_skb(data, frag_size); 328f450d539SAlexander Lobakin 329f450d539SAlexander Lobakin if (likely(skb) && frag_size) { 330f450d539SAlexander Lobakin skb->head_frag = 1; 331f450d539SAlexander Lobakin skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 332f450d539SAlexander Lobakin } 333f450d539SAlexander Lobakin 334f450d539SAlexander Lobakin return skb; 335f450d539SAlexander Lobakin } 336f450d539SAlexander Lobakin EXPORT_SYMBOL(napi_build_skb); 337f450d539SAlexander Lobakin 3385381b23dSAlexander Lobakin /* 3395381b23dSAlexander Lobakin * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 3405381b23dSAlexander Lobakin * the caller if emergency pfmemalloc reserves are being used. If it is and 3415381b23dSAlexander Lobakin * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 3425381b23dSAlexander Lobakin * may be used. Otherwise, the packet data may be discarded until enough 3435381b23dSAlexander Lobakin * memory is free 3445381b23dSAlexander Lobakin */ 345ef28095fSAlexander Lobakin static void *kmalloc_reserve(size_t size, gfp_t flags, int node, 346ef28095fSAlexander Lobakin bool *pfmemalloc) 3475381b23dSAlexander Lobakin { 3485381b23dSAlexander Lobakin void *obj; 3495381b23dSAlexander Lobakin bool ret_pfmemalloc = false; 3505381b23dSAlexander Lobakin 3515381b23dSAlexander Lobakin /* 3525381b23dSAlexander Lobakin * Try a regular allocation, when that fails and we're not entitled 3535381b23dSAlexander Lobakin * to the reserves, fail. 3545381b23dSAlexander Lobakin */ 3555381b23dSAlexander Lobakin obj = kmalloc_node_track_caller(size, 3565381b23dSAlexander Lobakin flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 3575381b23dSAlexander Lobakin node); 3585381b23dSAlexander Lobakin if (obj || !(gfp_pfmemalloc_allowed(flags))) 3595381b23dSAlexander Lobakin goto out; 3605381b23dSAlexander Lobakin 3615381b23dSAlexander Lobakin /* Try again but now we are using pfmemalloc reserves */ 3625381b23dSAlexander Lobakin ret_pfmemalloc = true; 3635381b23dSAlexander Lobakin obj = kmalloc_node_track_caller(size, flags, node); 3645381b23dSAlexander Lobakin 3655381b23dSAlexander Lobakin out: 3665381b23dSAlexander Lobakin if (pfmemalloc) 3675381b23dSAlexander Lobakin *pfmemalloc = ret_pfmemalloc; 3685381b23dSAlexander Lobakin 3695381b23dSAlexander Lobakin return obj; 3705381b23dSAlexander Lobakin } 3715381b23dSAlexander Lobakin 3725381b23dSAlexander Lobakin /* Allocate a new skbuff. We do this ourselves so we can fill in a few 3735381b23dSAlexander Lobakin * 'private' fields and also do memory statistics to find all the 3745381b23dSAlexander Lobakin * [BEEP] leaks. 3755381b23dSAlexander Lobakin * 3765381b23dSAlexander Lobakin */ 3775381b23dSAlexander Lobakin 3785381b23dSAlexander Lobakin /** 3795381b23dSAlexander Lobakin * __alloc_skb - allocate a network buffer 3805381b23dSAlexander Lobakin * @size: size to allocate 3815381b23dSAlexander Lobakin * @gfp_mask: allocation mask 3825381b23dSAlexander Lobakin * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 3835381b23dSAlexander Lobakin * instead of head cache and allocate a cloned (child) skb. 3845381b23dSAlexander Lobakin * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 3855381b23dSAlexander Lobakin * allocations in case the data is required for writeback 3865381b23dSAlexander Lobakin * @node: numa node to allocate memory on 3875381b23dSAlexander Lobakin * 3885381b23dSAlexander Lobakin * Allocate a new &sk_buff. The returned buffer has no headroom and a 3895381b23dSAlexander Lobakin * tail room of at least size bytes. The object has a reference count 3905381b23dSAlexander Lobakin * of one. The return is the buffer. On a failure the return is %NULL. 3915381b23dSAlexander Lobakin * 3925381b23dSAlexander Lobakin * Buffers may only be allocated from interrupts using a @gfp_mask of 3935381b23dSAlexander Lobakin * %GFP_ATOMIC. 3945381b23dSAlexander Lobakin */ 3955381b23dSAlexander Lobakin struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 3965381b23dSAlexander Lobakin int flags, int node) 3975381b23dSAlexander Lobakin { 3985381b23dSAlexander Lobakin struct kmem_cache *cache; 3995381b23dSAlexander Lobakin struct sk_buff *skb; 4005381b23dSAlexander Lobakin u8 *data; 4015381b23dSAlexander Lobakin bool pfmemalloc; 4025381b23dSAlexander Lobakin 4035381b23dSAlexander Lobakin cache = (flags & SKB_ALLOC_FCLONE) 4045381b23dSAlexander Lobakin ? skbuff_fclone_cache : skbuff_head_cache; 4055381b23dSAlexander Lobakin 4065381b23dSAlexander Lobakin if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 4075381b23dSAlexander Lobakin gfp_mask |= __GFP_MEMALLOC; 4085381b23dSAlexander Lobakin 4095381b23dSAlexander Lobakin /* Get the HEAD */ 410d13612b5SAlexander Lobakin if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 411d13612b5SAlexander Lobakin likely(node == NUMA_NO_NODE || node == numa_mem_id())) 412d13612b5SAlexander Lobakin skb = napi_skb_cache_get(); 413d13612b5SAlexander Lobakin else 414d13612b5SAlexander Lobakin skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 415df1ae022SAlexander Lobakin if (unlikely(!skb)) 416df1ae022SAlexander Lobakin return NULL; 4175381b23dSAlexander Lobakin prefetchw(skb); 4185381b23dSAlexander Lobakin 4195381b23dSAlexander Lobakin /* We do our best to align skb_shared_info on a separate cache 4205381b23dSAlexander Lobakin * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 4215381b23dSAlexander Lobakin * aligned memory blocks, unless SLUB/SLAB debug is enabled. 4225381b23dSAlexander Lobakin * Both skb->head and skb_shared_info are cache line aligned. 4235381b23dSAlexander Lobakin */ 4245381b23dSAlexander Lobakin size = SKB_DATA_ALIGN(size); 4255381b23dSAlexander Lobakin size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4265381b23dSAlexander Lobakin data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 427df1ae022SAlexander Lobakin if (unlikely(!data)) 4285381b23dSAlexander Lobakin goto nodata; 4295381b23dSAlexander Lobakin /* kmalloc(size) might give us more room than requested. 4305381b23dSAlexander Lobakin * Put skb_shared_info exactly at the end of allocated zone, 4315381b23dSAlexander Lobakin * to allow max possible filling before reallocation. 4325381b23dSAlexander Lobakin */ 4335381b23dSAlexander Lobakin size = SKB_WITH_OVERHEAD(ksize(data)); 4345381b23dSAlexander Lobakin prefetchw(data + size); 4355381b23dSAlexander Lobakin 4365381b23dSAlexander Lobakin /* 4375381b23dSAlexander Lobakin * Only clear those fields we need to clear, not those that we will 4385381b23dSAlexander Lobakin * actually initialise below. Hence, don't put any more fields after 4395381b23dSAlexander Lobakin * the tail pointer in struct sk_buff! 4405381b23dSAlexander Lobakin */ 4415381b23dSAlexander Lobakin memset(skb, 0, offsetof(struct sk_buff, tail)); 442f9d6725bSAlexander Lobakin __build_skb_around(skb, data, 0); 4435381b23dSAlexander Lobakin skb->pfmemalloc = pfmemalloc; 4445381b23dSAlexander Lobakin 4455381b23dSAlexander Lobakin if (flags & SKB_ALLOC_FCLONE) { 4465381b23dSAlexander Lobakin struct sk_buff_fclones *fclones; 4475381b23dSAlexander Lobakin 4485381b23dSAlexander Lobakin fclones = container_of(skb, struct sk_buff_fclones, skb1); 4495381b23dSAlexander Lobakin 4505381b23dSAlexander Lobakin skb->fclone = SKB_FCLONE_ORIG; 4515381b23dSAlexander Lobakin refcount_set(&fclones->fclone_ref, 1); 4525381b23dSAlexander Lobakin 4535381b23dSAlexander Lobakin fclones->skb2.fclone = SKB_FCLONE_CLONE; 4545381b23dSAlexander Lobakin } 4555381b23dSAlexander Lobakin 4565381b23dSAlexander Lobakin return skb; 457df1ae022SAlexander Lobakin 4585381b23dSAlexander Lobakin nodata: 4595381b23dSAlexander Lobakin kmem_cache_free(cache, skb); 460df1ae022SAlexander Lobakin return NULL; 4615381b23dSAlexander Lobakin } 4625381b23dSAlexander Lobakin EXPORT_SYMBOL(__alloc_skb); 4635381b23dSAlexander Lobakin 4647ba7aeabSSebastian Andrzej Siewior /** 465fd11a83dSAlexander Duyck * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 466fd11a83dSAlexander Duyck * @dev: network device to receive on 467d7499160SMasanari Iida * @len: length to allocate 468fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb 469fd11a83dSAlexander Duyck * 470fd11a83dSAlexander Duyck * Allocate a new &sk_buff and assign it a usage count of one. The 471fd11a83dSAlexander Duyck * buffer has NET_SKB_PAD headroom built in. Users should allocate 472fd11a83dSAlexander Duyck * the headroom they think they need without accounting for the 473fd11a83dSAlexander Duyck * built in space. The built in space is used for optimisations. 474fd11a83dSAlexander Duyck * 475fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 476fd11a83dSAlexander Duyck */ 4779451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 4789451980aSAlexander Duyck gfp_t gfp_mask) 479fd11a83dSAlexander Duyck { 480b63ae8caSAlexander Duyck struct page_frag_cache *nc; 481fd11a83dSAlexander Duyck struct sk_buff *skb; 4829451980aSAlexander Duyck bool pfmemalloc; 4839451980aSAlexander Duyck void *data; 484fd11a83dSAlexander Duyck 4859451980aSAlexander Duyck len += NET_SKB_PAD; 486fd11a83dSAlexander Duyck 48766c55602SAlexander Lobakin /* If requested length is either too small or too big, 48866c55602SAlexander Lobakin * we use kmalloc() for skb->head allocation. 48966c55602SAlexander Lobakin */ 49066c55602SAlexander Lobakin if (len <= SKB_WITH_OVERHEAD(1024) || 49166c55602SAlexander Lobakin len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 492d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 493a080e7bdSAlexander Duyck skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 494a080e7bdSAlexander Duyck if (!skb) 495a080e7bdSAlexander Duyck goto skb_fail; 496a080e7bdSAlexander Duyck goto skb_success; 497a080e7bdSAlexander Duyck } 4989451980aSAlexander Duyck 4999451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5009451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 5019451980aSAlexander Duyck 5029451980aSAlexander Duyck if (sk_memalloc_socks()) 5039451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 5049451980aSAlexander Duyck 50592dcabd7SSebastian Andrzej Siewior if (in_irq() || irqs_disabled()) { 5069451980aSAlexander Duyck nc = this_cpu_ptr(&netdev_alloc_cache); 5078c2dd3e4SAlexander Duyck data = page_frag_alloc(nc, len, gfp_mask); 5089451980aSAlexander Duyck pfmemalloc = nc->pfmemalloc; 50992dcabd7SSebastian Andrzej Siewior } else { 51092dcabd7SSebastian Andrzej Siewior local_bh_disable(); 51192dcabd7SSebastian Andrzej Siewior nc = this_cpu_ptr(&napi_alloc_cache.page); 51292dcabd7SSebastian Andrzej Siewior data = page_frag_alloc(nc, len, gfp_mask); 51392dcabd7SSebastian Andrzej Siewior pfmemalloc = nc->pfmemalloc; 51492dcabd7SSebastian Andrzej Siewior local_bh_enable(); 51592dcabd7SSebastian Andrzej Siewior } 5169451980aSAlexander Duyck 5179451980aSAlexander Duyck if (unlikely(!data)) 5189451980aSAlexander Duyck return NULL; 5199451980aSAlexander Duyck 5209451980aSAlexander Duyck skb = __build_skb(data, len); 5219451980aSAlexander Duyck if (unlikely(!skb)) { 522181edb2bSAlexander Duyck skb_free_frag(data); 5239451980aSAlexander Duyck return NULL; 5249451980aSAlexander Duyck } 5259451980aSAlexander Duyck 5269451980aSAlexander Duyck if (pfmemalloc) 5279451980aSAlexander Duyck skb->pfmemalloc = 1; 5289451980aSAlexander Duyck skb->head_frag = 1; 5299451980aSAlexander Duyck 530a080e7bdSAlexander Duyck skb_success: 5318af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 5327b2e497aSChristoph Hellwig skb->dev = dev; 533fd11a83dSAlexander Duyck 534a080e7bdSAlexander Duyck skb_fail: 5358af27456SChristoph Hellwig return skb; 5368af27456SChristoph Hellwig } 537b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 5381da177e4SLinus Torvalds 539fd11a83dSAlexander Duyck /** 540fd11a83dSAlexander Duyck * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 541fd11a83dSAlexander Duyck * @napi: napi instance this buffer was allocated for 542d7499160SMasanari Iida * @len: length to allocate 543fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 544fd11a83dSAlexander Duyck * 545fd11a83dSAlexander Duyck * Allocate a new sk_buff for use in NAPI receive. This buffer will 546fd11a83dSAlexander Duyck * attempt to allocate the head from a special reserved region used 547fd11a83dSAlexander Duyck * only for NAPI Rx allocation. By doing this we can save several 548fd11a83dSAlexander Duyck * CPU cycles by avoiding having to disable and re-enable IRQs. 549fd11a83dSAlexander Duyck * 550fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 551fd11a83dSAlexander Duyck */ 5529451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 5539451980aSAlexander Duyck gfp_t gfp_mask) 554fd11a83dSAlexander Duyck { 5553226b158SEric Dumazet struct napi_alloc_cache *nc; 556fd11a83dSAlexander Duyck struct sk_buff *skb; 5579451980aSAlexander Duyck void *data; 558fd11a83dSAlexander Duyck 5599451980aSAlexander Duyck len += NET_SKB_PAD + NET_IP_ALIGN; 560fd11a83dSAlexander Duyck 5613226b158SEric Dumazet /* If requested length is either too small or too big, 5623226b158SEric Dumazet * we use kmalloc() for skb->head allocation. 5633226b158SEric Dumazet */ 5643226b158SEric Dumazet if (len <= SKB_WITH_OVERHEAD(1024) || 5653226b158SEric Dumazet len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 566d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 567cfb8ec65SAlexander Lobakin skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 568cfb8ec65SAlexander Lobakin NUMA_NO_NODE); 569a080e7bdSAlexander Duyck if (!skb) 570a080e7bdSAlexander Duyck goto skb_fail; 571a080e7bdSAlexander Duyck goto skb_success; 572a080e7bdSAlexander Duyck } 5739451980aSAlexander Duyck 5743226b158SEric Dumazet nc = this_cpu_ptr(&napi_alloc_cache); 5759451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5769451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 5779451980aSAlexander Duyck 5789451980aSAlexander Duyck if (sk_memalloc_socks()) 5799451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 5809451980aSAlexander Duyck 5818c2dd3e4SAlexander Duyck data = page_frag_alloc(&nc->page, len, gfp_mask); 5829451980aSAlexander Duyck if (unlikely(!data)) 5839451980aSAlexander Duyck return NULL; 5849451980aSAlexander Duyck 585cfb8ec65SAlexander Lobakin skb = __napi_build_skb(data, len); 5869451980aSAlexander Duyck if (unlikely(!skb)) { 587181edb2bSAlexander Duyck skb_free_frag(data); 5889451980aSAlexander Duyck return NULL; 5899451980aSAlexander Duyck } 5909451980aSAlexander Duyck 591795bb1c0SJesper Dangaard Brouer if (nc->page.pfmemalloc) 5929451980aSAlexander Duyck skb->pfmemalloc = 1; 5939451980aSAlexander Duyck skb->head_frag = 1; 5949451980aSAlexander Duyck 595a080e7bdSAlexander Duyck skb_success: 596fd11a83dSAlexander Duyck skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 597fd11a83dSAlexander Duyck skb->dev = napi->dev; 598fd11a83dSAlexander Duyck 599a080e7bdSAlexander Duyck skb_fail: 600fd11a83dSAlexander Duyck return skb; 601fd11a83dSAlexander Duyck } 602fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb); 603fd11a83dSAlexander Duyck 604654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 60550269e19SEric Dumazet int size, unsigned int truesize) 606654bed16SPeter Zijlstra { 607654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 608654bed16SPeter Zijlstra skb->len += size; 609654bed16SPeter Zijlstra skb->data_len += size; 61050269e19SEric Dumazet skb->truesize += truesize; 611654bed16SPeter Zijlstra } 612654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 613654bed16SPeter Zijlstra 614f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 615f8e617e1SJason Wang unsigned int truesize) 616f8e617e1SJason Wang { 617f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 618f8e617e1SJason Wang 619f8e617e1SJason Wang skb_frag_size_add(frag, size); 620f8e617e1SJason Wang skb->len += size; 621f8e617e1SJason Wang skb->data_len += size; 622f8e617e1SJason Wang skb->truesize += truesize; 623f8e617e1SJason Wang } 624f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 625f8e617e1SJason Wang 62627b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 6271da177e4SLinus Torvalds { 628bd8a7036SEric Dumazet kfree_skb_list(*listp); 62927b437c8SHerbert Xu *listp = NULL; 6301da177e4SLinus Torvalds } 6311da177e4SLinus Torvalds 63227b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 63327b437c8SHerbert Xu { 63427b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 63527b437c8SHerbert Xu } 63627b437c8SHerbert Xu 6371da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 6381da177e4SLinus Torvalds { 6391da177e4SLinus Torvalds struct sk_buff *list; 6401da177e4SLinus Torvalds 641fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 6421da177e4SLinus Torvalds skb_get(list); 6431da177e4SLinus Torvalds } 6441da177e4SLinus Torvalds 645d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 646d3836f21SEric Dumazet { 647181edb2bSAlexander Duyck unsigned char *head = skb->head; 648181edb2bSAlexander Duyck 6496a5bcd84SIlias Apalodimas if (skb->head_frag) { 6506a5bcd84SIlias Apalodimas if (skb_pp_recycle(skb, head)) 6516a5bcd84SIlias Apalodimas return; 652181edb2bSAlexander Duyck skb_free_frag(head); 6536a5bcd84SIlias Apalodimas } else { 654181edb2bSAlexander Duyck kfree(head); 655d3836f21SEric Dumazet } 6566a5bcd84SIlias Apalodimas } 657d3836f21SEric Dumazet 6585bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 6591da177e4SLinus Torvalds { 660ff04a771SEric Dumazet struct skb_shared_info *shinfo = skb_shinfo(skb); 6611da177e4SLinus Torvalds int i; 662ff04a771SEric Dumazet 663ff04a771SEric Dumazet if (skb->cloned && 664ff04a771SEric Dumazet atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 665ff04a771SEric Dumazet &shinfo->dataref)) 6662cc3aeb5SIlias Apalodimas goto exit; 667ff04a771SEric Dumazet 66870c43167SJonathan Lemon skb_zcopy_clear(skb, true); 66970c43167SJonathan Lemon 670ff04a771SEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) 6716a5bcd84SIlias Apalodimas __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); 6721da177e4SLinus Torvalds 673ff04a771SEric Dumazet if (shinfo->frag_list) 674ff04a771SEric Dumazet kfree_skb_list(shinfo->frag_list); 6751da177e4SLinus Torvalds 676d3836f21SEric Dumazet skb_free_head(skb); 6772cc3aeb5SIlias Apalodimas exit: 6782cc3aeb5SIlias Apalodimas /* When we clone an SKB we copy the reycling bit. The pp_recycle 6792cc3aeb5SIlias Apalodimas * bit is only set on the head though, so in order to avoid races 6802cc3aeb5SIlias Apalodimas * while trying to recycle fragments on __skb_frag_unref() we need 6812cc3aeb5SIlias Apalodimas * to make one SKB responsible for triggering the recycle path. 6822cc3aeb5SIlias Apalodimas * So disable the recycling bit if an SKB is cloned and we have 6832cc3aeb5SIlias Apalodimas * additional references to to the fragmented part of the SKB. 6842cc3aeb5SIlias Apalodimas * Eventually the last SKB will have the recycling bit set and it's 6852cc3aeb5SIlias Apalodimas * dataref set to 0, which will trigger the recycling 6862cc3aeb5SIlias Apalodimas */ 6872cc3aeb5SIlias Apalodimas skb->pp_recycle = 0; 6881da177e4SLinus Torvalds } 6891da177e4SLinus Torvalds 6901da177e4SLinus Torvalds /* 6911da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 6921da177e4SLinus Torvalds */ 6932d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 6941da177e4SLinus Torvalds { 695d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones; 696d179cd12SDavid S. Miller 697d179cd12SDavid S. Miller switch (skb->fclone) { 698d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 6991da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 7006ffe75ebSEric Dumazet return; 701d179cd12SDavid S. Miller 702d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 703d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb1); 7046ffe75ebSEric Dumazet 7056ffe75ebSEric Dumazet /* We usually free the clone (TX completion) before original skb 7066ffe75ebSEric Dumazet * This test would have no chance to be true for the clone, 7076ffe75ebSEric Dumazet * while here, branch prediction will be good. 7086ffe75ebSEric Dumazet */ 7092638595aSReshetova, Elena if (refcount_read(&fclones->fclone_ref) == 1) 7106ffe75ebSEric Dumazet goto fastpath; 711d179cd12SDavid S. Miller break; 712d179cd12SDavid S. Miller 7136ffe75ebSEric Dumazet default: /* SKB_FCLONE_CLONE */ 714d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb2); 715d179cd12SDavid S. Miller break; 7163ff50b79SStephen Hemminger } 7172638595aSReshetova, Elena if (!refcount_dec_and_test(&fclones->fclone_ref)) 7186ffe75ebSEric Dumazet return; 7196ffe75ebSEric Dumazet fastpath: 7206ffe75ebSEric Dumazet kmem_cache_free(skbuff_fclone_cache, fclones); 7211da177e4SLinus Torvalds } 7221da177e4SLinus Torvalds 7230a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb) 7241da177e4SLinus Torvalds { 725adf30907SEric Dumazet skb_dst_drop(skb); 7261da177e4SLinus Torvalds if (skb->destructor) { 7279c2b3328SStephen Hemminger WARN_ON(in_irq()); 7281da177e4SLinus Torvalds skb->destructor(skb); 7291da177e4SLinus Torvalds } 730a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 731cb9c6836SFlorian Westphal nf_conntrack_put(skb_nfct(skb)); 7322fc72c7bSKOVACS Krisztian #endif 733df5042f4SFlorian Westphal skb_ext_put(skb); 73404a4bb55SLennert Buytenhek } 73504a4bb55SLennert Buytenhek 73604a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 73704a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 73804a4bb55SLennert Buytenhek { 73904a4bb55SLennert Buytenhek skb_release_head_state(skb); 740a28b1b90SFlorian Westphal if (likely(skb->head)) 7412d4baff8SHerbert Xu skb_release_data(skb); 7422d4baff8SHerbert Xu } 7431da177e4SLinus Torvalds 7442d4baff8SHerbert Xu /** 7452d4baff8SHerbert Xu * __kfree_skb - private function 7462d4baff8SHerbert Xu * @skb: buffer 7472d4baff8SHerbert Xu * 7482d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 7492d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 7502d4baff8SHerbert Xu * always call kfree_skb 7512d4baff8SHerbert Xu */ 7522d4baff8SHerbert Xu 7532d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 7542d4baff8SHerbert Xu { 7552d4baff8SHerbert Xu skb_release_all(skb); 7561da177e4SLinus Torvalds kfree_skbmem(skb); 7571da177e4SLinus Torvalds } 758b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 7591da177e4SLinus Torvalds 7601da177e4SLinus Torvalds /** 761231d06aeSJörn Engel * kfree_skb - free an sk_buff 762231d06aeSJörn Engel * @skb: buffer to free 763231d06aeSJörn Engel * 764231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 765231d06aeSJörn Engel * hit zero. 766231d06aeSJörn Engel */ 767231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 768231d06aeSJörn Engel { 7693889a803SPaolo Abeni if (!skb_unref(skb)) 770231d06aeSJörn Engel return; 7713889a803SPaolo Abeni 772ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 773231d06aeSJörn Engel __kfree_skb(skb); 774231d06aeSJörn Engel } 775b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 776231d06aeSJörn Engel 777bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 778bd8a7036SEric Dumazet { 779bd8a7036SEric Dumazet while (segs) { 780bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 781bd8a7036SEric Dumazet 782bd8a7036SEric Dumazet kfree_skb(segs); 783bd8a7036SEric Dumazet segs = next; 784bd8a7036SEric Dumazet } 785bd8a7036SEric Dumazet } 786bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 787bd8a7036SEric Dumazet 7886413139dSWillem de Bruijn /* Dump skb information and contents. 7896413139dSWillem de Bruijn * 7906413139dSWillem de Bruijn * Must only be called from net_ratelimit()-ed paths. 7916413139dSWillem de Bruijn * 792302af7c6SVladimir Oltean * Dumps whole packets if full_pkt, only headers otherwise. 7936413139dSWillem de Bruijn */ 7946413139dSWillem de Bruijn void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 7956413139dSWillem de Bruijn { 7966413139dSWillem de Bruijn struct skb_shared_info *sh = skb_shinfo(skb); 7976413139dSWillem de Bruijn struct net_device *dev = skb->dev; 7986413139dSWillem de Bruijn struct sock *sk = skb->sk; 7996413139dSWillem de Bruijn struct sk_buff *list_skb; 8006413139dSWillem de Bruijn bool has_mac, has_trans; 8016413139dSWillem de Bruijn int headroom, tailroom; 8026413139dSWillem de Bruijn int i, len, seg_len; 8036413139dSWillem de Bruijn 8046413139dSWillem de Bruijn if (full_pkt) 8056413139dSWillem de Bruijn len = skb->len; 8066413139dSWillem de Bruijn else 8076413139dSWillem de Bruijn len = min_t(int, skb->len, MAX_HEADER + 128); 8086413139dSWillem de Bruijn 8096413139dSWillem de Bruijn headroom = skb_headroom(skb); 8106413139dSWillem de Bruijn tailroom = skb_tailroom(skb); 8116413139dSWillem de Bruijn 8126413139dSWillem de Bruijn has_mac = skb_mac_header_was_set(skb); 8136413139dSWillem de Bruijn has_trans = skb_transport_header_was_set(skb); 8146413139dSWillem de Bruijn 8156413139dSWillem de Bruijn printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 8166413139dSWillem de Bruijn "mac=(%d,%d) net=(%d,%d) trans=%d\n" 8176413139dSWillem de Bruijn "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 8186413139dSWillem de Bruijn "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 8196413139dSWillem de Bruijn "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 8206413139dSWillem de Bruijn level, skb->len, headroom, skb_headlen(skb), tailroom, 8216413139dSWillem de Bruijn has_mac ? skb->mac_header : -1, 8226413139dSWillem de Bruijn has_mac ? skb_mac_header_len(skb) : -1, 8236413139dSWillem de Bruijn skb->network_header, 8246413139dSWillem de Bruijn has_trans ? skb_network_header_len(skb) : -1, 8256413139dSWillem de Bruijn has_trans ? skb->transport_header : -1, 8266413139dSWillem de Bruijn sh->tx_flags, sh->nr_frags, 8276413139dSWillem de Bruijn sh->gso_size, sh->gso_type, sh->gso_segs, 8286413139dSWillem de Bruijn skb->csum, skb->ip_summed, skb->csum_complete_sw, 8296413139dSWillem de Bruijn skb->csum_valid, skb->csum_level, 8306413139dSWillem de Bruijn skb->hash, skb->sw_hash, skb->l4_hash, 8316413139dSWillem de Bruijn ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 8326413139dSWillem de Bruijn 8336413139dSWillem de Bruijn if (dev) 8346413139dSWillem de Bruijn printk("%sdev name=%s feat=0x%pNF\n", 8356413139dSWillem de Bruijn level, dev->name, &dev->features); 8366413139dSWillem de Bruijn if (sk) 837db8051f3SQian Cai printk("%ssk family=%hu type=%u proto=%u\n", 8386413139dSWillem de Bruijn level, sk->sk_family, sk->sk_type, sk->sk_protocol); 8396413139dSWillem de Bruijn 8406413139dSWillem de Bruijn if (full_pkt && headroom) 8416413139dSWillem de Bruijn print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 8426413139dSWillem de Bruijn 16, 1, skb->head, headroom, false); 8436413139dSWillem de Bruijn 8446413139dSWillem de Bruijn seg_len = min_t(int, skb_headlen(skb), len); 8456413139dSWillem de Bruijn if (seg_len) 8466413139dSWillem de Bruijn print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 8476413139dSWillem de Bruijn 16, 1, skb->data, seg_len, false); 8486413139dSWillem de Bruijn len -= seg_len; 8496413139dSWillem de Bruijn 8506413139dSWillem de Bruijn if (full_pkt && tailroom) 8516413139dSWillem de Bruijn print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 8526413139dSWillem de Bruijn 16, 1, skb_tail_pointer(skb), tailroom, false); 8536413139dSWillem de Bruijn 8546413139dSWillem de Bruijn for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 8556413139dSWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8566413139dSWillem de Bruijn u32 p_off, p_len, copied; 8576413139dSWillem de Bruijn struct page *p; 8586413139dSWillem de Bruijn u8 *vaddr; 8596413139dSWillem de Bruijn 860b54c9d5bSJonathan Lemon skb_frag_foreach_page(frag, skb_frag_off(frag), 8616413139dSWillem de Bruijn skb_frag_size(frag), p, p_off, p_len, 8626413139dSWillem de Bruijn copied) { 8636413139dSWillem de Bruijn seg_len = min_t(int, p_len, len); 8646413139dSWillem de Bruijn vaddr = kmap_atomic(p); 8656413139dSWillem de Bruijn print_hex_dump(level, "skb frag: ", 8666413139dSWillem de Bruijn DUMP_PREFIX_OFFSET, 8676413139dSWillem de Bruijn 16, 1, vaddr + p_off, seg_len, false); 8686413139dSWillem de Bruijn kunmap_atomic(vaddr); 8696413139dSWillem de Bruijn len -= seg_len; 8706413139dSWillem de Bruijn if (!len) 8716413139dSWillem de Bruijn break; 8726413139dSWillem de Bruijn } 8736413139dSWillem de Bruijn } 8746413139dSWillem de Bruijn 8756413139dSWillem de Bruijn if (full_pkt && skb_has_frag_list(skb)) { 8766413139dSWillem de Bruijn printk("skb fraglist:\n"); 8776413139dSWillem de Bruijn skb_walk_frags(skb, list_skb) 8786413139dSWillem de Bruijn skb_dump(level, list_skb, true); 8796413139dSWillem de Bruijn } 8806413139dSWillem de Bruijn } 8816413139dSWillem de Bruijn EXPORT_SYMBOL(skb_dump); 8826413139dSWillem de Bruijn 883d1a203eaSStephen Hemminger /** 88425121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 88525121173SMichael S. Tsirkin * @skb: buffer that triggered an error 88625121173SMichael S. Tsirkin * 88725121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 88825121173SMichael S. Tsirkin * skb must be freed afterwards. 88925121173SMichael S. Tsirkin */ 89025121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 89125121173SMichael S. Tsirkin { 8921f8b977aSWillem de Bruijn skb_zcopy_clear(skb, true); 89325121173SMichael S. Tsirkin } 89425121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 89525121173SMichael S. Tsirkin 896be769db2SHerbert Xu #ifdef CONFIG_TRACEPOINTS 89725121173SMichael S. Tsirkin /** 898ead2ceb0SNeil Horman * consume_skb - free an skbuff 899ead2ceb0SNeil Horman * @skb: buffer to free 900ead2ceb0SNeil Horman * 901ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 902ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 903ead2ceb0SNeil Horman * is being dropped after a failure and notes that 904ead2ceb0SNeil Horman */ 905ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 906ead2ceb0SNeil Horman { 9073889a803SPaolo Abeni if (!skb_unref(skb)) 908ead2ceb0SNeil Horman return; 9093889a803SPaolo Abeni 91007dc22e7SKoki Sanagi trace_consume_skb(skb); 911ead2ceb0SNeil Horman __kfree_skb(skb); 912ead2ceb0SNeil Horman } 913ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 914be769db2SHerbert Xu #endif 915ead2ceb0SNeil Horman 9160a463c78SPaolo Abeni /** 917c1639be9SMauro Carvalho Chehab * __consume_stateless_skb - free an skbuff, assuming it is stateless 9180a463c78SPaolo Abeni * @skb: buffer to free 9190a463c78SPaolo Abeni * 920ca2c1418SPaolo Abeni * Alike consume_skb(), but this variant assumes that this is the last 921ca2c1418SPaolo Abeni * skb reference and all the head states have been already dropped 9220a463c78SPaolo Abeni */ 923ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb) 9240a463c78SPaolo Abeni { 9250a463c78SPaolo Abeni trace_consume_skb(skb); 9260a463c78SPaolo Abeni skb_release_data(skb); 9270a463c78SPaolo Abeni kfree_skbmem(skb); 9280a463c78SPaolo Abeni } 9290a463c78SPaolo Abeni 930f450d539SAlexander Lobakin static void napi_skb_cache_put(struct sk_buff *skb) 931795bb1c0SJesper Dangaard Brouer { 932795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 933f450d539SAlexander Lobakin u32 i; 934795bb1c0SJesper Dangaard Brouer 935f450d539SAlexander Lobakin kasan_poison_object_data(skbuff_head_cache, skb); 936795bb1c0SJesper Dangaard Brouer nc->skb_cache[nc->skb_count++] = skb; 937795bb1c0SJesper Dangaard Brouer 938795bb1c0SJesper Dangaard Brouer if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 939f450d539SAlexander Lobakin for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 940f450d539SAlexander Lobakin kasan_unpoison_object_data(skbuff_head_cache, 941f450d539SAlexander Lobakin nc->skb_cache[i]); 942f450d539SAlexander Lobakin 943f450d539SAlexander Lobakin kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, 944f450d539SAlexander Lobakin nc->skb_cache + NAPI_SKB_CACHE_HALF); 945f450d539SAlexander Lobakin nc->skb_count = NAPI_SKB_CACHE_HALF; 946795bb1c0SJesper Dangaard Brouer } 947795bb1c0SJesper Dangaard Brouer } 948f450d539SAlexander Lobakin 94915fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb) 95015fad714SJesper Dangaard Brouer { 9519243adfcSAlexander Lobakin skb_release_all(skb); 9529243adfcSAlexander Lobakin napi_skb_cache_put(skb); 9539243adfcSAlexander Lobakin } 9549243adfcSAlexander Lobakin 9559243adfcSAlexander Lobakin void napi_skb_free_stolen_head(struct sk_buff *skb) 9569243adfcSAlexander Lobakin { 9578550ff8dSPaul Blakey nf_reset_ct(skb); 9589243adfcSAlexander Lobakin skb_dst_drop(skb); 9599243adfcSAlexander Lobakin skb_ext_put(skb); 960f450d539SAlexander Lobakin napi_skb_cache_put(skb); 96115fad714SJesper Dangaard Brouer } 962795bb1c0SJesper Dangaard Brouer 963795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget) 964795bb1c0SJesper Dangaard Brouer { 965885eb0a5SJesper Dangaard Brouer /* Zero budget indicate non-NAPI context called us, like netpoll */ 966795bb1c0SJesper Dangaard Brouer if (unlikely(!budget)) { 967885eb0a5SJesper Dangaard Brouer dev_consume_skb_any(skb); 968795bb1c0SJesper Dangaard Brouer return; 969795bb1c0SJesper Dangaard Brouer } 970795bb1c0SJesper Dangaard Brouer 9716454eca8SYunsheng Lin lockdep_assert_in_softirq(); 9726454eca8SYunsheng Lin 9737608894eSPaolo Abeni if (!skb_unref(skb)) 974795bb1c0SJesper Dangaard Brouer return; 9757608894eSPaolo Abeni 976795bb1c0SJesper Dangaard Brouer /* if reaching here SKB is ready to free */ 977795bb1c0SJesper Dangaard Brouer trace_consume_skb(skb); 978795bb1c0SJesper Dangaard Brouer 979795bb1c0SJesper Dangaard Brouer /* if SKB is a clone, don't handle this case */ 980abbdb5a7SEric Dumazet if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 981795bb1c0SJesper Dangaard Brouer __kfree_skb(skb); 982795bb1c0SJesper Dangaard Brouer return; 983795bb1c0SJesper Dangaard Brouer } 984795bb1c0SJesper Dangaard Brouer 9859243adfcSAlexander Lobakin skb_release_all(skb); 986f450d539SAlexander Lobakin napi_skb_cache_put(skb); 987795bb1c0SJesper Dangaard Brouer } 988795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb); 989795bb1c0SJesper Dangaard Brouer 990b1937227SEric Dumazet /* Make sure a field is enclosed inside headers_start/headers_end section */ 991b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \ 992b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 993b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); \ 994b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 995b1937227SEric Dumazet offsetof(struct sk_buff, headers_end)); \ 996b1937227SEric Dumazet 997dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 998dec18810SHerbert Xu { 999dec18810SHerbert Xu new->tstamp = old->tstamp; 1000b1937227SEric Dumazet /* We do not copy old->sk */ 1001dec18810SHerbert Xu new->dev = old->dev; 1002b1937227SEric Dumazet memcpy(new->cb, old->cb, sizeof(old->cb)); 10037fee226aSEric Dumazet skb_dst_copy(new, old); 1004df5042f4SFlorian Westphal __skb_ext_copy(new, old); 1005b1937227SEric Dumazet __nf_copy(new, old, false); 10066aa895b0SPatrick McHardy 1007b1937227SEric Dumazet /* Note : this field could be in headers_start/headers_end section 1008b1937227SEric Dumazet * It is not yet because we do not want to have a 16 bit hole 1009b1937227SEric Dumazet */ 1010b1937227SEric Dumazet new->queue_mapping = old->queue_mapping; 101106021292SEliezer Tamir 1012b1937227SEric Dumazet memcpy(&new->headers_start, &old->headers_start, 1013b1937227SEric Dumazet offsetof(struct sk_buff, headers_end) - 1014b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); 1015b1937227SEric Dumazet CHECK_SKB_FIELD(protocol); 1016b1937227SEric Dumazet CHECK_SKB_FIELD(csum); 1017b1937227SEric Dumazet CHECK_SKB_FIELD(hash); 1018b1937227SEric Dumazet CHECK_SKB_FIELD(priority); 1019b1937227SEric Dumazet CHECK_SKB_FIELD(skb_iif); 1020b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_proto); 1021b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_tci); 1022b1937227SEric Dumazet CHECK_SKB_FIELD(transport_header); 1023b1937227SEric Dumazet CHECK_SKB_FIELD(network_header); 1024b1937227SEric Dumazet CHECK_SKB_FIELD(mac_header); 1025b1937227SEric Dumazet CHECK_SKB_FIELD(inner_protocol); 1026b1937227SEric Dumazet CHECK_SKB_FIELD(inner_transport_header); 1027b1937227SEric Dumazet CHECK_SKB_FIELD(inner_network_header); 1028b1937227SEric Dumazet CHECK_SKB_FIELD(inner_mac_header); 1029b1937227SEric Dumazet CHECK_SKB_FIELD(mark); 1030b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK 1031b1937227SEric Dumazet CHECK_SKB_FIELD(secmark); 1032b1937227SEric Dumazet #endif 1033e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 1034b1937227SEric Dumazet CHECK_SKB_FIELD(napi_id); 103506021292SEliezer Tamir #endif 10362bd82484SEric Dumazet #ifdef CONFIG_XPS 10372bd82484SEric Dumazet CHECK_SKB_FIELD(sender_cpu); 10382bd82484SEric Dumazet #endif 1039b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED 1040b1937227SEric Dumazet CHECK_SKB_FIELD(tc_index); 1041b1937227SEric Dumazet #endif 1042b1937227SEric Dumazet 1043dec18810SHerbert Xu } 1044dec18810SHerbert Xu 104582c49a35SHerbert Xu /* 104682c49a35SHerbert Xu * You should not add any new code to this function. Add it to 104782c49a35SHerbert Xu * __copy_skb_header above instead. 104882c49a35SHerbert Xu */ 1049e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 10501da177e4SLinus Torvalds { 10511da177e4SLinus Torvalds #define C(x) n->x = skb->x 10521da177e4SLinus Torvalds 10531da177e4SLinus Torvalds n->next = n->prev = NULL; 10541da177e4SLinus Torvalds n->sk = NULL; 1055dec18810SHerbert Xu __copy_skb_header(n, skb); 1056dec18810SHerbert Xu 10571da177e4SLinus Torvalds C(len); 10581da177e4SLinus Torvalds C(data_len); 10593e6b3b2eSAlexey Dobriyan C(mac_len); 1060334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 106102f1c89dSPaul Moore n->cloned = 1; 10621da177e4SLinus Torvalds n->nohdr = 0; 1063b13dda9fSEric Dumazet n->peeked = 0; 1064e78bfb07SStefano Brivio C(pfmemalloc); 10656a5bcd84SIlias Apalodimas C(pp_recycle); 10661da177e4SLinus Torvalds n->destructor = NULL; 10671da177e4SLinus Torvalds C(tail); 10681da177e4SLinus Torvalds C(end); 106902f1c89dSPaul Moore C(head); 1070d3836f21SEric Dumazet C(head_frag); 107102f1c89dSPaul Moore C(data); 107202f1c89dSPaul Moore C(truesize); 107363354797SReshetova, Elena refcount_set(&n->users, 1); 10741da177e4SLinus Torvalds 10751da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 10761da177e4SLinus Torvalds skb->cloned = 1; 10771da177e4SLinus Torvalds 10781da177e4SLinus Torvalds return n; 1079e0053ec0SHerbert Xu #undef C 1080e0053ec0SHerbert Xu } 1081e0053ec0SHerbert Xu 1082e0053ec0SHerbert Xu /** 1083da29e4b4SJakub Kicinski * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1084da29e4b4SJakub Kicinski * @first: first sk_buff of the msg 1085da29e4b4SJakub Kicinski */ 1086da29e4b4SJakub Kicinski struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1087da29e4b4SJakub Kicinski { 1088da29e4b4SJakub Kicinski struct sk_buff *n; 1089da29e4b4SJakub Kicinski 1090da29e4b4SJakub Kicinski n = alloc_skb(0, GFP_ATOMIC); 1091da29e4b4SJakub Kicinski if (!n) 1092da29e4b4SJakub Kicinski return NULL; 1093da29e4b4SJakub Kicinski 1094da29e4b4SJakub Kicinski n->len = first->len; 1095da29e4b4SJakub Kicinski n->data_len = first->len; 1096da29e4b4SJakub Kicinski n->truesize = first->truesize; 1097da29e4b4SJakub Kicinski 1098da29e4b4SJakub Kicinski skb_shinfo(n)->frag_list = first; 1099da29e4b4SJakub Kicinski 1100da29e4b4SJakub Kicinski __copy_skb_header(n, first); 1101da29e4b4SJakub Kicinski n->destructor = NULL; 1102da29e4b4SJakub Kicinski 1103da29e4b4SJakub Kicinski return n; 1104da29e4b4SJakub Kicinski } 1105da29e4b4SJakub Kicinski EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1106da29e4b4SJakub Kicinski 1107da29e4b4SJakub Kicinski /** 1108e0053ec0SHerbert Xu * skb_morph - morph one skb into another 1109e0053ec0SHerbert Xu * @dst: the skb to receive the contents 1110e0053ec0SHerbert Xu * @src: the skb to supply the contents 1111e0053ec0SHerbert Xu * 1112e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 1113e0053ec0SHerbert Xu * supplied by the user. 1114e0053ec0SHerbert Xu * 1115e0053ec0SHerbert Xu * The target skb is returned upon exit. 1116e0053ec0SHerbert Xu */ 1117e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1118e0053ec0SHerbert Xu { 11192d4baff8SHerbert Xu skb_release_all(dst); 1120e0053ec0SHerbert Xu return __skb_clone(dst, src); 1121e0053ec0SHerbert Xu } 1122e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 1123e0053ec0SHerbert Xu 11246f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1125a91dbff5SWillem de Bruijn { 1126a91dbff5SWillem de Bruijn unsigned long max_pg, num_pg, new_pg, old_pg; 1127a91dbff5SWillem de Bruijn struct user_struct *user; 1128a91dbff5SWillem de Bruijn 1129a91dbff5SWillem de Bruijn if (capable(CAP_IPC_LOCK) || !size) 1130a91dbff5SWillem de Bruijn return 0; 1131a91dbff5SWillem de Bruijn 1132a91dbff5SWillem de Bruijn num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1133a91dbff5SWillem de Bruijn max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1134a91dbff5SWillem de Bruijn user = mmp->user ? : current_user(); 1135a91dbff5SWillem de Bruijn 1136a91dbff5SWillem de Bruijn do { 1137a91dbff5SWillem de Bruijn old_pg = atomic_long_read(&user->locked_vm); 1138a91dbff5SWillem de Bruijn new_pg = old_pg + num_pg; 1139a91dbff5SWillem de Bruijn if (new_pg > max_pg) 1140a91dbff5SWillem de Bruijn return -ENOBUFS; 1141a91dbff5SWillem de Bruijn } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1142a91dbff5SWillem de Bruijn old_pg); 1143a91dbff5SWillem de Bruijn 1144a91dbff5SWillem de Bruijn if (!mmp->user) { 1145a91dbff5SWillem de Bruijn mmp->user = get_uid(user); 1146a91dbff5SWillem de Bruijn mmp->num_pg = num_pg; 1147a91dbff5SWillem de Bruijn } else { 1148a91dbff5SWillem de Bruijn mmp->num_pg += num_pg; 1149a91dbff5SWillem de Bruijn } 1150a91dbff5SWillem de Bruijn 1151a91dbff5SWillem de Bruijn return 0; 1152a91dbff5SWillem de Bruijn } 11536f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1154a91dbff5SWillem de Bruijn 11556f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp) 1156a91dbff5SWillem de Bruijn { 1157a91dbff5SWillem de Bruijn if (mmp->user) { 1158a91dbff5SWillem de Bruijn atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1159a91dbff5SWillem de Bruijn free_uid(mmp->user); 1160a91dbff5SWillem de Bruijn } 1161a91dbff5SWillem de Bruijn } 11626f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1163a91dbff5SWillem de Bruijn 11648c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 116552267790SWillem de Bruijn { 116652267790SWillem de Bruijn struct ubuf_info *uarg; 116752267790SWillem de Bruijn struct sk_buff *skb; 116852267790SWillem de Bruijn 116952267790SWillem de Bruijn WARN_ON_ONCE(!in_task()); 117052267790SWillem de Bruijn 117152267790SWillem de Bruijn skb = sock_omalloc(sk, 0, GFP_KERNEL); 117252267790SWillem de Bruijn if (!skb) 117352267790SWillem de Bruijn return NULL; 117452267790SWillem de Bruijn 117552267790SWillem de Bruijn BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 117652267790SWillem de Bruijn uarg = (void *)skb->cb; 1177a91dbff5SWillem de Bruijn uarg->mmp.user = NULL; 1178a91dbff5SWillem de Bruijn 1179a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) { 1180a91dbff5SWillem de Bruijn kfree_skb(skb); 1181a91dbff5SWillem de Bruijn return NULL; 1182a91dbff5SWillem de Bruijn } 118352267790SWillem de Bruijn 11848c793822SJonathan Lemon uarg->callback = msg_zerocopy_callback; 11854ab6c99dSWillem de Bruijn uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 11864ab6c99dSWillem de Bruijn uarg->len = 1; 11874ab6c99dSWillem de Bruijn uarg->bytelen = size; 118852267790SWillem de Bruijn uarg->zerocopy = 1; 118904c2d33eSJonathan Lemon uarg->flags = SKBFL_ZEROCOPY_FRAG; 1190c1d1b437SEric Dumazet refcount_set(&uarg->refcnt, 1); 119152267790SWillem de Bruijn sock_hold(sk); 119252267790SWillem de Bruijn 119352267790SWillem de Bruijn return uarg; 119452267790SWillem de Bruijn } 11958c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); 119652267790SWillem de Bruijn 119752267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 119852267790SWillem de Bruijn { 119952267790SWillem de Bruijn return container_of((void *)uarg, struct sk_buff, cb); 120052267790SWillem de Bruijn } 120152267790SWillem de Bruijn 12028c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 12034ab6c99dSWillem de Bruijn struct ubuf_info *uarg) 12044ab6c99dSWillem de Bruijn { 12054ab6c99dSWillem de Bruijn if (uarg) { 12064ab6c99dSWillem de Bruijn const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 12074ab6c99dSWillem de Bruijn u32 bytelen, next; 12084ab6c99dSWillem de Bruijn 12094ab6c99dSWillem de Bruijn /* realloc only when socket is locked (TCP, UDP cork), 12104ab6c99dSWillem de Bruijn * so uarg->len and sk_zckey access is serialized 12114ab6c99dSWillem de Bruijn */ 12124ab6c99dSWillem de Bruijn if (!sock_owned_by_user(sk)) { 12134ab6c99dSWillem de Bruijn WARN_ON_ONCE(1); 12144ab6c99dSWillem de Bruijn return NULL; 12154ab6c99dSWillem de Bruijn } 12164ab6c99dSWillem de Bruijn 12174ab6c99dSWillem de Bruijn bytelen = uarg->bytelen + size; 12184ab6c99dSWillem de Bruijn if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 12194ab6c99dSWillem de Bruijn /* TCP can create new skb to attach new uarg */ 12204ab6c99dSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 12214ab6c99dSWillem de Bruijn goto new_alloc; 12224ab6c99dSWillem de Bruijn return NULL; 12234ab6c99dSWillem de Bruijn } 12244ab6c99dSWillem de Bruijn 12254ab6c99dSWillem de Bruijn next = (u32)atomic_read(&sk->sk_zckey); 12264ab6c99dSWillem de Bruijn if ((u32)(uarg->id + uarg->len) == next) { 1227a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) 1228a91dbff5SWillem de Bruijn return NULL; 12294ab6c99dSWillem de Bruijn uarg->len++; 12304ab6c99dSWillem de Bruijn uarg->bytelen = bytelen; 12314ab6c99dSWillem de Bruijn atomic_set(&sk->sk_zckey, ++next); 1232100f6d8eSWillem de Bruijn 1233100f6d8eSWillem de Bruijn /* no extra ref when appending to datagram (MSG_MORE) */ 1234100f6d8eSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 12358e044917SJonathan Lemon net_zcopy_get(uarg); 1236100f6d8eSWillem de Bruijn 12374ab6c99dSWillem de Bruijn return uarg; 12384ab6c99dSWillem de Bruijn } 12394ab6c99dSWillem de Bruijn } 12404ab6c99dSWillem de Bruijn 12414ab6c99dSWillem de Bruijn new_alloc: 12428c793822SJonathan Lemon return msg_zerocopy_alloc(sk, size); 12434ab6c99dSWillem de Bruijn } 12448c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 12454ab6c99dSWillem de Bruijn 12464ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 12474ab6c99dSWillem de Bruijn { 12484ab6c99dSWillem de Bruijn struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 12494ab6c99dSWillem de Bruijn u32 old_lo, old_hi; 12504ab6c99dSWillem de Bruijn u64 sum_len; 12514ab6c99dSWillem de Bruijn 12524ab6c99dSWillem de Bruijn old_lo = serr->ee.ee_info; 12534ab6c99dSWillem de Bruijn old_hi = serr->ee.ee_data; 12544ab6c99dSWillem de Bruijn sum_len = old_hi - old_lo + 1ULL + len; 12554ab6c99dSWillem de Bruijn 12564ab6c99dSWillem de Bruijn if (sum_len >= (1ULL << 32)) 12574ab6c99dSWillem de Bruijn return false; 12584ab6c99dSWillem de Bruijn 12594ab6c99dSWillem de Bruijn if (lo != old_hi + 1) 12604ab6c99dSWillem de Bruijn return false; 12614ab6c99dSWillem de Bruijn 12624ab6c99dSWillem de Bruijn serr->ee.ee_data += len; 12634ab6c99dSWillem de Bruijn return true; 12644ab6c99dSWillem de Bruijn } 12654ab6c99dSWillem de Bruijn 12668c793822SJonathan Lemon static void __msg_zerocopy_callback(struct ubuf_info *uarg) 126752267790SWillem de Bruijn { 12684ab6c99dSWillem de Bruijn struct sk_buff *tail, *skb = skb_from_uarg(uarg); 126952267790SWillem de Bruijn struct sock_exterr_skb *serr; 127052267790SWillem de Bruijn struct sock *sk = skb->sk; 12714ab6c99dSWillem de Bruijn struct sk_buff_head *q; 12724ab6c99dSWillem de Bruijn unsigned long flags; 12733bdd5ee0SWillem de Bruijn bool is_zerocopy; 12744ab6c99dSWillem de Bruijn u32 lo, hi; 12754ab6c99dSWillem de Bruijn u16 len; 127652267790SWillem de Bruijn 1277ccaffff1SWillem de Bruijn mm_unaccount_pinned_pages(&uarg->mmp); 1278ccaffff1SWillem de Bruijn 12794ab6c99dSWillem de Bruijn /* if !len, there was only 1 call, and it was aborted 12804ab6c99dSWillem de Bruijn * so do not queue a completion notification 12814ab6c99dSWillem de Bruijn */ 12824ab6c99dSWillem de Bruijn if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 128352267790SWillem de Bruijn goto release; 128452267790SWillem de Bruijn 12854ab6c99dSWillem de Bruijn len = uarg->len; 12864ab6c99dSWillem de Bruijn lo = uarg->id; 12874ab6c99dSWillem de Bruijn hi = uarg->id + len - 1; 12883bdd5ee0SWillem de Bruijn is_zerocopy = uarg->zerocopy; 12894ab6c99dSWillem de Bruijn 129052267790SWillem de Bruijn serr = SKB_EXT_ERR(skb); 129152267790SWillem de Bruijn memset(serr, 0, sizeof(*serr)); 129252267790SWillem de Bruijn serr->ee.ee_errno = 0; 129352267790SWillem de Bruijn serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 12944ab6c99dSWillem de Bruijn serr->ee.ee_data = hi; 12954ab6c99dSWillem de Bruijn serr->ee.ee_info = lo; 12963bdd5ee0SWillem de Bruijn if (!is_zerocopy) 129752267790SWillem de Bruijn serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 129852267790SWillem de Bruijn 12994ab6c99dSWillem de Bruijn q = &sk->sk_error_queue; 13004ab6c99dSWillem de Bruijn spin_lock_irqsave(&q->lock, flags); 13014ab6c99dSWillem de Bruijn tail = skb_peek_tail(q); 13024ab6c99dSWillem de Bruijn if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 13034ab6c99dSWillem de Bruijn !skb_zerocopy_notify_extend(tail, lo, len)) { 13044ab6c99dSWillem de Bruijn __skb_queue_tail(q, skb); 130552267790SWillem de Bruijn skb = NULL; 13064ab6c99dSWillem de Bruijn } 13074ab6c99dSWillem de Bruijn spin_unlock_irqrestore(&q->lock, flags); 130852267790SWillem de Bruijn 1309e3ae2365SAlexander Aring sk_error_report(sk); 131052267790SWillem de Bruijn 131152267790SWillem de Bruijn release: 131252267790SWillem de Bruijn consume_skb(skb); 131352267790SWillem de Bruijn sock_put(sk); 131452267790SWillem de Bruijn } 131575518851SJonathan Lemon 13168c793822SJonathan Lemon void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 131736177832SJonathan Lemon bool success) 131875518851SJonathan Lemon { 131975518851SJonathan Lemon uarg->zerocopy = uarg->zerocopy & success; 132075518851SJonathan Lemon 132175518851SJonathan Lemon if (refcount_dec_and_test(&uarg->refcnt)) 13228c793822SJonathan Lemon __msg_zerocopy_callback(uarg); 132375518851SJonathan Lemon } 13248c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 132552267790SWillem de Bruijn 13268c793822SJonathan Lemon void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 132752267790SWillem de Bruijn { 132852267790SWillem de Bruijn struct sock *sk = skb_from_uarg(uarg)->sk; 132952267790SWillem de Bruijn 133052267790SWillem de Bruijn atomic_dec(&sk->sk_zckey); 13314ab6c99dSWillem de Bruijn uarg->len--; 133252267790SWillem de Bruijn 133352900d22SWillem de Bruijn if (have_uref) 13348c793822SJonathan Lemon msg_zerocopy_callback(NULL, uarg, true); 133552267790SWillem de Bruijn } 13368c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 133752267790SWillem de Bruijn 1338b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1339b5947e5dSWillem de Bruijn { 1340b5947e5dSWillem de Bruijn return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1341b5947e5dSWillem de Bruijn } 1342b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1343b5947e5dSWillem de Bruijn 134452267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 134552267790SWillem de Bruijn struct msghdr *msg, int len, 134652267790SWillem de Bruijn struct ubuf_info *uarg) 134752267790SWillem de Bruijn { 13484ab6c99dSWillem de Bruijn struct ubuf_info *orig_uarg = skb_zcopy(skb); 134952267790SWillem de Bruijn struct iov_iter orig_iter = msg->msg_iter; 135052267790SWillem de Bruijn int err, orig_len = skb->len; 135152267790SWillem de Bruijn 13524ab6c99dSWillem de Bruijn /* An skb can only point to one uarg. This edge case happens when 13534ab6c99dSWillem de Bruijn * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 13544ab6c99dSWillem de Bruijn */ 13554ab6c99dSWillem de Bruijn if (orig_uarg && uarg != orig_uarg) 13564ab6c99dSWillem de Bruijn return -EEXIST; 13574ab6c99dSWillem de Bruijn 135852267790SWillem de Bruijn err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 135952267790SWillem de Bruijn if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 136054d43117SWillem de Bruijn struct sock *save_sk = skb->sk; 136154d43117SWillem de Bruijn 136252267790SWillem de Bruijn /* Streams do not free skb on error. Reset to prev state. */ 136352267790SWillem de Bruijn msg->msg_iter = orig_iter; 136454d43117SWillem de Bruijn skb->sk = sk; 136552267790SWillem de Bruijn ___pskb_trim(skb, orig_len); 136654d43117SWillem de Bruijn skb->sk = save_sk; 136752267790SWillem de Bruijn return err; 136852267790SWillem de Bruijn } 136952267790SWillem de Bruijn 137052900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, NULL); 137152267790SWillem de Bruijn return skb->len - orig_len; 137252267790SWillem de Bruijn } 137352267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 137452267790SWillem de Bruijn 13751f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 137652267790SWillem de Bruijn gfp_t gfp_mask) 137752267790SWillem de Bruijn { 137852267790SWillem de Bruijn if (skb_zcopy(orig)) { 137952267790SWillem de Bruijn if (skb_zcopy(nskb)) { 138052267790SWillem de Bruijn /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 138152267790SWillem de Bruijn if (!gfp_mask) { 138252267790SWillem de Bruijn WARN_ON_ONCE(1); 138352267790SWillem de Bruijn return -ENOMEM; 138452267790SWillem de Bruijn } 138552267790SWillem de Bruijn if (skb_uarg(nskb) == skb_uarg(orig)) 138652267790SWillem de Bruijn return 0; 138752267790SWillem de Bruijn if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 138852267790SWillem de Bruijn return -EIO; 138952267790SWillem de Bruijn } 139052900d22SWillem de Bruijn skb_zcopy_set(nskb, skb_uarg(orig), NULL); 139152267790SWillem de Bruijn } 139252267790SWillem de Bruijn return 0; 139352267790SWillem de Bruijn } 139452267790SWillem de Bruijn 13952c53040fSBen Hutchings /** 13962c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 139748c83012SMichael S. Tsirkin * @skb: the skb to modify 139848c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 139948c83012SMichael S. Tsirkin * 140006b4feb3SJonathan Lemon * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 140148c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 140248c83012SMichael S. Tsirkin * to userspace pages. 140348c83012SMichael S. Tsirkin * 140448c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 140548c83012SMichael S. Tsirkin * %GFP_ATOMIC. 140648c83012SMichael S. Tsirkin * 140748c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 140848c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 140948c83012SMichael S. Tsirkin */ 141048c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1411a6686f2fSShirley Ma { 1412a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 1413a6686f2fSShirley Ma struct page *page, *head = NULL; 14143ece7826SWillem de Bruijn int i, new_frags; 14153ece7826SWillem de Bruijn u32 d_off; 1416a6686f2fSShirley Ma 14173ece7826SWillem de Bruijn if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 14183ece7826SWillem de Bruijn return -EINVAL; 14193ece7826SWillem de Bruijn 1420f72c4ac6SWillem de Bruijn if (!num_frags) 1421f72c4ac6SWillem de Bruijn goto release; 1422f72c4ac6SWillem de Bruijn 14233ece7826SWillem de Bruijn new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 14243ece7826SWillem de Bruijn for (i = 0; i < new_frags; i++) { 142502756ed4SKrishna Kumar page = alloc_page(gfp_mask); 1426a6686f2fSShirley Ma if (!page) { 1427a6686f2fSShirley Ma while (head) { 142840dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 1429a6686f2fSShirley Ma put_page(head); 1430a6686f2fSShirley Ma head = next; 1431a6686f2fSShirley Ma } 1432a6686f2fSShirley Ma return -ENOMEM; 1433a6686f2fSShirley Ma } 14343ece7826SWillem de Bruijn set_page_private(page, (unsigned long)head); 14353ece7826SWillem de Bruijn head = page; 14363ece7826SWillem de Bruijn } 14373ece7826SWillem de Bruijn 14383ece7826SWillem de Bruijn page = head; 14393ece7826SWillem de Bruijn d_off = 0; 14403ece7826SWillem de Bruijn for (i = 0; i < num_frags; i++) { 14413ece7826SWillem de Bruijn skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 14423ece7826SWillem de Bruijn u32 p_off, p_len, copied; 14433ece7826SWillem de Bruijn struct page *p; 14443ece7826SWillem de Bruijn u8 *vaddr; 1445c613c209SWillem de Bruijn 1446b54c9d5bSJonathan Lemon skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1447c613c209SWillem de Bruijn p, p_off, p_len, copied) { 14483ece7826SWillem de Bruijn u32 copy, done = 0; 1449c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 14503ece7826SWillem de Bruijn 14513ece7826SWillem de Bruijn while (done < p_len) { 14523ece7826SWillem de Bruijn if (d_off == PAGE_SIZE) { 14533ece7826SWillem de Bruijn d_off = 0; 14543ece7826SWillem de Bruijn page = (struct page *)page_private(page); 14553ece7826SWillem de Bruijn } 14563ece7826SWillem de Bruijn copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 14573ece7826SWillem de Bruijn memcpy(page_address(page) + d_off, 14583ece7826SWillem de Bruijn vaddr + p_off + done, copy); 14593ece7826SWillem de Bruijn done += copy; 14603ece7826SWillem de Bruijn d_off += copy; 14613ece7826SWillem de Bruijn } 146251c56b00SEric Dumazet kunmap_atomic(vaddr); 1463c613c209SWillem de Bruijn } 1464a6686f2fSShirley Ma } 1465a6686f2fSShirley Ma 1466a6686f2fSShirley Ma /* skb frags release userspace buffers */ 146702756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 1468a8605c60SIan Campbell skb_frag_unref(skb, i); 1469a6686f2fSShirley Ma 1470a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 14713ece7826SWillem de Bruijn for (i = 0; i < new_frags - 1; i++) { 14723ece7826SWillem de Bruijn __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 147340dadff2SSunghan Suh head = (struct page *)page_private(head); 1474a6686f2fSShirley Ma } 14753ece7826SWillem de Bruijn __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 14763ece7826SWillem de Bruijn skb_shinfo(skb)->nr_frags = new_frags; 147748c83012SMichael S. Tsirkin 1478b90ddd56SWillem de Bruijn release: 14791f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 1480a6686f2fSShirley Ma return 0; 1481a6686f2fSShirley Ma } 1482dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1483a6686f2fSShirley Ma 1484e0053ec0SHerbert Xu /** 1485e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 1486e0053ec0SHerbert Xu * @skb: buffer to clone 1487e0053ec0SHerbert Xu * @gfp_mask: allocation priority 1488e0053ec0SHerbert Xu * 1489e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1490e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 1491e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 1492e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 1493e0053ec0SHerbert Xu * 1494e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 1495e0053ec0SHerbert Xu * %GFP_ATOMIC. 1496e0053ec0SHerbert Xu */ 1497e0053ec0SHerbert Xu 1498e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1499e0053ec0SHerbert Xu { 1500d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones = container_of(skb, 1501d0bf4a9eSEric Dumazet struct sk_buff_fclones, 1502d0bf4a9eSEric Dumazet skb1); 15036ffe75ebSEric Dumazet struct sk_buff *n; 1504e0053ec0SHerbert Xu 150570008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1506a6686f2fSShirley Ma return NULL; 1507a6686f2fSShirley Ma 1508e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 15092638595aSReshetova, Elena refcount_read(&fclones->fclone_ref) == 1) { 15106ffe75ebSEric Dumazet n = &fclones->skb2; 15112638595aSReshetova, Elena refcount_set(&fclones->fclone_ref, 2); 1512e0053ec0SHerbert Xu } else { 1513c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1514c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1515c93bdd0eSMel Gorman 1516e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1517e0053ec0SHerbert Xu if (!n) 1518e0053ec0SHerbert Xu return NULL; 1519fe55f6d5SVegard Nossum 1520e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 1521e0053ec0SHerbert Xu } 1522e0053ec0SHerbert Xu 1523e0053ec0SHerbert Xu return __skb_clone(n, skb); 15241da177e4SLinus Torvalds } 1525b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 15261da177e4SLinus Torvalds 1527b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off) 1528f5b17294SPravin B Shelar { 1529030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 1530030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 1531030737bcSEric Dumazet skb->csum_start += off; 1532f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 1533f5b17294SPravin B Shelar skb->transport_header += off; 1534f5b17294SPravin B Shelar skb->network_header += off; 1535f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 1536f5b17294SPravin B Shelar skb->mac_header += off; 1537f5b17294SPravin B Shelar skb->inner_transport_header += off; 1538f5b17294SPravin B Shelar skb->inner_network_header += off; 1539aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 1540f5b17294SPravin B Shelar } 1541b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update); 1542f5b17294SPravin B Shelar 154308303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 15441da177e4SLinus Torvalds { 1545dec18810SHerbert Xu __copy_skb_header(new, old); 1546dec18810SHerbert Xu 15477967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 15487967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 15497967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 15501da177e4SLinus Torvalds } 155108303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header); 15521da177e4SLinus Torvalds 1553c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1554c93bdd0eSMel Gorman { 1555c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1556c93bdd0eSMel Gorman return SKB_ALLOC_RX; 1557c93bdd0eSMel Gorman return 0; 1558c93bdd0eSMel Gorman } 1559c93bdd0eSMel Gorman 15601da177e4SLinus Torvalds /** 15611da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 15621da177e4SLinus Torvalds * @skb: buffer to copy 15631da177e4SLinus Torvalds * @gfp_mask: allocation priority 15641da177e4SLinus Torvalds * 15651da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 15661da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 15671da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 15681da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 15691da177e4SLinus Torvalds * 15701da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 15711da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 15721da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 15731da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 15741da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 15751da177e4SLinus Torvalds */ 15761da177e4SLinus Torvalds 1577dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 15781da177e4SLinus Torvalds { 15796602cebbSEric Dumazet int headerlen = skb_headroom(skb); 1580ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 1581c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 1582c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 15836602cebbSEric Dumazet 15841da177e4SLinus Torvalds if (!n) 15851da177e4SLinus Torvalds return NULL; 15861da177e4SLinus Torvalds 15871da177e4SLinus Torvalds /* Set the data pointer */ 15881da177e4SLinus Torvalds skb_reserve(n, headerlen); 15891da177e4SLinus Torvalds /* Set the tail pointer and length */ 15901da177e4SLinus Torvalds skb_put(n, skb->len); 15911da177e4SLinus Torvalds 15929f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 15931da177e4SLinus Torvalds 159408303c18SIlya Lesokhin skb_copy_header(n, skb); 15951da177e4SLinus Torvalds return n; 15961da177e4SLinus Torvalds } 1597b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 15981da177e4SLinus Torvalds 15991da177e4SLinus Torvalds /** 1600bad93e9dSOctavian Purdila * __pskb_copy_fclone - create copy of an sk_buff with private head. 16011da177e4SLinus Torvalds * @skb: buffer to copy 1602117632e6SEric Dumazet * @headroom: headroom of new skb 16031da177e4SLinus Torvalds * @gfp_mask: allocation priority 1604bad93e9dSOctavian Purdila * @fclone: if true allocate the copy of the skb from the fclone 1605bad93e9dSOctavian Purdila * cache instead of the head cache; it is recommended to set this 1606bad93e9dSOctavian Purdila * to true for the cases where the copy will likely be cloned 16071da177e4SLinus Torvalds * 16081da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 16091da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 16101da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 16111da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 16121da177e4SLinus Torvalds * or the pointer to the buffer on success. 16131da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 16141da177e4SLinus Torvalds */ 16151da177e4SLinus Torvalds 1616bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1617bad93e9dSOctavian Purdila gfp_t gfp_mask, bool fclone) 16181da177e4SLinus Torvalds { 1619117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 1620bad93e9dSOctavian Purdila int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1621bad93e9dSOctavian Purdila struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 16226602cebbSEric Dumazet 16231da177e4SLinus Torvalds if (!n) 16241da177e4SLinus Torvalds goto out; 16251da177e4SLinus Torvalds 16261da177e4SLinus Torvalds /* Set the data pointer */ 1627117632e6SEric Dumazet skb_reserve(n, headroom); 16281da177e4SLinus Torvalds /* Set the tail pointer and length */ 16291da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 16301da177e4SLinus Torvalds /* Copy the bytes */ 1631d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 16321da177e4SLinus Torvalds 163325f484a6SHerbert Xu n->truesize += skb->data_len; 16341da177e4SLinus Torvalds n->data_len = skb->data_len; 16351da177e4SLinus Torvalds n->len = skb->len; 16361da177e4SLinus Torvalds 16371da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 16381da177e4SLinus Torvalds int i; 16391da177e4SLinus Torvalds 16401f8b977aSWillem de Bruijn if (skb_orphan_frags(skb, gfp_mask) || 16411f8b977aSWillem de Bruijn skb_zerocopy_clone(n, skb, gfp_mask)) { 16421511022cSDan Carpenter kfree_skb(n); 16431511022cSDan Carpenter n = NULL; 1644a6686f2fSShirley Ma goto out; 1645a6686f2fSShirley Ma } 16461da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 16471da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1648ea2ab693SIan Campbell skb_frag_ref(skb, i); 16491da177e4SLinus Torvalds } 16501da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 16511da177e4SLinus Torvalds } 16521da177e4SLinus Torvalds 165321dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 16541da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 16551da177e4SLinus Torvalds skb_clone_fraglist(n); 16561da177e4SLinus Torvalds } 16571da177e4SLinus Torvalds 165808303c18SIlya Lesokhin skb_copy_header(n, skb); 16591da177e4SLinus Torvalds out: 16601da177e4SLinus Torvalds return n; 16611da177e4SLinus Torvalds } 1662bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone); 16631da177e4SLinus Torvalds 16641da177e4SLinus Torvalds /** 16651da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 16661da177e4SLinus Torvalds * @skb: buffer to reallocate 16671da177e4SLinus Torvalds * @nhead: room to add at head 16681da177e4SLinus Torvalds * @ntail: room to add at tail 16691da177e4SLinus Torvalds * @gfp_mask: allocation priority 16701da177e4SLinus Torvalds * 1671bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1672bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 16731da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 16741da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 16751da177e4SLinus Torvalds * 16761da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 16771da177e4SLinus Torvalds * reloaded after call to this function. 16781da177e4SLinus Torvalds */ 16791da177e4SLinus Torvalds 168086a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1681dd0fc66fSAl Viro gfp_t gfp_mask) 16821da177e4SLinus Torvalds { 1683158f323bSEric Dumazet int i, osize = skb_end_offset(skb); 1684158f323bSEric Dumazet int size = osize + nhead + ntail; 16851da177e4SLinus Torvalds long off; 1686158f323bSEric Dumazet u8 *data; 16871da177e4SLinus Torvalds 16884edd87adSHerbert Xu BUG_ON(nhead < 0); 16894edd87adSHerbert Xu 16909f77fad3STim Hansen BUG_ON(skb_shared(skb)); 16911da177e4SLinus Torvalds 16921da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 16931da177e4SLinus Torvalds 1694c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1695c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1696c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1697c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 16981da177e4SLinus Torvalds if (!data) 16991da177e4SLinus Torvalds goto nodata; 170087151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 17011da177e4SLinus Torvalds 17021da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 17036602cebbSEric Dumazet * optimized for the cases when header is void. 17046602cebbSEric Dumazet */ 17056602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 17066602cebbSEric Dumazet 17076602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 17086602cebbSEric Dumazet skb_shinfo(skb), 1709fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 17101da177e4SLinus Torvalds 17113e24591aSAlexander Duyck /* 17123e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 17133e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 17143e24591aSAlexander Duyck * be since all we did is relocate the values 17153e24591aSAlexander Duyck */ 17163e24591aSAlexander Duyck if (skb_cloned(skb)) { 171770008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1718a6686f2fSShirley Ma goto nofrags; 17191f8b977aSWillem de Bruijn if (skb_zcopy(skb)) 1720c1d1b437SEric Dumazet refcount_inc(&skb_uarg(skb)->refcnt); 17211da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1722ea2ab693SIan Campbell skb_frag_ref(skb, i); 17231da177e4SLinus Torvalds 172421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 17251da177e4SLinus Torvalds skb_clone_fraglist(skb); 17261da177e4SLinus Torvalds 17271da177e4SLinus Torvalds skb_release_data(skb); 17283e24591aSAlexander Duyck } else { 17293e24591aSAlexander Duyck skb_free_head(skb); 17301fd63041SEric Dumazet } 17311da177e4SLinus Torvalds off = (data + nhead) - skb->head; 17321da177e4SLinus Torvalds 17331da177e4SLinus Torvalds skb->head = data; 1734d3836f21SEric Dumazet skb->head_frag = 0; 17351da177e4SLinus Torvalds skb->data += off; 17364305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 17374305b541SArnaldo Carvalho de Melo skb->end = size; 173856eb8882SPatrick McHardy off = nhead; 17394305b541SArnaldo Carvalho de Melo #else 17404305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 174156eb8882SPatrick McHardy #endif 174227a884dcSArnaldo Carvalho de Melo skb->tail += off; 1743b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 17441da177e4SLinus Torvalds skb->cloned = 0; 1745334a8132SPatrick McHardy skb->hdr_len = 0; 17461da177e4SLinus Torvalds skb->nohdr = 0; 17471da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 1748158f323bSEric Dumazet 1749de8f3a83SDaniel Borkmann skb_metadata_clear(skb); 1750de8f3a83SDaniel Borkmann 1751158f323bSEric Dumazet /* It is not generally safe to change skb->truesize. 1752158f323bSEric Dumazet * For the moment, we really care of rx path, or 1753158f323bSEric Dumazet * when skb is orphaned (not attached to a socket). 1754158f323bSEric Dumazet */ 1755158f323bSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 1756158f323bSEric Dumazet skb->truesize += size - osize; 1757158f323bSEric Dumazet 17581da177e4SLinus Torvalds return 0; 17591da177e4SLinus Torvalds 1760a6686f2fSShirley Ma nofrags: 1761a6686f2fSShirley Ma kfree(data); 17621da177e4SLinus Torvalds nodata: 17631da177e4SLinus Torvalds return -ENOMEM; 17641da177e4SLinus Torvalds } 1765b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 17661da177e4SLinus Torvalds 17671da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 17681da177e4SLinus Torvalds 17691da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 17701da177e4SLinus Torvalds { 17711da177e4SLinus Torvalds struct sk_buff *skb2; 17721da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 17731da177e4SLinus Torvalds 17741da177e4SLinus Torvalds if (delta <= 0) 17751da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 17761da177e4SLinus Torvalds else { 17771da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 17781da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 17791da177e4SLinus Torvalds GFP_ATOMIC)) { 17801da177e4SLinus Torvalds kfree_skb(skb2); 17811da177e4SLinus Torvalds skb2 = NULL; 17821da177e4SLinus Torvalds } 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds return skb2; 17851da177e4SLinus Torvalds } 1786b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 17871da177e4SLinus Torvalds 17881da177e4SLinus Torvalds /** 17891da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 17901da177e4SLinus Torvalds * @skb: buffer to copy 17911da177e4SLinus Torvalds * @newheadroom: new free bytes at head 17921da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 17931da177e4SLinus Torvalds * @gfp_mask: allocation priority 17941da177e4SLinus Torvalds * 17951da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 17961da177e4SLinus Torvalds * allocate additional space. 17971da177e4SLinus Torvalds * 17981da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 17991da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 18001da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 18011da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 18021da177e4SLinus Torvalds * 18031da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 18041da177e4SLinus Torvalds * is called from an interrupt. 18051da177e4SLinus Torvalds */ 18061da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 180786a76cafSVictor Fusco int newheadroom, int newtailroom, 1808dd0fc66fSAl Viro gfp_t gfp_mask) 18091da177e4SLinus Torvalds { 18101da177e4SLinus Torvalds /* 18111da177e4SLinus Torvalds * Allocate the copy buffer 18121da177e4SLinus Torvalds */ 1813c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1814c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1815c93bdd0eSMel Gorman NUMA_NO_NODE); 1816efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 18171da177e4SLinus Torvalds int head_copy_len, head_copy_off; 18181da177e4SLinus Torvalds 18191da177e4SLinus Torvalds if (!n) 18201da177e4SLinus Torvalds return NULL; 18211da177e4SLinus Torvalds 18221da177e4SLinus Torvalds skb_reserve(n, newheadroom); 18231da177e4SLinus Torvalds 18241da177e4SLinus Torvalds /* Set the tail pointer and length */ 18251da177e4SLinus Torvalds skb_put(n, skb->len); 18261da177e4SLinus Torvalds 1827efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 18281da177e4SLinus Torvalds head_copy_off = 0; 18291da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 18301da177e4SLinus Torvalds head_copy_len = newheadroom; 18311da177e4SLinus Torvalds else 18321da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 18331da177e4SLinus Torvalds 18341da177e4SLinus Torvalds /* Copy the linear header and data. */ 18359f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 18369f77fad3STim Hansen skb->len + head_copy_len)); 18371da177e4SLinus Torvalds 183808303c18SIlya Lesokhin skb_copy_header(n, skb); 18391da177e4SLinus Torvalds 1840030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1841efd1e8d5SPatrick McHardy 18421da177e4SLinus Torvalds return n; 18431da177e4SLinus Torvalds } 1844b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds /** 1847cd0a137aSFlorian Fainelli * __skb_pad - zero pad the tail of an skb 18481da177e4SLinus Torvalds * @skb: buffer to pad 18491da177e4SLinus Torvalds * @pad: space to pad 1850cd0a137aSFlorian Fainelli * @free_on_error: free buffer on error 18511da177e4SLinus Torvalds * 18521da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 18531da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 18541da177e4SLinus Torvalds * beyond the buffer end onto the wire. 18551da177e4SLinus Torvalds * 1856cd0a137aSFlorian Fainelli * May return error in out of memory cases. The skb is freed on error 1857cd0a137aSFlorian Fainelli * if @free_on_error is true. 18581da177e4SLinus Torvalds */ 18591da177e4SLinus Torvalds 1860cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 18611da177e4SLinus Torvalds { 18625b057c6bSHerbert Xu int err; 18635b057c6bSHerbert Xu int ntail; 18641da177e4SLinus Torvalds 18651da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 18665b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 18671da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 18685b057c6bSHerbert Xu return 0; 18691da177e4SLinus Torvalds } 18701da177e4SLinus Torvalds 18714305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 18725b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 18735b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 18745b057c6bSHerbert Xu if (unlikely(err)) 18755b057c6bSHerbert Xu goto free_skb; 18765b057c6bSHerbert Xu } 18775b057c6bSHerbert Xu 18785b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 18795b057c6bSHerbert Xu * to be audited. 18805b057c6bSHerbert Xu */ 18815b057c6bSHerbert Xu err = skb_linearize(skb); 18825b057c6bSHerbert Xu if (unlikely(err)) 18835b057c6bSHerbert Xu goto free_skb; 18845b057c6bSHerbert Xu 18855b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 18865b057c6bSHerbert Xu return 0; 18875b057c6bSHerbert Xu 18885b057c6bSHerbert Xu free_skb: 1889cd0a137aSFlorian Fainelli if (free_on_error) 18901da177e4SLinus Torvalds kfree_skb(skb); 18915b057c6bSHerbert Xu return err; 18921da177e4SLinus Torvalds } 1893cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad); 18941da177e4SLinus Torvalds 18950dde3e16SIlpo Järvinen /** 18960c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 18970c7ddf36SMathias Krause * @skb: start of the buffer to use 18980c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 18990c7ddf36SMathias Krause * @len: amount of data to add 19000c7ddf36SMathias Krause * 19010c7ddf36SMathias Krause * This function extends the used data area of the potentially 19020c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 19030c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 19040c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 19050c7ddf36SMathias Krause * returned. 19060c7ddf36SMathias Krause */ 19070c7ddf36SMathias Krause 19084df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 19090c7ddf36SMathias Krause { 19100c7ddf36SMathias Krause if (tail != skb) { 19110c7ddf36SMathias Krause skb->data_len += len; 19120c7ddf36SMathias Krause skb->len += len; 19130c7ddf36SMathias Krause } 19140c7ddf36SMathias Krause return skb_put(tail, len); 19150c7ddf36SMathias Krause } 19160c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 19170c7ddf36SMathias Krause 19180c7ddf36SMathias Krause /** 19190dde3e16SIlpo Järvinen * skb_put - add data to a buffer 19200dde3e16SIlpo Järvinen * @skb: buffer to use 19210dde3e16SIlpo Järvinen * @len: amount of data to add 19220dde3e16SIlpo Järvinen * 19230dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 19240dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 19250dde3e16SIlpo Järvinen * first byte of the extra data is returned. 19260dde3e16SIlpo Järvinen */ 19274df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len) 19280dde3e16SIlpo Järvinen { 19294df864c1SJohannes Berg void *tmp = skb_tail_pointer(skb); 19300dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 19310dde3e16SIlpo Järvinen skb->tail += len; 19320dde3e16SIlpo Järvinen skb->len += len; 19330dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 19340dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 19350dde3e16SIlpo Järvinen return tmp; 19360dde3e16SIlpo Järvinen } 19370dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 19380dde3e16SIlpo Järvinen 19396be8ac2fSIlpo Järvinen /** 1940c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1941c2aa270aSIlpo Järvinen * @skb: buffer to use 1942c2aa270aSIlpo Järvinen * @len: amount of data to add 1943c2aa270aSIlpo Järvinen * 1944c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1945c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1946c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1947c2aa270aSIlpo Järvinen */ 1948d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len) 1949c2aa270aSIlpo Järvinen { 1950c2aa270aSIlpo Järvinen skb->data -= len; 1951c2aa270aSIlpo Järvinen skb->len += len; 1952c2aa270aSIlpo Järvinen if (unlikely(skb->data < skb->head)) 1953c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1954c2aa270aSIlpo Järvinen return skb->data; 1955c2aa270aSIlpo Järvinen } 1956c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1957c2aa270aSIlpo Järvinen 1958c2aa270aSIlpo Järvinen /** 19596be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 19606be8ac2fSIlpo Järvinen * @skb: buffer to use 19616be8ac2fSIlpo Järvinen * @len: amount of data to remove 19626be8ac2fSIlpo Järvinen * 19636be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 19646be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 19656be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 19666be8ac2fSIlpo Järvinen * the old data. 19676be8ac2fSIlpo Järvinen */ 1968af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len) 19696be8ac2fSIlpo Järvinen { 197047d29646SDavid S. Miller return skb_pull_inline(skb, len); 19716be8ac2fSIlpo Järvinen } 19726be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 19736be8ac2fSIlpo Järvinen 1974419ae74eSIlpo Järvinen /** 1975419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1976419ae74eSIlpo Järvinen * @skb: buffer to alter 1977419ae74eSIlpo Järvinen * @len: new length 1978419ae74eSIlpo Järvinen * 1979419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1980419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1981419ae74eSIlpo Järvinen * The skb must be linear. 1982419ae74eSIlpo Järvinen */ 1983419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1984419ae74eSIlpo Järvinen { 1985419ae74eSIlpo Järvinen if (skb->len > len) 1986419ae74eSIlpo Järvinen __skb_trim(skb, len); 1987419ae74eSIlpo Järvinen } 1988419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1989419ae74eSIlpo Järvinen 19903cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 19911da177e4SLinus Torvalds */ 19921da177e4SLinus Torvalds 19933cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 19941da177e4SLinus Torvalds { 199527b437c8SHerbert Xu struct sk_buff **fragp; 199627b437c8SHerbert Xu struct sk_buff *frag; 19971da177e4SLinus Torvalds int offset = skb_headlen(skb); 19981da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 19991da177e4SLinus Torvalds int i; 200027b437c8SHerbert Xu int err; 200127b437c8SHerbert Xu 200227b437c8SHerbert Xu if (skb_cloned(skb) && 200327b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 200427b437c8SHerbert Xu return err; 20051da177e4SLinus Torvalds 2006f4d26fb3SHerbert Xu i = 0; 2007f4d26fb3SHerbert Xu if (offset >= len) 2008f4d26fb3SHerbert Xu goto drop_pages; 2009f4d26fb3SHerbert Xu 2010f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 20119e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 201227b437c8SHerbert Xu 201327b437c8SHerbert Xu if (end < len) { 20141da177e4SLinus Torvalds offset = end; 201527b437c8SHerbert Xu continue; 20161da177e4SLinus Torvalds } 20171da177e4SLinus Torvalds 20189e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 201927b437c8SHerbert Xu 2020f4d26fb3SHerbert Xu drop_pages: 202127b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 202227b437c8SHerbert Xu 202327b437c8SHerbert Xu for (; i < nfrags; i++) 2024ea2ab693SIan Campbell skb_frag_unref(skb, i); 202527b437c8SHerbert Xu 202621dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 202727b437c8SHerbert Xu skb_drop_fraglist(skb); 2028f4d26fb3SHerbert Xu goto done; 202927b437c8SHerbert Xu } 203027b437c8SHerbert Xu 203127b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 203227b437c8SHerbert Xu fragp = &frag->next) { 203327b437c8SHerbert Xu int end = offset + frag->len; 203427b437c8SHerbert Xu 203527b437c8SHerbert Xu if (skb_shared(frag)) { 203627b437c8SHerbert Xu struct sk_buff *nfrag; 203727b437c8SHerbert Xu 203827b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 203927b437c8SHerbert Xu if (unlikely(!nfrag)) 204027b437c8SHerbert Xu return -ENOMEM; 204127b437c8SHerbert Xu 204227b437c8SHerbert Xu nfrag->next = frag->next; 204385bb2a60SEric Dumazet consume_skb(frag); 204427b437c8SHerbert Xu frag = nfrag; 204527b437c8SHerbert Xu *fragp = frag; 204627b437c8SHerbert Xu } 204727b437c8SHerbert Xu 204827b437c8SHerbert Xu if (end < len) { 204927b437c8SHerbert Xu offset = end; 205027b437c8SHerbert Xu continue; 205127b437c8SHerbert Xu } 205227b437c8SHerbert Xu 205327b437c8SHerbert Xu if (end > len && 205427b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 205527b437c8SHerbert Xu return err; 205627b437c8SHerbert Xu 205727b437c8SHerbert Xu if (frag->next) 205827b437c8SHerbert Xu skb_drop_list(&frag->next); 205927b437c8SHerbert Xu break; 206027b437c8SHerbert Xu } 206127b437c8SHerbert Xu 2062f4d26fb3SHerbert Xu done: 206327b437c8SHerbert Xu if (len > skb_headlen(skb)) { 20641da177e4SLinus Torvalds skb->data_len -= skb->len - len; 20651da177e4SLinus Torvalds skb->len = len; 20661da177e4SLinus Torvalds } else { 20671da177e4SLinus Torvalds skb->len = len; 20681da177e4SLinus Torvalds skb->data_len = 0; 206927a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 20701da177e4SLinus Torvalds } 20711da177e4SLinus Torvalds 2072c21b48ccSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 2073c21b48ccSEric Dumazet skb_condense(skb); 20741da177e4SLinus Torvalds return 0; 20751da177e4SLinus Torvalds } 2076b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 20771da177e4SLinus Torvalds 207888078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly 207988078d98SEric Dumazet */ 208088078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 208188078d98SEric Dumazet { 208288078d98SEric Dumazet if (skb->ip_summed == CHECKSUM_COMPLETE) { 208388078d98SEric Dumazet int delta = skb->len - len; 208488078d98SEric Dumazet 2085d55bef50SDimitris Michailidis skb->csum = csum_block_sub(skb->csum, 2086d55bef50SDimitris Michailidis skb_checksum(skb, len, delta, 0), 2087d55bef50SDimitris Michailidis len); 208854970a2fSVasily Averin } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 208954970a2fSVasily Averin int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 209054970a2fSVasily Averin int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 209154970a2fSVasily Averin 209254970a2fSVasily Averin if (offset + sizeof(__sum16) > hdlen) 209354970a2fSVasily Averin return -EINVAL; 209488078d98SEric Dumazet } 209588078d98SEric Dumazet return __pskb_trim(skb, len); 209688078d98SEric Dumazet } 209788078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow); 209888078d98SEric Dumazet 20991da177e4SLinus Torvalds /** 21001da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 21011da177e4SLinus Torvalds * @skb: buffer to reallocate 21021da177e4SLinus Torvalds * @delta: number of bytes to advance tail 21031da177e4SLinus Torvalds * 21041da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 21051da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 21061da177e4SLinus Torvalds * data from fragmented part. 21071da177e4SLinus Torvalds * 21081da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 21091da177e4SLinus Torvalds * 21101da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 21111da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 21121da177e4SLinus Torvalds * 21131da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 21141da177e4SLinus Torvalds * reloaded after call to this function. 21151da177e4SLinus Torvalds */ 21161da177e4SLinus Torvalds 21171da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 21181da177e4SLinus Torvalds * when it is necessary. 21191da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 21201da177e4SLinus Torvalds * 2. It may change skb pointers. 21211da177e4SLinus Torvalds * 21221da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 21231da177e4SLinus Torvalds */ 2124af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta) 21251da177e4SLinus Torvalds { 21261da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 21271da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 21281da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 21291da177e4SLinus Torvalds */ 21304305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 21311da177e4SLinus Torvalds 21321da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 21331da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 21341da177e4SLinus Torvalds GFP_ATOMIC)) 21351da177e4SLinus Torvalds return NULL; 21361da177e4SLinus Torvalds } 21371da177e4SLinus Torvalds 21389f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 21399f77fad3STim Hansen skb_tail_pointer(skb), delta)); 21401da177e4SLinus Torvalds 21411da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 21421da177e4SLinus Torvalds * size of pulled pages. Superb. 21431da177e4SLinus Torvalds */ 214421dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 21451da177e4SLinus Torvalds goto pull_pages; 21461da177e4SLinus Torvalds 21471da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 21481da177e4SLinus Torvalds eat = delta; 21491da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 21509e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 21519e903e08SEric Dumazet 21529e903e08SEric Dumazet if (size >= eat) 21531da177e4SLinus Torvalds goto pull_pages; 21549e903e08SEric Dumazet eat -= size; 21551da177e4SLinus Torvalds } 21561da177e4SLinus Torvalds 21571da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 215809001b03SWenhua Shi * Certainly, it is possible to add an offset to skb data, 21591da177e4SLinus Torvalds * but taking into account that pulling is expected to 21601da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 21611da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 21621da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 21631da177e4SLinus Torvalds */ 21641da177e4SLinus Torvalds if (eat) { 21651da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 21661da177e4SLinus Torvalds struct sk_buff *clone = NULL; 21671da177e4SLinus Torvalds struct sk_buff *insp = NULL; 21681da177e4SLinus Torvalds 21691da177e4SLinus Torvalds do { 21701da177e4SLinus Torvalds if (list->len <= eat) { 21711da177e4SLinus Torvalds /* Eaten as whole. */ 21721da177e4SLinus Torvalds eat -= list->len; 21731da177e4SLinus Torvalds list = list->next; 21741da177e4SLinus Torvalds insp = list; 21751da177e4SLinus Torvalds } else { 21761da177e4SLinus Torvalds /* Eaten partially. */ 21771da177e4SLinus Torvalds 21781da177e4SLinus Torvalds if (skb_shared(list)) { 21791da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 21801da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 21811da177e4SLinus Torvalds if (!clone) 21821da177e4SLinus Torvalds return NULL; 21831da177e4SLinus Torvalds insp = list->next; 21841da177e4SLinus Torvalds list = clone; 21851da177e4SLinus Torvalds } else { 21861da177e4SLinus Torvalds /* This may be pulled without 21871da177e4SLinus Torvalds * problems. */ 21881da177e4SLinus Torvalds insp = list; 21891da177e4SLinus Torvalds } 21901da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 21911da177e4SLinus Torvalds kfree_skb(clone); 21921da177e4SLinus Torvalds return NULL; 21931da177e4SLinus Torvalds } 21941da177e4SLinus Torvalds break; 21951da177e4SLinus Torvalds } 21961da177e4SLinus Torvalds } while (eat); 21971da177e4SLinus Torvalds 21981da177e4SLinus Torvalds /* Free pulled out fragments. */ 21991da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 22001da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 22011da177e4SLinus Torvalds kfree_skb(list); 22021da177e4SLinus Torvalds } 22031da177e4SLinus Torvalds /* And insert new clone at head. */ 22041da177e4SLinus Torvalds if (clone) { 22051da177e4SLinus Torvalds clone->next = list; 22061da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 22071da177e4SLinus Torvalds } 22081da177e4SLinus Torvalds } 22091da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 22101da177e4SLinus Torvalds 22111da177e4SLinus Torvalds pull_pages: 22121da177e4SLinus Torvalds eat = delta; 22131da177e4SLinus Torvalds k = 0; 22141da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 22159e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 22169e903e08SEric Dumazet 22179e903e08SEric Dumazet if (size <= eat) { 2218ea2ab693SIan Campbell skb_frag_unref(skb, i); 22199e903e08SEric Dumazet eat -= size; 22201da177e4SLinus Torvalds } else { 2221b54c9d5bSJonathan Lemon skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2222b54c9d5bSJonathan Lemon 2223b54c9d5bSJonathan Lemon *frag = skb_shinfo(skb)->frags[i]; 22241da177e4SLinus Torvalds if (eat) { 2225b54c9d5bSJonathan Lemon skb_frag_off_add(frag, eat); 2226b54c9d5bSJonathan Lemon skb_frag_size_sub(frag, eat); 22273ccc6c6fSlinzhang if (!i) 22283ccc6c6fSlinzhang goto end; 22291da177e4SLinus Torvalds eat = 0; 22301da177e4SLinus Torvalds } 22311da177e4SLinus Torvalds k++; 22321da177e4SLinus Torvalds } 22331da177e4SLinus Torvalds } 22341da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 22351da177e4SLinus Torvalds 22363ccc6c6fSlinzhang end: 22371da177e4SLinus Torvalds skb->tail += delta; 22381da177e4SLinus Torvalds skb->data_len -= delta; 22391da177e4SLinus Torvalds 22401f8b977aSWillem de Bruijn if (!skb->data_len) 22411f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 22421f8b977aSWillem de Bruijn 224327a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 22441da177e4SLinus Torvalds } 2245b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 22461da177e4SLinus Torvalds 224722019b17SEric Dumazet /** 224822019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 224922019b17SEric Dumazet * @skb: source skb 225022019b17SEric Dumazet * @offset: offset in source 225122019b17SEric Dumazet * @to: destination buffer 225222019b17SEric Dumazet * @len: number of bytes to copy 225322019b17SEric Dumazet * 225422019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 225522019b17SEric Dumazet * destination buffer. 225622019b17SEric Dumazet * 225722019b17SEric Dumazet * CAUTION ! : 225822019b17SEric Dumazet * If its prototype is ever changed, 225922019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 226022019b17SEric Dumazet * since it is called from BPF assembly code. 226122019b17SEric Dumazet */ 22621da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 22631da177e4SLinus Torvalds { 22641a028e50SDavid S. Miller int start = skb_headlen(skb); 2265fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2266fbb398a8SDavid S. Miller int i, copy; 22671da177e4SLinus Torvalds 22681da177e4SLinus Torvalds if (offset > (int)skb->len - len) 22691da177e4SLinus Torvalds goto fault; 22701da177e4SLinus Torvalds 22711da177e4SLinus Torvalds /* Copy header. */ 22721a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 22731da177e4SLinus Torvalds if (copy > len) 22741da177e4SLinus Torvalds copy = len; 2275d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 22761da177e4SLinus Torvalds if ((len -= copy) == 0) 22771da177e4SLinus Torvalds return 0; 22781da177e4SLinus Torvalds offset += copy; 22791da177e4SLinus Torvalds to += copy; 22801da177e4SLinus Torvalds } 22811da177e4SLinus Torvalds 22821da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 22831a028e50SDavid S. Miller int end; 228451c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 22851da177e4SLinus Torvalds 2286547b792cSIlpo Järvinen WARN_ON(start > offset + len); 22871a028e50SDavid S. Miller 228851c56b00SEric Dumazet end = start + skb_frag_size(f); 22891da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2290c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2291c613c209SWillem de Bruijn struct page *p; 22921da177e4SLinus Torvalds u8 *vaddr; 22931da177e4SLinus Torvalds 22941da177e4SLinus Torvalds if (copy > len) 22951da177e4SLinus Torvalds copy = len; 22961da177e4SLinus Torvalds 2297c613c209SWillem de Bruijn skb_frag_foreach_page(f, 2298b54c9d5bSJonathan Lemon skb_frag_off(f) + offset - start, 2299c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2300c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2301c613c209SWillem de Bruijn memcpy(to + copied, vaddr + p_off, p_len); 230251c56b00SEric Dumazet kunmap_atomic(vaddr); 2303c613c209SWillem de Bruijn } 23041da177e4SLinus Torvalds 23051da177e4SLinus Torvalds if ((len -= copy) == 0) 23061da177e4SLinus Torvalds return 0; 23071da177e4SLinus Torvalds offset += copy; 23081da177e4SLinus Torvalds to += copy; 23091da177e4SLinus Torvalds } 23101a028e50SDavid S. Miller start = end; 23111da177e4SLinus Torvalds } 23121da177e4SLinus Torvalds 2313fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 23141a028e50SDavid S. Miller int end; 23151da177e4SLinus Torvalds 2316547b792cSIlpo Järvinen WARN_ON(start > offset + len); 23171a028e50SDavid S. Miller 2318fbb398a8SDavid S. Miller end = start + frag_iter->len; 23191da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 23201da177e4SLinus Torvalds if (copy > len) 23211da177e4SLinus Torvalds copy = len; 2322fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 23231da177e4SLinus Torvalds goto fault; 23241da177e4SLinus Torvalds if ((len -= copy) == 0) 23251da177e4SLinus Torvalds return 0; 23261da177e4SLinus Torvalds offset += copy; 23271da177e4SLinus Torvalds to += copy; 23281da177e4SLinus Torvalds } 23291a028e50SDavid S. Miller start = end; 23301da177e4SLinus Torvalds } 2331a6686f2fSShirley Ma 23321da177e4SLinus Torvalds if (!len) 23331da177e4SLinus Torvalds return 0; 23341da177e4SLinus Torvalds 23351da177e4SLinus Torvalds fault: 23361da177e4SLinus Torvalds return -EFAULT; 23371da177e4SLinus Torvalds } 2338b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 23391da177e4SLinus Torvalds 23409c55e01cSJens Axboe /* 23419c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 23429c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 23439c55e01cSJens Axboe */ 23449c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 23459c55e01cSJens Axboe { 23468b9d3728SJarek Poplawski put_page(spd->pages[i]); 23478b9d3728SJarek Poplawski } 23489c55e01cSJens Axboe 2349a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 23504fb66994SJarek Poplawski unsigned int *offset, 235118aafc62SEric Dumazet struct sock *sk) 23528b9d3728SJarek Poplawski { 23535640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 23548b9d3728SJarek Poplawski 23555640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 23568b9d3728SJarek Poplawski return NULL; 23574fb66994SJarek Poplawski 23585640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 23594fb66994SJarek Poplawski 23605640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 23615640f768SEric Dumazet page_address(page) + *offset, *len); 23625640f768SEric Dumazet *offset = pfrag->offset; 23635640f768SEric Dumazet pfrag->offset += *len; 23644fb66994SJarek Poplawski 23655640f768SEric Dumazet return pfrag->page; 23669c55e01cSJens Axboe } 23679c55e01cSJens Axboe 236841c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 236941c73a0dSEric Dumazet struct page *page, 237041c73a0dSEric Dumazet unsigned int offset) 237141c73a0dSEric Dumazet { 237241c73a0dSEric Dumazet return spd->nr_pages && 237341c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 237441c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 237541c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 237641c73a0dSEric Dumazet } 237741c73a0dSEric Dumazet 23789c55e01cSJens Axboe /* 23799c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 23809c55e01cSJens Axboe */ 2381a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 238235f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 23834fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 238418aafc62SEric Dumazet bool linear, 23857a67e56fSJarek Poplawski struct sock *sk) 23869c55e01cSJens Axboe { 238741c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2388a108d5f3SDavid S. Miller return true; 23899c55e01cSJens Axboe 23908b9d3728SJarek Poplawski if (linear) { 239118aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 23928b9d3728SJarek Poplawski if (!page) 2393a108d5f3SDavid S. Miller return true; 239441c73a0dSEric Dumazet } 239541c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 239641c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 2397a108d5f3SDavid S. Miller return false; 239841c73a0dSEric Dumazet } 23998b9d3728SJarek Poplawski get_page(page); 24009c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 24014fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 24029c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 24039c55e01cSJens Axboe spd->nr_pages++; 24048b9d3728SJarek Poplawski 2405a108d5f3SDavid S. Miller return false; 24069c55e01cSJens Axboe } 24079c55e01cSJens Axboe 2408a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 24092870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 241018aafc62SEric Dumazet unsigned int *len, 2411d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 241235f3d14dSJens Axboe struct sock *sk, 241335f3d14dSJens Axboe struct pipe_inode_info *pipe) 24149c55e01cSJens Axboe { 24152870c43dSOctavian Purdila if (!*len) 2416a108d5f3SDavid S. Miller return true; 24179c55e01cSJens Axboe 24182870c43dSOctavian Purdila /* skip this segment if already processed */ 24192870c43dSOctavian Purdila if (*off >= plen) { 24202870c43dSOctavian Purdila *off -= plen; 2421a108d5f3SDavid S. Miller return false; 24222870c43dSOctavian Purdila } 24232870c43dSOctavian Purdila 24242870c43dSOctavian Purdila /* ignore any bits we already processed */ 24259ca1b22dSEric Dumazet poff += *off; 24269ca1b22dSEric Dumazet plen -= *off; 24272870c43dSOctavian Purdila *off = 0; 24282870c43dSOctavian Purdila 242918aafc62SEric Dumazet do { 243018aafc62SEric Dumazet unsigned int flen = min(*len, plen); 24312870c43dSOctavian Purdila 243218aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 243318aafc62SEric Dumazet linear, sk)) 2434a108d5f3SDavid S. Miller return true; 243518aafc62SEric Dumazet poff += flen; 243618aafc62SEric Dumazet plen -= flen; 24372870c43dSOctavian Purdila *len -= flen; 243818aafc62SEric Dumazet } while (*len && plen); 24392870c43dSOctavian Purdila 2440a108d5f3SDavid S. Miller return false; 2441db43a282SOctavian Purdila } 24429c55e01cSJens Axboe 24439c55e01cSJens Axboe /* 2444a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 24452870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 24469c55e01cSJens Axboe */ 2447a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 244835f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 244935f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 24502870c43dSOctavian Purdila { 24512870c43dSOctavian Purdila int seg; 2452fa9835e5STom Herbert struct sk_buff *iter; 24539c55e01cSJens Axboe 24541d0c0b32SEric Dumazet /* map the linear part : 24552996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 24562996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 24572996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 24589c55e01cSJens Axboe */ 24592870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 24602870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 24612870c43dSOctavian Purdila skb_headlen(skb), 246218aafc62SEric Dumazet offset, len, spd, 24633a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 24641d0c0b32SEric Dumazet sk, pipe)) 2465a108d5f3SDavid S. Miller return true; 24669c55e01cSJens Axboe 24679c55e01cSJens Axboe /* 24689c55e01cSJens Axboe * then map the fragments 24699c55e01cSJens Axboe */ 24709c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 24719c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 24729c55e01cSJens Axboe 2473ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 2474b54c9d5bSJonathan Lemon skb_frag_off(f), skb_frag_size(f), 247518aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 2476a108d5f3SDavid S. Miller return true; 24779c55e01cSJens Axboe } 24789c55e01cSJens Axboe 2479fa9835e5STom Herbert skb_walk_frags(skb, iter) { 2480fa9835e5STom Herbert if (*offset >= iter->len) { 2481fa9835e5STom Herbert *offset -= iter->len; 2482fa9835e5STom Herbert continue; 2483fa9835e5STom Herbert } 2484fa9835e5STom Herbert /* __skb_splice_bits() only fails if the output has no room 2485fa9835e5STom Herbert * left, so no point in going over the frag_list for the error 2486fa9835e5STom Herbert * case. 2487fa9835e5STom Herbert */ 2488fa9835e5STom Herbert if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2489fa9835e5STom Herbert return true; 2490fa9835e5STom Herbert } 2491fa9835e5STom Herbert 2492a108d5f3SDavid S. Miller return false; 24939c55e01cSJens Axboe } 24949c55e01cSJens Axboe 24959c55e01cSJens Axboe /* 24969c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 2497fa9835e5STom Herbert * the fragments, and the frag list. 24989c55e01cSJens Axboe */ 2499a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 25009c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 250125869262SAl Viro unsigned int flags) 25029c55e01cSJens Axboe { 250341c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 250441c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 25059c55e01cSJens Axboe struct splice_pipe_desc spd = { 25069c55e01cSJens Axboe .pages = pages, 25079c55e01cSJens Axboe .partial = partial, 2508047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 250928a625cbSMiklos Szeredi .ops = &nosteal_pipe_buf_ops, 25109c55e01cSJens Axboe .spd_release = sock_spd_release, 25119c55e01cSJens Axboe }; 251235f3d14dSJens Axboe int ret = 0; 251335f3d14dSJens Axboe 2514fa9835e5STom Herbert __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 25159c55e01cSJens Axboe 2516a60e3cc7SHannes Frederic Sowa if (spd.nr_pages) 251725869262SAl Viro ret = splice_to_pipe(pipe, &spd); 25189c55e01cSJens Axboe 251935f3d14dSJens Axboe return ret; 25209c55e01cSJens Axboe } 25212b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits); 25229c55e01cSJens Axboe 25230739cd28SCong Wang static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, 25240739cd28SCong Wang struct kvec *vec, size_t num, size_t size) 25250739cd28SCong Wang { 25260739cd28SCong Wang struct socket *sock = sk->sk_socket; 25270739cd28SCong Wang 25280739cd28SCong Wang if (!sock) 25290739cd28SCong Wang return -EINVAL; 25300739cd28SCong Wang return kernel_sendmsg(sock, msg, vec, num, size); 25310739cd28SCong Wang } 25320739cd28SCong Wang 25330739cd28SCong Wang static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, 25340739cd28SCong Wang size_t size, int flags) 25350739cd28SCong Wang { 25360739cd28SCong Wang struct socket *sock = sk->sk_socket; 25370739cd28SCong Wang 25380739cd28SCong Wang if (!sock) 25390739cd28SCong Wang return -EINVAL; 25400739cd28SCong Wang return kernel_sendpage(sock, page, offset, size, flags); 25410739cd28SCong Wang } 25420739cd28SCong Wang 25430739cd28SCong Wang typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, 25440739cd28SCong Wang struct kvec *vec, size_t num, size_t size); 25450739cd28SCong Wang typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, 25460739cd28SCong Wang size_t size, int flags); 25470739cd28SCong Wang static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, 25480739cd28SCong Wang int len, sendmsg_func sendmsg, sendpage_func sendpage) 254920bf50deSTom Herbert { 255020bf50deSTom Herbert unsigned int orig_len = len; 255120bf50deSTom Herbert struct sk_buff *head = skb; 255220bf50deSTom Herbert unsigned short fragidx; 255320bf50deSTom Herbert int slen, ret; 255420bf50deSTom Herbert 255520bf50deSTom Herbert do_frag_list: 255620bf50deSTom Herbert 255720bf50deSTom Herbert /* Deal with head data */ 255820bf50deSTom Herbert while (offset < skb_headlen(skb) && len) { 255920bf50deSTom Herbert struct kvec kv; 256020bf50deSTom Herbert struct msghdr msg; 256120bf50deSTom Herbert 256220bf50deSTom Herbert slen = min_t(int, len, skb_headlen(skb) - offset); 256320bf50deSTom Herbert kv.iov_base = skb->data + offset; 2564db5980d8SJohn Fastabend kv.iov_len = slen; 256520bf50deSTom Herbert memset(&msg, 0, sizeof(msg)); 2566bd95e678SJohn Fastabend msg.msg_flags = MSG_DONTWAIT; 256720bf50deSTom Herbert 25680739cd28SCong Wang ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, 25690739cd28SCong Wang sendmsg_unlocked, sk, &msg, &kv, 1, slen); 257020bf50deSTom Herbert if (ret <= 0) 257120bf50deSTom Herbert goto error; 257220bf50deSTom Herbert 257320bf50deSTom Herbert offset += ret; 257420bf50deSTom Herbert len -= ret; 257520bf50deSTom Herbert } 257620bf50deSTom Herbert 257720bf50deSTom Herbert /* All the data was skb head? */ 257820bf50deSTom Herbert if (!len) 257920bf50deSTom Herbert goto out; 258020bf50deSTom Herbert 258120bf50deSTom Herbert /* Make offset relative to start of frags */ 258220bf50deSTom Herbert offset -= skb_headlen(skb); 258320bf50deSTom Herbert 258420bf50deSTom Herbert /* Find where we are in frag list */ 258520bf50deSTom Herbert for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 258620bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 258720bf50deSTom Herbert 2588d8e18a51SMatthew Wilcox (Oracle) if (offset < skb_frag_size(frag)) 258920bf50deSTom Herbert break; 259020bf50deSTom Herbert 2591d8e18a51SMatthew Wilcox (Oracle) offset -= skb_frag_size(frag); 259220bf50deSTom Herbert } 259320bf50deSTom Herbert 259420bf50deSTom Herbert for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 259520bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 259620bf50deSTom Herbert 2597d8e18a51SMatthew Wilcox (Oracle) slen = min_t(size_t, len, skb_frag_size(frag) - offset); 259820bf50deSTom Herbert 259920bf50deSTom Herbert while (slen) { 26000739cd28SCong Wang ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, 26010739cd28SCong Wang sendpage_unlocked, sk, 26020739cd28SCong Wang skb_frag_page(frag), 2603b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset, 260420bf50deSTom Herbert slen, MSG_DONTWAIT); 260520bf50deSTom Herbert if (ret <= 0) 260620bf50deSTom Herbert goto error; 260720bf50deSTom Herbert 260820bf50deSTom Herbert len -= ret; 260920bf50deSTom Herbert offset += ret; 261020bf50deSTom Herbert slen -= ret; 261120bf50deSTom Herbert } 261220bf50deSTom Herbert 261320bf50deSTom Herbert offset = 0; 261420bf50deSTom Herbert } 261520bf50deSTom Herbert 261620bf50deSTom Herbert if (len) { 261720bf50deSTom Herbert /* Process any frag lists */ 261820bf50deSTom Herbert 261920bf50deSTom Herbert if (skb == head) { 262020bf50deSTom Herbert if (skb_has_frag_list(skb)) { 262120bf50deSTom Herbert skb = skb_shinfo(skb)->frag_list; 262220bf50deSTom Herbert goto do_frag_list; 262320bf50deSTom Herbert } 262420bf50deSTom Herbert } else if (skb->next) { 262520bf50deSTom Herbert skb = skb->next; 262620bf50deSTom Herbert goto do_frag_list; 262720bf50deSTom Herbert } 262820bf50deSTom Herbert } 262920bf50deSTom Herbert 263020bf50deSTom Herbert out: 263120bf50deSTom Herbert return orig_len - len; 263220bf50deSTom Herbert 263320bf50deSTom Herbert error: 263420bf50deSTom Herbert return orig_len == len ? ret : orig_len - len; 263520bf50deSTom Herbert } 26360739cd28SCong Wang 26370739cd28SCong Wang /* Send skb data on a socket. Socket must be locked. */ 26380739cd28SCong Wang int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 26390739cd28SCong Wang int len) 26400739cd28SCong Wang { 26410739cd28SCong Wang return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, 26420739cd28SCong Wang kernel_sendpage_locked); 26430739cd28SCong Wang } 264420bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked); 264520bf50deSTom Herbert 26460739cd28SCong Wang /* Send skb data on a socket. Socket must be unlocked. */ 26470739cd28SCong Wang int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) 26480739cd28SCong Wang { 26490739cd28SCong Wang return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 26500739cd28SCong Wang sendpage_unlocked); 26510739cd28SCong Wang } 26520739cd28SCong Wang 2653357b40a1SHerbert Xu /** 2654357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 2655357b40a1SHerbert Xu * @skb: destination buffer 2656357b40a1SHerbert Xu * @offset: offset in destination 2657357b40a1SHerbert Xu * @from: source buffer 2658357b40a1SHerbert Xu * @len: number of bytes to copy 2659357b40a1SHerbert Xu * 2660357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 2661357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 2662357b40a1SHerbert Xu * traversing fragment lists and such. 2663357b40a1SHerbert Xu */ 2664357b40a1SHerbert Xu 26650c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2666357b40a1SHerbert Xu { 26671a028e50SDavid S. Miller int start = skb_headlen(skb); 2668fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2669fbb398a8SDavid S. Miller int i, copy; 2670357b40a1SHerbert Xu 2671357b40a1SHerbert Xu if (offset > (int)skb->len - len) 2672357b40a1SHerbert Xu goto fault; 2673357b40a1SHerbert Xu 26741a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 2675357b40a1SHerbert Xu if (copy > len) 2676357b40a1SHerbert Xu copy = len; 267727d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 2678357b40a1SHerbert Xu if ((len -= copy) == 0) 2679357b40a1SHerbert Xu return 0; 2680357b40a1SHerbert Xu offset += copy; 2681357b40a1SHerbert Xu from += copy; 2682357b40a1SHerbert Xu } 2683357b40a1SHerbert Xu 2684357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2685357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 26861a028e50SDavid S. Miller int end; 2687357b40a1SHerbert Xu 2688547b792cSIlpo Järvinen WARN_ON(start > offset + len); 26891a028e50SDavid S. Miller 26909e903e08SEric Dumazet end = start + skb_frag_size(frag); 2691357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2692c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2693c613c209SWillem de Bruijn struct page *p; 2694357b40a1SHerbert Xu u8 *vaddr; 2695357b40a1SHerbert Xu 2696357b40a1SHerbert Xu if (copy > len) 2697357b40a1SHerbert Xu copy = len; 2698357b40a1SHerbert Xu 2699c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2700b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2701c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2702c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2703c613c209SWillem de Bruijn memcpy(vaddr + p_off, from + copied, p_len); 270451c56b00SEric Dumazet kunmap_atomic(vaddr); 2705c613c209SWillem de Bruijn } 2706357b40a1SHerbert Xu 2707357b40a1SHerbert Xu if ((len -= copy) == 0) 2708357b40a1SHerbert Xu return 0; 2709357b40a1SHerbert Xu offset += copy; 2710357b40a1SHerbert Xu from += copy; 2711357b40a1SHerbert Xu } 27121a028e50SDavid S. Miller start = end; 2713357b40a1SHerbert Xu } 2714357b40a1SHerbert Xu 2715fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 27161a028e50SDavid S. Miller int end; 2717357b40a1SHerbert Xu 2718547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27191a028e50SDavid S. Miller 2720fbb398a8SDavid S. Miller end = start + frag_iter->len; 2721357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2722357b40a1SHerbert Xu if (copy > len) 2723357b40a1SHerbert Xu copy = len; 2724fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 27251a028e50SDavid S. Miller from, copy)) 2726357b40a1SHerbert Xu goto fault; 2727357b40a1SHerbert Xu if ((len -= copy) == 0) 2728357b40a1SHerbert Xu return 0; 2729357b40a1SHerbert Xu offset += copy; 2730357b40a1SHerbert Xu from += copy; 2731357b40a1SHerbert Xu } 27321a028e50SDavid S. Miller start = end; 2733357b40a1SHerbert Xu } 2734357b40a1SHerbert Xu if (!len) 2735357b40a1SHerbert Xu return 0; 2736357b40a1SHerbert Xu 2737357b40a1SHerbert Xu fault: 2738357b40a1SHerbert Xu return -EFAULT; 2739357b40a1SHerbert Xu } 2740357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 2741357b40a1SHerbert Xu 27421da177e4SLinus Torvalds /* Checksum skb data. */ 27432817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 27442817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 27451da177e4SLinus Torvalds { 27461a028e50SDavid S. Miller int start = skb_headlen(skb); 27471a028e50SDavid S. Miller int i, copy = start - offset; 2748fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 27491da177e4SLinus Torvalds int pos = 0; 27501da177e4SLinus Torvalds 27511da177e4SLinus Torvalds /* Checksum header. */ 27521da177e4SLinus Torvalds if (copy > 0) { 27531da177e4SLinus Torvalds if (copy > len) 27541da177e4SLinus Torvalds copy = len; 27552544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 27562544af03SMatteo Croce skb->data + offset, copy, csum); 27571da177e4SLinus Torvalds if ((len -= copy) == 0) 27581da177e4SLinus Torvalds return csum; 27591da177e4SLinus Torvalds offset += copy; 27601da177e4SLinus Torvalds pos = copy; 27611da177e4SLinus Torvalds } 27621da177e4SLinus Torvalds 27631da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 27641a028e50SDavid S. Miller int end; 276551c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 27661da177e4SLinus Torvalds 2767547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27681a028e50SDavid S. Miller 276951c56b00SEric Dumazet end = start + skb_frag_size(frag); 27701da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2771c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2772c613c209SWillem de Bruijn struct page *p; 277344bb9363SAl Viro __wsum csum2; 27741da177e4SLinus Torvalds u8 *vaddr; 27751da177e4SLinus Torvalds 27761da177e4SLinus Torvalds if (copy > len) 27771da177e4SLinus Torvalds copy = len; 2778c613c209SWillem de Bruijn 2779c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2780b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2781c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2782c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 27832544af03SMatteo Croce csum2 = INDIRECT_CALL_1(ops->update, 27842544af03SMatteo Croce csum_partial_ext, 27852544af03SMatteo Croce vaddr + p_off, p_len, 0); 278651c56b00SEric Dumazet kunmap_atomic(vaddr); 27872544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, 27882544af03SMatteo Croce csum_block_add_ext, csum, 27892544af03SMatteo Croce csum2, pos, p_len); 2790c613c209SWillem de Bruijn pos += p_len; 2791c613c209SWillem de Bruijn } 2792c613c209SWillem de Bruijn 27931da177e4SLinus Torvalds if (!(len -= copy)) 27941da177e4SLinus Torvalds return csum; 27951da177e4SLinus Torvalds offset += copy; 27961da177e4SLinus Torvalds } 27971a028e50SDavid S. Miller start = end; 27981da177e4SLinus Torvalds } 27991da177e4SLinus Torvalds 2800fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 28011a028e50SDavid S. Miller int end; 28021da177e4SLinus Torvalds 2803547b792cSIlpo Järvinen WARN_ON(start > offset + len); 28041a028e50SDavid S. Miller 2805fbb398a8SDavid S. Miller end = start + frag_iter->len; 28061da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 28075f92a738SAl Viro __wsum csum2; 28081da177e4SLinus Torvalds if (copy > len) 28091da177e4SLinus Torvalds copy = len; 28102817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 28112817a336SDaniel Borkmann copy, 0, ops); 28122544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 28132544af03SMatteo Croce csum, csum2, pos, copy); 28141da177e4SLinus Torvalds if ((len -= copy) == 0) 28151da177e4SLinus Torvalds return csum; 28161da177e4SLinus Torvalds offset += copy; 28171da177e4SLinus Torvalds pos += copy; 28181da177e4SLinus Torvalds } 28191a028e50SDavid S. Miller start = end; 28201da177e4SLinus Torvalds } 282109a62660SKris Katterjohn BUG_ON(len); 28221da177e4SLinus Torvalds 28231da177e4SLinus Torvalds return csum; 28241da177e4SLinus Torvalds } 28252817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 28262817a336SDaniel Borkmann 28272817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 28282817a336SDaniel Borkmann int len, __wsum csum) 28292817a336SDaniel Borkmann { 28302817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2831cea80ea8SDaniel Borkmann .update = csum_partial_ext, 28322817a336SDaniel Borkmann .combine = csum_block_add_ext, 28332817a336SDaniel Borkmann }; 28342817a336SDaniel Borkmann 28352817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 28362817a336SDaniel Borkmann } 2837b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 28381da177e4SLinus Torvalds 28391da177e4SLinus Torvalds /* Both of above in one bottle. */ 28401da177e4SLinus Torvalds 284181d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 28428d5930dfSAl Viro u8 *to, int len) 28431da177e4SLinus Torvalds { 28441a028e50SDavid S. Miller int start = skb_headlen(skb); 28451a028e50SDavid S. Miller int i, copy = start - offset; 2846fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 28471da177e4SLinus Torvalds int pos = 0; 28488d5930dfSAl Viro __wsum csum = 0; 28491da177e4SLinus Torvalds 28501da177e4SLinus Torvalds /* Copy header. */ 28511da177e4SLinus Torvalds if (copy > 0) { 28521da177e4SLinus Torvalds if (copy > len) 28531da177e4SLinus Torvalds copy = len; 28541da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 2855cc44c17bSAl Viro copy); 28561da177e4SLinus Torvalds if ((len -= copy) == 0) 28571da177e4SLinus Torvalds return csum; 28581da177e4SLinus Torvalds offset += copy; 28591da177e4SLinus Torvalds to += copy; 28601da177e4SLinus Torvalds pos = copy; 28611da177e4SLinus Torvalds } 28621da177e4SLinus Torvalds 28631da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 28641a028e50SDavid S. Miller int end; 28651da177e4SLinus Torvalds 2866547b792cSIlpo Järvinen WARN_ON(start > offset + len); 28671a028e50SDavid S. Miller 28689e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 28691da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2870c613c209SWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2871c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2872c613c209SWillem de Bruijn struct page *p; 28735084205fSAl Viro __wsum csum2; 28741da177e4SLinus Torvalds u8 *vaddr; 28751da177e4SLinus Torvalds 28761da177e4SLinus Torvalds if (copy > len) 28771da177e4SLinus Torvalds copy = len; 2878c613c209SWillem de Bruijn 2879c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2880b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2881c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2882c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2883c613c209SWillem de Bruijn csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2884c613c209SWillem de Bruijn to + copied, 2885cc44c17bSAl Viro p_len); 288651c56b00SEric Dumazet kunmap_atomic(vaddr); 28871da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 2888c613c209SWillem de Bruijn pos += p_len; 2889c613c209SWillem de Bruijn } 2890c613c209SWillem de Bruijn 28911da177e4SLinus Torvalds if (!(len -= copy)) 28921da177e4SLinus Torvalds return csum; 28931da177e4SLinus Torvalds offset += copy; 28941da177e4SLinus Torvalds to += copy; 28951da177e4SLinus Torvalds } 28961a028e50SDavid S. Miller start = end; 28971da177e4SLinus Torvalds } 28981da177e4SLinus Torvalds 2899fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 290081d77662SAl Viro __wsum csum2; 29011a028e50SDavid S. Miller int end; 29021da177e4SLinus Torvalds 2903547b792cSIlpo Järvinen WARN_ON(start > offset + len); 29041a028e50SDavid S. Miller 2905fbb398a8SDavid S. Miller end = start + frag_iter->len; 29061da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 29071da177e4SLinus Torvalds if (copy > len) 29081da177e4SLinus Torvalds copy = len; 2909fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 29101a028e50SDavid S. Miller offset - start, 29118d5930dfSAl Viro to, copy); 29121da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 29131da177e4SLinus Torvalds if ((len -= copy) == 0) 29141da177e4SLinus Torvalds return csum; 29151da177e4SLinus Torvalds offset += copy; 29161da177e4SLinus Torvalds to += copy; 29171da177e4SLinus Torvalds pos += copy; 29181da177e4SLinus Torvalds } 29191a028e50SDavid S. Miller start = end; 29201da177e4SLinus Torvalds } 292109a62660SKris Katterjohn BUG_ON(len); 29221da177e4SLinus Torvalds return csum; 29231da177e4SLinus Torvalds } 2924b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 29251da177e4SLinus Torvalds 292649f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 292749f8e832SCong Wang { 292849f8e832SCong Wang __sum16 sum; 292949f8e832SCong Wang 293049f8e832SCong Wang sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 293114641931SCong Wang /* See comments in __skb_checksum_complete(). */ 293249f8e832SCong Wang if (likely(!sum)) { 293349f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 293449f8e832SCong Wang !skb->csum_complete_sw) 29357fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 293649f8e832SCong Wang } 293749f8e832SCong Wang if (!skb_shared(skb)) 293849f8e832SCong Wang skb->csum_valid = !sum; 293949f8e832SCong Wang return sum; 294049f8e832SCong Wang } 294149f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head); 294249f8e832SCong Wang 294314641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum, 294414641931SCong Wang * which has been changed from the hardware checksum, for example, by 294514641931SCong Wang * __skb_checksum_validate_complete(). And, the original skb->csum must 294614641931SCong Wang * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 294714641931SCong Wang * 294814641931SCong Wang * It returns non-zero if the recomputed checksum is still invalid, otherwise 294914641931SCong Wang * zero. The new checksum is stored back into skb->csum unless the skb is 295014641931SCong Wang * shared. 295114641931SCong Wang */ 295249f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb) 295349f8e832SCong Wang { 295449f8e832SCong Wang __wsum csum; 295549f8e832SCong Wang __sum16 sum; 295649f8e832SCong Wang 295749f8e832SCong Wang csum = skb_checksum(skb, 0, skb->len, 0); 295849f8e832SCong Wang 295949f8e832SCong Wang sum = csum_fold(csum_add(skb->csum, csum)); 296014641931SCong Wang /* This check is inverted, because we already knew the hardware 296114641931SCong Wang * checksum is invalid before calling this function. So, if the 296214641931SCong Wang * re-computed checksum is valid instead, then we have a mismatch 296314641931SCong Wang * between the original skb->csum and skb_checksum(). This means either 296414641931SCong Wang * the original hardware checksum is incorrect or we screw up skb->csum 296514641931SCong Wang * when moving skb->data around. 296614641931SCong Wang */ 296749f8e832SCong Wang if (likely(!sum)) { 296849f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 296949f8e832SCong Wang !skb->csum_complete_sw) 29707fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 297149f8e832SCong Wang } 297249f8e832SCong Wang 297349f8e832SCong Wang if (!skb_shared(skb)) { 297449f8e832SCong Wang /* Save full packet checksum */ 297549f8e832SCong Wang skb->csum = csum; 297649f8e832SCong Wang skb->ip_summed = CHECKSUM_COMPLETE; 297749f8e832SCong Wang skb->csum_complete_sw = 1; 297849f8e832SCong Wang skb->csum_valid = !sum; 297949f8e832SCong Wang } 298049f8e832SCong Wang 298149f8e832SCong Wang return sum; 298249f8e832SCong Wang } 298349f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete); 298449f8e832SCong Wang 29859617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 29869617813dSDavide Caratti { 29879617813dSDavide Caratti net_warn_ratelimited( 29889617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 29899617813dSDavide Caratti __func__); 29909617813dSDavide Caratti return 0; 29919617813dSDavide Caratti } 29929617813dSDavide Caratti 29939617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 29949617813dSDavide Caratti int offset, int len) 29959617813dSDavide Caratti { 29969617813dSDavide Caratti net_warn_ratelimited( 29979617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 29989617813dSDavide Caratti __func__); 29999617813dSDavide Caratti return 0; 30009617813dSDavide Caratti } 30019617813dSDavide Caratti 30029617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = { 30039617813dSDavide Caratti .update = warn_crc32c_csum_update, 30049617813dSDavide Caratti .combine = warn_crc32c_csum_combine, 30059617813dSDavide Caratti }; 30069617813dSDavide Caratti 30079617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 30089617813dSDavide Caratti &default_crc32c_ops; 30099617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub); 30109617813dSDavide Caratti 3011af2806f8SThomas Graf /** 3012af2806f8SThomas Graf * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 3013af2806f8SThomas Graf * @from: source buffer 3014af2806f8SThomas Graf * 3015af2806f8SThomas Graf * Calculates the amount of linear headroom needed in the 'to' skb passed 3016af2806f8SThomas Graf * into skb_zerocopy(). 3017af2806f8SThomas Graf */ 3018af2806f8SThomas Graf unsigned int 3019af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from) 3020af2806f8SThomas Graf { 3021af2806f8SThomas Graf unsigned int hlen = 0; 3022af2806f8SThomas Graf 3023af2806f8SThomas Graf if (!from->head_frag || 3024af2806f8SThomas Graf skb_headlen(from) < L1_CACHE_BYTES || 3025*a17ad096SPravin B Shelar skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { 3026af2806f8SThomas Graf hlen = skb_headlen(from); 3027*a17ad096SPravin B Shelar if (!hlen) 3028*a17ad096SPravin B Shelar hlen = from->len; 3029*a17ad096SPravin B Shelar } 3030af2806f8SThomas Graf 3031af2806f8SThomas Graf if (skb_has_frag_list(from)) 3032af2806f8SThomas Graf hlen = from->len; 3033af2806f8SThomas Graf 3034af2806f8SThomas Graf return hlen; 3035af2806f8SThomas Graf } 3036af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 3037af2806f8SThomas Graf 3038af2806f8SThomas Graf /** 3039af2806f8SThomas Graf * skb_zerocopy - Zero copy skb to skb 3040af2806f8SThomas Graf * @to: destination buffer 30417fceb4deSMasanari Iida * @from: source buffer 3042af2806f8SThomas Graf * @len: number of bytes to copy from source buffer 3043af2806f8SThomas Graf * @hlen: size of linear headroom in destination buffer 3044af2806f8SThomas Graf * 3045af2806f8SThomas Graf * Copies up to `len` bytes from `from` to `to` by creating references 3046af2806f8SThomas Graf * to the frags in the source buffer. 3047af2806f8SThomas Graf * 3048af2806f8SThomas Graf * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 3049af2806f8SThomas Graf * headroom in the `to` buffer. 305036d5fe6aSZoltan Kiss * 305136d5fe6aSZoltan Kiss * Return value: 305236d5fe6aSZoltan Kiss * 0: everything is OK 305336d5fe6aSZoltan Kiss * -ENOMEM: couldn't orphan frags of @from due to lack of memory 305436d5fe6aSZoltan Kiss * -EFAULT: skb_copy_bits() found some problem with skb geometry 3055af2806f8SThomas Graf */ 305636d5fe6aSZoltan Kiss int 305736d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 3058af2806f8SThomas Graf { 3059af2806f8SThomas Graf int i, j = 0; 3060af2806f8SThomas Graf int plen = 0; /* length of skb->head fragment */ 306136d5fe6aSZoltan Kiss int ret; 3062af2806f8SThomas Graf struct page *page; 3063af2806f8SThomas Graf unsigned int offset; 3064af2806f8SThomas Graf 3065af2806f8SThomas Graf BUG_ON(!from->head_frag && !hlen); 3066af2806f8SThomas Graf 3067af2806f8SThomas Graf /* dont bother with small payloads */ 306836d5fe6aSZoltan Kiss if (len <= skb_tailroom(to)) 306936d5fe6aSZoltan Kiss return skb_copy_bits(from, 0, skb_put(to, len), len); 3070af2806f8SThomas Graf 3071af2806f8SThomas Graf if (hlen) { 307236d5fe6aSZoltan Kiss ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 307336d5fe6aSZoltan Kiss if (unlikely(ret)) 307436d5fe6aSZoltan Kiss return ret; 3075af2806f8SThomas Graf len -= hlen; 3076af2806f8SThomas Graf } else { 3077af2806f8SThomas Graf plen = min_t(int, skb_headlen(from), len); 3078af2806f8SThomas Graf if (plen) { 3079af2806f8SThomas Graf page = virt_to_head_page(from->head); 3080af2806f8SThomas Graf offset = from->data - (unsigned char *)page_address(page); 3081af2806f8SThomas Graf __skb_fill_page_desc(to, 0, page, offset, plen); 3082af2806f8SThomas Graf get_page(page); 3083af2806f8SThomas Graf j = 1; 3084af2806f8SThomas Graf len -= plen; 3085af2806f8SThomas Graf } 3086af2806f8SThomas Graf } 3087af2806f8SThomas Graf 3088af2806f8SThomas Graf to->truesize += len + plen; 3089af2806f8SThomas Graf to->len += len + plen; 3090af2806f8SThomas Graf to->data_len += len + plen; 3091af2806f8SThomas Graf 309236d5fe6aSZoltan Kiss if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 309336d5fe6aSZoltan Kiss skb_tx_error(from); 309436d5fe6aSZoltan Kiss return -ENOMEM; 309536d5fe6aSZoltan Kiss } 30961f8b977aSWillem de Bruijn skb_zerocopy_clone(to, from, GFP_ATOMIC); 309736d5fe6aSZoltan Kiss 3098af2806f8SThomas Graf for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3099d8e18a51SMatthew Wilcox (Oracle) int size; 3100d8e18a51SMatthew Wilcox (Oracle) 3101af2806f8SThomas Graf if (!len) 3102af2806f8SThomas Graf break; 3103af2806f8SThomas Graf skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3104d8e18a51SMatthew Wilcox (Oracle) size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3105d8e18a51SMatthew Wilcox (Oracle) len); 3106d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3107d8e18a51SMatthew Wilcox (Oracle) len -= size; 3108af2806f8SThomas Graf skb_frag_ref(to, j); 3109af2806f8SThomas Graf j++; 3110af2806f8SThomas Graf } 3111af2806f8SThomas Graf skb_shinfo(to)->nr_frags = j; 311236d5fe6aSZoltan Kiss 311336d5fe6aSZoltan Kiss return 0; 3114af2806f8SThomas Graf } 3115af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy); 3116af2806f8SThomas Graf 31171da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 31181da177e4SLinus Torvalds { 3119d3bc23e7SAl Viro __wsum csum; 31201da177e4SLinus Torvalds long csstart; 31211da177e4SLinus Torvalds 312284fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 312355508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 31241da177e4SLinus Torvalds else 31251da177e4SLinus Torvalds csstart = skb_headlen(skb); 31261da177e4SLinus Torvalds 312709a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 31281da177e4SLinus Torvalds 3129d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 31301da177e4SLinus Torvalds 31311da177e4SLinus Torvalds csum = 0; 31321da177e4SLinus Torvalds if (csstart != skb->len) 31331da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 31348d5930dfSAl Viro skb->len - csstart); 31351da177e4SLinus Torvalds 313684fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 3137ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 31381da177e4SLinus Torvalds 3139d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 31401da177e4SLinus Torvalds } 31411da177e4SLinus Torvalds } 3142b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 31431da177e4SLinus Torvalds 31441da177e4SLinus Torvalds /** 31451da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 31461da177e4SLinus Torvalds * @list: list to dequeue from 31471da177e4SLinus Torvalds * 31481da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 31491da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 31501da177e4SLinus Torvalds * returned or %NULL if the list is empty. 31511da177e4SLinus Torvalds */ 31521da177e4SLinus Torvalds 31531da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 31541da177e4SLinus Torvalds { 31551da177e4SLinus Torvalds unsigned long flags; 31561da177e4SLinus Torvalds struct sk_buff *result; 31571da177e4SLinus Torvalds 31581da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31591da177e4SLinus Torvalds result = __skb_dequeue(list); 31601da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31611da177e4SLinus Torvalds return result; 31621da177e4SLinus Torvalds } 3163b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 31641da177e4SLinus Torvalds 31651da177e4SLinus Torvalds /** 31661da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 31671da177e4SLinus Torvalds * @list: list to dequeue from 31681da177e4SLinus Torvalds * 31691da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 31701da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 31711da177e4SLinus Torvalds * returned or %NULL if the list is empty. 31721da177e4SLinus Torvalds */ 31731da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 31741da177e4SLinus Torvalds { 31751da177e4SLinus Torvalds unsigned long flags; 31761da177e4SLinus Torvalds struct sk_buff *result; 31771da177e4SLinus Torvalds 31781da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31791da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 31801da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31811da177e4SLinus Torvalds return result; 31821da177e4SLinus Torvalds } 3183b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 31841da177e4SLinus Torvalds 31851da177e4SLinus Torvalds /** 31861da177e4SLinus Torvalds * skb_queue_purge - empty a list 31871da177e4SLinus Torvalds * @list: list to empty 31881da177e4SLinus Torvalds * 31891da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 31901da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 31911da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 31921da177e4SLinus Torvalds */ 31931da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 31941da177e4SLinus Torvalds { 31951da177e4SLinus Torvalds struct sk_buff *skb; 31961da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 31971da177e4SLinus Torvalds kfree_skb(skb); 31981da177e4SLinus Torvalds } 3199b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 32001da177e4SLinus Torvalds 32011da177e4SLinus Torvalds /** 32029f5afeaeSYaogong Wang * skb_rbtree_purge - empty a skb rbtree 32039f5afeaeSYaogong Wang * @root: root of the rbtree to empty 3204385114deSPeter Oskolkov * Return value: the sum of truesizes of all purged skbs. 32059f5afeaeSYaogong Wang * 32069f5afeaeSYaogong Wang * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 32079f5afeaeSYaogong Wang * the list and one reference dropped. This function does not take 32089f5afeaeSYaogong Wang * any lock. Synchronization should be handled by the caller (e.g., TCP 32099f5afeaeSYaogong Wang * out-of-order queue is protected by the socket lock). 32109f5afeaeSYaogong Wang */ 3211385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root) 32129f5afeaeSYaogong Wang { 32137c90584cSEric Dumazet struct rb_node *p = rb_first(root); 3214385114deSPeter Oskolkov unsigned int sum = 0; 32159f5afeaeSYaogong Wang 32167c90584cSEric Dumazet while (p) { 32177c90584cSEric Dumazet struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 32187c90584cSEric Dumazet 32197c90584cSEric Dumazet p = rb_next(p); 32207c90584cSEric Dumazet rb_erase(&skb->rbnode, root); 3221385114deSPeter Oskolkov sum += skb->truesize; 32229f5afeaeSYaogong Wang kfree_skb(skb); 32237c90584cSEric Dumazet } 3224385114deSPeter Oskolkov return sum; 32259f5afeaeSYaogong Wang } 32269f5afeaeSYaogong Wang 32279f5afeaeSYaogong Wang /** 32281da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 32291da177e4SLinus Torvalds * @list: list to use 32301da177e4SLinus Torvalds * @newsk: buffer to queue 32311da177e4SLinus Torvalds * 32321da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 32331da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 32341da177e4SLinus Torvalds * safely. 32351da177e4SLinus Torvalds * 32361da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 32371da177e4SLinus Torvalds */ 32381da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 32391da177e4SLinus Torvalds { 32401da177e4SLinus Torvalds unsigned long flags; 32411da177e4SLinus Torvalds 32421da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 32431da177e4SLinus Torvalds __skb_queue_head(list, newsk); 32441da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 32451da177e4SLinus Torvalds } 3246b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 32471da177e4SLinus Torvalds 32481da177e4SLinus Torvalds /** 32491da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 32501da177e4SLinus Torvalds * @list: list to use 32511da177e4SLinus Torvalds * @newsk: buffer to queue 32521da177e4SLinus Torvalds * 32531da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 32541da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 32551da177e4SLinus Torvalds * safely. 32561da177e4SLinus Torvalds * 32571da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 32581da177e4SLinus Torvalds */ 32591da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 32601da177e4SLinus Torvalds { 32611da177e4SLinus Torvalds unsigned long flags; 32621da177e4SLinus Torvalds 32631da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 32641da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 32651da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 32661da177e4SLinus Torvalds } 3267b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 32688728b834SDavid S. Miller 32691da177e4SLinus Torvalds /** 32701da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 32711da177e4SLinus Torvalds * @skb: buffer to remove 32728728b834SDavid S. Miller * @list: list to use 32731da177e4SLinus Torvalds * 32748728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 32758728b834SDavid S. Miller * function is atomic with respect to other list locked calls 32761da177e4SLinus Torvalds * 32778728b834SDavid S. Miller * You must know what list the SKB is on. 32781da177e4SLinus Torvalds */ 32798728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 32801da177e4SLinus Torvalds { 32811da177e4SLinus Torvalds unsigned long flags; 32821da177e4SLinus Torvalds 32831da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 32848728b834SDavid S. Miller __skb_unlink(skb, list); 32851da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 32861da177e4SLinus Torvalds } 3287b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 32881da177e4SLinus Torvalds 32891da177e4SLinus Torvalds /** 32901da177e4SLinus Torvalds * skb_append - append a buffer 32911da177e4SLinus Torvalds * @old: buffer to insert after 32921da177e4SLinus Torvalds * @newsk: buffer to insert 32938728b834SDavid S. Miller * @list: list to use 32941da177e4SLinus Torvalds * 32951da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 32961da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 32971da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 32981da177e4SLinus Torvalds */ 32998728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 33001da177e4SLinus Torvalds { 33011da177e4SLinus Torvalds unsigned long flags; 33021da177e4SLinus Torvalds 33038728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 33047de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 33058728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 33061da177e4SLinus Torvalds } 3307b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 33081da177e4SLinus Torvalds 33091da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 33101da177e4SLinus Torvalds struct sk_buff* skb1, 33111da177e4SLinus Torvalds const u32 len, const int pos) 33121da177e4SLinus Torvalds { 33131da177e4SLinus Torvalds int i; 33141da177e4SLinus Torvalds 3315d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3316d626f62bSArnaldo Carvalho de Melo pos - len); 33171da177e4SLinus Torvalds /* And move data appendix as is. */ 33181da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 33191da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 33201da177e4SLinus Torvalds 33211da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 33221da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 33231da177e4SLinus Torvalds skb1->data_len = skb->data_len; 33241da177e4SLinus Torvalds skb1->len += skb1->data_len; 33251da177e4SLinus Torvalds skb->data_len = 0; 33261da177e4SLinus Torvalds skb->len = len; 332727a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 33281da177e4SLinus Torvalds } 33291da177e4SLinus Torvalds 33301da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 33311da177e4SLinus Torvalds struct sk_buff* skb1, 33321da177e4SLinus Torvalds const u32 len, int pos) 33331da177e4SLinus Torvalds { 33341da177e4SLinus Torvalds int i, k = 0; 33351da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 33361da177e4SLinus Torvalds 33371da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 33381da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 33391da177e4SLinus Torvalds skb->len = len; 33401da177e4SLinus Torvalds skb->data_len = len - pos; 33411da177e4SLinus Torvalds 33421da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 33439e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 33441da177e4SLinus Torvalds 33451da177e4SLinus Torvalds if (pos + size > len) { 33461da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 33471da177e4SLinus Torvalds 33481da177e4SLinus Torvalds if (pos < len) { 33491da177e4SLinus Torvalds /* Split frag. 33501da177e4SLinus Torvalds * We have two variants in this case: 33511da177e4SLinus Torvalds * 1. Move all the frag to the second 33521da177e4SLinus Torvalds * part, if it is possible. F.e. 33531da177e4SLinus Torvalds * this approach is mandatory for TUX, 33541da177e4SLinus Torvalds * where splitting is expensive. 33551da177e4SLinus Torvalds * 2. Split is accurately. We make this. 33561da177e4SLinus Torvalds */ 3357ea2ab693SIan Campbell skb_frag_ref(skb, i); 3358b54c9d5bSJonathan Lemon skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 33599e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 33609e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 33611da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 33621da177e4SLinus Torvalds } 33631da177e4SLinus Torvalds k++; 33641da177e4SLinus Torvalds } else 33651da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 33661da177e4SLinus Torvalds pos += size; 33671da177e4SLinus Torvalds } 33681da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 33691da177e4SLinus Torvalds } 33701da177e4SLinus Torvalds 33711da177e4SLinus Torvalds /** 33721da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 33731da177e4SLinus Torvalds * @skb: the buffer to split 33741da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 33751da177e4SLinus Torvalds * @len: new length for skb 33761da177e4SLinus Torvalds */ 33771da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 33781da177e4SLinus Torvalds { 33791da177e4SLinus Torvalds int pos = skb_headlen(skb); 33801da177e4SLinus Torvalds 338106b4feb3SJonathan Lemon skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; 33821f8b977aSWillem de Bruijn skb_zerocopy_clone(skb1, skb, 0); 33831da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 33841da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 33851da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 33861da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 33871da177e4SLinus Torvalds } 3388b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 33891da177e4SLinus Torvalds 33909f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 33919f782db3SIlpo Järvinen * 33929f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 33939f782db3SIlpo Järvinen */ 3394832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 3395832d11c5SIlpo Järvinen { 3396097b9146SMarco Elver int ret = 0; 3397097b9146SMarco Elver 3398097b9146SMarco Elver if (skb_cloned(skb)) { 3399097b9146SMarco Elver /* Save and restore truesize: pskb_expand_head() may reallocate 3400097b9146SMarco Elver * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we 3401097b9146SMarco Elver * cannot change truesize at this point. 3402097b9146SMarco Elver */ 3403097b9146SMarco Elver unsigned int save_truesize = skb->truesize; 3404097b9146SMarco Elver 3405097b9146SMarco Elver ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3406097b9146SMarco Elver skb->truesize = save_truesize; 3407097b9146SMarco Elver } 3408097b9146SMarco Elver return ret; 3409832d11c5SIlpo Järvinen } 3410832d11c5SIlpo Järvinen 3411832d11c5SIlpo Järvinen /** 3412832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 3413832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 3414832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 3415832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 3416832d11c5SIlpo Järvinen * 3417832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 341820e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 3419832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 3420832d11c5SIlpo Järvinen * 3421832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 3422832d11c5SIlpo Järvinen * 3423832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 3424832d11c5SIlpo Järvinen * to have non-paged data as well. 3425832d11c5SIlpo Järvinen * 3426832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 3427832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 3428832d11c5SIlpo Järvinen */ 3429832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3430832d11c5SIlpo Järvinen { 3431832d11c5SIlpo Järvinen int from, to, merge, todo; 3432d8e18a51SMatthew Wilcox (Oracle) skb_frag_t *fragfrom, *fragto; 3433832d11c5SIlpo Järvinen 3434832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 3435f8071cdeSEric Dumazet 3436f8071cdeSEric Dumazet if (skb_headlen(skb)) 3437f8071cdeSEric Dumazet return 0; 34381f8b977aSWillem de Bruijn if (skb_zcopy(tgt) || skb_zcopy(skb)) 34391f8b977aSWillem de Bruijn return 0; 3440832d11c5SIlpo Järvinen 3441832d11c5SIlpo Järvinen todo = shiftlen; 3442832d11c5SIlpo Järvinen from = 0; 3443832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 3444832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3445832d11c5SIlpo Järvinen 3446832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 3447832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 3448832d11c5SIlpo Järvinen */ 3449832d11c5SIlpo Järvinen if (!to || 3450ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3451b54c9d5bSJonathan Lemon skb_frag_off(fragfrom))) { 3452832d11c5SIlpo Järvinen merge = -1; 3453832d11c5SIlpo Järvinen } else { 3454832d11c5SIlpo Järvinen merge = to - 1; 3455832d11c5SIlpo Järvinen 34569e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3457832d11c5SIlpo Järvinen if (todo < 0) { 3458832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 3459832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 3460832d11c5SIlpo Järvinen return 0; 3461832d11c5SIlpo Järvinen 34629f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 34639f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3464832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3465832d11c5SIlpo Järvinen 34669e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 34679e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 3468b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, shiftlen); 3469832d11c5SIlpo Järvinen 3470832d11c5SIlpo Järvinen goto onlymerged; 3471832d11c5SIlpo Järvinen } 3472832d11c5SIlpo Järvinen 3473832d11c5SIlpo Järvinen from++; 3474832d11c5SIlpo Järvinen } 3475832d11c5SIlpo Järvinen 3476832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 3477832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 3478832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3479832d11c5SIlpo Järvinen return 0; 3480832d11c5SIlpo Järvinen 3481832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3482832d11c5SIlpo Järvinen return 0; 3483832d11c5SIlpo Järvinen 3484832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3485832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 3486832d11c5SIlpo Järvinen return 0; 3487832d11c5SIlpo Järvinen 3488832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3489832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 3490832d11c5SIlpo Järvinen 34919e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 3492832d11c5SIlpo Järvinen *fragto = *fragfrom; 34939e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3494832d11c5SIlpo Järvinen from++; 3495832d11c5SIlpo Järvinen to++; 3496832d11c5SIlpo Järvinen 3497832d11c5SIlpo Järvinen } else { 3498ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 3499b54c9d5bSJonathan Lemon skb_frag_page_copy(fragto, fragfrom); 3500b54c9d5bSJonathan Lemon skb_frag_off_copy(fragto, fragfrom); 35019e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 3502832d11c5SIlpo Järvinen 3503b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, todo); 35049e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 3505832d11c5SIlpo Järvinen todo = 0; 3506832d11c5SIlpo Järvinen 3507832d11c5SIlpo Järvinen to++; 3508832d11c5SIlpo Järvinen break; 3509832d11c5SIlpo Järvinen } 3510832d11c5SIlpo Järvinen } 3511832d11c5SIlpo Järvinen 3512832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 3513832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 3514832d11c5SIlpo Järvinen 3515832d11c5SIlpo Järvinen if (merge >= 0) { 3516832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 3517832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3518832d11c5SIlpo Järvinen 35199e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 35206a5bcd84SIlias Apalodimas __skb_frag_unref(fragfrom, skb->pp_recycle); 3521832d11c5SIlpo Järvinen } 3522832d11c5SIlpo Järvinen 3523832d11c5SIlpo Järvinen /* Reposition in the original skb */ 3524832d11c5SIlpo Järvinen to = 0; 3525832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 3526832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3527832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 3528832d11c5SIlpo Järvinen 3529832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3530832d11c5SIlpo Järvinen 3531832d11c5SIlpo Järvinen onlymerged: 3532832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 3533832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 3534832d11c5SIlpo Järvinen */ 3535832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 3536832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 3537832d11c5SIlpo Järvinen 3538832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 3539832d11c5SIlpo Järvinen skb->len -= shiftlen; 3540832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 3541832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 3542832d11c5SIlpo Järvinen tgt->len += shiftlen; 3543832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 3544832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 3545832d11c5SIlpo Järvinen 3546832d11c5SIlpo Järvinen return shiftlen; 3547832d11c5SIlpo Järvinen } 3548832d11c5SIlpo Järvinen 3549677e90edSThomas Graf /** 3550677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 3551677e90edSThomas Graf * @skb: the buffer to read 3552677e90edSThomas Graf * @from: lower offset of data to be read 3553677e90edSThomas Graf * @to: upper offset of data to be read 3554677e90edSThomas Graf * @st: state variable 3555677e90edSThomas Graf * 3556677e90edSThomas Graf * Initializes the specified state variable. Must be called before 3557677e90edSThomas Graf * invoking skb_seq_read() for the first time. 3558677e90edSThomas Graf */ 3559677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3560677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 3561677e90edSThomas Graf { 3562677e90edSThomas Graf st->lower_offset = from; 3563677e90edSThomas Graf st->upper_offset = to; 3564677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 3565677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 3566677e90edSThomas Graf st->frag_data = NULL; 356797550f6fSWillem de Bruijn st->frag_off = 0; 3568677e90edSThomas Graf } 3569b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 3570677e90edSThomas Graf 3571677e90edSThomas Graf /** 3572677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 3573677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 3574677e90edSThomas Graf * @data: destination pointer for data to be returned 3575677e90edSThomas Graf * @st: state variable 3576677e90edSThomas Graf * 3577bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 3578677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 3579bc32383cSMathias Krause * the head of the data block to @data and returns the length 3580677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 3581677e90edSThomas Graf * offset has been reached. 3582677e90edSThomas Graf * 3583677e90edSThomas Graf * The caller is not required to consume all of the data 3584bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 3585677e90edSThomas Graf * of bytes already consumed and the next call to 3586677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 3587677e90edSThomas Graf * 358825985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 3589e793c0f7SMasanari Iida * this limitation is the cost for zerocopy sequential 3590677e90edSThomas Graf * reads of potentially non linear data. 3591677e90edSThomas Graf * 3592bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 3593677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 3594677e90edSThomas Graf * a stack for this purpose. 3595677e90edSThomas Graf */ 3596677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3597677e90edSThomas Graf struct skb_seq_state *st) 3598677e90edSThomas Graf { 3599677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3600677e90edSThomas Graf skb_frag_t *frag; 3601677e90edSThomas Graf 3602aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 3603aeb193eaSWedson Almeida Filho if (st->frag_data) { 3604aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 3605aeb193eaSWedson Almeida Filho st->frag_data = NULL; 3606aeb193eaSWedson Almeida Filho } 3607677e90edSThomas Graf return 0; 3608aeb193eaSWedson Almeida Filho } 3609677e90edSThomas Graf 3610677e90edSThomas Graf next_skb: 361195e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3612677e90edSThomas Graf 3613995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 361495e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3615677e90edSThomas Graf return block_limit - abs_offset; 3616677e90edSThomas Graf } 3617677e90edSThomas Graf 3618677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 3619677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 3620677e90edSThomas Graf 3621677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 362297550f6fSWillem de Bruijn unsigned int pg_idx, pg_off, pg_sz; 3623677e90edSThomas Graf 362497550f6fSWillem de Bruijn frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 362597550f6fSWillem de Bruijn 362697550f6fSWillem de Bruijn pg_idx = 0; 362797550f6fSWillem de Bruijn pg_off = skb_frag_off(frag); 362897550f6fSWillem de Bruijn pg_sz = skb_frag_size(frag); 362997550f6fSWillem de Bruijn 363097550f6fSWillem de Bruijn if (skb_frag_must_loop(skb_frag_page(frag))) { 363197550f6fSWillem de Bruijn pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 363297550f6fSWillem de Bruijn pg_off = offset_in_page(pg_off + st->frag_off); 363397550f6fSWillem de Bruijn pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 363497550f6fSWillem de Bruijn PAGE_SIZE - pg_off); 363597550f6fSWillem de Bruijn } 363697550f6fSWillem de Bruijn 363797550f6fSWillem de Bruijn block_limit = pg_sz + st->stepped_offset; 3638677e90edSThomas Graf if (abs_offset < block_limit) { 3639677e90edSThomas Graf if (!st->frag_data) 364097550f6fSWillem de Bruijn st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 3641677e90edSThomas Graf 364297550f6fSWillem de Bruijn *data = (u8 *)st->frag_data + pg_off + 3643677e90edSThomas Graf (abs_offset - st->stepped_offset); 3644677e90edSThomas Graf 3645677e90edSThomas Graf return block_limit - abs_offset; 3646677e90edSThomas Graf } 3647677e90edSThomas Graf 3648677e90edSThomas Graf if (st->frag_data) { 364951c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3650677e90edSThomas Graf st->frag_data = NULL; 3651677e90edSThomas Graf } 3652677e90edSThomas Graf 365397550f6fSWillem de Bruijn st->stepped_offset += pg_sz; 365497550f6fSWillem de Bruijn st->frag_off += pg_sz; 365597550f6fSWillem de Bruijn if (st->frag_off == skb_frag_size(frag)) { 365697550f6fSWillem de Bruijn st->frag_off = 0; 3657677e90edSThomas Graf st->frag_idx++; 365897550f6fSWillem de Bruijn } 3659677e90edSThomas Graf } 3660677e90edSThomas Graf 36615b5a60daSOlaf Kirch if (st->frag_data) { 366251c56b00SEric Dumazet kunmap_atomic(st->frag_data); 36635b5a60daSOlaf Kirch st->frag_data = NULL; 36645b5a60daSOlaf Kirch } 36655b5a60daSOlaf Kirch 366621dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3667677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 366895e3b24cSHerbert Xu st->frag_idx = 0; 3669677e90edSThomas Graf goto next_skb; 367071b3346dSShyam Iyer } else if (st->cur_skb->next) { 367171b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 367271b3346dSShyam Iyer st->frag_idx = 0; 3673677e90edSThomas Graf goto next_skb; 3674677e90edSThomas Graf } 3675677e90edSThomas Graf 3676677e90edSThomas Graf return 0; 3677677e90edSThomas Graf } 3678b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 3679677e90edSThomas Graf 3680677e90edSThomas Graf /** 3681677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 3682677e90edSThomas Graf * @st: state variable 3683677e90edSThomas Graf * 3684677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 3685677e90edSThomas Graf * returned 0. 3686677e90edSThomas Graf */ 3687677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 3688677e90edSThomas Graf { 3689677e90edSThomas Graf if (st->frag_data) 369051c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3691677e90edSThomas Graf } 3692b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 3693677e90edSThomas Graf 36943fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 36953fc7e8a6SThomas Graf 36963fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 36973fc7e8a6SThomas Graf struct ts_config *conf, 36983fc7e8a6SThomas Graf struct ts_state *state) 36993fc7e8a6SThomas Graf { 37003fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 37013fc7e8a6SThomas Graf } 37023fc7e8a6SThomas Graf 37033fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 37043fc7e8a6SThomas Graf { 37053fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 37063fc7e8a6SThomas Graf } 37073fc7e8a6SThomas Graf 37083fc7e8a6SThomas Graf /** 37093fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 37103fc7e8a6SThomas Graf * @skb: the buffer to look in 37113fc7e8a6SThomas Graf * @from: search offset 37123fc7e8a6SThomas Graf * @to: search limit 37133fc7e8a6SThomas Graf * @config: textsearch configuration 37143fc7e8a6SThomas Graf * 37153fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 37163fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 37173fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 37183fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 37193fc7e8a6SThomas Graf */ 37203fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3721059a2440SBojan Prtvar unsigned int to, struct ts_config *config) 37223fc7e8a6SThomas Graf { 3723059a2440SBojan Prtvar struct ts_state state; 3724f72b948dSPhil Oester unsigned int ret; 3725f72b948dSPhil Oester 3726b228c9b0SWillem de Bruijn BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb)); 3727b228c9b0SWillem de Bruijn 37283fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 37293fc7e8a6SThomas Graf config->finish = skb_ts_finish; 37303fc7e8a6SThomas Graf 3731059a2440SBojan Prtvar skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 37323fc7e8a6SThomas Graf 3733059a2440SBojan Prtvar ret = textsearch_find(config, &state); 3734f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 37353fc7e8a6SThomas Graf } 3736b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 37373fc7e8a6SThomas Graf 3738be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3739be12a1feSHannes Frederic Sowa int offset, size_t size) 3740be12a1feSHannes Frederic Sowa { 3741be12a1feSHannes Frederic Sowa int i = skb_shinfo(skb)->nr_frags; 3742be12a1feSHannes Frederic Sowa 3743be12a1feSHannes Frederic Sowa if (skb_can_coalesce(skb, i, page, offset)) { 3744be12a1feSHannes Frederic Sowa skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3745be12a1feSHannes Frederic Sowa } else if (i < MAX_SKB_FRAGS) { 3746be12a1feSHannes Frederic Sowa get_page(page); 3747be12a1feSHannes Frederic Sowa skb_fill_page_desc(skb, i, page, offset, size); 3748be12a1feSHannes Frederic Sowa } else { 3749be12a1feSHannes Frederic Sowa return -EMSGSIZE; 3750be12a1feSHannes Frederic Sowa } 3751be12a1feSHannes Frederic Sowa 3752be12a1feSHannes Frederic Sowa return 0; 3753be12a1feSHannes Frederic Sowa } 3754be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3755be12a1feSHannes Frederic Sowa 3756cbb042f9SHerbert Xu /** 3757cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 3758cbb042f9SHerbert Xu * @skb: buffer to update 3759cbb042f9SHerbert Xu * @len: length of data pulled 3760cbb042f9SHerbert Xu * 3761cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 3762fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 376384fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 376484fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 376584fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 3766cbb042f9SHerbert Xu */ 3767af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3768cbb042f9SHerbert Xu { 376931b33dfbSPravin B Shelar unsigned char *data = skb->data; 377031b33dfbSPravin B Shelar 3771cbb042f9SHerbert Xu BUG_ON(len > skb->len); 377231b33dfbSPravin B Shelar __skb_pull(skb, len); 377331b33dfbSPravin B Shelar skb_postpull_rcsum(skb, data, len); 377431b33dfbSPravin B Shelar return skb->data; 3775cbb042f9SHerbert Xu } 3776f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3777f94691acSArnaldo Carvalho de Melo 377813acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 377913acc94eSYonghong Song { 378013acc94eSYonghong Song skb_frag_t head_frag; 378113acc94eSYonghong Song struct page *page; 378213acc94eSYonghong Song 378313acc94eSYonghong Song page = virt_to_head_page(frag_skb->head); 3784d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(&head_frag, page); 3785b54c9d5bSJonathan Lemon skb_frag_off_set(&head_frag, frag_skb->data - 3786b54c9d5bSJonathan Lemon (unsigned char *)page_address(page)); 3787d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); 378813acc94eSYonghong Song return head_frag; 378913acc94eSYonghong Song } 379013acc94eSYonghong Song 37913a1296a3SSteffen Klassert struct sk_buff *skb_segment_list(struct sk_buff *skb, 37923a1296a3SSteffen Klassert netdev_features_t features, 37933a1296a3SSteffen Klassert unsigned int offset) 37943a1296a3SSteffen Klassert { 37953a1296a3SSteffen Klassert struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 37963a1296a3SSteffen Klassert unsigned int tnl_hlen = skb_tnl_header_len(skb); 37973a1296a3SSteffen Klassert unsigned int delta_truesize = 0; 37983a1296a3SSteffen Klassert unsigned int delta_len = 0; 37993a1296a3SSteffen Klassert struct sk_buff *tail = NULL; 380053475c5dSDongseok Yi struct sk_buff *nskb, *tmp; 380153475c5dSDongseok Yi int err; 38023a1296a3SSteffen Klassert 38033a1296a3SSteffen Klassert skb_push(skb, -skb_network_offset(skb) + offset); 38043a1296a3SSteffen Klassert 38053a1296a3SSteffen Klassert skb_shinfo(skb)->frag_list = NULL; 38063a1296a3SSteffen Klassert 38073a1296a3SSteffen Klassert do { 38083a1296a3SSteffen Klassert nskb = list_skb; 38093a1296a3SSteffen Klassert list_skb = list_skb->next; 38103a1296a3SSteffen Klassert 381153475c5dSDongseok Yi err = 0; 381253475c5dSDongseok Yi if (skb_shared(nskb)) { 381353475c5dSDongseok Yi tmp = skb_clone(nskb, GFP_ATOMIC); 381453475c5dSDongseok Yi if (tmp) { 381553475c5dSDongseok Yi consume_skb(nskb); 381653475c5dSDongseok Yi nskb = tmp; 381753475c5dSDongseok Yi err = skb_unclone(nskb, GFP_ATOMIC); 381853475c5dSDongseok Yi } else { 381953475c5dSDongseok Yi err = -ENOMEM; 382053475c5dSDongseok Yi } 382153475c5dSDongseok Yi } 382253475c5dSDongseok Yi 38233a1296a3SSteffen Klassert if (!tail) 38243a1296a3SSteffen Klassert skb->next = nskb; 38253a1296a3SSteffen Klassert else 38263a1296a3SSteffen Klassert tail->next = nskb; 38273a1296a3SSteffen Klassert 382853475c5dSDongseok Yi if (unlikely(err)) { 382953475c5dSDongseok Yi nskb->next = list_skb; 383053475c5dSDongseok Yi goto err_linearize; 383153475c5dSDongseok Yi } 383253475c5dSDongseok Yi 38333a1296a3SSteffen Klassert tail = nskb; 38343a1296a3SSteffen Klassert 38353a1296a3SSteffen Klassert delta_len += nskb->len; 38363a1296a3SSteffen Klassert delta_truesize += nskb->truesize; 38373a1296a3SSteffen Klassert 38383a1296a3SSteffen Klassert skb_push(nskb, -skb_network_offset(nskb) + offset); 38393a1296a3SSteffen Klassert 3840cf673ed0SFlorian Westphal skb_release_head_state(nskb); 38413a1296a3SSteffen Klassert __copy_skb_header(nskb, skb); 38423a1296a3SSteffen Klassert 38433a1296a3SSteffen Klassert skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 38443a1296a3SSteffen Klassert skb_copy_from_linear_data_offset(skb, -tnl_hlen, 38453a1296a3SSteffen Klassert nskb->data - tnl_hlen, 38463a1296a3SSteffen Klassert offset + tnl_hlen); 38473a1296a3SSteffen Klassert 38483a1296a3SSteffen Klassert if (skb_needs_linearize(nskb, features) && 38493a1296a3SSteffen Klassert __skb_linearize(nskb)) 38503a1296a3SSteffen Klassert goto err_linearize; 38513a1296a3SSteffen Klassert 38523a1296a3SSteffen Klassert } while (list_skb); 38533a1296a3SSteffen Klassert 38543a1296a3SSteffen Klassert skb->truesize = skb->truesize - delta_truesize; 38553a1296a3SSteffen Klassert skb->data_len = skb->data_len - delta_len; 38563a1296a3SSteffen Klassert skb->len = skb->len - delta_len; 38573a1296a3SSteffen Klassert 38583a1296a3SSteffen Klassert skb_gso_reset(skb); 38593a1296a3SSteffen Klassert 38603a1296a3SSteffen Klassert skb->prev = tail; 38613a1296a3SSteffen Klassert 38623a1296a3SSteffen Klassert if (skb_needs_linearize(skb, features) && 38633a1296a3SSteffen Klassert __skb_linearize(skb)) 38643a1296a3SSteffen Klassert goto err_linearize; 38653a1296a3SSteffen Klassert 38663a1296a3SSteffen Klassert skb_get(skb); 38673a1296a3SSteffen Klassert 38683a1296a3SSteffen Klassert return skb; 38693a1296a3SSteffen Klassert 38703a1296a3SSteffen Klassert err_linearize: 38713a1296a3SSteffen Klassert kfree_skb_list(skb->next); 38723a1296a3SSteffen Klassert skb->next = NULL; 38733a1296a3SSteffen Klassert return ERR_PTR(-ENOMEM); 38743a1296a3SSteffen Klassert } 38753a1296a3SSteffen Klassert EXPORT_SYMBOL_GPL(skb_segment_list); 38763a1296a3SSteffen Klassert 38773a1296a3SSteffen Klassert int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 38783a1296a3SSteffen Klassert { 38793a1296a3SSteffen Klassert if (unlikely(p->len + skb->len >= 65536)) 38803a1296a3SSteffen Klassert return -E2BIG; 38813a1296a3SSteffen Klassert 38823a1296a3SSteffen Klassert if (NAPI_GRO_CB(p)->last == p) 38833a1296a3SSteffen Klassert skb_shinfo(p)->frag_list = skb; 38843a1296a3SSteffen Klassert else 38853a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last->next = skb; 38863a1296a3SSteffen Klassert 38873a1296a3SSteffen Klassert skb_pull(skb, skb_gro_offset(skb)); 38883a1296a3SSteffen Klassert 38893a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last = skb; 38903a1296a3SSteffen Klassert NAPI_GRO_CB(p)->count++; 38913a1296a3SSteffen Klassert p->data_len += skb->len; 38923a1296a3SSteffen Klassert p->truesize += skb->truesize; 38933a1296a3SSteffen Klassert p->len += skb->len; 38943a1296a3SSteffen Klassert 38953a1296a3SSteffen Klassert NAPI_GRO_CB(skb)->same_flow = 1; 38963a1296a3SSteffen Klassert 38973a1296a3SSteffen Klassert return 0; 38983a1296a3SSteffen Klassert } 38993a1296a3SSteffen Klassert 3900f4c50d99SHerbert Xu /** 3901f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 3902df5771ffSMichael S. Tsirkin * @head_skb: buffer to segment 3903576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 3904f4c50d99SHerbert Xu * 3905f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 39064c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 39074c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 3908f4c50d99SHerbert Xu */ 3909df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb, 3910df5771ffSMichael S. Tsirkin netdev_features_t features) 3911f4c50d99SHerbert Xu { 3912f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 3913f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 39141a4cedafSMichael S. Tsirkin struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3915df5771ffSMichael S. Tsirkin skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3916df5771ffSMichael S. Tsirkin unsigned int mss = skb_shinfo(head_skb)->gso_size; 3917df5771ffSMichael S. Tsirkin unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 39181fd819ecSMichael S. Tsirkin struct sk_buff *frag_skb = head_skb; 3919f4c50d99SHerbert Xu unsigned int offset = doffset; 3920df5771ffSMichael S. Tsirkin unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3921802ab55aSAlexander Duyck unsigned int partial_segs = 0; 3922f4c50d99SHerbert Xu unsigned int headroom; 3923802ab55aSAlexander Duyck unsigned int len = head_skb->len; 3924ec5f0615SPravin B Shelar __be16 proto; 392536c98382SAlexander Duyck bool csum, sg; 3926df5771ffSMichael S. Tsirkin int nfrags = skb_shinfo(head_skb)->nr_frags; 3927f4c50d99SHerbert Xu int err = -ENOMEM; 3928f4c50d99SHerbert Xu int i = 0; 3929f4c50d99SHerbert Xu int pos; 3930f4c50d99SHerbert Xu 39313dcbdb13SShmulik Ladkani if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && 39323dcbdb13SShmulik Ladkani (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { 39333dcbdb13SShmulik Ladkani /* gso_size is untrusted, and we have a frag_list with a linear 39343dcbdb13SShmulik Ladkani * non head_frag head. 39353dcbdb13SShmulik Ladkani * 39363dcbdb13SShmulik Ladkani * (we assume checking the first list_skb member suffices; 39373dcbdb13SShmulik Ladkani * i.e if either of the list_skb members have non head_frag 39383dcbdb13SShmulik Ladkani * head, then the first one has too). 39393dcbdb13SShmulik Ladkani * 39403dcbdb13SShmulik Ladkani * If head_skb's headlen does not fit requested gso_size, it 39413dcbdb13SShmulik Ladkani * means that the frag_list members do NOT terminate on exact 39423dcbdb13SShmulik Ladkani * gso_size boundaries. Hence we cannot perform skb_frag_t page 39433dcbdb13SShmulik Ladkani * sharing. Therefore we must fallback to copying the frag_list 39443dcbdb13SShmulik Ladkani * skbs; we do so by disabling SG. 39453dcbdb13SShmulik Ladkani */ 39463dcbdb13SShmulik Ladkani if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) 39473dcbdb13SShmulik Ladkani features &= ~NETIF_F_SG; 39483dcbdb13SShmulik Ladkani } 39493dcbdb13SShmulik Ladkani 39505882a07cSWei-Chun Chao __skb_push(head_skb, doffset); 39512f631133SMiaohe Lin proto = skb_network_protocol(head_skb, NULL); 3952ec5f0615SPravin B Shelar if (unlikely(!proto)) 3953ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 3954ec5f0615SPravin B Shelar 395536c98382SAlexander Duyck sg = !!(features & NETIF_F_SG); 3956f245d079SAlexander Duyck csum = !!can_checksum_protocol(features, proto); 39577e2b10c1STom Herbert 395807b26c94SSteffen Klassert if (sg && csum && (mss != GSO_BY_FRAGS)) { 395907b26c94SSteffen Klassert if (!(features & NETIF_F_GSO_PARTIAL)) { 396007b26c94SSteffen Klassert struct sk_buff *iter; 396143170c4eSIlan Tayari unsigned int frag_len; 396207b26c94SSteffen Klassert 396307b26c94SSteffen Klassert if (!list_skb || 396407b26c94SSteffen Klassert !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 396507b26c94SSteffen Klassert goto normal; 396607b26c94SSteffen Klassert 396743170c4eSIlan Tayari /* If we get here then all the required 396843170c4eSIlan Tayari * GSO features except frag_list are supported. 396943170c4eSIlan Tayari * Try to split the SKB to multiple GSO SKBs 397043170c4eSIlan Tayari * with no frag_list. 397143170c4eSIlan Tayari * Currently we can do that only when the buffers don't 397243170c4eSIlan Tayari * have a linear part and all the buffers except 397343170c4eSIlan Tayari * the last are of the same length. 397407b26c94SSteffen Klassert */ 397543170c4eSIlan Tayari frag_len = list_skb->len; 397607b26c94SSteffen Klassert skb_walk_frags(head_skb, iter) { 397743170c4eSIlan Tayari if (frag_len != iter->len && iter->next) 397843170c4eSIlan Tayari goto normal; 3979eaffadbbSIlan Tayari if (skb_headlen(iter) && !iter->head_frag) 398007b26c94SSteffen Klassert goto normal; 398107b26c94SSteffen Klassert 398207b26c94SSteffen Klassert len -= iter->len; 398307b26c94SSteffen Klassert } 398443170c4eSIlan Tayari 398543170c4eSIlan Tayari if (len != frag_len) 398643170c4eSIlan Tayari goto normal; 398707b26c94SSteffen Klassert } 398807b26c94SSteffen Klassert 3989802ab55aSAlexander Duyck /* GSO partial only requires that we trim off any excess that 3990802ab55aSAlexander Duyck * doesn't fit into an MSS sized block, so take care of that 3991802ab55aSAlexander Duyck * now. 3992802ab55aSAlexander Duyck */ 3993802ab55aSAlexander Duyck partial_segs = len / mss; 3994d7fb5a80SAlexander Duyck if (partial_segs > 1) 3995802ab55aSAlexander Duyck mss *= partial_segs; 3996d7fb5a80SAlexander Duyck else 3997d7fb5a80SAlexander Duyck partial_segs = 0; 3998802ab55aSAlexander Duyck } 3999802ab55aSAlexander Duyck 400007b26c94SSteffen Klassert normal: 4001df5771ffSMichael S. Tsirkin headroom = skb_headroom(head_skb); 4002df5771ffSMichael S. Tsirkin pos = skb_headlen(head_skb); 4003f4c50d99SHerbert Xu 4004f4c50d99SHerbert Xu do { 4005f4c50d99SHerbert Xu struct sk_buff *nskb; 40068cb19905SMichael S. Tsirkin skb_frag_t *nskb_frag; 4007c8884eddSHerbert Xu int hsize; 4008f4c50d99SHerbert Xu int size; 4009f4c50d99SHerbert Xu 40103953c46cSMarcelo Ricardo Leitner if (unlikely(mss == GSO_BY_FRAGS)) { 40113953c46cSMarcelo Ricardo Leitner len = list_skb->len; 40123953c46cSMarcelo Ricardo Leitner } else { 4013df5771ffSMichael S. Tsirkin len = head_skb->len - offset; 4014f4c50d99SHerbert Xu if (len > mss) 4015f4c50d99SHerbert Xu len = mss; 40163953c46cSMarcelo Ricardo Leitner } 4017f4c50d99SHerbert Xu 4018df5771ffSMichael S. Tsirkin hsize = skb_headlen(head_skb) - offset; 4019f4c50d99SHerbert Xu 4020dbd50f23SXin Long if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 40211a4cedafSMichael S. Tsirkin (skb_headlen(list_skb) == len || sg)) { 40221a4cedafSMichael S. Tsirkin BUG_ON(skb_headlen(list_skb) > len); 402389319d38SHerbert Xu 40249d8506ccSHerbert Xu i = 0; 40251a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 40261a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 40271fd819ecSMichael S. Tsirkin frag_skb = list_skb; 40281a4cedafSMichael S. Tsirkin pos += skb_headlen(list_skb); 40299d8506ccSHerbert Xu 40309d8506ccSHerbert Xu while (pos < offset + len) { 40319d8506ccSHerbert Xu BUG_ON(i >= nfrags); 40329d8506ccSHerbert Xu 40334e1beba1SMichael S. Tsirkin size = skb_frag_size(frag); 40349d8506ccSHerbert Xu if (pos + size > offset + len) 40359d8506ccSHerbert Xu break; 40369d8506ccSHerbert Xu 40379d8506ccSHerbert Xu i++; 40389d8506ccSHerbert Xu pos += size; 40394e1beba1SMichael S. Tsirkin frag++; 40409d8506ccSHerbert Xu } 40419d8506ccSHerbert Xu 40421a4cedafSMichael S. Tsirkin nskb = skb_clone(list_skb, GFP_ATOMIC); 40431a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 404489319d38SHerbert Xu 4045f4c50d99SHerbert Xu if (unlikely(!nskb)) 4046f4c50d99SHerbert Xu goto err; 4047f4c50d99SHerbert Xu 40489d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 40499d8506ccSHerbert Xu kfree_skb(nskb); 40509d8506ccSHerbert Xu goto err; 40519d8506ccSHerbert Xu } 40529d8506ccSHerbert Xu 4053ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 405489319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 405589319d38SHerbert Xu kfree_skb(nskb); 405689319d38SHerbert Xu goto err; 405789319d38SHerbert Xu } 405889319d38SHerbert Xu 4059ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 406089319d38SHerbert Xu skb_release_head_state(nskb); 406189319d38SHerbert Xu __skb_push(nskb, doffset); 406289319d38SHerbert Xu } else { 406300b229f7SPaolo Abeni if (hsize < 0) 406400b229f7SPaolo Abeni hsize = 0; 4065dbd50f23SXin Long if (hsize > len || !sg) 4066dbd50f23SXin Long hsize = len; 4067dbd50f23SXin Long 4068c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 4069df5771ffSMichael S. Tsirkin GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4070c93bdd0eSMel Gorman NUMA_NO_NODE); 407189319d38SHerbert Xu 407289319d38SHerbert Xu if (unlikely(!nskb)) 407389319d38SHerbert Xu goto err; 407489319d38SHerbert Xu 407589319d38SHerbert Xu skb_reserve(nskb, headroom); 407689319d38SHerbert Xu __skb_put(nskb, doffset); 407789319d38SHerbert Xu } 407889319d38SHerbert Xu 4079f4c50d99SHerbert Xu if (segs) 4080f4c50d99SHerbert Xu tail->next = nskb; 4081f4c50d99SHerbert Xu else 4082f4c50d99SHerbert Xu segs = nskb; 4083f4c50d99SHerbert Xu tail = nskb; 4084f4c50d99SHerbert Xu 4085df5771ffSMichael S. Tsirkin __copy_skb_header(nskb, head_skb); 4086f4c50d99SHerbert Xu 4087030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4088fcdfe3a7SVlad Yasevich skb_reset_mac_len(nskb); 408968c33163SPravin B Shelar 4090df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 409168c33163SPravin B Shelar nskb->data - tnl_hlen, 409268c33163SPravin B Shelar doffset + tnl_hlen); 409389319d38SHerbert Xu 40949d8506ccSHerbert Xu if (nskb->len == len + doffset) 40951cdbcb79SSimon Horman goto perform_csum_check; 409689319d38SHerbert Xu 40977fbeffedSAlexander Duyck if (!sg) { 40981454c9faSYadu Kishore if (!csum) { 40997fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 41006f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 410176443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 410276443456SAlexander Duyck skb_copy_and_csum_bits(head_skb, offset, 41031454c9faSYadu Kishore skb_put(nskb, 41041454c9faSYadu Kishore len), 41058d5930dfSAl Viro len); 41067e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 4107de843723STom Herbert skb_headroom(nskb) + doffset; 41081454c9faSYadu Kishore } else { 41091454c9faSYadu Kishore skb_copy_bits(head_skb, offset, 41101454c9faSYadu Kishore skb_put(nskb, len), 41111454c9faSYadu Kishore len); 41121454c9faSYadu Kishore } 4113f4c50d99SHerbert Xu continue; 4114f4c50d99SHerbert Xu } 4115f4c50d99SHerbert Xu 41168cb19905SMichael S. Tsirkin nskb_frag = skb_shinfo(nskb)->frags; 4117f4c50d99SHerbert Xu 4118df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, offset, 4119d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 4120f4c50d99SHerbert Xu 412106b4feb3SJonathan Lemon skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 412206b4feb3SJonathan Lemon SKBFL_SHARED_FRAG; 4123cef401deSEric Dumazet 4124bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4125bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4126bf5c25d6SWillem de Bruijn goto err; 4127bf5c25d6SWillem de Bruijn 41289d8506ccSHerbert Xu while (pos < offset + len) { 41299d8506ccSHerbert Xu if (i >= nfrags) { 41309d8506ccSHerbert Xu i = 0; 41311a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 41321a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 41331fd819ecSMichael S. Tsirkin frag_skb = list_skb; 413413acc94eSYonghong Song if (!skb_headlen(list_skb)) { 41359d8506ccSHerbert Xu BUG_ON(!nfrags); 413613acc94eSYonghong Song } else { 413713acc94eSYonghong Song BUG_ON(!list_skb->head_frag); 41389d8506ccSHerbert Xu 413913acc94eSYonghong Song /* to make room for head_frag. */ 414013acc94eSYonghong Song i--; 414113acc94eSYonghong Song frag--; 414213acc94eSYonghong Song } 4143bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4144bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, 4145bf5c25d6SWillem de Bruijn GFP_ATOMIC)) 4146bf5c25d6SWillem de Bruijn goto err; 4147bf5c25d6SWillem de Bruijn 41481a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 41499d8506ccSHerbert Xu } 41509d8506ccSHerbert Xu 41519d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 41529d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 41539d8506ccSHerbert Xu net_warn_ratelimited( 41549d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 41559d8506ccSHerbert Xu pos, mss); 4156ff907a11SEric Dumazet err = -EINVAL; 41579d8506ccSHerbert Xu goto err; 41589d8506ccSHerbert Xu } 41599d8506ccSHerbert Xu 416013acc94eSYonghong Song *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 41618cb19905SMichael S. Tsirkin __skb_frag_ref(nskb_frag); 41628cb19905SMichael S. Tsirkin size = skb_frag_size(nskb_frag); 4163f4c50d99SHerbert Xu 4164f4c50d99SHerbert Xu if (pos < offset) { 4165b54c9d5bSJonathan Lemon skb_frag_off_add(nskb_frag, offset - pos); 41668cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, offset - pos); 4167f4c50d99SHerbert Xu } 4168f4c50d99SHerbert Xu 416989319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 4170f4c50d99SHerbert Xu 4171f4c50d99SHerbert Xu if (pos + size <= offset + len) { 4172f4c50d99SHerbert Xu i++; 41734e1beba1SMichael S. Tsirkin frag++; 4174f4c50d99SHerbert Xu pos += size; 4175f4c50d99SHerbert Xu } else { 41768cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 417789319d38SHerbert Xu goto skip_fraglist; 4178f4c50d99SHerbert Xu } 4179f4c50d99SHerbert Xu 41808cb19905SMichael S. Tsirkin nskb_frag++; 4181f4c50d99SHerbert Xu } 4182f4c50d99SHerbert Xu 418389319d38SHerbert Xu skip_fraglist: 4184f4c50d99SHerbert Xu nskb->data_len = len - hsize; 4185f4c50d99SHerbert Xu nskb->len += nskb->data_len; 4186f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 4187ec5f0615SPravin B Shelar 41881cdbcb79SSimon Horman perform_csum_check: 41897fbeffedSAlexander Duyck if (!csum) { 4190ff907a11SEric Dumazet if (skb_has_shared_frag(nskb) && 4191ff907a11SEric Dumazet __skb_linearize(nskb)) 4192ddff00d4SAlexander Duyck goto err; 4193ff907a11SEric Dumazet 41947fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 4195ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 419676443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 419776443456SAlexander Duyck skb_checksum(nskb, doffset, 419876443456SAlexander Duyck nskb->len - doffset, 0); 41997e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 42007e2b10c1STom Herbert skb_headroom(nskb) + doffset; 4201ec5f0615SPravin B Shelar } 4202df5771ffSMichael S. Tsirkin } while ((offset += len) < head_skb->len); 4203f4c50d99SHerbert Xu 4204bec3cfdcSEric Dumazet /* Some callers want to get the end of the list. 4205bec3cfdcSEric Dumazet * Put it in segs->prev to avoid walking the list. 4206bec3cfdcSEric Dumazet * (see validate_xmit_skb_list() for example) 4207bec3cfdcSEric Dumazet */ 4208bec3cfdcSEric Dumazet segs->prev = tail; 4209432c856fSToshiaki Makita 4210802ab55aSAlexander Duyck if (partial_segs) { 421107b26c94SSteffen Klassert struct sk_buff *iter; 4212802ab55aSAlexander Duyck int type = skb_shinfo(head_skb)->gso_type; 421307b26c94SSteffen Klassert unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4214802ab55aSAlexander Duyck 4215802ab55aSAlexander Duyck /* Update type to add partial and then remove dodgy if set */ 421607b26c94SSteffen Klassert type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4217802ab55aSAlexander Duyck type &= ~SKB_GSO_DODGY; 4218802ab55aSAlexander Duyck 4219802ab55aSAlexander Duyck /* Update GSO info and prepare to start updating headers on 4220802ab55aSAlexander Duyck * our way back down the stack of protocols. 4221802ab55aSAlexander Duyck */ 422207b26c94SSteffen Klassert for (iter = segs; iter; iter = iter->next) { 422307b26c94SSteffen Klassert skb_shinfo(iter)->gso_size = gso_size; 422407b26c94SSteffen Klassert skb_shinfo(iter)->gso_segs = partial_segs; 422507b26c94SSteffen Klassert skb_shinfo(iter)->gso_type = type; 422607b26c94SSteffen Klassert SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 422707b26c94SSteffen Klassert } 422807b26c94SSteffen Klassert 422907b26c94SSteffen Klassert if (tail->len - doffset <= gso_size) 423007b26c94SSteffen Klassert skb_shinfo(tail)->gso_size = 0; 423107b26c94SSteffen Klassert else if (tail != segs) 423207b26c94SSteffen Klassert skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4233802ab55aSAlexander Duyck } 4234802ab55aSAlexander Duyck 4235432c856fSToshiaki Makita /* Following permits correct backpressure, for protocols 4236432c856fSToshiaki Makita * using skb_set_owner_w(). 4237432c856fSToshiaki Makita * Idea is to tranfert ownership from head_skb to last segment. 4238432c856fSToshiaki Makita */ 4239432c856fSToshiaki Makita if (head_skb->destructor == sock_wfree) { 4240432c856fSToshiaki Makita swap(tail->truesize, head_skb->truesize); 4241432c856fSToshiaki Makita swap(tail->destructor, head_skb->destructor); 4242432c856fSToshiaki Makita swap(tail->sk, head_skb->sk); 4243432c856fSToshiaki Makita } 4244f4c50d99SHerbert Xu return segs; 4245f4c50d99SHerbert Xu 4246f4c50d99SHerbert Xu err: 4247289dccbeSEric Dumazet kfree_skb_list(segs); 4248f4c50d99SHerbert Xu return ERR_PTR(err); 4249f4c50d99SHerbert Xu } 4250f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 4251f4c50d99SHerbert Xu 4252d4546c25SDavid Miller int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 425371d93b39SHerbert Xu { 42548a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 425567147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 425667147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 42578a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 4258715dc1f3SEric Dumazet unsigned int delta_truesize; 4259d4546c25SDavid Miller struct sk_buff *lp; 426071d93b39SHerbert Xu 42610ab03f35SSteffen Klassert if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 426271d93b39SHerbert Xu return -E2BIG; 426371d93b39SHerbert Xu 426429e98242SEric Dumazet lp = NAPI_GRO_CB(p)->last; 42658a29111cSEric Dumazet pinfo = skb_shinfo(lp); 42668a29111cSEric Dumazet 42678a29111cSEric Dumazet if (headlen <= offset) { 426842da6994SHerbert Xu skb_frag_t *frag; 426966e92fcfSHerbert Xu skb_frag_t *frag2; 42709aaa156cSHerbert Xu int i = skbinfo->nr_frags; 42719aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 427242da6994SHerbert Xu 427366e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 42748a29111cSEric Dumazet goto merge; 427581705ad1SHerbert Xu 42768a29111cSEric Dumazet offset -= headlen; 42779aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 42789aaa156cSHerbert Xu skbinfo->nr_frags = 0; 4279f5572068SHerbert Xu 42809aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 42819aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 428266e92fcfSHerbert Xu do { 428366e92fcfSHerbert Xu *--frag = *--frag2; 428466e92fcfSHerbert Xu } while (--i); 428566e92fcfSHerbert Xu 4286b54c9d5bSJonathan Lemon skb_frag_off_add(frag, offset); 42879e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 428866e92fcfSHerbert Xu 4289715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 4290ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 4291ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 4292715dc1f3SEric Dumazet 4293f5572068SHerbert Xu skb->truesize -= skb->data_len; 4294f5572068SHerbert Xu skb->len -= skb->data_len; 4295f5572068SHerbert Xu skb->data_len = 0; 4296f5572068SHerbert Xu 4297715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 42985d38a079SHerbert Xu goto done; 4299d7e8883cSEric Dumazet } else if (skb->head_frag) { 4300d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 4301d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 4302d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 4303d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 4304d7e8883cSEric Dumazet unsigned int first_offset; 4305d7e8883cSEric Dumazet 4306d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 43078a29111cSEric Dumazet goto merge; 4308d7e8883cSEric Dumazet 4309d7e8883cSEric Dumazet first_offset = skb->data - 4310d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 4311d7e8883cSEric Dumazet offset; 4312d7e8883cSEric Dumazet 4313d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4314d7e8883cSEric Dumazet 4315d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(frag, page); 4316b54c9d5bSJonathan Lemon skb_frag_off_set(frag, first_offset); 4317d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 4318d7e8883cSEric Dumazet 4319d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4320d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 4321d7e8883cSEric Dumazet 4322715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4323d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4324d7e8883cSEric Dumazet goto done; 43258a29111cSEric Dumazet } 432671d93b39SHerbert Xu 432771d93b39SHerbert Xu merge: 4328715dc1f3SEric Dumazet delta_truesize = skb->truesize; 432967147ba9SHerbert Xu if (offset > headlen) { 4330d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 4331d1dc7abfSMichal Schmidt 4332b54c9d5bSJonathan Lemon skb_frag_off_add(&skbinfo->frags[0], eat); 43339e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 4334d1dc7abfSMichal Schmidt skb->data_len -= eat; 4335d1dc7abfSMichal Schmidt skb->len -= eat; 433667147ba9SHerbert Xu offset = headlen; 433756035022SHerbert Xu } 433856035022SHerbert Xu 433967147ba9SHerbert Xu __skb_pull(skb, offset); 434056035022SHerbert Xu 434129e98242SEric Dumazet if (NAPI_GRO_CB(p)->last == p) 43428a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 43438a29111cSEric Dumazet else 4344c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 4345c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 4346f4a775d1SEric Dumazet __skb_header_release(skb); 43478a29111cSEric Dumazet lp = p; 434871d93b39SHerbert Xu 43495d38a079SHerbert Xu done: 43505d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 435137fe4732SHerbert Xu p->data_len += len; 4352715dc1f3SEric Dumazet p->truesize += delta_truesize; 435337fe4732SHerbert Xu p->len += len; 43548a29111cSEric Dumazet if (lp != p) { 43558a29111cSEric Dumazet lp->data_len += len; 43568a29111cSEric Dumazet lp->truesize += delta_truesize; 43578a29111cSEric Dumazet lp->len += len; 43588a29111cSEric Dumazet } 435971d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 436071d93b39SHerbert Xu return 0; 436171d93b39SHerbert Xu } 436271d93b39SHerbert Xu 4363df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 4364df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE 8 4365df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4366df5042f4SFlorian Westphal 4367df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = { 4368df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4369df5042f4SFlorian Westphal [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4370df5042f4SFlorian Westphal #endif 43714165079bSFlorian Westphal #ifdef CONFIG_XFRM 43724165079bSFlorian Westphal [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 43734165079bSFlorian Westphal #endif 437495a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 437595a7233cSPaul Blakey [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 437695a7233cSPaul Blakey #endif 43773ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 43783ee17bc7SMat Martineau [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 43793ee17bc7SMat Martineau #endif 4380df5042f4SFlorian Westphal }; 4381df5042f4SFlorian Westphal 4382df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void) 4383df5042f4SFlorian Westphal { 4384df5042f4SFlorian Westphal return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4385df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4386df5042f4SFlorian Westphal skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4387df5042f4SFlorian Westphal #endif 43884165079bSFlorian Westphal #ifdef CONFIG_XFRM 43894165079bSFlorian Westphal skb_ext_type_len[SKB_EXT_SEC_PATH] + 43904165079bSFlorian Westphal #endif 439195a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 439295a7233cSPaul Blakey skb_ext_type_len[TC_SKB_EXT] + 439395a7233cSPaul Blakey #endif 43943ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 43953ee17bc7SMat Martineau skb_ext_type_len[SKB_EXT_MPTCP] + 43963ee17bc7SMat Martineau #endif 4397df5042f4SFlorian Westphal 0; 4398df5042f4SFlorian Westphal } 4399df5042f4SFlorian Westphal 4400df5042f4SFlorian Westphal static void skb_extensions_init(void) 4401df5042f4SFlorian Westphal { 4402df5042f4SFlorian Westphal BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4403df5042f4SFlorian Westphal BUILD_BUG_ON(skb_ext_total_length() > 255); 4404df5042f4SFlorian Westphal 4405df5042f4SFlorian Westphal skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4406df5042f4SFlorian Westphal SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4407df5042f4SFlorian Westphal 0, 4408df5042f4SFlorian Westphal SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4409df5042f4SFlorian Westphal NULL); 4410df5042f4SFlorian Westphal } 4411df5042f4SFlorian Westphal #else 4412df5042f4SFlorian Westphal static void skb_extensions_init(void) {} 4413df5042f4SFlorian Westphal #endif 4414df5042f4SFlorian Westphal 44151da177e4SLinus Torvalds void __init skb_init(void) 44161da177e4SLinus Torvalds { 441779a8a642SKees Cook skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 44181da177e4SLinus Torvalds sizeof(struct sk_buff), 44191da177e4SLinus Torvalds 0, 4420e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 442179a8a642SKees Cook offsetof(struct sk_buff, cb), 442279a8a642SKees Cook sizeof_field(struct sk_buff, cb), 442320c2df83SPaul Mundt NULL); 4424d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4425d0bf4a9eSEric Dumazet sizeof(struct sk_buff_fclones), 4426d179cd12SDavid S. Miller 0, 4427e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 442820c2df83SPaul Mundt NULL); 4429df5042f4SFlorian Westphal skb_extensions_init(); 44301da177e4SLinus Torvalds } 44311da177e4SLinus Torvalds 443251c739d1SDavid S. Miller static int 443348a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 443448a1df65SJason A. Donenfeld unsigned int recursion_level) 4435716ea3a7SDavid Howells { 44361a028e50SDavid S. Miller int start = skb_headlen(skb); 44371a028e50SDavid S. Miller int i, copy = start - offset; 4438fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 4439716ea3a7SDavid Howells int elt = 0; 4440716ea3a7SDavid Howells 444148a1df65SJason A. Donenfeld if (unlikely(recursion_level >= 24)) 444248a1df65SJason A. Donenfeld return -EMSGSIZE; 444348a1df65SJason A. Donenfeld 4444716ea3a7SDavid Howells if (copy > 0) { 4445716ea3a7SDavid Howells if (copy > len) 4446716ea3a7SDavid Howells copy = len; 4447642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 4448716ea3a7SDavid Howells elt++; 4449716ea3a7SDavid Howells if ((len -= copy) == 0) 4450716ea3a7SDavid Howells return elt; 4451716ea3a7SDavid Howells offset += copy; 4452716ea3a7SDavid Howells } 4453716ea3a7SDavid Howells 4454716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 44551a028e50SDavid S. Miller int end; 4456716ea3a7SDavid Howells 4457547b792cSIlpo Järvinen WARN_ON(start > offset + len); 44581a028e50SDavid S. Miller 44599e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4460716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 4461716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 446248a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 446348a1df65SJason A. Donenfeld return -EMSGSIZE; 4464716ea3a7SDavid Howells 4465716ea3a7SDavid Howells if (copy > len) 4466716ea3a7SDavid Howells copy = len; 4467ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4468b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start); 4469716ea3a7SDavid Howells elt++; 4470716ea3a7SDavid Howells if (!(len -= copy)) 4471716ea3a7SDavid Howells return elt; 4472716ea3a7SDavid Howells offset += copy; 4473716ea3a7SDavid Howells } 44741a028e50SDavid S. Miller start = end; 4475716ea3a7SDavid Howells } 4476716ea3a7SDavid Howells 4477fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 447848a1df65SJason A. Donenfeld int end, ret; 4479716ea3a7SDavid Howells 4480547b792cSIlpo Järvinen WARN_ON(start > offset + len); 44811a028e50SDavid S. Miller 4482fbb398a8SDavid S. Miller end = start + frag_iter->len; 4483716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 448448a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 448548a1df65SJason A. Donenfeld return -EMSGSIZE; 448648a1df65SJason A. Donenfeld 4487716ea3a7SDavid Howells if (copy > len) 4488716ea3a7SDavid Howells copy = len; 448948a1df65SJason A. Donenfeld ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 449048a1df65SJason A. Donenfeld copy, recursion_level + 1); 449148a1df65SJason A. Donenfeld if (unlikely(ret < 0)) 449248a1df65SJason A. Donenfeld return ret; 449348a1df65SJason A. Donenfeld elt += ret; 4494716ea3a7SDavid Howells if ((len -= copy) == 0) 4495716ea3a7SDavid Howells return elt; 4496716ea3a7SDavid Howells offset += copy; 4497716ea3a7SDavid Howells } 44981a028e50SDavid S. Miller start = end; 4499716ea3a7SDavid Howells } 4500716ea3a7SDavid Howells BUG_ON(len); 4501716ea3a7SDavid Howells return elt; 4502716ea3a7SDavid Howells } 4503716ea3a7SDavid Howells 450448a1df65SJason A. Donenfeld /** 450548a1df65SJason A. Donenfeld * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 450648a1df65SJason A. Donenfeld * @skb: Socket buffer containing the buffers to be mapped 450748a1df65SJason A. Donenfeld * @sg: The scatter-gather list to map into 450848a1df65SJason A. Donenfeld * @offset: The offset into the buffer's contents to start mapping 450948a1df65SJason A. Donenfeld * @len: Length of buffer space to be mapped 451048a1df65SJason A. Donenfeld * 451148a1df65SJason A. Donenfeld * Fill the specified scatter-gather list with mappings/pointers into a 451248a1df65SJason A. Donenfeld * region of the buffer space attached to a socket buffer. Returns either 451348a1df65SJason A. Donenfeld * the number of scatterlist items used, or -EMSGSIZE if the contents 451448a1df65SJason A. Donenfeld * could not fit. 451548a1df65SJason A. Donenfeld */ 451648a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 451748a1df65SJason A. Donenfeld { 451848a1df65SJason A. Donenfeld int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 451948a1df65SJason A. Donenfeld 452048a1df65SJason A. Donenfeld if (nsg <= 0) 452148a1df65SJason A. Donenfeld return nsg; 452248a1df65SJason A. Donenfeld 452348a1df65SJason A. Donenfeld sg_mark_end(&sg[nsg - 1]); 452448a1df65SJason A. Donenfeld 452548a1df65SJason A. Donenfeld return nsg; 452648a1df65SJason A. Donenfeld } 452748a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec); 452848a1df65SJason A. Donenfeld 452925a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 453025a91d8dSFan Du * sglist without mark the sg which contain last skb data as the end. 453125a91d8dSFan Du * So the caller can mannipulate sg list as will when padding new data after 453225a91d8dSFan Du * the first call without calling sg_unmark_end to expend sg list. 453325a91d8dSFan Du * 453425a91d8dSFan Du * Scenario to use skb_to_sgvec_nomark: 453525a91d8dSFan Du * 1. sg_init_table 453625a91d8dSFan Du * 2. skb_to_sgvec_nomark(payload1) 453725a91d8dSFan Du * 3. skb_to_sgvec_nomark(payload2) 453825a91d8dSFan Du * 453925a91d8dSFan Du * This is equivalent to: 454025a91d8dSFan Du * 1. sg_init_table 454125a91d8dSFan Du * 2. skb_to_sgvec(payload1) 454225a91d8dSFan Du * 3. sg_unmark_end 454325a91d8dSFan Du * 4. skb_to_sgvec(payload2) 454425a91d8dSFan Du * 454525a91d8dSFan Du * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 454625a91d8dSFan Du * is more preferable. 454725a91d8dSFan Du */ 454825a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 454925a91d8dSFan Du int offset, int len) 455025a91d8dSFan Du { 455148a1df65SJason A. Donenfeld return __skb_to_sgvec(skb, sg, offset, len, 0); 455225a91d8dSFan Du } 455325a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 455425a91d8dSFan Du 455551c739d1SDavid S. Miller 455651c739d1SDavid S. Miller 4557716ea3a7SDavid Howells /** 4558716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 4559716ea3a7SDavid Howells * @skb: The socket buffer to check. 4560716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 4561716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 4562716ea3a7SDavid Howells * 4563716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 4564716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 4565716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 4566716ea3a7SDavid Howells * 4567716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 4568716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 4569716ea3a7SDavid Howells * set to point to the skb in which this space begins. 4570716ea3a7SDavid Howells * 4571716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 4572716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 4573716ea3a7SDavid Howells */ 4574716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4575716ea3a7SDavid Howells { 4576716ea3a7SDavid Howells int copyflag; 4577716ea3a7SDavid Howells int elt; 4578716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 4579716ea3a7SDavid Howells 4580716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 4581716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 4582716ea3a7SDavid Howells * at the moment even if they are anonymous). 4583716ea3a7SDavid Howells */ 4584716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4585c15fc199SMiaohe Lin !__pskb_pull_tail(skb, __skb_pagelen(skb))) 4586716ea3a7SDavid Howells return -ENOMEM; 4587716ea3a7SDavid Howells 4588716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 458921dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 4590716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 4591716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 4592716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 4593716ea3a7SDavid Howells * space, 128 bytes is fair. */ 4594716ea3a7SDavid Howells 4595716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 4596716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4597716ea3a7SDavid Howells return -ENOMEM; 4598716ea3a7SDavid Howells 4599716ea3a7SDavid Howells /* Voila! */ 4600716ea3a7SDavid Howells *trailer = skb; 4601716ea3a7SDavid Howells return 1; 4602716ea3a7SDavid Howells } 4603716ea3a7SDavid Howells 4604716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 4605716ea3a7SDavid Howells 4606716ea3a7SDavid Howells elt = 1; 4607716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 4608716ea3a7SDavid Howells copyflag = 0; 4609716ea3a7SDavid Howells 4610716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 4611716ea3a7SDavid Howells int ntail = 0; 4612716ea3a7SDavid Howells 4613716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 4614716ea3a7SDavid Howells * this can happen on input. Copy it and everything 4615716ea3a7SDavid Howells * after it. */ 4616716ea3a7SDavid Howells 4617716ea3a7SDavid Howells if (skb_shared(skb1)) 4618716ea3a7SDavid Howells copyflag = 1; 4619716ea3a7SDavid Howells 4620716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 4621716ea3a7SDavid Howells 4622716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 4623716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 462421dc3301SDavid S. Miller skb_has_frag_list(skb1) || 4625716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 4626716ea3a7SDavid Howells ntail = tailbits + 128; 4627716ea3a7SDavid Howells } 4628716ea3a7SDavid Howells 4629716ea3a7SDavid Howells if (copyflag || 4630716ea3a7SDavid Howells skb_cloned(skb1) || 4631716ea3a7SDavid Howells ntail || 4632716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 463321dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 4634716ea3a7SDavid Howells struct sk_buff *skb2; 4635716ea3a7SDavid Howells 4636716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 4637716ea3a7SDavid Howells if (ntail == 0) 4638716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 4639716ea3a7SDavid Howells else 4640716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 4641716ea3a7SDavid Howells skb_headroom(skb1), 4642716ea3a7SDavid Howells ntail, 4643716ea3a7SDavid Howells GFP_ATOMIC); 4644716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 4645716ea3a7SDavid Howells return -ENOMEM; 4646716ea3a7SDavid Howells 4647716ea3a7SDavid Howells if (skb1->sk) 4648716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 4649716ea3a7SDavid Howells 4650716ea3a7SDavid Howells /* Looking around. Are we still alive? 4651716ea3a7SDavid Howells * OK, link new skb, drop old one */ 4652716ea3a7SDavid Howells 4653716ea3a7SDavid Howells skb2->next = skb1->next; 4654716ea3a7SDavid Howells *skb_p = skb2; 4655716ea3a7SDavid Howells kfree_skb(skb1); 4656716ea3a7SDavid Howells skb1 = skb2; 4657716ea3a7SDavid Howells } 4658716ea3a7SDavid Howells elt++; 4659716ea3a7SDavid Howells *trailer = skb1; 4660716ea3a7SDavid Howells skb_p = &skb1->next; 4661716ea3a7SDavid Howells } 4662716ea3a7SDavid Howells 4663716ea3a7SDavid Howells return elt; 4664716ea3a7SDavid Howells } 4665b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 4666716ea3a7SDavid Howells 4667b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 4668b1faf566SEric Dumazet { 4669b1faf566SEric Dumazet struct sock *sk = skb->sk; 4670b1faf566SEric Dumazet 4671b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4672b1faf566SEric Dumazet } 4673b1faf566SEric Dumazet 46748605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb) 46758605330aSSoheil Hassas Yeganeh { 46768605330aSSoheil Hassas Yeganeh /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 46778605330aSSoheil Hassas Yeganeh * So, it is safe to (mis)use it to mark skbs on the error queue. 46788605330aSSoheil Hassas Yeganeh */ 46798605330aSSoheil Hassas Yeganeh skb->pkt_type = PACKET_OUTGOING; 46808605330aSSoheil Hassas Yeganeh BUILD_BUG_ON(PACKET_OUTGOING == 0); 46818605330aSSoheil Hassas Yeganeh } 46828605330aSSoheil Hassas Yeganeh 4683b1faf566SEric Dumazet /* 4684b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4685b1faf566SEric Dumazet */ 4686b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4687b1faf566SEric Dumazet { 4688b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4689ebb3b78dSEric Dumazet (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 4690b1faf566SEric Dumazet return -ENOMEM; 4691b1faf566SEric Dumazet 4692b1faf566SEric Dumazet skb_orphan(skb); 4693b1faf566SEric Dumazet skb->sk = sk; 4694b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 4695b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 46968605330aSSoheil Hassas Yeganeh skb_set_err_queue(skb); 4697b1faf566SEric Dumazet 4698abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 4699abb57ea4SEric Dumazet skb_dst_force(skb); 4700abb57ea4SEric Dumazet 4701b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 4702b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 4703e3ae2365SAlexander Aring sk_error_report(sk); 4704b1faf566SEric Dumazet return 0; 4705b1faf566SEric Dumazet } 4706b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 4707b1faf566SEric Dumazet 470883a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb) 470983a1a1a7SSoheil Hassas Yeganeh { 471083a1a1a7SSoheil Hassas Yeganeh return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 471183a1a1a7SSoheil Hassas Yeganeh SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 471283a1a1a7SSoheil Hassas Yeganeh } 471383a1a1a7SSoheil Hassas Yeganeh 4714364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4715364a9e93SWillem de Bruijn { 4716364a9e93SWillem de Bruijn struct sk_buff_head *q = &sk->sk_error_queue; 471783a1a1a7SSoheil Hassas Yeganeh struct sk_buff *skb, *skb_next = NULL; 471883a1a1a7SSoheil Hassas Yeganeh bool icmp_next = false; 4719997d5c3fSEric Dumazet unsigned long flags; 4720364a9e93SWillem de Bruijn 4721997d5c3fSEric Dumazet spin_lock_irqsave(&q->lock, flags); 4722364a9e93SWillem de Bruijn skb = __skb_dequeue(q); 472338b25793SSoheil Hassas Yeganeh if (skb && (skb_next = skb_peek(q))) { 472483a1a1a7SSoheil Hassas Yeganeh icmp_next = is_icmp_err_skb(skb_next); 472538b25793SSoheil Hassas Yeganeh if (icmp_next) 4726985f7337SWillem de Bruijn sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 472738b25793SSoheil Hassas Yeganeh } 4728997d5c3fSEric Dumazet spin_unlock_irqrestore(&q->lock, flags); 4729364a9e93SWillem de Bruijn 473083a1a1a7SSoheil Hassas Yeganeh if (is_icmp_err_skb(skb) && !icmp_next) 473183a1a1a7SSoheil Hassas Yeganeh sk->sk_err = 0; 473283a1a1a7SSoheil Hassas Yeganeh 473383a1a1a7SSoheil Hassas Yeganeh if (skb_next) 4734e3ae2365SAlexander Aring sk_error_report(sk); 4735364a9e93SWillem de Bruijn 4736364a9e93SWillem de Bruijn return skb; 4737364a9e93SWillem de Bruijn } 4738364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb); 4739364a9e93SWillem de Bruijn 4740cab41c47SAlexander Duyck /** 4741cab41c47SAlexander Duyck * skb_clone_sk - create clone of skb, and take reference to socket 4742cab41c47SAlexander Duyck * @skb: the skb to clone 4743cab41c47SAlexander Duyck * 4744cab41c47SAlexander Duyck * This function creates a clone of a buffer that holds a reference on 4745cab41c47SAlexander Duyck * sk_refcnt. Buffers created via this function are meant to be 4746cab41c47SAlexander Duyck * returned using sock_queue_err_skb, or free via kfree_skb. 4747cab41c47SAlexander Duyck * 4748cab41c47SAlexander Duyck * When passing buffers allocated with this function to sock_queue_err_skb 4749cab41c47SAlexander Duyck * it is necessary to wrap the call with sock_hold/sock_put in order to 4750cab41c47SAlexander Duyck * prevent the socket from being released prior to being enqueued on 4751cab41c47SAlexander Duyck * the sk_error_queue. 4752cab41c47SAlexander Duyck */ 475362bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb) 475462bccb8cSAlexander Duyck { 475562bccb8cSAlexander Duyck struct sock *sk = skb->sk; 475662bccb8cSAlexander Duyck struct sk_buff *clone; 475762bccb8cSAlexander Duyck 475841c6d650SReshetova, Elena if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 475962bccb8cSAlexander Duyck return NULL; 476062bccb8cSAlexander Duyck 476162bccb8cSAlexander Duyck clone = skb_clone(skb, GFP_ATOMIC); 476262bccb8cSAlexander Duyck if (!clone) { 476362bccb8cSAlexander Duyck sock_put(sk); 476462bccb8cSAlexander Duyck return NULL; 476562bccb8cSAlexander Duyck } 476662bccb8cSAlexander Duyck 476762bccb8cSAlexander Duyck clone->sk = sk; 476862bccb8cSAlexander Duyck clone->destructor = sock_efree; 476962bccb8cSAlexander Duyck 477062bccb8cSAlexander Duyck return clone; 477162bccb8cSAlexander Duyck } 477262bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk); 477362bccb8cSAlexander Duyck 477437846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb, 477537846ef0SAlexander Duyck struct sock *sk, 47764ef1b286SSoheil Hassas Yeganeh int tstype, 47774ef1b286SSoheil Hassas Yeganeh bool opt_stats) 4778ac45f602SPatrick Ohly { 4779ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 4780ac45f602SPatrick Ohly int err; 4781ac45f602SPatrick Ohly 47824ef1b286SSoheil Hassas Yeganeh BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 47834ef1b286SSoheil Hassas Yeganeh 4784ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 4785ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 4786ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 4787ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4788e7fd2885SWillem de Bruijn serr->ee.ee_info = tstype; 47894ef1b286SSoheil Hassas Yeganeh serr->opt_stats = opt_stats; 47901862d620SWillem de Bruijn serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 47914ed2d765SWillem de Bruijn if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 479209c2d251SWillem de Bruijn serr->ee.ee_data = skb_shinfo(skb)->tskey; 4793ac5cc977SWANG Cong if (sk->sk_protocol == IPPROTO_TCP && 4794ac5cc977SWANG Cong sk->sk_type == SOCK_STREAM) 47954ed2d765SWillem de Bruijn serr->ee.ee_data -= sk->sk_tskey; 47964ed2d765SWillem de Bruijn } 479729030374SEric Dumazet 4798ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 479929030374SEric Dumazet 4800ac45f602SPatrick Ohly if (err) 4801ac45f602SPatrick Ohly kfree_skb(skb); 4802ac45f602SPatrick Ohly } 480337846ef0SAlexander Duyck 4804b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4805b245be1fSWillem de Bruijn { 4806b245be1fSWillem de Bruijn bool ret; 4807b245be1fSWillem de Bruijn 4808b245be1fSWillem de Bruijn if (likely(sysctl_tstamp_allow_data || tsonly)) 4809b245be1fSWillem de Bruijn return true; 4810b245be1fSWillem de Bruijn 4811b245be1fSWillem de Bruijn read_lock_bh(&sk->sk_callback_lock); 4812b245be1fSWillem de Bruijn ret = sk->sk_socket && sk->sk_socket->file && 4813b245be1fSWillem de Bruijn file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4814b245be1fSWillem de Bruijn read_unlock_bh(&sk->sk_callback_lock); 4815b245be1fSWillem de Bruijn return ret; 4816b245be1fSWillem de Bruijn } 4817b245be1fSWillem de Bruijn 481837846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb, 481937846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps) 482037846ef0SAlexander Duyck { 482137846ef0SAlexander Duyck struct sock *sk = skb->sk; 482237846ef0SAlexander Duyck 4823b245be1fSWillem de Bruijn if (!skb_may_tx_timestamp(sk, false)) 482435b99dffSWillem de Bruijn goto err; 4825b245be1fSWillem de Bruijn 48269ac25fc0SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 48279ac25fc0SEric Dumazet * but only if the socket refcount is not zero. 48289ac25fc0SEric Dumazet */ 482941c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 483037846ef0SAlexander Duyck *skb_hwtstamps(skb) = *hwtstamps; 48314ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 483237846ef0SAlexander Duyck sock_put(sk); 483335b99dffSWillem de Bruijn return; 483437846ef0SAlexander Duyck } 483535b99dffSWillem de Bruijn 483635b99dffSWillem de Bruijn err: 483735b99dffSWillem de Bruijn kfree_skb(skb); 48389ac25fc0SEric Dumazet } 483937846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 484037846ef0SAlexander Duyck 484137846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb, 4842e7ed11eeSYousuk Seung const struct sk_buff *ack_skb, 484337846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps, 484437846ef0SAlexander Duyck struct sock *sk, int tstype) 484537846ef0SAlexander Duyck { 484637846ef0SAlexander Duyck struct sk_buff *skb; 48474ef1b286SSoheil Hassas Yeganeh bool tsonly, opt_stats = false; 484837846ef0SAlexander Duyck 48493a8dd971SWillem de Bruijn if (!sk) 48503a8dd971SWillem de Bruijn return; 48513a8dd971SWillem de Bruijn 4852b50a5c70SMiroslav Lichvar if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4853b50a5c70SMiroslav Lichvar skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4854b50a5c70SMiroslav Lichvar return; 4855b50a5c70SMiroslav Lichvar 48563a8dd971SWillem de Bruijn tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 48573a8dd971SWillem de Bruijn if (!skb_may_tx_timestamp(sk, tsonly)) 485837846ef0SAlexander Duyck return; 485937846ef0SAlexander Duyck 48601c885808SFrancis Yan if (tsonly) { 48611c885808SFrancis Yan #ifdef CONFIG_INET 48621c885808SFrancis Yan if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 48631c885808SFrancis Yan sk->sk_protocol == IPPROTO_TCP && 48644ef1b286SSoheil Hassas Yeganeh sk->sk_type == SOCK_STREAM) { 4865e7ed11eeSYousuk Seung skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 4866e7ed11eeSYousuk Seung ack_skb); 48674ef1b286SSoheil Hassas Yeganeh opt_stats = true; 48684ef1b286SSoheil Hassas Yeganeh } else 48691c885808SFrancis Yan #endif 48701c885808SFrancis Yan skb = alloc_skb(0, GFP_ATOMIC); 48711c885808SFrancis Yan } else { 487237846ef0SAlexander Duyck skb = skb_clone(orig_skb, GFP_ATOMIC); 48731c885808SFrancis Yan } 487437846ef0SAlexander Duyck if (!skb) 487537846ef0SAlexander Duyck return; 487637846ef0SAlexander Duyck 487749ca0d8bSWillem de Bruijn if (tsonly) { 4878fff88030SWillem de Bruijn skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4879fff88030SWillem de Bruijn SKBTX_ANY_TSTAMP; 488049ca0d8bSWillem de Bruijn skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 488149ca0d8bSWillem de Bruijn } 488249ca0d8bSWillem de Bruijn 488349ca0d8bSWillem de Bruijn if (hwtstamps) 488449ca0d8bSWillem de Bruijn *skb_hwtstamps(skb) = *hwtstamps; 488549ca0d8bSWillem de Bruijn else 488649ca0d8bSWillem de Bruijn skb->tstamp = ktime_get_real(); 488749ca0d8bSWillem de Bruijn 48884ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 488937846ef0SAlexander Duyck } 4890e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4891e7fd2885SWillem de Bruijn 4892e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb, 4893e7fd2885SWillem de Bruijn struct skb_shared_hwtstamps *hwtstamps) 4894e7fd2885SWillem de Bruijn { 4895e7ed11eeSYousuk Seung return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 4896e7fd2885SWillem de Bruijn SCM_TSTAMP_SND); 4897e7fd2885SWillem de Bruijn } 4898ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4899ac45f602SPatrick Ohly 49006e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 49016e3e939fSJohannes Berg { 49026e3e939fSJohannes Berg struct sock *sk = skb->sk; 49036e3e939fSJohannes Berg struct sock_exterr_skb *serr; 4904dd4f1072SEric Dumazet int err = 1; 49056e3e939fSJohannes Berg 49066e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 49076e3e939fSJohannes Berg skb->wifi_acked = acked; 49086e3e939fSJohannes Berg 49096e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 49106e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 49116e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 49126e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 49136e3e939fSJohannes Berg 4914dd4f1072SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 4915dd4f1072SEric Dumazet * but only if the socket refcount is not zero. 4916dd4f1072SEric Dumazet */ 491741c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 49186e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 4919dd4f1072SEric Dumazet sock_put(sk); 4920dd4f1072SEric Dumazet } 49216e3e939fSJohannes Berg if (err) 49226e3e939fSJohannes Berg kfree_skb(skb); 49236e3e939fSJohannes Berg } 49246e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 49256e3e939fSJohannes Berg 4926f35d9d8aSRusty Russell /** 4927f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 4928f35d9d8aSRusty Russell * @skb: the skb to set 4929f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 4930f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 4931f35d9d8aSRusty Russell * 4932f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 4933f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4934f35d9d8aSRusty Russell * 4935f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 4936f35d9d8aSRusty Russell * returns false you should drop the packet. 4937f35d9d8aSRusty Russell */ 4938f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4939f35d9d8aSRusty Russell { 494052b5d6f5SEric Dumazet u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 494152b5d6f5SEric Dumazet u32 csum_start = skb_headroom(skb) + (u32)start; 494252b5d6f5SEric Dumazet 494352b5d6f5SEric Dumazet if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 494452b5d6f5SEric Dumazet net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 494552b5d6f5SEric Dumazet start, off, skb_headroom(skb), skb_headlen(skb)); 4946f35d9d8aSRusty Russell return false; 4947f35d9d8aSRusty Russell } 4948f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 494952b5d6f5SEric Dumazet skb->csum_start = csum_start; 4950f35d9d8aSRusty Russell skb->csum_offset = off; 4951e5d5decaSJason Wang skb_set_transport_header(skb, start); 4952f35d9d8aSRusty Russell return true; 4953f35d9d8aSRusty Russell } 4954b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4955f35d9d8aSRusty Russell 4956ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4957ed1f50c3SPaul Durrant unsigned int max) 4958ed1f50c3SPaul Durrant { 4959ed1f50c3SPaul Durrant if (skb_headlen(skb) >= len) 4960ed1f50c3SPaul Durrant return 0; 4961ed1f50c3SPaul Durrant 4962ed1f50c3SPaul Durrant /* If we need to pullup then pullup to the max, so we 4963ed1f50c3SPaul Durrant * won't need to do it again. 4964ed1f50c3SPaul Durrant */ 4965ed1f50c3SPaul Durrant if (max > skb->len) 4966ed1f50c3SPaul Durrant max = skb->len; 4967ed1f50c3SPaul Durrant 4968ed1f50c3SPaul Durrant if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4969ed1f50c3SPaul Durrant return -ENOMEM; 4970ed1f50c3SPaul Durrant 4971ed1f50c3SPaul Durrant if (skb_headlen(skb) < len) 4972ed1f50c3SPaul Durrant return -EPROTO; 4973ed1f50c3SPaul Durrant 4974ed1f50c3SPaul Durrant return 0; 4975ed1f50c3SPaul Durrant } 4976ed1f50c3SPaul Durrant 4977f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4) 4978f9708b43SJan Beulich 4979f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4980f9708b43SJan Beulich typeof(IPPROTO_IP) proto, 4981f9708b43SJan Beulich unsigned int off) 4982f9708b43SJan Beulich { 4983f9708b43SJan Beulich int err; 4984f9708b43SJan Beulich 4985161d1792SKees Cook switch (proto) { 4986f9708b43SJan Beulich case IPPROTO_TCP: 4987f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4988f9708b43SJan Beulich off + MAX_TCP_HDR_LEN); 4989f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4990f9708b43SJan Beulich offsetof(struct tcphdr, 4991f9708b43SJan Beulich check))) 4992f9708b43SJan Beulich err = -EPROTO; 4993f9708b43SJan Beulich return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4994f9708b43SJan Beulich 4995f9708b43SJan Beulich case IPPROTO_UDP: 4996f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4997f9708b43SJan Beulich off + sizeof(struct udphdr)); 4998f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4999f9708b43SJan Beulich offsetof(struct udphdr, 5000f9708b43SJan Beulich check))) 5001f9708b43SJan Beulich err = -EPROTO; 5002f9708b43SJan Beulich return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 5003f9708b43SJan Beulich } 5004f9708b43SJan Beulich 5005f9708b43SJan Beulich return ERR_PTR(-EPROTO); 5006f9708b43SJan Beulich } 5007f9708b43SJan Beulich 5008ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 5009ed1f50c3SPaul Durrant * maximally sized IP and TCP or UDP headers. 5010ed1f50c3SPaul Durrant */ 5011ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128 5012ed1f50c3SPaul Durrant 5013f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 5014ed1f50c3SPaul Durrant { 5015ed1f50c3SPaul Durrant unsigned int off; 5016ed1f50c3SPaul Durrant bool fragment; 5017f9708b43SJan Beulich __sum16 *csum; 5018ed1f50c3SPaul Durrant int err; 5019ed1f50c3SPaul Durrant 5020ed1f50c3SPaul Durrant fragment = false; 5021ed1f50c3SPaul Durrant 5022ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5023ed1f50c3SPaul Durrant sizeof(struct iphdr), 5024ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 5025ed1f50c3SPaul Durrant if (err < 0) 5026ed1f50c3SPaul Durrant goto out; 5027ed1f50c3SPaul Durrant 502811f920d2SMiaohe Lin if (ip_is_fragment(ip_hdr(skb))) 5029ed1f50c3SPaul Durrant fragment = true; 5030ed1f50c3SPaul Durrant 5031ed1f50c3SPaul Durrant off = ip_hdrlen(skb); 5032ed1f50c3SPaul Durrant 5033ed1f50c3SPaul Durrant err = -EPROTO; 5034ed1f50c3SPaul Durrant 5035ed1f50c3SPaul Durrant if (fragment) 5036ed1f50c3SPaul Durrant goto out; 5037ed1f50c3SPaul Durrant 5038f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 5039f9708b43SJan Beulich if (IS_ERR(csum)) 5040f9708b43SJan Beulich return PTR_ERR(csum); 5041ed1f50c3SPaul Durrant 5042ed1f50c3SPaul Durrant if (recalculate) 5043f9708b43SJan Beulich *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 5044ed1f50c3SPaul Durrant ip_hdr(skb)->daddr, 5045ed1f50c3SPaul Durrant skb->len - off, 5046f9708b43SJan Beulich ip_hdr(skb)->protocol, 0); 5047ed1f50c3SPaul Durrant err = 0; 5048ed1f50c3SPaul Durrant 5049ed1f50c3SPaul Durrant out: 5050ed1f50c3SPaul Durrant return err; 5051ed1f50c3SPaul Durrant } 5052ed1f50c3SPaul Durrant 5053ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 5054ed1f50c3SPaul Durrant * an IPv6 header, all options, and a maximal TCP or UDP header. 5055ed1f50c3SPaul Durrant */ 5056ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256 5057ed1f50c3SPaul Durrant 5058ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \ 5059ed1f50c3SPaul Durrant (type *)(skb_network_header(skb) + (off)) 5060ed1f50c3SPaul Durrant 5061ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 5062ed1f50c3SPaul Durrant { 5063ed1f50c3SPaul Durrant int err; 5064ed1f50c3SPaul Durrant u8 nexthdr; 5065ed1f50c3SPaul Durrant unsigned int off; 5066ed1f50c3SPaul Durrant unsigned int len; 5067ed1f50c3SPaul Durrant bool fragment; 5068ed1f50c3SPaul Durrant bool done; 5069f9708b43SJan Beulich __sum16 *csum; 5070ed1f50c3SPaul Durrant 5071ed1f50c3SPaul Durrant fragment = false; 5072ed1f50c3SPaul Durrant done = false; 5073ed1f50c3SPaul Durrant 5074ed1f50c3SPaul Durrant off = sizeof(struct ipv6hdr); 5075ed1f50c3SPaul Durrant 5076ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5077ed1f50c3SPaul Durrant if (err < 0) 5078ed1f50c3SPaul Durrant goto out; 5079ed1f50c3SPaul Durrant 5080ed1f50c3SPaul Durrant nexthdr = ipv6_hdr(skb)->nexthdr; 5081ed1f50c3SPaul Durrant 5082ed1f50c3SPaul Durrant len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5083ed1f50c3SPaul Durrant while (off <= len && !done) { 5084ed1f50c3SPaul Durrant switch (nexthdr) { 5085ed1f50c3SPaul Durrant case IPPROTO_DSTOPTS: 5086ed1f50c3SPaul Durrant case IPPROTO_HOPOPTS: 5087ed1f50c3SPaul Durrant case IPPROTO_ROUTING: { 5088ed1f50c3SPaul Durrant struct ipv6_opt_hdr *hp; 5089ed1f50c3SPaul Durrant 5090ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5091ed1f50c3SPaul Durrant off + 5092ed1f50c3SPaul Durrant sizeof(struct ipv6_opt_hdr), 5093ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5094ed1f50c3SPaul Durrant if (err < 0) 5095ed1f50c3SPaul Durrant goto out; 5096ed1f50c3SPaul Durrant 5097ed1f50c3SPaul Durrant hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5098ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5099ed1f50c3SPaul Durrant off += ipv6_optlen(hp); 5100ed1f50c3SPaul Durrant break; 5101ed1f50c3SPaul Durrant } 5102ed1f50c3SPaul Durrant case IPPROTO_AH: { 5103ed1f50c3SPaul Durrant struct ip_auth_hdr *hp; 5104ed1f50c3SPaul Durrant 5105ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5106ed1f50c3SPaul Durrant off + 5107ed1f50c3SPaul Durrant sizeof(struct ip_auth_hdr), 5108ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5109ed1f50c3SPaul Durrant if (err < 0) 5110ed1f50c3SPaul Durrant goto out; 5111ed1f50c3SPaul Durrant 5112ed1f50c3SPaul Durrant hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5113ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5114ed1f50c3SPaul Durrant off += ipv6_authlen(hp); 5115ed1f50c3SPaul Durrant break; 5116ed1f50c3SPaul Durrant } 5117ed1f50c3SPaul Durrant case IPPROTO_FRAGMENT: { 5118ed1f50c3SPaul Durrant struct frag_hdr *hp; 5119ed1f50c3SPaul Durrant 5120ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5121ed1f50c3SPaul Durrant off + 5122ed1f50c3SPaul Durrant sizeof(struct frag_hdr), 5123ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5124ed1f50c3SPaul Durrant if (err < 0) 5125ed1f50c3SPaul Durrant goto out; 5126ed1f50c3SPaul Durrant 5127ed1f50c3SPaul Durrant hp = OPT_HDR(struct frag_hdr, skb, off); 5128ed1f50c3SPaul Durrant 5129ed1f50c3SPaul Durrant if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5130ed1f50c3SPaul Durrant fragment = true; 5131ed1f50c3SPaul Durrant 5132ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5133ed1f50c3SPaul Durrant off += sizeof(struct frag_hdr); 5134ed1f50c3SPaul Durrant break; 5135ed1f50c3SPaul Durrant } 5136ed1f50c3SPaul Durrant default: 5137ed1f50c3SPaul Durrant done = true; 5138ed1f50c3SPaul Durrant break; 5139ed1f50c3SPaul Durrant } 5140ed1f50c3SPaul Durrant } 5141ed1f50c3SPaul Durrant 5142ed1f50c3SPaul Durrant err = -EPROTO; 5143ed1f50c3SPaul Durrant 5144ed1f50c3SPaul Durrant if (!done || fragment) 5145ed1f50c3SPaul Durrant goto out; 5146ed1f50c3SPaul Durrant 5147f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, nexthdr, off); 5148f9708b43SJan Beulich if (IS_ERR(csum)) 5149f9708b43SJan Beulich return PTR_ERR(csum); 5150ed1f50c3SPaul Durrant 5151ed1f50c3SPaul Durrant if (recalculate) 5152f9708b43SJan Beulich *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5153ed1f50c3SPaul Durrant &ipv6_hdr(skb)->daddr, 5154f9708b43SJan Beulich skb->len - off, nexthdr, 0); 5155ed1f50c3SPaul Durrant err = 0; 5156ed1f50c3SPaul Durrant 5157ed1f50c3SPaul Durrant out: 5158ed1f50c3SPaul Durrant return err; 5159ed1f50c3SPaul Durrant } 5160ed1f50c3SPaul Durrant 5161ed1f50c3SPaul Durrant /** 5162ed1f50c3SPaul Durrant * skb_checksum_setup - set up partial checksum offset 5163ed1f50c3SPaul Durrant * @skb: the skb to set up 5164ed1f50c3SPaul Durrant * @recalculate: if true the pseudo-header checksum will be recalculated 5165ed1f50c3SPaul Durrant */ 5166ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5167ed1f50c3SPaul Durrant { 5168ed1f50c3SPaul Durrant int err; 5169ed1f50c3SPaul Durrant 5170ed1f50c3SPaul Durrant switch (skb->protocol) { 5171ed1f50c3SPaul Durrant case htons(ETH_P_IP): 5172f9708b43SJan Beulich err = skb_checksum_setup_ipv4(skb, recalculate); 5173ed1f50c3SPaul Durrant break; 5174ed1f50c3SPaul Durrant 5175ed1f50c3SPaul Durrant case htons(ETH_P_IPV6): 5176ed1f50c3SPaul Durrant err = skb_checksum_setup_ipv6(skb, recalculate); 5177ed1f50c3SPaul Durrant break; 5178ed1f50c3SPaul Durrant 5179ed1f50c3SPaul Durrant default: 5180ed1f50c3SPaul Durrant err = -EPROTO; 5181ed1f50c3SPaul Durrant break; 5182ed1f50c3SPaul Durrant } 5183ed1f50c3SPaul Durrant 5184ed1f50c3SPaul Durrant return err; 5185ed1f50c3SPaul Durrant } 5186ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup); 5187ed1f50c3SPaul Durrant 51889afd85c9SLinus Lüssing /** 51899afd85c9SLinus Lüssing * skb_checksum_maybe_trim - maybe trims the given skb 51909afd85c9SLinus Lüssing * @skb: the skb to check 51919afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 51929afd85c9SLinus Lüssing * 51939afd85c9SLinus Lüssing * Checks whether the given skb has data beyond the given transport length. 51949afd85c9SLinus Lüssing * If so, returns a cloned skb trimmed to this transport length. 51959afd85c9SLinus Lüssing * Otherwise returns the provided skb. Returns NULL in error cases 51969afd85c9SLinus Lüssing * (e.g. transport_len exceeds skb length or out-of-memory). 51979afd85c9SLinus Lüssing * 5198a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5199a516993fSLinus Lüssing * differs from the provided skb. 52009afd85c9SLinus Lüssing */ 52019afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 52029afd85c9SLinus Lüssing unsigned int transport_len) 52039afd85c9SLinus Lüssing { 52049afd85c9SLinus Lüssing struct sk_buff *skb_chk; 52059afd85c9SLinus Lüssing unsigned int len = skb_transport_offset(skb) + transport_len; 52069afd85c9SLinus Lüssing int ret; 52079afd85c9SLinus Lüssing 5208a516993fSLinus Lüssing if (skb->len < len) 52099afd85c9SLinus Lüssing return NULL; 5210a516993fSLinus Lüssing else if (skb->len == len) 52119afd85c9SLinus Lüssing return skb; 52129afd85c9SLinus Lüssing 52139afd85c9SLinus Lüssing skb_chk = skb_clone(skb, GFP_ATOMIC); 52149afd85c9SLinus Lüssing if (!skb_chk) 52159afd85c9SLinus Lüssing return NULL; 52169afd85c9SLinus Lüssing 52179afd85c9SLinus Lüssing ret = pskb_trim_rcsum(skb_chk, len); 52189afd85c9SLinus Lüssing if (ret) { 52199afd85c9SLinus Lüssing kfree_skb(skb_chk); 52209afd85c9SLinus Lüssing return NULL; 52219afd85c9SLinus Lüssing } 52229afd85c9SLinus Lüssing 52239afd85c9SLinus Lüssing return skb_chk; 52249afd85c9SLinus Lüssing } 52259afd85c9SLinus Lüssing 52269afd85c9SLinus Lüssing /** 52279afd85c9SLinus Lüssing * skb_checksum_trimmed - validate checksum of an skb 52289afd85c9SLinus Lüssing * @skb: the skb to check 52299afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 52309afd85c9SLinus Lüssing * @skb_chkf: checksum function to use 52319afd85c9SLinus Lüssing * 52329afd85c9SLinus Lüssing * Applies the given checksum function skb_chkf to the provided skb. 52339afd85c9SLinus Lüssing * Returns a checked and maybe trimmed skb. Returns NULL on error. 52349afd85c9SLinus Lüssing * 52359afd85c9SLinus Lüssing * If the skb has data beyond the given transport length, then a 52369afd85c9SLinus Lüssing * trimmed & cloned skb is checked and returned. 52379afd85c9SLinus Lüssing * 5238a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5239a516993fSLinus Lüssing * differs from the provided skb. 52409afd85c9SLinus Lüssing */ 52419afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 52429afd85c9SLinus Lüssing unsigned int transport_len, 52439afd85c9SLinus Lüssing __sum16(*skb_chkf)(struct sk_buff *skb)) 52449afd85c9SLinus Lüssing { 52459afd85c9SLinus Lüssing struct sk_buff *skb_chk; 52469afd85c9SLinus Lüssing unsigned int offset = skb_transport_offset(skb); 5247fcba67c9SLinus Lüssing __sum16 ret; 52489afd85c9SLinus Lüssing 52499afd85c9SLinus Lüssing skb_chk = skb_checksum_maybe_trim(skb, transport_len); 52509afd85c9SLinus Lüssing if (!skb_chk) 5251a516993fSLinus Lüssing goto err; 52529afd85c9SLinus Lüssing 5253a516993fSLinus Lüssing if (!pskb_may_pull(skb_chk, offset)) 5254a516993fSLinus Lüssing goto err; 52559afd85c9SLinus Lüssing 52569b368814SLinus Lüssing skb_pull_rcsum(skb_chk, offset); 52579afd85c9SLinus Lüssing ret = skb_chkf(skb_chk); 52589b368814SLinus Lüssing skb_push_rcsum(skb_chk, offset); 52599afd85c9SLinus Lüssing 5260a516993fSLinus Lüssing if (ret) 5261a516993fSLinus Lüssing goto err; 52629afd85c9SLinus Lüssing 52639afd85c9SLinus Lüssing return skb_chk; 5264a516993fSLinus Lüssing 5265a516993fSLinus Lüssing err: 5266a516993fSLinus Lüssing if (skb_chk && skb_chk != skb) 5267a516993fSLinus Lüssing kfree_skb(skb_chk); 5268a516993fSLinus Lüssing 5269a516993fSLinus Lüssing return NULL; 5270a516993fSLinus Lüssing 52719afd85c9SLinus Lüssing } 52729afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed); 52739afd85c9SLinus Lüssing 52744497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 52754497b076SBen Hutchings { 5276e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5277e87cc472SJoe Perches skb->dev->name); 52784497b076SBen Hutchings } 52794497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5280bad43ca8SEric Dumazet 5281bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5282bad43ca8SEric Dumazet { 52833d861f66SEric Dumazet if (head_stolen) { 52843d861f66SEric Dumazet skb_release_head_state(skb); 5285bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 52863d861f66SEric Dumazet } else { 5287bad43ca8SEric Dumazet __kfree_skb(skb); 5288bad43ca8SEric Dumazet } 52893d861f66SEric Dumazet } 5290bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 5291bad43ca8SEric Dumazet 5292bad43ca8SEric Dumazet /** 5293bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 5294bad43ca8SEric Dumazet * @to: prior buffer 5295bad43ca8SEric Dumazet * @from: buffer to add 5296bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 5297c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 5298bad43ca8SEric Dumazet */ 5299bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5300bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 5301bad43ca8SEric Dumazet { 5302c818fa9eSEric Dumazet struct skb_shared_info *to_shinfo, *from_shinfo; 5303bad43ca8SEric Dumazet int i, delta, len = from->len; 5304bad43ca8SEric Dumazet 5305bad43ca8SEric Dumazet *fragstolen = false; 5306bad43ca8SEric Dumazet 5307bad43ca8SEric Dumazet if (skb_cloned(to)) 5308bad43ca8SEric Dumazet return false; 5309bad43ca8SEric Dumazet 53106a5bcd84SIlias Apalodimas /* The page pool signature of struct page will eventually figure out 53116a5bcd84SIlias Apalodimas * which pages can be recycled or not but for now let's prohibit slab 53126a5bcd84SIlias Apalodimas * allocated and page_pool allocated SKBs from being coalesced. 53136a5bcd84SIlias Apalodimas */ 53146a5bcd84SIlias Apalodimas if (to->pp_recycle != from->pp_recycle) 53156a5bcd84SIlias Apalodimas return false; 53166a5bcd84SIlias Apalodimas 5317bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 5318e93a0435SEric Dumazet if (len) 5319bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5320bad43ca8SEric Dumazet *delta_truesize = 0; 5321bad43ca8SEric Dumazet return true; 5322bad43ca8SEric Dumazet } 5323bad43ca8SEric Dumazet 5324c818fa9eSEric Dumazet to_shinfo = skb_shinfo(to); 5325c818fa9eSEric Dumazet from_shinfo = skb_shinfo(from); 5326c818fa9eSEric Dumazet if (to_shinfo->frag_list || from_shinfo->frag_list) 5327bad43ca8SEric Dumazet return false; 53281f8b977aSWillem de Bruijn if (skb_zcopy(to) || skb_zcopy(from)) 53291f8b977aSWillem de Bruijn return false; 5330bad43ca8SEric Dumazet 5331bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 5332bad43ca8SEric Dumazet struct page *page; 5333bad43ca8SEric Dumazet unsigned int offset; 5334bad43ca8SEric Dumazet 5335c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5336c818fa9eSEric Dumazet from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5337bad43ca8SEric Dumazet return false; 5338bad43ca8SEric Dumazet 5339bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 5340bad43ca8SEric Dumazet return false; 5341bad43ca8SEric Dumazet 5342bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5343bad43ca8SEric Dumazet 5344bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 5345bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 5346bad43ca8SEric Dumazet 5347c818fa9eSEric Dumazet skb_fill_page_desc(to, to_shinfo->nr_frags, 5348bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 5349bad43ca8SEric Dumazet *fragstolen = true; 5350bad43ca8SEric Dumazet } else { 5351c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5352c818fa9eSEric Dumazet from_shinfo->nr_frags > MAX_SKB_FRAGS) 5353bad43ca8SEric Dumazet return false; 5354bad43ca8SEric Dumazet 5355f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5356bad43ca8SEric Dumazet } 5357bad43ca8SEric Dumazet 5358bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 5359bad43ca8SEric Dumazet 5360c818fa9eSEric Dumazet memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5361c818fa9eSEric Dumazet from_shinfo->frags, 5362c818fa9eSEric Dumazet from_shinfo->nr_frags * sizeof(skb_frag_t)); 5363c818fa9eSEric Dumazet to_shinfo->nr_frags += from_shinfo->nr_frags; 5364bad43ca8SEric Dumazet 5365bad43ca8SEric Dumazet if (!skb_cloned(from)) 5366c818fa9eSEric Dumazet from_shinfo->nr_frags = 0; 5367bad43ca8SEric Dumazet 53688ea853fdSLi RongQing /* if the skb is not cloned this does nothing 53698ea853fdSLi RongQing * since we set nr_frags to 0. 53708ea853fdSLi RongQing */ 5371c818fa9eSEric Dumazet for (i = 0; i < from_shinfo->nr_frags; i++) 5372c818fa9eSEric Dumazet __skb_frag_ref(&from_shinfo->frags[i]); 5373bad43ca8SEric Dumazet 5374bad43ca8SEric Dumazet to->truesize += delta; 5375bad43ca8SEric Dumazet to->len += len; 5376bad43ca8SEric Dumazet to->data_len += len; 5377bad43ca8SEric Dumazet 5378bad43ca8SEric Dumazet *delta_truesize = delta; 5379bad43ca8SEric Dumazet return true; 5380bad43ca8SEric Dumazet } 5381bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 5382621e84d6SNicolas Dichtel 5383621e84d6SNicolas Dichtel /** 53848b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 5385621e84d6SNicolas Dichtel * 5386621e84d6SNicolas Dichtel * @skb: buffer to clean 53878b27f277SNicolas Dichtel * @xnet: packet is crossing netns 5388621e84d6SNicolas Dichtel * 53898b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 53908b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 53918b27f277SNicolas Dichtel * operations. 53928b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 53938b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 53948b27f277SNicolas Dichtel * skb that could impact namespace isolation. 5395621e84d6SNicolas Dichtel */ 53968b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5397621e84d6SNicolas Dichtel { 5398621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 5399621e84d6SNicolas Dichtel skb->skb_iif = 0; 540060ff7467SWANG Cong skb->ignore_df = 0; 5401621e84d6SNicolas Dichtel skb_dst_drop(skb); 5402174e2381SFlorian Westphal skb_ext_reset(skb); 5403895b5c9fSFlorian Westphal nf_reset_ct(skb); 5404621e84d6SNicolas Dichtel nf_reset_trace(skb); 5405213dd74aSHerbert Xu 54066f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV 54076f9a5069SPetr Machata skb->offload_fwd_mark = 0; 5408875e8939SIdo Schimmel skb->offload_l3_fwd_mark = 0; 54096f9a5069SPetr Machata #endif 54106f9a5069SPetr Machata 5411213dd74aSHerbert Xu if (!xnet) 5412213dd74aSHerbert Xu return; 5413213dd74aSHerbert Xu 54142b5ec1a5SYe Yin ipvs_reset(skb); 5415213dd74aSHerbert Xu skb->mark = 0; 5416c47d8c2fSJesus Sanchez-Palencia skb->tstamp = 0; 5417621e84d6SNicolas Dichtel } 5418621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 5419de960aa9SFlorian Westphal 5420de960aa9SFlorian Westphal /** 5421de960aa9SFlorian Westphal * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5422de960aa9SFlorian Westphal * 5423de960aa9SFlorian Westphal * @skb: GSO skb 5424de960aa9SFlorian Westphal * 5425de960aa9SFlorian Westphal * skb_gso_transport_seglen is used to determine the real size of the 5426de960aa9SFlorian Westphal * individual segments, including Layer4 headers (TCP/UDP). 5427de960aa9SFlorian Westphal * 5428de960aa9SFlorian Westphal * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5429de960aa9SFlorian Westphal */ 5430a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5431de960aa9SFlorian Westphal { 5432de960aa9SFlorian Westphal const struct skb_shared_info *shinfo = skb_shinfo(skb); 5433f993bc25SFlorian Westphal unsigned int thlen = 0; 5434f993bc25SFlorian Westphal 5435f993bc25SFlorian Westphal if (skb->encapsulation) { 5436f993bc25SFlorian Westphal thlen = skb_inner_transport_header(skb) - 5437f993bc25SFlorian Westphal skb_transport_header(skb); 5438de960aa9SFlorian Westphal 5439de960aa9SFlorian Westphal if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5440f993bc25SFlorian Westphal thlen += inner_tcp_hdrlen(skb); 5441f993bc25SFlorian Westphal } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5442f993bc25SFlorian Westphal thlen = tcp_hdrlen(skb); 54431dd27cdeSDaniel Axtens } else if (unlikely(skb_is_gso_sctp(skb))) { 544490017accSMarcelo Ricardo Leitner thlen = sizeof(struct sctphdr); 5445ee80d1ebSWillem de Bruijn } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5446ee80d1ebSWillem de Bruijn thlen = sizeof(struct udphdr); 5447f993bc25SFlorian Westphal } 54486d39d589SFlorian Westphal /* UFO sets gso_size to the size of the fragmentation 54496d39d589SFlorian Westphal * payload, i.e. the size of the L4 (UDP) header is already 54506d39d589SFlorian Westphal * accounted for. 54516d39d589SFlorian Westphal */ 5452f993bc25SFlorian Westphal return thlen + shinfo->gso_size; 5453de960aa9SFlorian Westphal } 5454a4a77718SDaniel Axtens 5455a4a77718SDaniel Axtens /** 5456a4a77718SDaniel Axtens * skb_gso_network_seglen - Return length of individual segments of a gso packet 5457a4a77718SDaniel Axtens * 5458a4a77718SDaniel Axtens * @skb: GSO skb 5459a4a77718SDaniel Axtens * 5460a4a77718SDaniel Axtens * skb_gso_network_seglen is used to determine the real size of the 5461a4a77718SDaniel Axtens * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5462a4a77718SDaniel Axtens * 5463a4a77718SDaniel Axtens * The MAC/L2 header is not accounted for. 5464a4a77718SDaniel Axtens */ 5465a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5466a4a77718SDaniel Axtens { 5467a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - 5468a4a77718SDaniel Axtens skb_network_header(skb); 5469a4a77718SDaniel Axtens 5470a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5471a4a77718SDaniel Axtens } 5472a4a77718SDaniel Axtens 5473a4a77718SDaniel Axtens /** 5474a4a77718SDaniel Axtens * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5475a4a77718SDaniel Axtens * 5476a4a77718SDaniel Axtens * @skb: GSO skb 5477a4a77718SDaniel Axtens * 5478a4a77718SDaniel Axtens * skb_gso_mac_seglen is used to determine the real size of the 5479a4a77718SDaniel Axtens * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5480a4a77718SDaniel Axtens * headers (TCP/UDP). 5481a4a77718SDaniel Axtens */ 5482a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5483a4a77718SDaniel Axtens { 5484a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5485a4a77718SDaniel Axtens 5486a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5487a4a77718SDaniel Axtens } 54880d5501c1SVlad Yasevich 5489ae7ef81eSMarcelo Ricardo Leitner /** 54902b16f048SDaniel Axtens * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 54912b16f048SDaniel Axtens * 54922b16f048SDaniel Axtens * There are a couple of instances where we have a GSO skb, and we 54932b16f048SDaniel Axtens * want to determine what size it would be after it is segmented. 54942b16f048SDaniel Axtens * 54952b16f048SDaniel Axtens * We might want to check: 54962b16f048SDaniel Axtens * - L3+L4+payload size (e.g. IP forwarding) 54972b16f048SDaniel Axtens * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 54982b16f048SDaniel Axtens * 54992b16f048SDaniel Axtens * This is a helper to do that correctly considering GSO_BY_FRAGS. 55002b16f048SDaniel Axtens * 550149682bfaSMathieu Malaterre * @skb: GSO skb 550249682bfaSMathieu Malaterre * 55032b16f048SDaniel Axtens * @seg_len: The segmented length (from skb_gso_*_seglen). In the 55042b16f048SDaniel Axtens * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 55052b16f048SDaniel Axtens * 55062b16f048SDaniel Axtens * @max_len: The maximum permissible length. 55072b16f048SDaniel Axtens * 55082b16f048SDaniel Axtens * Returns true if the segmented length <= max length. 55092b16f048SDaniel Axtens */ 55102b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb, 55112b16f048SDaniel Axtens unsigned int seg_len, 55122b16f048SDaniel Axtens unsigned int max_len) { 55132b16f048SDaniel Axtens const struct skb_shared_info *shinfo = skb_shinfo(skb); 55142b16f048SDaniel Axtens const struct sk_buff *iter; 55152b16f048SDaniel Axtens 55162b16f048SDaniel Axtens if (shinfo->gso_size != GSO_BY_FRAGS) 55172b16f048SDaniel Axtens return seg_len <= max_len; 55182b16f048SDaniel Axtens 55192b16f048SDaniel Axtens /* Undo this so we can re-use header sizes */ 55202b16f048SDaniel Axtens seg_len -= GSO_BY_FRAGS; 55212b16f048SDaniel Axtens 55222b16f048SDaniel Axtens skb_walk_frags(skb, iter) { 55232b16f048SDaniel Axtens if (seg_len + skb_headlen(iter) > max_len) 55242b16f048SDaniel Axtens return false; 55252b16f048SDaniel Axtens } 55262b16f048SDaniel Axtens 55272b16f048SDaniel Axtens return true; 55282b16f048SDaniel Axtens } 55292b16f048SDaniel Axtens 55302b16f048SDaniel Axtens /** 5531779b7931SDaniel Axtens * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5532ae7ef81eSMarcelo Ricardo Leitner * 5533ae7ef81eSMarcelo Ricardo Leitner * @skb: GSO skb 553476f21b99SDavid S. Miller * @mtu: MTU to validate against 5535ae7ef81eSMarcelo Ricardo Leitner * 5536779b7931SDaniel Axtens * skb_gso_validate_network_len validates if a given skb will fit a 5537779b7931SDaniel Axtens * wanted MTU once split. It considers L3 headers, L4 headers, and the 5538779b7931SDaniel Axtens * payload. 5539ae7ef81eSMarcelo Ricardo Leitner */ 5540779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5541ae7ef81eSMarcelo Ricardo Leitner { 55422b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5543ae7ef81eSMarcelo Ricardo Leitner } 5544779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5545ae7ef81eSMarcelo Ricardo Leitner 55462b16f048SDaniel Axtens /** 55472b16f048SDaniel Axtens * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 55482b16f048SDaniel Axtens * 55492b16f048SDaniel Axtens * @skb: GSO skb 55502b16f048SDaniel Axtens * @len: length to validate against 55512b16f048SDaniel Axtens * 55522b16f048SDaniel Axtens * skb_gso_validate_mac_len validates if a given skb will fit a wanted 55532b16f048SDaniel Axtens * length once split, including L2, L3 and L4 headers and the payload. 55542b16f048SDaniel Axtens */ 55552b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 55562b16f048SDaniel Axtens { 55572b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 55582b16f048SDaniel Axtens } 55592b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 55602b16f048SDaniel Axtens 55610d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 55620d5501c1SVlad Yasevich { 5563d85e8be2SYuya Kusakabe int mac_len, meta_len; 5564d85e8be2SYuya Kusakabe void *meta; 55654bbb3e0eSToshiaki Makita 55660d5501c1SVlad Yasevich if (skb_cow(skb, skb_headroom(skb)) < 0) { 55670d5501c1SVlad Yasevich kfree_skb(skb); 55680d5501c1SVlad Yasevich return NULL; 55690d5501c1SVlad Yasevich } 55700d5501c1SVlad Yasevich 55714bbb3e0eSToshiaki Makita mac_len = skb->data - skb_mac_header(skb); 5572ae474573SToshiaki Makita if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 55734bbb3e0eSToshiaki Makita memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 55744bbb3e0eSToshiaki Makita mac_len - VLAN_HLEN - ETH_TLEN); 5575ae474573SToshiaki Makita } 5576d85e8be2SYuya Kusakabe 5577d85e8be2SYuya Kusakabe meta_len = skb_metadata_len(skb); 5578d85e8be2SYuya Kusakabe if (meta_len) { 5579d85e8be2SYuya Kusakabe meta = skb_metadata_end(skb) - meta_len; 5580d85e8be2SYuya Kusakabe memmove(meta + VLAN_HLEN, meta, meta_len); 5581d85e8be2SYuya Kusakabe } 5582d85e8be2SYuya Kusakabe 55830d5501c1SVlad Yasevich skb->mac_header += VLAN_HLEN; 55840d5501c1SVlad Yasevich return skb; 55850d5501c1SVlad Yasevich } 55860d5501c1SVlad Yasevich 55870d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 55880d5501c1SVlad Yasevich { 55890d5501c1SVlad Yasevich struct vlan_hdr *vhdr; 55900d5501c1SVlad Yasevich u16 vlan_tci; 55910d5501c1SVlad Yasevich 5592df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb))) { 55930d5501c1SVlad Yasevich /* vlan_tci is already set-up so leave this for another time */ 55940d5501c1SVlad Yasevich return skb; 55950d5501c1SVlad Yasevich } 55960d5501c1SVlad Yasevich 55970d5501c1SVlad Yasevich skb = skb_share_check(skb, GFP_ATOMIC); 55980d5501c1SVlad Yasevich if (unlikely(!skb)) 55990d5501c1SVlad Yasevich goto err_free; 560055eff0ebSMiaohe Lin /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 560155eff0ebSMiaohe Lin if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 56020d5501c1SVlad Yasevich goto err_free; 56030d5501c1SVlad Yasevich 56040d5501c1SVlad Yasevich vhdr = (struct vlan_hdr *)skb->data; 56050d5501c1SVlad Yasevich vlan_tci = ntohs(vhdr->h_vlan_TCI); 56060d5501c1SVlad Yasevich __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 56070d5501c1SVlad Yasevich 56080d5501c1SVlad Yasevich skb_pull_rcsum(skb, VLAN_HLEN); 56090d5501c1SVlad Yasevich vlan_set_encap_proto(skb, vhdr); 56100d5501c1SVlad Yasevich 56110d5501c1SVlad Yasevich skb = skb_reorder_vlan_header(skb); 56120d5501c1SVlad Yasevich if (unlikely(!skb)) 56130d5501c1SVlad Yasevich goto err_free; 56140d5501c1SVlad Yasevich 56150d5501c1SVlad Yasevich skb_reset_network_header(skb); 56168be33ecfSAlexander Lobakin if (!skb_transport_header_was_set(skb)) 56170d5501c1SVlad Yasevich skb_reset_transport_header(skb); 56180d5501c1SVlad Yasevich skb_reset_mac_len(skb); 56190d5501c1SVlad Yasevich 56200d5501c1SVlad Yasevich return skb; 56210d5501c1SVlad Yasevich 56220d5501c1SVlad Yasevich err_free: 56230d5501c1SVlad Yasevich kfree_skb(skb); 56240d5501c1SVlad Yasevich return NULL; 56250d5501c1SVlad Yasevich } 56260d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag); 56272e4e4410SEric Dumazet 5628e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len) 5629e2195121SJiri Pirko { 5630e2195121SJiri Pirko if (!pskb_may_pull(skb, write_len)) 5631e2195121SJiri Pirko return -ENOMEM; 5632e2195121SJiri Pirko 5633e2195121SJiri Pirko if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5634e2195121SJiri Pirko return 0; 5635e2195121SJiri Pirko 5636e2195121SJiri Pirko return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5637e2195121SJiri Pirko } 5638e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable); 5639e2195121SJiri Pirko 5640bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly. 5641bfca4c52SShmulik Ladkani * expects a non skb_vlan_tag_present skb with a vlan tag payload 5642bfca4c52SShmulik Ladkani */ 5643bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 564493515d53SJiri Pirko { 564593515d53SJiri Pirko struct vlan_hdr *vhdr; 5646b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 564793515d53SJiri Pirko int err; 564893515d53SJiri Pirko 5649b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5650b6a79208SShmulik Ladkani "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5651b6a79208SShmulik Ladkani offset)) { 5652b6a79208SShmulik Ladkani return -EINVAL; 5653b6a79208SShmulik Ladkani } 5654b6a79208SShmulik Ladkani 565593515d53SJiri Pirko err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 565693515d53SJiri Pirko if (unlikely(err)) 5657b6a79208SShmulik Ladkani return err; 565893515d53SJiri Pirko 565993515d53SJiri Pirko skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 566093515d53SJiri Pirko 566193515d53SJiri Pirko vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 566293515d53SJiri Pirko *vlan_tci = ntohs(vhdr->h_vlan_TCI); 566393515d53SJiri Pirko 566493515d53SJiri Pirko memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 566593515d53SJiri Pirko __skb_pull(skb, VLAN_HLEN); 566693515d53SJiri Pirko 566793515d53SJiri Pirko vlan_set_encap_proto(skb, vhdr); 566893515d53SJiri Pirko skb->mac_header += VLAN_HLEN; 566993515d53SJiri Pirko 567093515d53SJiri Pirko if (skb_network_offset(skb) < ETH_HLEN) 567193515d53SJiri Pirko skb_set_network_header(skb, ETH_HLEN); 567293515d53SJiri Pirko 567393515d53SJiri Pirko skb_reset_mac_len(skb); 567493515d53SJiri Pirko 567593515d53SJiri Pirko return err; 567693515d53SJiri Pirko } 5677bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop); 567893515d53SJiri Pirko 5679b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload. 5680b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5681b6a79208SShmulik Ladkani */ 568293515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb) 568393515d53SJiri Pirko { 568493515d53SJiri Pirko u16 vlan_tci; 568593515d53SJiri Pirko __be16 vlan_proto; 568693515d53SJiri Pirko int err; 568793515d53SJiri Pirko 5688df8a39deSJiri Pirko if (likely(skb_vlan_tag_present(skb))) { 5689b1817524SMichał Mirosław __vlan_hwaccel_clear_tag(skb); 569093515d53SJiri Pirko } else { 5691ecf4ee41SShmulik Ladkani if (unlikely(!eth_type_vlan(skb->protocol))) 569293515d53SJiri Pirko return 0; 569393515d53SJiri Pirko 569493515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 569593515d53SJiri Pirko if (err) 569693515d53SJiri Pirko return err; 569793515d53SJiri Pirko } 569893515d53SJiri Pirko /* move next vlan tag to hw accel tag */ 5699ecf4ee41SShmulik Ladkani if (likely(!eth_type_vlan(skb->protocol))) 570093515d53SJiri Pirko return 0; 570193515d53SJiri Pirko 570293515d53SJiri Pirko vlan_proto = skb->protocol; 570393515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 570493515d53SJiri Pirko if (unlikely(err)) 570593515d53SJiri Pirko return err; 570693515d53SJiri Pirko 570793515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 570893515d53SJiri Pirko return 0; 570993515d53SJiri Pirko } 571093515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop); 571193515d53SJiri Pirko 5712b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5713b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5714b6a79208SShmulik Ladkani */ 571593515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 571693515d53SJiri Pirko { 5717df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 5718b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 571993515d53SJiri Pirko int err; 572093515d53SJiri Pirko 5721b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5722b6a79208SShmulik Ladkani "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5723b6a79208SShmulik Ladkani offset)) { 5724b6a79208SShmulik Ladkani return -EINVAL; 5725b6a79208SShmulik Ladkani } 5726b6a79208SShmulik Ladkani 572793515d53SJiri Pirko err = __vlan_insert_tag(skb, skb->vlan_proto, 5728df8a39deSJiri Pirko skb_vlan_tag_get(skb)); 5729b6a79208SShmulik Ladkani if (err) 573093515d53SJiri Pirko return err; 57319241e2dfSDaniel Borkmann 573293515d53SJiri Pirko skb->protocol = skb->vlan_proto; 573393515d53SJiri Pirko skb->mac_len += VLAN_HLEN; 573493515d53SJiri Pirko 57356b83d28aSDaniel Borkmann skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 573693515d53SJiri Pirko } 573793515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 573893515d53SJiri Pirko return 0; 573993515d53SJiri Pirko } 574093515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push); 574193515d53SJiri Pirko 574219fbcb36SGuillaume Nault /** 574319fbcb36SGuillaume Nault * skb_eth_pop() - Drop the Ethernet header at the head of a packet 574419fbcb36SGuillaume Nault * 574519fbcb36SGuillaume Nault * @skb: Socket buffer to modify 574619fbcb36SGuillaume Nault * 574719fbcb36SGuillaume Nault * Drop the Ethernet header of @skb. 574819fbcb36SGuillaume Nault * 574919fbcb36SGuillaume Nault * Expects that skb->data points to the mac header and that no VLAN tags are 575019fbcb36SGuillaume Nault * present. 575119fbcb36SGuillaume Nault * 575219fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 575319fbcb36SGuillaume Nault */ 575419fbcb36SGuillaume Nault int skb_eth_pop(struct sk_buff *skb) 575519fbcb36SGuillaume Nault { 575619fbcb36SGuillaume Nault if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 575719fbcb36SGuillaume Nault skb_network_offset(skb) < ETH_HLEN) 575819fbcb36SGuillaume Nault return -EPROTO; 575919fbcb36SGuillaume Nault 576019fbcb36SGuillaume Nault skb_pull_rcsum(skb, ETH_HLEN); 576119fbcb36SGuillaume Nault skb_reset_mac_header(skb); 576219fbcb36SGuillaume Nault skb_reset_mac_len(skb); 576319fbcb36SGuillaume Nault 576419fbcb36SGuillaume Nault return 0; 576519fbcb36SGuillaume Nault } 576619fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_pop); 576719fbcb36SGuillaume Nault 576819fbcb36SGuillaume Nault /** 576919fbcb36SGuillaume Nault * skb_eth_push() - Add a new Ethernet header at the head of a packet 577019fbcb36SGuillaume Nault * 577119fbcb36SGuillaume Nault * @skb: Socket buffer to modify 577219fbcb36SGuillaume Nault * @dst: Destination MAC address of the new header 577319fbcb36SGuillaume Nault * @src: Source MAC address of the new header 577419fbcb36SGuillaume Nault * 577519fbcb36SGuillaume Nault * Prepend @skb with a new Ethernet header. 577619fbcb36SGuillaume Nault * 577719fbcb36SGuillaume Nault * Expects that skb->data points to the mac header, which must be empty. 577819fbcb36SGuillaume Nault * 577919fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 578019fbcb36SGuillaume Nault */ 578119fbcb36SGuillaume Nault int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 578219fbcb36SGuillaume Nault const unsigned char *src) 578319fbcb36SGuillaume Nault { 578419fbcb36SGuillaume Nault struct ethhdr *eth; 578519fbcb36SGuillaume Nault int err; 578619fbcb36SGuillaume Nault 578719fbcb36SGuillaume Nault if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 578819fbcb36SGuillaume Nault return -EPROTO; 578919fbcb36SGuillaume Nault 579019fbcb36SGuillaume Nault err = skb_cow_head(skb, sizeof(*eth)); 579119fbcb36SGuillaume Nault if (err < 0) 579219fbcb36SGuillaume Nault return err; 579319fbcb36SGuillaume Nault 579419fbcb36SGuillaume Nault skb_push(skb, sizeof(*eth)); 579519fbcb36SGuillaume Nault skb_reset_mac_header(skb); 579619fbcb36SGuillaume Nault skb_reset_mac_len(skb); 579719fbcb36SGuillaume Nault 579819fbcb36SGuillaume Nault eth = eth_hdr(skb); 579919fbcb36SGuillaume Nault ether_addr_copy(eth->h_dest, dst); 580019fbcb36SGuillaume Nault ether_addr_copy(eth->h_source, src); 580119fbcb36SGuillaume Nault eth->h_proto = skb->protocol; 580219fbcb36SGuillaume Nault 580319fbcb36SGuillaume Nault skb_postpush_rcsum(skb, eth, sizeof(*eth)); 580419fbcb36SGuillaume Nault 580519fbcb36SGuillaume Nault return 0; 580619fbcb36SGuillaume Nault } 580719fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_push); 580819fbcb36SGuillaume Nault 58098822e270SJohn Hurley /* Update the ethertype of hdr and the skb csum value if required. */ 58108822e270SJohn Hurley static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 58118822e270SJohn Hurley __be16 ethertype) 58128822e270SJohn Hurley { 58138822e270SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 58148822e270SJohn Hurley __be16 diff[] = { ~hdr->h_proto, ethertype }; 58158822e270SJohn Hurley 58168822e270SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 58178822e270SJohn Hurley } 58188822e270SJohn Hurley 58198822e270SJohn Hurley hdr->h_proto = ethertype; 58208822e270SJohn Hurley } 58218822e270SJohn Hurley 58228822e270SJohn Hurley /** 5823e7dbfed1SMartin Varghese * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 5824e7dbfed1SMartin Varghese * the packet 58258822e270SJohn Hurley * 58268822e270SJohn Hurley * @skb: buffer 58278822e270SJohn Hurley * @mpls_lse: MPLS label stack entry to push 58288822e270SJohn Hurley * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5829fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 5830e7dbfed1SMartin Varghese * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 5831e7dbfed1SMartin Varghese * ethernet 58328822e270SJohn Hurley * 58338822e270SJohn Hurley * Expects skb->data at mac header. 58348822e270SJohn Hurley * 58358822e270SJohn Hurley * Returns 0 on success, -errno otherwise. 58368822e270SJohn Hurley */ 5837fa4e0f88SDavide Caratti int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5838d04ac224SMartin Varghese int mac_len, bool ethernet) 58398822e270SJohn Hurley { 58408822e270SJohn Hurley struct mpls_shim_hdr *lse; 58418822e270SJohn Hurley int err; 58428822e270SJohn Hurley 58438822e270SJohn Hurley if (unlikely(!eth_p_mpls(mpls_proto))) 58448822e270SJohn Hurley return -EINVAL; 58458822e270SJohn Hurley 58468822e270SJohn Hurley /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 58478822e270SJohn Hurley if (skb->encapsulation) 58488822e270SJohn Hurley return -EINVAL; 58498822e270SJohn Hurley 58508822e270SJohn Hurley err = skb_cow_head(skb, MPLS_HLEN); 58518822e270SJohn Hurley if (unlikely(err)) 58528822e270SJohn Hurley return err; 58538822e270SJohn Hurley 58548822e270SJohn Hurley if (!skb->inner_protocol) { 5855e7dbfed1SMartin Varghese skb_set_inner_network_header(skb, skb_network_offset(skb)); 58568822e270SJohn Hurley skb_set_inner_protocol(skb, skb->protocol); 58578822e270SJohn Hurley } 58588822e270SJohn Hurley 58598822e270SJohn Hurley skb_push(skb, MPLS_HLEN); 58608822e270SJohn Hurley memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5861fa4e0f88SDavide Caratti mac_len); 58628822e270SJohn Hurley skb_reset_mac_header(skb); 5863fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5864e7dbfed1SMartin Varghese skb_reset_mac_len(skb); 58658822e270SJohn Hurley 58668822e270SJohn Hurley lse = mpls_hdr(skb); 58678822e270SJohn Hurley lse->label_stack_entry = mpls_lse; 58688822e270SJohn Hurley skb_postpush_rcsum(skb, lse, MPLS_HLEN); 58698822e270SJohn Hurley 58704296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) 58718822e270SJohn Hurley skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 58728822e270SJohn Hurley skb->protocol = mpls_proto; 58738822e270SJohn Hurley 58748822e270SJohn Hurley return 0; 58758822e270SJohn Hurley } 58768822e270SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_push); 58778822e270SJohn Hurley 58782e4e4410SEric Dumazet /** 5879ed246ceeSJohn Hurley * skb_mpls_pop() - pop the outermost MPLS header 5880ed246ceeSJohn Hurley * 5881ed246ceeSJohn Hurley * @skb: buffer 5882ed246ceeSJohn Hurley * @next_proto: ethertype of header after popped MPLS header 5883fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 588476f99f98SMartin Varghese * @ethernet: flag to indicate if the packet is ethernet 5885ed246ceeSJohn Hurley * 5886ed246ceeSJohn Hurley * Expects skb->data at mac header. 5887ed246ceeSJohn Hurley * 5888ed246ceeSJohn Hurley * Returns 0 on success, -errno otherwise. 5889ed246ceeSJohn Hurley */ 5890040b5cfbSMartin Varghese int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5891040b5cfbSMartin Varghese bool ethernet) 5892ed246ceeSJohn Hurley { 5893ed246ceeSJohn Hurley int err; 5894ed246ceeSJohn Hurley 5895ed246ceeSJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5896dedc5a08SDavide Caratti return 0; 5897ed246ceeSJohn Hurley 5898fa4e0f88SDavide Caratti err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 5899ed246ceeSJohn Hurley if (unlikely(err)) 5900ed246ceeSJohn Hurley return err; 5901ed246ceeSJohn Hurley 5902ed246ceeSJohn Hurley skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5903ed246ceeSJohn Hurley memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5904fa4e0f88SDavide Caratti mac_len); 5905ed246ceeSJohn Hurley 5906ed246ceeSJohn Hurley __skb_pull(skb, MPLS_HLEN); 5907ed246ceeSJohn Hurley skb_reset_mac_header(skb); 5908fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5909ed246ceeSJohn Hurley 59104296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) { 5911ed246ceeSJohn Hurley struct ethhdr *hdr; 5912ed246ceeSJohn Hurley 5913ed246ceeSJohn Hurley /* use mpls_hdr() to get ethertype to account for VLANs. */ 5914ed246ceeSJohn Hurley hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5915ed246ceeSJohn Hurley skb_mod_eth_type(skb, hdr, next_proto); 5916ed246ceeSJohn Hurley } 5917ed246ceeSJohn Hurley skb->protocol = next_proto; 5918ed246ceeSJohn Hurley 5919ed246ceeSJohn Hurley return 0; 5920ed246ceeSJohn Hurley } 5921ed246ceeSJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_pop); 5922ed246ceeSJohn Hurley 5923ed246ceeSJohn Hurley /** 5924d27cf5c5SJohn Hurley * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5925d27cf5c5SJohn Hurley * 5926d27cf5c5SJohn Hurley * @skb: buffer 5927d27cf5c5SJohn Hurley * @mpls_lse: new MPLS label stack entry to update to 5928d27cf5c5SJohn Hurley * 5929d27cf5c5SJohn Hurley * Expects skb->data at mac header. 5930d27cf5c5SJohn Hurley * 5931d27cf5c5SJohn Hurley * Returns 0 on success, -errno otherwise. 5932d27cf5c5SJohn Hurley */ 5933d27cf5c5SJohn Hurley int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5934d27cf5c5SJohn Hurley { 5935d27cf5c5SJohn Hurley int err; 5936d27cf5c5SJohn Hurley 5937d27cf5c5SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5938d27cf5c5SJohn Hurley return -EINVAL; 5939d27cf5c5SJohn Hurley 5940d27cf5c5SJohn Hurley err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5941d27cf5c5SJohn Hurley if (unlikely(err)) 5942d27cf5c5SJohn Hurley return err; 5943d27cf5c5SJohn Hurley 5944d27cf5c5SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 5945d27cf5c5SJohn Hurley __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 5946d27cf5c5SJohn Hurley 5947d27cf5c5SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5948d27cf5c5SJohn Hurley } 5949d27cf5c5SJohn Hurley 5950d27cf5c5SJohn Hurley mpls_hdr(skb)->label_stack_entry = mpls_lse; 5951d27cf5c5SJohn Hurley 5952d27cf5c5SJohn Hurley return 0; 5953d27cf5c5SJohn Hurley } 5954d27cf5c5SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 5955d27cf5c5SJohn Hurley 5956d27cf5c5SJohn Hurley /** 59572a2ea508SJohn Hurley * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 59582a2ea508SJohn Hurley * 59592a2ea508SJohn Hurley * @skb: buffer 59602a2ea508SJohn Hurley * 59612a2ea508SJohn Hurley * Expects skb->data at mac header. 59622a2ea508SJohn Hurley * 59632a2ea508SJohn Hurley * Returns 0 on success, -errno otherwise. 59642a2ea508SJohn Hurley */ 59652a2ea508SJohn Hurley int skb_mpls_dec_ttl(struct sk_buff *skb) 59662a2ea508SJohn Hurley { 59672a2ea508SJohn Hurley u32 lse; 59682a2ea508SJohn Hurley u8 ttl; 59692a2ea508SJohn Hurley 59702a2ea508SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 59712a2ea508SJohn Hurley return -EINVAL; 59722a2ea508SJohn Hurley 597313de4ed9SDavide Caratti if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 597413de4ed9SDavide Caratti return -ENOMEM; 597513de4ed9SDavide Caratti 59762a2ea508SJohn Hurley lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 59772a2ea508SJohn Hurley ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 59782a2ea508SJohn Hurley if (!--ttl) 59792a2ea508SJohn Hurley return -EINVAL; 59802a2ea508SJohn Hurley 59812a2ea508SJohn Hurley lse &= ~MPLS_LS_TTL_MASK; 59822a2ea508SJohn Hurley lse |= ttl << MPLS_LS_TTL_SHIFT; 59832a2ea508SJohn Hurley 59842a2ea508SJohn Hurley return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 59852a2ea508SJohn Hurley } 59862a2ea508SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 59872a2ea508SJohn Hurley 59882a2ea508SJohn Hurley /** 59892e4e4410SEric Dumazet * alloc_skb_with_frags - allocate skb with page frags 59902e4e4410SEric Dumazet * 5991de3f0d0eSMasanari Iida * @header_len: size of linear part 5992de3f0d0eSMasanari Iida * @data_len: needed length in frags 5993de3f0d0eSMasanari Iida * @max_page_order: max page order desired. 5994de3f0d0eSMasanari Iida * @errcode: pointer to error code if any 5995de3f0d0eSMasanari Iida * @gfp_mask: allocation mask 59962e4e4410SEric Dumazet * 59972e4e4410SEric Dumazet * This can be used to allocate a paged skb, given a maximal order for frags. 59982e4e4410SEric Dumazet */ 59992e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 60002e4e4410SEric Dumazet unsigned long data_len, 60012e4e4410SEric Dumazet int max_page_order, 60022e4e4410SEric Dumazet int *errcode, 60032e4e4410SEric Dumazet gfp_t gfp_mask) 60042e4e4410SEric Dumazet { 60052e4e4410SEric Dumazet int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 60062e4e4410SEric Dumazet unsigned long chunk; 60072e4e4410SEric Dumazet struct sk_buff *skb; 60082e4e4410SEric Dumazet struct page *page; 60092e4e4410SEric Dumazet int i; 60102e4e4410SEric Dumazet 60112e4e4410SEric Dumazet *errcode = -EMSGSIZE; 60122e4e4410SEric Dumazet /* Note this test could be relaxed, if we succeed to allocate 60132e4e4410SEric Dumazet * high order pages... 60142e4e4410SEric Dumazet */ 60152e4e4410SEric Dumazet if (npages > MAX_SKB_FRAGS) 60162e4e4410SEric Dumazet return NULL; 60172e4e4410SEric Dumazet 60182e4e4410SEric Dumazet *errcode = -ENOBUFS; 6019f8c468e8SDavid Rientjes skb = alloc_skb(header_len, gfp_mask); 60202e4e4410SEric Dumazet if (!skb) 60212e4e4410SEric Dumazet return NULL; 60222e4e4410SEric Dumazet 60232e4e4410SEric Dumazet skb->truesize += npages << PAGE_SHIFT; 60242e4e4410SEric Dumazet 60252e4e4410SEric Dumazet for (i = 0; npages > 0; i++) { 60262e4e4410SEric Dumazet int order = max_page_order; 60272e4e4410SEric Dumazet 60282e4e4410SEric Dumazet while (order) { 60292e4e4410SEric Dumazet if (npages >= 1 << order) { 6030d0164adcSMel Gorman page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 60312e4e4410SEric Dumazet __GFP_COMP | 6032d14b56f5SMichal Hocko __GFP_NOWARN, 60332e4e4410SEric Dumazet order); 60342e4e4410SEric Dumazet if (page) 60352e4e4410SEric Dumazet goto fill_page; 60362e4e4410SEric Dumazet /* Do not retry other high order allocations */ 60372e4e4410SEric Dumazet order = 1; 60382e4e4410SEric Dumazet max_page_order = 0; 60392e4e4410SEric Dumazet } 60402e4e4410SEric Dumazet order--; 60412e4e4410SEric Dumazet } 60422e4e4410SEric Dumazet page = alloc_page(gfp_mask); 60432e4e4410SEric Dumazet if (!page) 60442e4e4410SEric Dumazet goto failure; 60452e4e4410SEric Dumazet fill_page: 60462e4e4410SEric Dumazet chunk = min_t(unsigned long, data_len, 60472e4e4410SEric Dumazet PAGE_SIZE << order); 60482e4e4410SEric Dumazet skb_fill_page_desc(skb, i, page, 0, chunk); 60492e4e4410SEric Dumazet data_len -= chunk; 60502e4e4410SEric Dumazet npages -= 1 << order; 60512e4e4410SEric Dumazet } 60522e4e4410SEric Dumazet return skb; 60532e4e4410SEric Dumazet 60542e4e4410SEric Dumazet failure: 60552e4e4410SEric Dumazet kfree_skb(skb); 60562e4e4410SEric Dumazet return NULL; 60572e4e4410SEric Dumazet } 60582e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags); 60596fa01ccdSSowmini Varadhan 60606fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */ 60616fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 60626fa01ccdSSowmini Varadhan const int headlen, gfp_t gfp_mask) 60636fa01ccdSSowmini Varadhan { 60646fa01ccdSSowmini Varadhan int i; 60656fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 60666fa01ccdSSowmini Varadhan int new_hlen = headlen - off; 60676fa01ccdSSowmini Varadhan u8 *data; 60686fa01ccdSSowmini Varadhan 60696fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 60706fa01ccdSSowmini Varadhan 60716fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 60726fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 60736fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 60746fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 60756fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 60766fa01ccdSSowmini Varadhan if (!data) 60776fa01ccdSSowmini Varadhan return -ENOMEM; 60786fa01ccdSSowmini Varadhan 60796fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 60806fa01ccdSSowmini Varadhan 60816fa01ccdSSowmini Varadhan /* Copy real data, and all frags */ 60826fa01ccdSSowmini Varadhan skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 60836fa01ccdSSowmini Varadhan skb->len -= off; 60846fa01ccdSSowmini Varadhan 60856fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 60866fa01ccdSSowmini Varadhan skb_shinfo(skb), 60876fa01ccdSSowmini Varadhan offsetof(struct skb_shared_info, 60886fa01ccdSSowmini Varadhan frags[skb_shinfo(skb)->nr_frags])); 60896fa01ccdSSowmini Varadhan if (skb_cloned(skb)) { 60906fa01ccdSSowmini Varadhan /* drop the old head gracefully */ 60916fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 60926fa01ccdSSowmini Varadhan kfree(data); 60936fa01ccdSSowmini Varadhan return -ENOMEM; 60946fa01ccdSSowmini Varadhan } 60956fa01ccdSSowmini Varadhan for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 60966fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 60976fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 60986fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 60996fa01ccdSSowmini Varadhan skb_release_data(skb); 61006fa01ccdSSowmini Varadhan } else { 61016fa01ccdSSowmini Varadhan /* we can reuse existing recount- all we did was 61026fa01ccdSSowmini Varadhan * relocate values 61036fa01ccdSSowmini Varadhan */ 61046fa01ccdSSowmini Varadhan skb_free_head(skb); 61056fa01ccdSSowmini Varadhan } 61066fa01ccdSSowmini Varadhan 61076fa01ccdSSowmini Varadhan skb->head = data; 61086fa01ccdSSowmini Varadhan skb->data = data; 61096fa01ccdSSowmini Varadhan skb->head_frag = 0; 61106fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 61116fa01ccdSSowmini Varadhan skb->end = size; 61126fa01ccdSSowmini Varadhan #else 61136fa01ccdSSowmini Varadhan skb->end = skb->head + size; 61146fa01ccdSSowmini Varadhan #endif 61156fa01ccdSSowmini Varadhan skb_set_tail_pointer(skb, skb_headlen(skb)); 61166fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 61176fa01ccdSSowmini Varadhan skb->cloned = 0; 61186fa01ccdSSowmini Varadhan skb->hdr_len = 0; 61196fa01ccdSSowmini Varadhan skb->nohdr = 0; 61206fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 61216fa01ccdSSowmini Varadhan 61226fa01ccdSSowmini Varadhan return 0; 61236fa01ccdSSowmini Varadhan } 61246fa01ccdSSowmini Varadhan 61256fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 61266fa01ccdSSowmini Varadhan 61276fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into 61286fa01ccdSSowmini Varadhan * pskb_carve() 61296fa01ccdSSowmini Varadhan */ 61306fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb, 61316fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo, int eat, 61326fa01ccdSSowmini Varadhan gfp_t gfp_mask) 61336fa01ccdSSowmini Varadhan { 61346fa01ccdSSowmini Varadhan struct sk_buff *list = shinfo->frag_list; 61356fa01ccdSSowmini Varadhan struct sk_buff *clone = NULL; 61366fa01ccdSSowmini Varadhan struct sk_buff *insp = NULL; 61376fa01ccdSSowmini Varadhan 61386fa01ccdSSowmini Varadhan do { 61396fa01ccdSSowmini Varadhan if (!list) { 61406fa01ccdSSowmini Varadhan pr_err("Not enough bytes to eat. Want %d\n", eat); 61416fa01ccdSSowmini Varadhan return -EFAULT; 61426fa01ccdSSowmini Varadhan } 61436fa01ccdSSowmini Varadhan if (list->len <= eat) { 61446fa01ccdSSowmini Varadhan /* Eaten as whole. */ 61456fa01ccdSSowmini Varadhan eat -= list->len; 61466fa01ccdSSowmini Varadhan list = list->next; 61476fa01ccdSSowmini Varadhan insp = list; 61486fa01ccdSSowmini Varadhan } else { 61496fa01ccdSSowmini Varadhan /* Eaten partially. */ 61506fa01ccdSSowmini Varadhan if (skb_shared(list)) { 61516fa01ccdSSowmini Varadhan clone = skb_clone(list, gfp_mask); 61526fa01ccdSSowmini Varadhan if (!clone) 61536fa01ccdSSowmini Varadhan return -ENOMEM; 61546fa01ccdSSowmini Varadhan insp = list->next; 61556fa01ccdSSowmini Varadhan list = clone; 61566fa01ccdSSowmini Varadhan } else { 61576fa01ccdSSowmini Varadhan /* This may be pulled without problems. */ 61586fa01ccdSSowmini Varadhan insp = list; 61596fa01ccdSSowmini Varadhan } 61606fa01ccdSSowmini Varadhan if (pskb_carve(list, eat, gfp_mask) < 0) { 61616fa01ccdSSowmini Varadhan kfree_skb(clone); 61626fa01ccdSSowmini Varadhan return -ENOMEM; 61636fa01ccdSSowmini Varadhan } 61646fa01ccdSSowmini Varadhan break; 61656fa01ccdSSowmini Varadhan } 61666fa01ccdSSowmini Varadhan } while (eat); 61676fa01ccdSSowmini Varadhan 61686fa01ccdSSowmini Varadhan /* Free pulled out fragments. */ 61696fa01ccdSSowmini Varadhan while ((list = shinfo->frag_list) != insp) { 61706fa01ccdSSowmini Varadhan shinfo->frag_list = list->next; 61716fa01ccdSSowmini Varadhan kfree_skb(list); 61726fa01ccdSSowmini Varadhan } 61736fa01ccdSSowmini Varadhan /* And insert new clone at head. */ 61746fa01ccdSSowmini Varadhan if (clone) { 61756fa01ccdSSowmini Varadhan clone->next = list; 61766fa01ccdSSowmini Varadhan shinfo->frag_list = clone; 61776fa01ccdSSowmini Varadhan } 61786fa01ccdSSowmini Varadhan return 0; 61796fa01ccdSSowmini Varadhan } 61806fa01ccdSSowmini Varadhan 61816fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the 61826fa01ccdSSowmini Varadhan * non-linear part of skb 61836fa01ccdSSowmini Varadhan */ 61846fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 61856fa01ccdSSowmini Varadhan int pos, gfp_t gfp_mask) 61866fa01ccdSSowmini Varadhan { 61876fa01ccdSSowmini Varadhan int i, k = 0; 61886fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 61896fa01ccdSSowmini Varadhan u8 *data; 61906fa01ccdSSowmini Varadhan const int nfrags = skb_shinfo(skb)->nr_frags; 61916fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo; 61926fa01ccdSSowmini Varadhan 61936fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 61946fa01ccdSSowmini Varadhan 61956fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 61966fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 61976fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 61986fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 61996fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 62006fa01ccdSSowmini Varadhan if (!data) 62016fa01ccdSSowmini Varadhan return -ENOMEM; 62026fa01ccdSSowmini Varadhan 62036fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 62046fa01ccdSSowmini Varadhan 62056fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 6206e3ec1e8cSMiaohe Lin skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 62076fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 62086fa01ccdSSowmini Varadhan kfree(data); 62096fa01ccdSSowmini Varadhan return -ENOMEM; 62106fa01ccdSSowmini Varadhan } 62116fa01ccdSSowmini Varadhan shinfo = (struct skb_shared_info *)(data + size); 62126fa01ccdSSowmini Varadhan for (i = 0; i < nfrags; i++) { 62136fa01ccdSSowmini Varadhan int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 62146fa01ccdSSowmini Varadhan 62156fa01ccdSSowmini Varadhan if (pos + fsize > off) { 62166fa01ccdSSowmini Varadhan shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 62176fa01ccdSSowmini Varadhan 62186fa01ccdSSowmini Varadhan if (pos < off) { 62196fa01ccdSSowmini Varadhan /* Split frag. 62206fa01ccdSSowmini Varadhan * We have two variants in this case: 62216fa01ccdSSowmini Varadhan * 1. Move all the frag to the second 62226fa01ccdSSowmini Varadhan * part, if it is possible. F.e. 62236fa01ccdSSowmini Varadhan * this approach is mandatory for TUX, 62246fa01ccdSSowmini Varadhan * where splitting is expensive. 62256fa01ccdSSowmini Varadhan * 2. Split is accurately. We make this. 62266fa01ccdSSowmini Varadhan */ 6227b54c9d5bSJonathan Lemon skb_frag_off_add(&shinfo->frags[0], off - pos); 62286fa01ccdSSowmini Varadhan skb_frag_size_sub(&shinfo->frags[0], off - pos); 62296fa01ccdSSowmini Varadhan } 62306fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 62316fa01ccdSSowmini Varadhan k++; 62326fa01ccdSSowmini Varadhan } 62336fa01ccdSSowmini Varadhan pos += fsize; 62346fa01ccdSSowmini Varadhan } 62356fa01ccdSSowmini Varadhan shinfo->nr_frags = k; 62366fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 62376fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 62386fa01ccdSSowmini Varadhan 62396fa01ccdSSowmini Varadhan /* split line is in frag list */ 6240eabe8618SMiaohe Lin if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6241eabe8618SMiaohe Lin /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6242eabe8618SMiaohe Lin if (skb_has_frag_list(skb)) 6243eabe8618SMiaohe Lin kfree_skb_list(skb_shinfo(skb)->frag_list); 6244eabe8618SMiaohe Lin kfree(data); 6245eabe8618SMiaohe Lin return -ENOMEM; 62466fa01ccdSSowmini Varadhan } 62476fa01ccdSSowmini Varadhan skb_release_data(skb); 62486fa01ccdSSowmini Varadhan 62496fa01ccdSSowmini Varadhan skb->head = data; 62506fa01ccdSSowmini Varadhan skb->head_frag = 0; 62516fa01ccdSSowmini Varadhan skb->data = data; 62526fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 62536fa01ccdSSowmini Varadhan skb->end = size; 62546fa01ccdSSowmini Varadhan #else 62556fa01ccdSSowmini Varadhan skb->end = skb->head + size; 62566fa01ccdSSowmini Varadhan #endif 62576fa01ccdSSowmini Varadhan skb_reset_tail_pointer(skb); 62586fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 62596fa01ccdSSowmini Varadhan skb->cloned = 0; 62606fa01ccdSSowmini Varadhan skb->hdr_len = 0; 62616fa01ccdSSowmini Varadhan skb->nohdr = 0; 62626fa01ccdSSowmini Varadhan skb->len -= off; 62636fa01ccdSSowmini Varadhan skb->data_len = skb->len; 62646fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 62656fa01ccdSSowmini Varadhan return 0; 62666fa01ccdSSowmini Varadhan } 62676fa01ccdSSowmini Varadhan 62686fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */ 62696fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 62706fa01ccdSSowmini Varadhan { 62716fa01ccdSSowmini Varadhan int headlen = skb_headlen(skb); 62726fa01ccdSSowmini Varadhan 62736fa01ccdSSowmini Varadhan if (len < headlen) 62746fa01ccdSSowmini Varadhan return pskb_carve_inside_header(skb, len, headlen, gfp); 62756fa01ccdSSowmini Varadhan else 62766fa01ccdSSowmini Varadhan return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 62776fa01ccdSSowmini Varadhan } 62786fa01ccdSSowmini Varadhan 62796fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in 62806fa01ccdSSowmini Varadhan * a new skb 62816fa01ccdSSowmini Varadhan */ 62826fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 62836fa01ccdSSowmini Varadhan int to_copy, gfp_t gfp) 62846fa01ccdSSowmini Varadhan { 62856fa01ccdSSowmini Varadhan struct sk_buff *clone = skb_clone(skb, gfp); 62866fa01ccdSSowmini Varadhan 62876fa01ccdSSowmini Varadhan if (!clone) 62886fa01ccdSSowmini Varadhan return NULL; 62896fa01ccdSSowmini Varadhan 62906fa01ccdSSowmini Varadhan if (pskb_carve(clone, off, gfp) < 0 || 62916fa01ccdSSowmini Varadhan pskb_trim(clone, to_copy)) { 62926fa01ccdSSowmini Varadhan kfree_skb(clone); 62936fa01ccdSSowmini Varadhan return NULL; 62946fa01ccdSSowmini Varadhan } 62956fa01ccdSSowmini Varadhan return clone; 62966fa01ccdSSowmini Varadhan } 62976fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract); 6298c8c8b127SEric Dumazet 6299c8c8b127SEric Dumazet /** 6300c8c8b127SEric Dumazet * skb_condense - try to get rid of fragments/frag_list if possible 6301c8c8b127SEric Dumazet * @skb: buffer 6302c8c8b127SEric Dumazet * 6303c8c8b127SEric Dumazet * Can be used to save memory before skb is added to a busy queue. 6304c8c8b127SEric Dumazet * If packet has bytes in frags and enough tail room in skb->head, 6305c8c8b127SEric Dumazet * pull all of them, so that we can free the frags right now and adjust 6306c8c8b127SEric Dumazet * truesize. 6307c8c8b127SEric Dumazet * Notes: 6308c8c8b127SEric Dumazet * We do not reallocate skb->head thus can not fail. 6309c8c8b127SEric Dumazet * Caller must re-evaluate skb->truesize if needed. 6310c8c8b127SEric Dumazet */ 6311c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb) 6312c8c8b127SEric Dumazet { 63133174fed9SEric Dumazet if (skb->data_len) { 63143174fed9SEric Dumazet if (skb->data_len > skb->end - skb->tail || 6315c8c8b127SEric Dumazet skb_cloned(skb)) 6316c8c8b127SEric Dumazet return; 6317c8c8b127SEric Dumazet 6318c8c8b127SEric Dumazet /* Nice, we can free page frag(s) right now */ 6319c8c8b127SEric Dumazet __pskb_pull_tail(skb, skb->data_len); 63203174fed9SEric Dumazet } 63213174fed9SEric Dumazet /* At this point, skb->truesize might be over estimated, 63223174fed9SEric Dumazet * because skb had a fragment, and fragments do not tell 63233174fed9SEric Dumazet * their truesize. 63243174fed9SEric Dumazet * When we pulled its content into skb->head, fragment 63253174fed9SEric Dumazet * was freed, but __pskb_pull_tail() could not possibly 63263174fed9SEric Dumazet * adjust skb->truesize, not knowing the frag truesize. 6327c8c8b127SEric Dumazet */ 6328c8c8b127SEric Dumazet skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6329c8c8b127SEric Dumazet } 6330df5042f4SFlorian Westphal 6331df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 6332df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6333df5042f4SFlorian Westphal { 6334df5042f4SFlorian Westphal return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6335df5042f4SFlorian Westphal } 6336df5042f4SFlorian Westphal 63378b69a803SPaolo Abeni /** 63388b69a803SPaolo Abeni * __skb_ext_alloc - allocate a new skb extensions storage 63398b69a803SPaolo Abeni * 63404930f483SFlorian Westphal * @flags: See kmalloc(). 63414930f483SFlorian Westphal * 63428b69a803SPaolo Abeni * Returns the newly allocated pointer. The pointer can later attached to a 63438b69a803SPaolo Abeni * skb via __skb_ext_set(). 63448b69a803SPaolo Abeni * Note: caller must handle the skb_ext as an opaque data. 63458b69a803SPaolo Abeni */ 63464930f483SFlorian Westphal struct skb_ext *__skb_ext_alloc(gfp_t flags) 6347df5042f4SFlorian Westphal { 63484930f483SFlorian Westphal struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6349df5042f4SFlorian Westphal 6350df5042f4SFlorian Westphal if (new) { 6351df5042f4SFlorian Westphal memset(new->offset, 0, sizeof(new->offset)); 6352df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6353df5042f4SFlorian Westphal } 6354df5042f4SFlorian Westphal 6355df5042f4SFlorian Westphal return new; 6356df5042f4SFlorian Westphal } 6357df5042f4SFlorian Westphal 63584165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 63594165079bSFlorian Westphal unsigned int old_active) 6360df5042f4SFlorian Westphal { 6361df5042f4SFlorian Westphal struct skb_ext *new; 6362df5042f4SFlorian Westphal 6363df5042f4SFlorian Westphal if (refcount_read(&old->refcnt) == 1) 6364df5042f4SFlorian Westphal return old; 6365df5042f4SFlorian Westphal 6366df5042f4SFlorian Westphal new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6367df5042f4SFlorian Westphal if (!new) 6368df5042f4SFlorian Westphal return NULL; 6369df5042f4SFlorian Westphal 6370df5042f4SFlorian Westphal memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6371df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6372df5042f4SFlorian Westphal 63734165079bSFlorian Westphal #ifdef CONFIG_XFRM 63744165079bSFlorian Westphal if (old_active & (1 << SKB_EXT_SEC_PATH)) { 63754165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 63764165079bSFlorian Westphal unsigned int i; 63774165079bSFlorian Westphal 63784165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 63794165079bSFlorian Westphal xfrm_state_hold(sp->xvec[i]); 63804165079bSFlorian Westphal } 63814165079bSFlorian Westphal #endif 6382df5042f4SFlorian Westphal __skb_ext_put(old); 6383df5042f4SFlorian Westphal return new; 6384df5042f4SFlorian Westphal } 6385df5042f4SFlorian Westphal 6386df5042f4SFlorian Westphal /** 63878b69a803SPaolo Abeni * __skb_ext_set - attach the specified extension storage to this skb 63888b69a803SPaolo Abeni * @skb: buffer 63898b69a803SPaolo Abeni * @id: extension id 63908b69a803SPaolo Abeni * @ext: extension storage previously allocated via __skb_ext_alloc() 63918b69a803SPaolo Abeni * 63928b69a803SPaolo Abeni * Existing extensions, if any, are cleared. 63938b69a803SPaolo Abeni * 63948b69a803SPaolo Abeni * Returns the pointer to the extension. 63958b69a803SPaolo Abeni */ 63968b69a803SPaolo Abeni void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 63978b69a803SPaolo Abeni struct skb_ext *ext) 63988b69a803SPaolo Abeni { 63998b69a803SPaolo Abeni unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 64008b69a803SPaolo Abeni 64018b69a803SPaolo Abeni skb_ext_put(skb); 64028b69a803SPaolo Abeni newlen = newoff + skb_ext_type_len[id]; 64038b69a803SPaolo Abeni ext->chunks = newlen; 64048b69a803SPaolo Abeni ext->offset[id] = newoff; 64058b69a803SPaolo Abeni skb->extensions = ext; 64068b69a803SPaolo Abeni skb->active_extensions = 1 << id; 64078b69a803SPaolo Abeni return skb_ext_get_ptr(ext, id); 64088b69a803SPaolo Abeni } 64098b69a803SPaolo Abeni 64108b69a803SPaolo Abeni /** 6411df5042f4SFlorian Westphal * skb_ext_add - allocate space for given extension, COW if needed 6412df5042f4SFlorian Westphal * @skb: buffer 6413df5042f4SFlorian Westphal * @id: extension to allocate space for 6414df5042f4SFlorian Westphal * 6415df5042f4SFlorian Westphal * Allocates enough space for the given extension. 6416df5042f4SFlorian Westphal * If the extension is already present, a pointer to that extension 6417df5042f4SFlorian Westphal * is returned. 6418df5042f4SFlorian Westphal * 6419df5042f4SFlorian Westphal * If the skb was cloned, COW applies and the returned memory can be 6420df5042f4SFlorian Westphal * modified without changing the extension space of clones buffers. 6421df5042f4SFlorian Westphal * 6422df5042f4SFlorian Westphal * Returns pointer to the extension or NULL on allocation failure. 6423df5042f4SFlorian Westphal */ 6424df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6425df5042f4SFlorian Westphal { 6426df5042f4SFlorian Westphal struct skb_ext *new, *old = NULL; 6427df5042f4SFlorian Westphal unsigned int newlen, newoff; 6428df5042f4SFlorian Westphal 6429df5042f4SFlorian Westphal if (skb->active_extensions) { 6430df5042f4SFlorian Westphal old = skb->extensions; 6431df5042f4SFlorian Westphal 64324165079bSFlorian Westphal new = skb_ext_maybe_cow(old, skb->active_extensions); 6433df5042f4SFlorian Westphal if (!new) 6434df5042f4SFlorian Westphal return NULL; 6435df5042f4SFlorian Westphal 6436682ec859SPaolo Abeni if (__skb_ext_exist(new, id)) 6437df5042f4SFlorian Westphal goto set_active; 6438df5042f4SFlorian Westphal 6439e94e50bdSPaolo Abeni newoff = new->chunks; 6440df5042f4SFlorian Westphal } else { 6441df5042f4SFlorian Westphal newoff = SKB_EXT_CHUNKSIZEOF(*new); 6442df5042f4SFlorian Westphal 64434930f483SFlorian Westphal new = __skb_ext_alloc(GFP_ATOMIC); 6444df5042f4SFlorian Westphal if (!new) 6445df5042f4SFlorian Westphal return NULL; 6446df5042f4SFlorian Westphal } 6447df5042f4SFlorian Westphal 6448df5042f4SFlorian Westphal newlen = newoff + skb_ext_type_len[id]; 6449df5042f4SFlorian Westphal new->chunks = newlen; 6450df5042f4SFlorian Westphal new->offset[id] = newoff; 6451df5042f4SFlorian Westphal set_active: 6452682ec859SPaolo Abeni skb->extensions = new; 6453df5042f4SFlorian Westphal skb->active_extensions |= 1 << id; 6454df5042f4SFlorian Westphal return skb_ext_get_ptr(new, id); 6455df5042f4SFlorian Westphal } 6456df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add); 6457df5042f4SFlorian Westphal 64584165079bSFlorian Westphal #ifdef CONFIG_XFRM 64594165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp) 64604165079bSFlorian Westphal { 64614165079bSFlorian Westphal unsigned int i; 64624165079bSFlorian Westphal 64634165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 64644165079bSFlorian Westphal xfrm_state_put(sp->xvec[i]); 64654165079bSFlorian Westphal } 64664165079bSFlorian Westphal #endif 64674165079bSFlorian Westphal 6468df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6469df5042f4SFlorian Westphal { 6470df5042f4SFlorian Westphal struct skb_ext *ext = skb->extensions; 6471df5042f4SFlorian Westphal 6472df5042f4SFlorian Westphal skb->active_extensions &= ~(1 << id); 6473df5042f4SFlorian Westphal if (skb->active_extensions == 0) { 6474df5042f4SFlorian Westphal skb->extensions = NULL; 6475df5042f4SFlorian Westphal __skb_ext_put(ext); 64764165079bSFlorian Westphal #ifdef CONFIG_XFRM 64774165079bSFlorian Westphal } else if (id == SKB_EXT_SEC_PATH && 64784165079bSFlorian Westphal refcount_read(&ext->refcnt) == 1) { 64794165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 64804165079bSFlorian Westphal 64814165079bSFlorian Westphal skb_ext_put_sp(sp); 64824165079bSFlorian Westphal sp->len = 0; 64834165079bSFlorian Westphal #endif 6484df5042f4SFlorian Westphal } 6485df5042f4SFlorian Westphal } 6486df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del); 6487df5042f4SFlorian Westphal 6488df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext) 6489df5042f4SFlorian Westphal { 6490df5042f4SFlorian Westphal /* If this is last clone, nothing can increment 6491df5042f4SFlorian Westphal * it after check passes. Avoids one atomic op. 6492df5042f4SFlorian Westphal */ 6493df5042f4SFlorian Westphal if (refcount_read(&ext->refcnt) == 1) 6494df5042f4SFlorian Westphal goto free_now; 6495df5042f4SFlorian Westphal 6496df5042f4SFlorian Westphal if (!refcount_dec_and_test(&ext->refcnt)) 6497df5042f4SFlorian Westphal return; 6498df5042f4SFlorian Westphal free_now: 64994165079bSFlorian Westphal #ifdef CONFIG_XFRM 65004165079bSFlorian Westphal if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 65014165079bSFlorian Westphal skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 65024165079bSFlorian Westphal #endif 65034165079bSFlorian Westphal 6504df5042f4SFlorian Westphal kmem_cache_free(skbuff_ext_cache, ext); 6505df5042f4SFlorian Westphal } 6506df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put); 6507df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */ 6508