12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * Routines having to do with the 'struct sk_buff' memory handlers. 41da177e4SLinus Torvalds * 5113aa838SAlan Cox * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> 61da177e4SLinus Torvalds * Florian La Roche <rzsfl@rz.uni-sb.de> 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Fixes: 91da177e4SLinus Torvalds * Alan Cox : Fixed the worst of the load 101da177e4SLinus Torvalds * balancer bugs. 111da177e4SLinus Torvalds * Dave Platt : Interrupt stacking fix. 121da177e4SLinus Torvalds * Richard Kooijman : Timestamp fixes. 131da177e4SLinus Torvalds * Alan Cox : Changed buffer format. 141da177e4SLinus Torvalds * Alan Cox : destructor hook for AF_UNIX etc. 151da177e4SLinus Torvalds * Linus Torvalds : Better skb_clone. 161da177e4SLinus Torvalds * Alan Cox : Added skb_copy. 171da177e4SLinus Torvalds * Alan Cox : Added all the changed routines Linus 181da177e4SLinus Torvalds * only put in the headers 191da177e4SLinus Torvalds * Ray VanTassle : Fixed --skb->lock in free 201da177e4SLinus Torvalds * Alan Cox : skb_copy copy arp field 211da177e4SLinus Torvalds * Andi Kleen : slabified it. 221da177e4SLinus Torvalds * Robert Olsson : Removed skb_head_pool 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * NOTE: 251da177e4SLinus Torvalds * The __skb_ routines should be called with interrupts 261da177e4SLinus Torvalds * disabled, or you better be *real* sure that the operation is atomic 271da177e4SLinus Torvalds * with respect to whatever list is being frobbed (e.g. via lock_sock() 281da177e4SLinus Torvalds * or via disabling bottom half handlers, etc). 291da177e4SLinus Torvalds */ 301da177e4SLinus Torvalds 311da177e4SLinus Torvalds /* 321da177e4SLinus Torvalds * The functions in this file will not compile correctly with gcc 2.4.x 331da177e4SLinus Torvalds */ 341da177e4SLinus Torvalds 35e005d193SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 36e005d193SJoe Perches 371da177e4SLinus Torvalds #include <linux/module.h> 381da177e4SLinus Torvalds #include <linux/types.h> 391da177e4SLinus Torvalds #include <linux/kernel.h> 401da177e4SLinus Torvalds #include <linux/mm.h> 411da177e4SLinus Torvalds #include <linux/interrupt.h> 421da177e4SLinus Torvalds #include <linux/in.h> 431da177e4SLinus Torvalds #include <linux/inet.h> 441da177e4SLinus Torvalds #include <linux/slab.h> 45de960aa9SFlorian Westphal #include <linux/tcp.h> 46de960aa9SFlorian Westphal #include <linux/udp.h> 4790017accSMarcelo Ricardo Leitner #include <linux/sctp.h> 481da177e4SLinus Torvalds #include <linux/netdevice.h> 491da177e4SLinus Torvalds #ifdef CONFIG_NET_CLS_ACT 501da177e4SLinus Torvalds #include <net/pkt_sched.h> 511da177e4SLinus Torvalds #endif 521da177e4SLinus Torvalds #include <linux/string.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 549c55e01cSJens Axboe #include <linux/splice.h> 551da177e4SLinus Torvalds #include <linux/cache.h> 561da177e4SLinus Torvalds #include <linux/rtnetlink.h> 571da177e4SLinus Torvalds #include <linux/init.h> 58716ea3a7SDavid Howells #include <linux/scatterlist.h> 59ac45f602SPatrick Ohly #include <linux/errqueue.h> 60268bb0ceSLinus Torvalds #include <linux/prefetch.h> 610d5501c1SVlad Yasevich #include <linux/if_vlan.h> 622a2ea508SJohn Hurley #include <linux/mpls.h> 63*183f47fcSSebastian Andrzej Siewior #include <linux/kcov.h> 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds #include <net/protocol.h> 661da177e4SLinus Torvalds #include <net/dst.h> 671da177e4SLinus Torvalds #include <net/sock.h> 681da177e4SLinus Torvalds #include <net/checksum.h> 69ed1f50c3SPaul Durrant #include <net/ip6_checksum.h> 701da177e4SLinus Torvalds #include <net/xfrm.h> 718822e270SJohn Hurley #include <net/mpls.h> 723ee17bc7SMat Martineau #include <net/mptcp.h> 731da177e4SLinus Torvalds 747c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 75ad8d75ffSSteven Rostedt #include <trace/events/skb.h> 7651c56b00SEric Dumazet #include <linux/highmem.h> 77b245be1fSWillem de Bruijn #include <linux/capability.h> 78b245be1fSWillem de Bruijn #include <linux/user_namespace.h> 792544af03SMatteo Croce #include <linux/indirect_call_wrapper.h> 80a1f8e7f7SAl Viro 817b7ed885SBart Van Assche #include "datagram.h" 827b7ed885SBart Van Assche 8308009a76SAlexey Dobriyan struct kmem_cache *skbuff_head_cache __ro_after_init; 8408009a76SAlexey Dobriyan static struct kmem_cache *skbuff_fclone_cache __ro_after_init; 85df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 86df5042f4SFlorian Westphal static struct kmem_cache *skbuff_ext_cache __ro_after_init; 87df5042f4SFlorian Westphal #endif 885f74f82eSHans Westgaard Ry int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; 895f74f82eSHans Westgaard Ry EXPORT_SYMBOL(sysctl_max_skb_frags); 901da177e4SLinus Torvalds 911da177e4SLinus Torvalds /** 92f05de73bSJean Sacren * skb_panic - private function for out-of-line support 931da177e4SLinus Torvalds * @skb: buffer 941da177e4SLinus Torvalds * @sz: size 95f05de73bSJean Sacren * @addr: address 9699d5851eSJames Hogan * @msg: skb_over_panic or skb_under_panic 971da177e4SLinus Torvalds * 98f05de73bSJean Sacren * Out-of-line support for skb_put() and skb_push(). 99f05de73bSJean Sacren * Called via the wrapper skb_over_panic() or skb_under_panic(). 100f05de73bSJean Sacren * Keep out of line to prevent kernel bloat. 101f05de73bSJean Sacren * __builtin_return_address is not used because it is not always reliable. 1021da177e4SLinus Torvalds */ 103f05de73bSJean Sacren static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, 10499d5851eSJames Hogan const char msg[]) 1051da177e4SLinus Torvalds { 10641a46913SJesper Dangaard Brouer pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", 10799d5851eSJames Hogan msg, addr, skb->len, sz, skb->head, skb->data, 1084305b541SArnaldo Carvalho de Melo (unsigned long)skb->tail, (unsigned long)skb->end, 10926095455SPatrick McHardy skb->dev ? skb->dev->name : "<NULL>"); 1101da177e4SLinus Torvalds BUG(); 1111da177e4SLinus Torvalds } 1121da177e4SLinus Torvalds 113f05de73bSJean Sacren static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) 1141da177e4SLinus Torvalds { 115f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 1161da177e4SLinus Torvalds } 1171da177e4SLinus Torvalds 118f05de73bSJean Sacren static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) 119f05de73bSJean Sacren { 120f05de73bSJean Sacren skb_panic(skb, sz, addr, __func__); 121f05de73bSJean Sacren } 122c93bdd0eSMel Gorman 12350fad4b5SAlexander Lobakin #define NAPI_SKB_CACHE_SIZE 64 124f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_BULK 16 125f450d539SAlexander Lobakin #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) 12650fad4b5SAlexander Lobakin 12750fad4b5SAlexander Lobakin struct napi_alloc_cache { 12850fad4b5SAlexander Lobakin struct page_frag_cache page; 12950fad4b5SAlexander Lobakin unsigned int skb_count; 13050fad4b5SAlexander Lobakin void *skb_cache[NAPI_SKB_CACHE_SIZE]; 13150fad4b5SAlexander Lobakin }; 13250fad4b5SAlexander Lobakin 13350fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); 13450fad4b5SAlexander Lobakin static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); 13550fad4b5SAlexander Lobakin 13650fad4b5SAlexander Lobakin static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, 13750fad4b5SAlexander Lobakin unsigned int align_mask) 13850fad4b5SAlexander Lobakin { 13950fad4b5SAlexander Lobakin struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 14050fad4b5SAlexander Lobakin 14150fad4b5SAlexander Lobakin return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); 14250fad4b5SAlexander Lobakin } 14350fad4b5SAlexander Lobakin 14450fad4b5SAlexander Lobakin void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 14550fad4b5SAlexander Lobakin { 14650fad4b5SAlexander Lobakin fragsz = SKB_DATA_ALIGN(fragsz); 14750fad4b5SAlexander Lobakin 14850fad4b5SAlexander Lobakin return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 14950fad4b5SAlexander Lobakin } 15050fad4b5SAlexander Lobakin EXPORT_SYMBOL(__napi_alloc_frag_align); 15150fad4b5SAlexander Lobakin 15250fad4b5SAlexander Lobakin void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) 15350fad4b5SAlexander Lobakin { 15450fad4b5SAlexander Lobakin struct page_frag_cache *nc; 15550fad4b5SAlexander Lobakin void *data; 15650fad4b5SAlexander Lobakin 15750fad4b5SAlexander Lobakin fragsz = SKB_DATA_ALIGN(fragsz); 15850fad4b5SAlexander Lobakin if (in_irq() || irqs_disabled()) { 15950fad4b5SAlexander Lobakin nc = this_cpu_ptr(&netdev_alloc_cache); 16050fad4b5SAlexander Lobakin data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); 16150fad4b5SAlexander Lobakin } else { 16250fad4b5SAlexander Lobakin local_bh_disable(); 16350fad4b5SAlexander Lobakin data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); 16450fad4b5SAlexander Lobakin local_bh_enable(); 16550fad4b5SAlexander Lobakin } 16650fad4b5SAlexander Lobakin return data; 16750fad4b5SAlexander Lobakin } 16850fad4b5SAlexander Lobakin EXPORT_SYMBOL(__netdev_alloc_frag_align); 16950fad4b5SAlexander Lobakin 170f450d539SAlexander Lobakin static struct sk_buff *napi_skb_cache_get(void) 171f450d539SAlexander Lobakin { 172f450d539SAlexander Lobakin struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 173f450d539SAlexander Lobakin struct sk_buff *skb; 174f450d539SAlexander Lobakin 175f450d539SAlexander Lobakin if (unlikely(!nc->skb_count)) 176f450d539SAlexander Lobakin nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, 177f450d539SAlexander Lobakin GFP_ATOMIC, 178f450d539SAlexander Lobakin NAPI_SKB_CACHE_BULK, 179f450d539SAlexander Lobakin nc->skb_cache); 180f450d539SAlexander Lobakin if (unlikely(!nc->skb_count)) 181f450d539SAlexander Lobakin return NULL; 182f450d539SAlexander Lobakin 183f450d539SAlexander Lobakin skb = nc->skb_cache[--nc->skb_count]; 184f450d539SAlexander Lobakin kasan_unpoison_object_data(skbuff_head_cache, skb); 185f450d539SAlexander Lobakin 186f450d539SAlexander Lobakin return skb; 187f450d539SAlexander Lobakin } 188f450d539SAlexander Lobakin 189ba0509b6SJesper Dangaard Brouer /* Caller must provide SKB that is memset cleared */ 190483126b3SAlexander Lobakin static void __build_skb_around(struct sk_buff *skb, void *data, 191483126b3SAlexander Lobakin unsigned int frag_size) 192ba0509b6SJesper Dangaard Brouer { 193ba0509b6SJesper Dangaard Brouer struct skb_shared_info *shinfo; 194ba0509b6SJesper Dangaard Brouer unsigned int size = frag_size ? : ksize(data); 195ba0509b6SJesper Dangaard Brouer 196ba0509b6SJesper Dangaard Brouer size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 197ba0509b6SJesper Dangaard Brouer 198ba0509b6SJesper Dangaard Brouer /* Assumes caller memset cleared SKB */ 199ba0509b6SJesper Dangaard Brouer skb->truesize = SKB_TRUESIZE(size); 200ba0509b6SJesper Dangaard Brouer refcount_set(&skb->users, 1); 201ba0509b6SJesper Dangaard Brouer skb->head = data; 202ba0509b6SJesper Dangaard Brouer skb->data = data; 203ba0509b6SJesper Dangaard Brouer skb_reset_tail_pointer(skb); 204ba0509b6SJesper Dangaard Brouer skb->end = skb->tail + size; 205ba0509b6SJesper Dangaard Brouer skb->mac_header = (typeof(skb->mac_header))~0U; 206ba0509b6SJesper Dangaard Brouer skb->transport_header = (typeof(skb->transport_header))~0U; 207ba0509b6SJesper Dangaard Brouer 208ba0509b6SJesper Dangaard Brouer /* make sure we initialize shinfo sequentially */ 209ba0509b6SJesper Dangaard Brouer shinfo = skb_shinfo(skb); 210ba0509b6SJesper Dangaard Brouer memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 211ba0509b6SJesper Dangaard Brouer atomic_set(&shinfo->dataref, 1); 212ba0509b6SJesper Dangaard Brouer 2136370cc3bSAleksandr Nogikh skb_set_kcov_handle(skb, kcov_common_handle()); 214ba0509b6SJesper Dangaard Brouer } 215ba0509b6SJesper Dangaard Brouer 2161da177e4SLinus Torvalds /** 2172ea2f62cSEric Dumazet * __build_skb - build a network buffer 218b2b5ce9dSEric Dumazet * @data: data buffer provided by caller 2192ea2f62cSEric Dumazet * @frag_size: size of data, or 0 if head was kmalloced 220b2b5ce9dSEric Dumazet * 221b2b5ce9dSEric Dumazet * Allocate a new &sk_buff. Caller provides space holding head and 222deceb4c0SFlorian Fainelli * skb_shared_info. @data must have been allocated by kmalloc() only if 2232ea2f62cSEric Dumazet * @frag_size is 0, otherwise data should come from the page allocator 2242ea2f62cSEric Dumazet * or vmalloc() 225b2b5ce9dSEric Dumazet * The return is the new skb buffer. 226b2b5ce9dSEric Dumazet * On a failure the return is %NULL, and @data is not freed. 227b2b5ce9dSEric Dumazet * Notes : 228b2b5ce9dSEric Dumazet * Before IO, driver allocates only data buffer where NIC put incoming frame 229b2b5ce9dSEric Dumazet * Driver should add room at head (NET_SKB_PAD) and 230b2b5ce9dSEric Dumazet * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) 231b2b5ce9dSEric Dumazet * After IO, driver calls build_skb(), to allocate sk_buff and populate it 232b2b5ce9dSEric Dumazet * before giving packet to stack. 233b2b5ce9dSEric Dumazet * RX rings only contains data buffers, not full skbs. 234b2b5ce9dSEric Dumazet */ 2352ea2f62cSEric Dumazet struct sk_buff *__build_skb(void *data, unsigned int frag_size) 236b2b5ce9dSEric Dumazet { 237b2b5ce9dSEric Dumazet struct sk_buff *skb; 238b2b5ce9dSEric Dumazet 239b2b5ce9dSEric Dumazet skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 240ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 241b2b5ce9dSEric Dumazet return NULL; 242b2b5ce9dSEric Dumazet 243b2b5ce9dSEric Dumazet memset(skb, 0, offsetof(struct sk_buff, tail)); 244483126b3SAlexander Lobakin __build_skb_around(skb, data, frag_size); 245b2b5ce9dSEric Dumazet 246483126b3SAlexander Lobakin return skb; 247b2b5ce9dSEric Dumazet } 2482ea2f62cSEric Dumazet 2492ea2f62cSEric Dumazet /* build_skb() is wrapper over __build_skb(), that specifically 2502ea2f62cSEric Dumazet * takes care of skb->head and skb->pfmemalloc 2512ea2f62cSEric Dumazet * This means that if @frag_size is not zero, then @data must be backed 2522ea2f62cSEric Dumazet * by a page fragment, not kmalloc() or vmalloc() 2532ea2f62cSEric Dumazet */ 2542ea2f62cSEric Dumazet struct sk_buff *build_skb(void *data, unsigned int frag_size) 2552ea2f62cSEric Dumazet { 2562ea2f62cSEric Dumazet struct sk_buff *skb = __build_skb(data, frag_size); 2572ea2f62cSEric Dumazet 2582ea2f62cSEric Dumazet if (skb && frag_size) { 2592ea2f62cSEric Dumazet skb->head_frag = 1; 2602f064f34SMichal Hocko if (page_is_pfmemalloc(virt_to_head_page(data))) 2612ea2f62cSEric Dumazet skb->pfmemalloc = 1; 2622ea2f62cSEric Dumazet } 2632ea2f62cSEric Dumazet return skb; 2642ea2f62cSEric Dumazet } 265b2b5ce9dSEric Dumazet EXPORT_SYMBOL(build_skb); 266b2b5ce9dSEric Dumazet 267ba0509b6SJesper Dangaard Brouer /** 268ba0509b6SJesper Dangaard Brouer * build_skb_around - build a network buffer around provided skb 269ba0509b6SJesper Dangaard Brouer * @skb: sk_buff provide by caller, must be memset cleared 270ba0509b6SJesper Dangaard Brouer * @data: data buffer provided by caller 271ba0509b6SJesper Dangaard Brouer * @frag_size: size of data, or 0 if head was kmalloced 272ba0509b6SJesper Dangaard Brouer */ 273ba0509b6SJesper Dangaard Brouer struct sk_buff *build_skb_around(struct sk_buff *skb, 274ba0509b6SJesper Dangaard Brouer void *data, unsigned int frag_size) 275ba0509b6SJesper Dangaard Brouer { 276ba0509b6SJesper Dangaard Brouer if (unlikely(!skb)) 277ba0509b6SJesper Dangaard Brouer return NULL; 278ba0509b6SJesper Dangaard Brouer 279483126b3SAlexander Lobakin __build_skb_around(skb, data, frag_size); 280ba0509b6SJesper Dangaard Brouer 281483126b3SAlexander Lobakin if (frag_size) { 282ba0509b6SJesper Dangaard Brouer skb->head_frag = 1; 283ba0509b6SJesper Dangaard Brouer if (page_is_pfmemalloc(virt_to_head_page(data))) 284ba0509b6SJesper Dangaard Brouer skb->pfmemalloc = 1; 285ba0509b6SJesper Dangaard Brouer } 286ba0509b6SJesper Dangaard Brouer return skb; 287ba0509b6SJesper Dangaard Brouer } 288ba0509b6SJesper Dangaard Brouer EXPORT_SYMBOL(build_skb_around); 289ba0509b6SJesper Dangaard Brouer 290f450d539SAlexander Lobakin /** 291f450d539SAlexander Lobakin * __napi_build_skb - build a network buffer 292f450d539SAlexander Lobakin * @data: data buffer provided by caller 293f450d539SAlexander Lobakin * @frag_size: size of data, or 0 if head was kmalloced 294f450d539SAlexander Lobakin * 295f450d539SAlexander Lobakin * Version of __build_skb() that uses NAPI percpu caches to obtain 296f450d539SAlexander Lobakin * skbuff_head instead of inplace allocation. 297f450d539SAlexander Lobakin * 298f450d539SAlexander Lobakin * Returns a new &sk_buff on success, %NULL on allocation failure. 299f450d539SAlexander Lobakin */ 300f450d539SAlexander Lobakin static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) 301f450d539SAlexander Lobakin { 302f450d539SAlexander Lobakin struct sk_buff *skb; 303f450d539SAlexander Lobakin 304f450d539SAlexander Lobakin skb = napi_skb_cache_get(); 305f450d539SAlexander Lobakin if (unlikely(!skb)) 306f450d539SAlexander Lobakin return NULL; 307f450d539SAlexander Lobakin 308f450d539SAlexander Lobakin memset(skb, 0, offsetof(struct sk_buff, tail)); 309f450d539SAlexander Lobakin __build_skb_around(skb, data, frag_size); 310f450d539SAlexander Lobakin 311f450d539SAlexander Lobakin return skb; 312f450d539SAlexander Lobakin } 313f450d539SAlexander Lobakin 314f450d539SAlexander Lobakin /** 315f450d539SAlexander Lobakin * napi_build_skb - build a network buffer 316f450d539SAlexander Lobakin * @data: data buffer provided by caller 317f450d539SAlexander Lobakin * @frag_size: size of data, or 0 if head was kmalloced 318f450d539SAlexander Lobakin * 319f450d539SAlexander Lobakin * Version of __napi_build_skb() that takes care of skb->head_frag 320f450d539SAlexander Lobakin * and skb->pfmemalloc when the data is a page or page fragment. 321f450d539SAlexander Lobakin * 322f450d539SAlexander Lobakin * Returns a new &sk_buff on success, %NULL on allocation failure. 323f450d539SAlexander Lobakin */ 324f450d539SAlexander Lobakin struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) 325f450d539SAlexander Lobakin { 326f450d539SAlexander Lobakin struct sk_buff *skb = __napi_build_skb(data, frag_size); 327f450d539SAlexander Lobakin 328f450d539SAlexander Lobakin if (likely(skb) && frag_size) { 329f450d539SAlexander Lobakin skb->head_frag = 1; 330f450d539SAlexander Lobakin skb_propagate_pfmemalloc(virt_to_head_page(data), skb); 331f450d539SAlexander Lobakin } 332f450d539SAlexander Lobakin 333f450d539SAlexander Lobakin return skb; 334f450d539SAlexander Lobakin } 335f450d539SAlexander Lobakin EXPORT_SYMBOL(napi_build_skb); 336f450d539SAlexander Lobakin 3375381b23dSAlexander Lobakin /* 3385381b23dSAlexander Lobakin * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells 3395381b23dSAlexander Lobakin * the caller if emergency pfmemalloc reserves are being used. If it is and 3405381b23dSAlexander Lobakin * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves 3415381b23dSAlexander Lobakin * may be used. Otherwise, the packet data may be discarded until enough 3425381b23dSAlexander Lobakin * memory is free 3435381b23dSAlexander Lobakin */ 344ef28095fSAlexander Lobakin static void *kmalloc_reserve(size_t size, gfp_t flags, int node, 345ef28095fSAlexander Lobakin bool *pfmemalloc) 3465381b23dSAlexander Lobakin { 3475381b23dSAlexander Lobakin void *obj; 3485381b23dSAlexander Lobakin bool ret_pfmemalloc = false; 3495381b23dSAlexander Lobakin 3505381b23dSAlexander Lobakin /* 3515381b23dSAlexander Lobakin * Try a regular allocation, when that fails and we're not entitled 3525381b23dSAlexander Lobakin * to the reserves, fail. 3535381b23dSAlexander Lobakin */ 3545381b23dSAlexander Lobakin obj = kmalloc_node_track_caller(size, 3555381b23dSAlexander Lobakin flags | __GFP_NOMEMALLOC | __GFP_NOWARN, 3565381b23dSAlexander Lobakin node); 3575381b23dSAlexander Lobakin if (obj || !(gfp_pfmemalloc_allowed(flags))) 3585381b23dSAlexander Lobakin goto out; 3595381b23dSAlexander Lobakin 3605381b23dSAlexander Lobakin /* Try again but now we are using pfmemalloc reserves */ 3615381b23dSAlexander Lobakin ret_pfmemalloc = true; 3625381b23dSAlexander Lobakin obj = kmalloc_node_track_caller(size, flags, node); 3635381b23dSAlexander Lobakin 3645381b23dSAlexander Lobakin out: 3655381b23dSAlexander Lobakin if (pfmemalloc) 3665381b23dSAlexander Lobakin *pfmemalloc = ret_pfmemalloc; 3675381b23dSAlexander Lobakin 3685381b23dSAlexander Lobakin return obj; 3695381b23dSAlexander Lobakin } 3705381b23dSAlexander Lobakin 3715381b23dSAlexander Lobakin /* Allocate a new skbuff. We do this ourselves so we can fill in a few 3725381b23dSAlexander Lobakin * 'private' fields and also do memory statistics to find all the 3735381b23dSAlexander Lobakin * [BEEP] leaks. 3745381b23dSAlexander Lobakin * 3755381b23dSAlexander Lobakin */ 3765381b23dSAlexander Lobakin 3775381b23dSAlexander Lobakin /** 3785381b23dSAlexander Lobakin * __alloc_skb - allocate a network buffer 3795381b23dSAlexander Lobakin * @size: size to allocate 3805381b23dSAlexander Lobakin * @gfp_mask: allocation mask 3815381b23dSAlexander Lobakin * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache 3825381b23dSAlexander Lobakin * instead of head cache and allocate a cloned (child) skb. 3835381b23dSAlexander Lobakin * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for 3845381b23dSAlexander Lobakin * allocations in case the data is required for writeback 3855381b23dSAlexander Lobakin * @node: numa node to allocate memory on 3865381b23dSAlexander Lobakin * 3875381b23dSAlexander Lobakin * Allocate a new &sk_buff. The returned buffer has no headroom and a 3885381b23dSAlexander Lobakin * tail room of at least size bytes. The object has a reference count 3895381b23dSAlexander Lobakin * of one. The return is the buffer. On a failure the return is %NULL. 3905381b23dSAlexander Lobakin * 3915381b23dSAlexander Lobakin * Buffers may only be allocated from interrupts using a @gfp_mask of 3925381b23dSAlexander Lobakin * %GFP_ATOMIC. 3935381b23dSAlexander Lobakin */ 3945381b23dSAlexander Lobakin struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 3955381b23dSAlexander Lobakin int flags, int node) 3965381b23dSAlexander Lobakin { 3975381b23dSAlexander Lobakin struct kmem_cache *cache; 3985381b23dSAlexander Lobakin struct sk_buff *skb; 3995381b23dSAlexander Lobakin u8 *data; 4005381b23dSAlexander Lobakin bool pfmemalloc; 4015381b23dSAlexander Lobakin 4025381b23dSAlexander Lobakin cache = (flags & SKB_ALLOC_FCLONE) 4035381b23dSAlexander Lobakin ? skbuff_fclone_cache : skbuff_head_cache; 4045381b23dSAlexander Lobakin 4055381b23dSAlexander Lobakin if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) 4065381b23dSAlexander Lobakin gfp_mask |= __GFP_MEMALLOC; 4075381b23dSAlexander Lobakin 4085381b23dSAlexander Lobakin /* Get the HEAD */ 409d13612b5SAlexander Lobakin if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && 410d13612b5SAlexander Lobakin likely(node == NUMA_NO_NODE || node == numa_mem_id())) 411d13612b5SAlexander Lobakin skb = napi_skb_cache_get(); 412d13612b5SAlexander Lobakin else 413d13612b5SAlexander Lobakin skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); 414df1ae022SAlexander Lobakin if (unlikely(!skb)) 415df1ae022SAlexander Lobakin return NULL; 4165381b23dSAlexander Lobakin prefetchw(skb); 4175381b23dSAlexander Lobakin 4185381b23dSAlexander Lobakin /* We do our best to align skb_shared_info on a separate cache 4195381b23dSAlexander Lobakin * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives 4205381b23dSAlexander Lobakin * aligned memory blocks, unless SLUB/SLAB debug is enabled. 4215381b23dSAlexander Lobakin * Both skb->head and skb_shared_info are cache line aligned. 4225381b23dSAlexander Lobakin */ 4235381b23dSAlexander Lobakin size = SKB_DATA_ALIGN(size); 4245381b23dSAlexander Lobakin size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4255381b23dSAlexander Lobakin data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); 426df1ae022SAlexander Lobakin if (unlikely(!data)) 4275381b23dSAlexander Lobakin goto nodata; 4285381b23dSAlexander Lobakin /* kmalloc(size) might give us more room than requested. 4295381b23dSAlexander Lobakin * Put skb_shared_info exactly at the end of allocated zone, 4305381b23dSAlexander Lobakin * to allow max possible filling before reallocation. 4315381b23dSAlexander Lobakin */ 4325381b23dSAlexander Lobakin size = SKB_WITH_OVERHEAD(ksize(data)); 4335381b23dSAlexander Lobakin prefetchw(data + size); 4345381b23dSAlexander Lobakin 4355381b23dSAlexander Lobakin /* 4365381b23dSAlexander Lobakin * Only clear those fields we need to clear, not those that we will 4375381b23dSAlexander Lobakin * actually initialise below. Hence, don't put any more fields after 4385381b23dSAlexander Lobakin * the tail pointer in struct sk_buff! 4395381b23dSAlexander Lobakin */ 4405381b23dSAlexander Lobakin memset(skb, 0, offsetof(struct sk_buff, tail)); 441f9d6725bSAlexander Lobakin __build_skb_around(skb, data, 0); 4425381b23dSAlexander Lobakin skb->pfmemalloc = pfmemalloc; 4435381b23dSAlexander Lobakin 4445381b23dSAlexander Lobakin if (flags & SKB_ALLOC_FCLONE) { 4455381b23dSAlexander Lobakin struct sk_buff_fclones *fclones; 4465381b23dSAlexander Lobakin 4475381b23dSAlexander Lobakin fclones = container_of(skb, struct sk_buff_fclones, skb1); 4485381b23dSAlexander Lobakin 4495381b23dSAlexander Lobakin skb->fclone = SKB_FCLONE_ORIG; 4505381b23dSAlexander Lobakin refcount_set(&fclones->fclone_ref, 1); 4515381b23dSAlexander Lobakin 4525381b23dSAlexander Lobakin fclones->skb2.fclone = SKB_FCLONE_CLONE; 4535381b23dSAlexander Lobakin } 4545381b23dSAlexander Lobakin 4555381b23dSAlexander Lobakin return skb; 456df1ae022SAlexander Lobakin 4575381b23dSAlexander Lobakin nodata: 4585381b23dSAlexander Lobakin kmem_cache_free(cache, skb); 459df1ae022SAlexander Lobakin return NULL; 4605381b23dSAlexander Lobakin } 4615381b23dSAlexander Lobakin EXPORT_SYMBOL(__alloc_skb); 4625381b23dSAlexander Lobakin 4637ba7aeabSSebastian Andrzej Siewior /** 464fd11a83dSAlexander Duyck * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 465fd11a83dSAlexander Duyck * @dev: network device to receive on 466d7499160SMasanari Iida * @len: length to allocate 467fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb 468fd11a83dSAlexander Duyck * 469fd11a83dSAlexander Duyck * Allocate a new &sk_buff and assign it a usage count of one. The 470fd11a83dSAlexander Duyck * buffer has NET_SKB_PAD headroom built in. Users should allocate 471fd11a83dSAlexander Duyck * the headroom they think they need without accounting for the 472fd11a83dSAlexander Duyck * built in space. The built in space is used for optimisations. 473fd11a83dSAlexander Duyck * 474fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 475fd11a83dSAlexander Duyck */ 4769451980aSAlexander Duyck struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, 4779451980aSAlexander Duyck gfp_t gfp_mask) 478fd11a83dSAlexander Duyck { 479b63ae8caSAlexander Duyck struct page_frag_cache *nc; 480fd11a83dSAlexander Duyck struct sk_buff *skb; 4819451980aSAlexander Duyck bool pfmemalloc; 4829451980aSAlexander Duyck void *data; 483fd11a83dSAlexander Duyck 4849451980aSAlexander Duyck len += NET_SKB_PAD; 485fd11a83dSAlexander Duyck 48666c55602SAlexander Lobakin /* If requested length is either too small or too big, 48766c55602SAlexander Lobakin * we use kmalloc() for skb->head allocation. 48866c55602SAlexander Lobakin */ 48966c55602SAlexander Lobakin if (len <= SKB_WITH_OVERHEAD(1024) || 49066c55602SAlexander Lobakin len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 491d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 492a080e7bdSAlexander Duyck skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); 493a080e7bdSAlexander Duyck if (!skb) 494a080e7bdSAlexander Duyck goto skb_fail; 495a080e7bdSAlexander Duyck goto skb_success; 496a080e7bdSAlexander Duyck } 4979451980aSAlexander Duyck 4989451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 4999451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 5009451980aSAlexander Duyck 5019451980aSAlexander Duyck if (sk_memalloc_socks()) 5029451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 5039451980aSAlexander Duyck 50492dcabd7SSebastian Andrzej Siewior if (in_irq() || irqs_disabled()) { 5059451980aSAlexander Duyck nc = this_cpu_ptr(&netdev_alloc_cache); 5068c2dd3e4SAlexander Duyck data = page_frag_alloc(nc, len, gfp_mask); 5079451980aSAlexander Duyck pfmemalloc = nc->pfmemalloc; 50892dcabd7SSebastian Andrzej Siewior } else { 50992dcabd7SSebastian Andrzej Siewior local_bh_disable(); 51092dcabd7SSebastian Andrzej Siewior nc = this_cpu_ptr(&napi_alloc_cache.page); 51192dcabd7SSebastian Andrzej Siewior data = page_frag_alloc(nc, len, gfp_mask); 51292dcabd7SSebastian Andrzej Siewior pfmemalloc = nc->pfmemalloc; 51392dcabd7SSebastian Andrzej Siewior local_bh_enable(); 51492dcabd7SSebastian Andrzej Siewior } 5159451980aSAlexander Duyck 5169451980aSAlexander Duyck if (unlikely(!data)) 5179451980aSAlexander Duyck return NULL; 5189451980aSAlexander Duyck 5199451980aSAlexander Duyck skb = __build_skb(data, len); 5209451980aSAlexander Duyck if (unlikely(!skb)) { 521181edb2bSAlexander Duyck skb_free_frag(data); 5229451980aSAlexander Duyck return NULL; 5239451980aSAlexander Duyck } 5249451980aSAlexander Duyck 5259451980aSAlexander Duyck if (pfmemalloc) 5269451980aSAlexander Duyck skb->pfmemalloc = 1; 5279451980aSAlexander Duyck skb->head_frag = 1; 5289451980aSAlexander Duyck 529a080e7bdSAlexander Duyck skb_success: 5308af27456SChristoph Hellwig skb_reserve(skb, NET_SKB_PAD); 5317b2e497aSChristoph Hellwig skb->dev = dev; 532fd11a83dSAlexander Duyck 533a080e7bdSAlexander Duyck skb_fail: 5348af27456SChristoph Hellwig return skb; 5358af27456SChristoph Hellwig } 536b4ac530fSDavid S. Miller EXPORT_SYMBOL(__netdev_alloc_skb); 5371da177e4SLinus Torvalds 538fd11a83dSAlexander Duyck /** 539fd11a83dSAlexander Duyck * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance 540fd11a83dSAlexander Duyck * @napi: napi instance this buffer was allocated for 541d7499160SMasanari Iida * @len: length to allocate 542fd11a83dSAlexander Duyck * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 543fd11a83dSAlexander Duyck * 544fd11a83dSAlexander Duyck * Allocate a new sk_buff for use in NAPI receive. This buffer will 545fd11a83dSAlexander Duyck * attempt to allocate the head from a special reserved region used 546fd11a83dSAlexander Duyck * only for NAPI Rx allocation. By doing this we can save several 547fd11a83dSAlexander Duyck * CPU cycles by avoiding having to disable and re-enable IRQs. 548fd11a83dSAlexander Duyck * 549fd11a83dSAlexander Duyck * %NULL is returned if there is no free memory. 550fd11a83dSAlexander Duyck */ 5519451980aSAlexander Duyck struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, 5529451980aSAlexander Duyck gfp_t gfp_mask) 553fd11a83dSAlexander Duyck { 5543226b158SEric Dumazet struct napi_alloc_cache *nc; 555fd11a83dSAlexander Duyck struct sk_buff *skb; 5569451980aSAlexander Duyck void *data; 557fd11a83dSAlexander Duyck 5589451980aSAlexander Duyck len += NET_SKB_PAD + NET_IP_ALIGN; 559fd11a83dSAlexander Duyck 5603226b158SEric Dumazet /* If requested length is either too small or too big, 5613226b158SEric Dumazet * we use kmalloc() for skb->head allocation. 5623226b158SEric Dumazet */ 5633226b158SEric Dumazet if (len <= SKB_WITH_OVERHEAD(1024) || 5643226b158SEric Dumazet len > SKB_WITH_OVERHEAD(PAGE_SIZE) || 565d0164adcSMel Gorman (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { 566cfb8ec65SAlexander Lobakin skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, 567cfb8ec65SAlexander Lobakin NUMA_NO_NODE); 568a080e7bdSAlexander Duyck if (!skb) 569a080e7bdSAlexander Duyck goto skb_fail; 570a080e7bdSAlexander Duyck goto skb_success; 571a080e7bdSAlexander Duyck } 5729451980aSAlexander Duyck 5733226b158SEric Dumazet nc = this_cpu_ptr(&napi_alloc_cache); 5749451980aSAlexander Duyck len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5759451980aSAlexander Duyck len = SKB_DATA_ALIGN(len); 5769451980aSAlexander Duyck 5779451980aSAlexander Duyck if (sk_memalloc_socks()) 5789451980aSAlexander Duyck gfp_mask |= __GFP_MEMALLOC; 5799451980aSAlexander Duyck 5808c2dd3e4SAlexander Duyck data = page_frag_alloc(&nc->page, len, gfp_mask); 5819451980aSAlexander Duyck if (unlikely(!data)) 5829451980aSAlexander Duyck return NULL; 5839451980aSAlexander Duyck 584cfb8ec65SAlexander Lobakin skb = __napi_build_skb(data, len); 5859451980aSAlexander Duyck if (unlikely(!skb)) { 586181edb2bSAlexander Duyck skb_free_frag(data); 5879451980aSAlexander Duyck return NULL; 5889451980aSAlexander Duyck } 5899451980aSAlexander Duyck 590795bb1c0SJesper Dangaard Brouer if (nc->page.pfmemalloc) 5919451980aSAlexander Duyck skb->pfmemalloc = 1; 5929451980aSAlexander Duyck skb->head_frag = 1; 5939451980aSAlexander Duyck 594a080e7bdSAlexander Duyck skb_success: 595fd11a83dSAlexander Duyck skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); 596fd11a83dSAlexander Duyck skb->dev = napi->dev; 597fd11a83dSAlexander Duyck 598a080e7bdSAlexander Duyck skb_fail: 599fd11a83dSAlexander Duyck return skb; 600fd11a83dSAlexander Duyck } 601fd11a83dSAlexander Duyck EXPORT_SYMBOL(__napi_alloc_skb); 602fd11a83dSAlexander Duyck 603654bed16SPeter Zijlstra void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, 60450269e19SEric Dumazet int size, unsigned int truesize) 605654bed16SPeter Zijlstra { 606654bed16SPeter Zijlstra skb_fill_page_desc(skb, i, page, off, size); 607654bed16SPeter Zijlstra skb->len += size; 608654bed16SPeter Zijlstra skb->data_len += size; 60950269e19SEric Dumazet skb->truesize += truesize; 610654bed16SPeter Zijlstra } 611654bed16SPeter Zijlstra EXPORT_SYMBOL(skb_add_rx_frag); 612654bed16SPeter Zijlstra 613f8e617e1SJason Wang void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, 614f8e617e1SJason Wang unsigned int truesize) 615f8e617e1SJason Wang { 616f8e617e1SJason Wang skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 617f8e617e1SJason Wang 618f8e617e1SJason Wang skb_frag_size_add(frag, size); 619f8e617e1SJason Wang skb->len += size; 620f8e617e1SJason Wang skb->data_len += size; 621f8e617e1SJason Wang skb->truesize += truesize; 622f8e617e1SJason Wang } 623f8e617e1SJason Wang EXPORT_SYMBOL(skb_coalesce_rx_frag); 624f8e617e1SJason Wang 62527b437c8SHerbert Xu static void skb_drop_list(struct sk_buff **listp) 6261da177e4SLinus Torvalds { 627bd8a7036SEric Dumazet kfree_skb_list(*listp); 62827b437c8SHerbert Xu *listp = NULL; 6291da177e4SLinus Torvalds } 6301da177e4SLinus Torvalds 63127b437c8SHerbert Xu static inline void skb_drop_fraglist(struct sk_buff *skb) 63227b437c8SHerbert Xu { 63327b437c8SHerbert Xu skb_drop_list(&skb_shinfo(skb)->frag_list); 63427b437c8SHerbert Xu } 63527b437c8SHerbert Xu 6361da177e4SLinus Torvalds static void skb_clone_fraglist(struct sk_buff *skb) 6371da177e4SLinus Torvalds { 6381da177e4SLinus Torvalds struct sk_buff *list; 6391da177e4SLinus Torvalds 640fbb398a8SDavid S. Miller skb_walk_frags(skb, list) 6411da177e4SLinus Torvalds skb_get(list); 6421da177e4SLinus Torvalds } 6431da177e4SLinus Torvalds 644d3836f21SEric Dumazet static void skb_free_head(struct sk_buff *skb) 645d3836f21SEric Dumazet { 646181edb2bSAlexander Duyck unsigned char *head = skb->head; 647181edb2bSAlexander Duyck 648d3836f21SEric Dumazet if (skb->head_frag) 649181edb2bSAlexander Duyck skb_free_frag(head); 650d3836f21SEric Dumazet else 651181edb2bSAlexander Duyck kfree(head); 652d3836f21SEric Dumazet } 653d3836f21SEric Dumazet 6545bba1712SAdrian Bunk static void skb_release_data(struct sk_buff *skb) 6551da177e4SLinus Torvalds { 656ff04a771SEric Dumazet struct skb_shared_info *shinfo = skb_shinfo(skb); 6571da177e4SLinus Torvalds int i; 658ff04a771SEric Dumazet 659ff04a771SEric Dumazet if (skb->cloned && 660ff04a771SEric Dumazet atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 661ff04a771SEric Dumazet &shinfo->dataref)) 662ff04a771SEric Dumazet return; 663ff04a771SEric Dumazet 66470c43167SJonathan Lemon skb_zcopy_clear(skb, true); 66570c43167SJonathan Lemon 666ff04a771SEric Dumazet for (i = 0; i < shinfo->nr_frags; i++) 667ff04a771SEric Dumazet __skb_frag_unref(&shinfo->frags[i]); 6681da177e4SLinus Torvalds 669ff04a771SEric Dumazet if (shinfo->frag_list) 670ff04a771SEric Dumazet kfree_skb_list(shinfo->frag_list); 6711da177e4SLinus Torvalds 672d3836f21SEric Dumazet skb_free_head(skb); 6731da177e4SLinus Torvalds } 6741da177e4SLinus Torvalds 6751da177e4SLinus Torvalds /* 6761da177e4SLinus Torvalds * Free an skbuff by memory without cleaning the state. 6771da177e4SLinus Torvalds */ 6782d4baff8SHerbert Xu static void kfree_skbmem(struct sk_buff *skb) 6791da177e4SLinus Torvalds { 680d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones; 681d179cd12SDavid S. Miller 682d179cd12SDavid S. Miller switch (skb->fclone) { 683d179cd12SDavid S. Miller case SKB_FCLONE_UNAVAILABLE: 6841da177e4SLinus Torvalds kmem_cache_free(skbuff_head_cache, skb); 6856ffe75ebSEric Dumazet return; 686d179cd12SDavid S. Miller 687d179cd12SDavid S. Miller case SKB_FCLONE_ORIG: 688d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb1); 6896ffe75ebSEric Dumazet 6906ffe75ebSEric Dumazet /* We usually free the clone (TX completion) before original skb 6916ffe75ebSEric Dumazet * This test would have no chance to be true for the clone, 6926ffe75ebSEric Dumazet * while here, branch prediction will be good. 6936ffe75ebSEric Dumazet */ 6942638595aSReshetova, Elena if (refcount_read(&fclones->fclone_ref) == 1) 6956ffe75ebSEric Dumazet goto fastpath; 696d179cd12SDavid S. Miller break; 697d179cd12SDavid S. Miller 6986ffe75ebSEric Dumazet default: /* SKB_FCLONE_CLONE */ 699d0bf4a9eSEric Dumazet fclones = container_of(skb, struct sk_buff_fclones, skb2); 700d179cd12SDavid S. Miller break; 7013ff50b79SStephen Hemminger } 7022638595aSReshetova, Elena if (!refcount_dec_and_test(&fclones->fclone_ref)) 7036ffe75ebSEric Dumazet return; 7046ffe75ebSEric Dumazet fastpath: 7056ffe75ebSEric Dumazet kmem_cache_free(skbuff_fclone_cache, fclones); 7061da177e4SLinus Torvalds } 7071da177e4SLinus Torvalds 7080a463c78SPaolo Abeni void skb_release_head_state(struct sk_buff *skb) 7091da177e4SLinus Torvalds { 710adf30907SEric Dumazet skb_dst_drop(skb); 7111da177e4SLinus Torvalds if (skb->destructor) { 7129c2b3328SStephen Hemminger WARN_ON(in_irq()); 7131da177e4SLinus Torvalds skb->destructor(skb); 7141da177e4SLinus Torvalds } 715a3bf7ae9SIgor Maravić #if IS_ENABLED(CONFIG_NF_CONNTRACK) 716cb9c6836SFlorian Westphal nf_conntrack_put(skb_nfct(skb)); 7172fc72c7bSKOVACS Krisztian #endif 718df5042f4SFlorian Westphal skb_ext_put(skb); 71904a4bb55SLennert Buytenhek } 72004a4bb55SLennert Buytenhek 72104a4bb55SLennert Buytenhek /* Free everything but the sk_buff shell. */ 72204a4bb55SLennert Buytenhek static void skb_release_all(struct sk_buff *skb) 72304a4bb55SLennert Buytenhek { 72404a4bb55SLennert Buytenhek skb_release_head_state(skb); 725a28b1b90SFlorian Westphal if (likely(skb->head)) 7262d4baff8SHerbert Xu skb_release_data(skb); 7272d4baff8SHerbert Xu } 7281da177e4SLinus Torvalds 7292d4baff8SHerbert Xu /** 7302d4baff8SHerbert Xu * __kfree_skb - private function 7312d4baff8SHerbert Xu * @skb: buffer 7322d4baff8SHerbert Xu * 7332d4baff8SHerbert Xu * Free an sk_buff. Release anything attached to the buffer. 7342d4baff8SHerbert Xu * Clean the state. This is an internal helper function. Users should 7352d4baff8SHerbert Xu * always call kfree_skb 7362d4baff8SHerbert Xu */ 7372d4baff8SHerbert Xu 7382d4baff8SHerbert Xu void __kfree_skb(struct sk_buff *skb) 7392d4baff8SHerbert Xu { 7402d4baff8SHerbert Xu skb_release_all(skb); 7411da177e4SLinus Torvalds kfree_skbmem(skb); 7421da177e4SLinus Torvalds } 743b4ac530fSDavid S. Miller EXPORT_SYMBOL(__kfree_skb); 7441da177e4SLinus Torvalds 7451da177e4SLinus Torvalds /** 746231d06aeSJörn Engel * kfree_skb - free an sk_buff 747231d06aeSJörn Engel * @skb: buffer to free 748231d06aeSJörn Engel * 749231d06aeSJörn Engel * Drop a reference to the buffer and free it if the usage count has 750231d06aeSJörn Engel * hit zero. 751231d06aeSJörn Engel */ 752231d06aeSJörn Engel void kfree_skb(struct sk_buff *skb) 753231d06aeSJörn Engel { 7543889a803SPaolo Abeni if (!skb_unref(skb)) 755231d06aeSJörn Engel return; 7563889a803SPaolo Abeni 757ead2ceb0SNeil Horman trace_kfree_skb(skb, __builtin_return_address(0)); 758231d06aeSJörn Engel __kfree_skb(skb); 759231d06aeSJörn Engel } 760b4ac530fSDavid S. Miller EXPORT_SYMBOL(kfree_skb); 761231d06aeSJörn Engel 762bd8a7036SEric Dumazet void kfree_skb_list(struct sk_buff *segs) 763bd8a7036SEric Dumazet { 764bd8a7036SEric Dumazet while (segs) { 765bd8a7036SEric Dumazet struct sk_buff *next = segs->next; 766bd8a7036SEric Dumazet 767bd8a7036SEric Dumazet kfree_skb(segs); 768bd8a7036SEric Dumazet segs = next; 769bd8a7036SEric Dumazet } 770bd8a7036SEric Dumazet } 771bd8a7036SEric Dumazet EXPORT_SYMBOL(kfree_skb_list); 772bd8a7036SEric Dumazet 7736413139dSWillem de Bruijn /* Dump skb information and contents. 7746413139dSWillem de Bruijn * 7756413139dSWillem de Bruijn * Must only be called from net_ratelimit()-ed paths. 7766413139dSWillem de Bruijn * 777302af7c6SVladimir Oltean * Dumps whole packets if full_pkt, only headers otherwise. 7786413139dSWillem de Bruijn */ 7796413139dSWillem de Bruijn void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) 7806413139dSWillem de Bruijn { 7816413139dSWillem de Bruijn struct skb_shared_info *sh = skb_shinfo(skb); 7826413139dSWillem de Bruijn struct net_device *dev = skb->dev; 7836413139dSWillem de Bruijn struct sock *sk = skb->sk; 7846413139dSWillem de Bruijn struct sk_buff *list_skb; 7856413139dSWillem de Bruijn bool has_mac, has_trans; 7866413139dSWillem de Bruijn int headroom, tailroom; 7876413139dSWillem de Bruijn int i, len, seg_len; 7886413139dSWillem de Bruijn 7896413139dSWillem de Bruijn if (full_pkt) 7906413139dSWillem de Bruijn len = skb->len; 7916413139dSWillem de Bruijn else 7926413139dSWillem de Bruijn len = min_t(int, skb->len, MAX_HEADER + 128); 7936413139dSWillem de Bruijn 7946413139dSWillem de Bruijn headroom = skb_headroom(skb); 7956413139dSWillem de Bruijn tailroom = skb_tailroom(skb); 7966413139dSWillem de Bruijn 7976413139dSWillem de Bruijn has_mac = skb_mac_header_was_set(skb); 7986413139dSWillem de Bruijn has_trans = skb_transport_header_was_set(skb); 7996413139dSWillem de Bruijn 8006413139dSWillem de Bruijn printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" 8016413139dSWillem de Bruijn "mac=(%d,%d) net=(%d,%d) trans=%d\n" 8026413139dSWillem de Bruijn "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" 8036413139dSWillem de Bruijn "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" 8046413139dSWillem de Bruijn "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", 8056413139dSWillem de Bruijn level, skb->len, headroom, skb_headlen(skb), tailroom, 8066413139dSWillem de Bruijn has_mac ? skb->mac_header : -1, 8076413139dSWillem de Bruijn has_mac ? skb_mac_header_len(skb) : -1, 8086413139dSWillem de Bruijn skb->network_header, 8096413139dSWillem de Bruijn has_trans ? skb_network_header_len(skb) : -1, 8106413139dSWillem de Bruijn has_trans ? skb->transport_header : -1, 8116413139dSWillem de Bruijn sh->tx_flags, sh->nr_frags, 8126413139dSWillem de Bruijn sh->gso_size, sh->gso_type, sh->gso_segs, 8136413139dSWillem de Bruijn skb->csum, skb->ip_summed, skb->csum_complete_sw, 8146413139dSWillem de Bruijn skb->csum_valid, skb->csum_level, 8156413139dSWillem de Bruijn skb->hash, skb->sw_hash, skb->l4_hash, 8166413139dSWillem de Bruijn ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); 8176413139dSWillem de Bruijn 8186413139dSWillem de Bruijn if (dev) 8196413139dSWillem de Bruijn printk("%sdev name=%s feat=0x%pNF\n", 8206413139dSWillem de Bruijn level, dev->name, &dev->features); 8216413139dSWillem de Bruijn if (sk) 822db8051f3SQian Cai printk("%ssk family=%hu type=%u proto=%u\n", 8236413139dSWillem de Bruijn level, sk->sk_family, sk->sk_type, sk->sk_protocol); 8246413139dSWillem de Bruijn 8256413139dSWillem de Bruijn if (full_pkt && headroom) 8266413139dSWillem de Bruijn print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET, 8276413139dSWillem de Bruijn 16, 1, skb->head, headroom, false); 8286413139dSWillem de Bruijn 8296413139dSWillem de Bruijn seg_len = min_t(int, skb_headlen(skb), len); 8306413139dSWillem de Bruijn if (seg_len) 8316413139dSWillem de Bruijn print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 8326413139dSWillem de Bruijn 16, 1, skb->data, seg_len, false); 8336413139dSWillem de Bruijn len -= seg_len; 8346413139dSWillem de Bruijn 8356413139dSWillem de Bruijn if (full_pkt && tailroom) 8366413139dSWillem de Bruijn print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET, 8376413139dSWillem de Bruijn 16, 1, skb_tail_pointer(skb), tailroom, false); 8386413139dSWillem de Bruijn 8396413139dSWillem de Bruijn for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { 8406413139dSWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 8416413139dSWillem de Bruijn u32 p_off, p_len, copied; 8426413139dSWillem de Bruijn struct page *p; 8436413139dSWillem de Bruijn u8 *vaddr; 8446413139dSWillem de Bruijn 845b54c9d5bSJonathan Lemon skb_frag_foreach_page(frag, skb_frag_off(frag), 8466413139dSWillem de Bruijn skb_frag_size(frag), p, p_off, p_len, 8476413139dSWillem de Bruijn copied) { 8486413139dSWillem de Bruijn seg_len = min_t(int, p_len, len); 8496413139dSWillem de Bruijn vaddr = kmap_atomic(p); 8506413139dSWillem de Bruijn print_hex_dump(level, "skb frag: ", 8516413139dSWillem de Bruijn DUMP_PREFIX_OFFSET, 8526413139dSWillem de Bruijn 16, 1, vaddr + p_off, seg_len, false); 8536413139dSWillem de Bruijn kunmap_atomic(vaddr); 8546413139dSWillem de Bruijn len -= seg_len; 8556413139dSWillem de Bruijn if (!len) 8566413139dSWillem de Bruijn break; 8576413139dSWillem de Bruijn } 8586413139dSWillem de Bruijn } 8596413139dSWillem de Bruijn 8606413139dSWillem de Bruijn if (full_pkt && skb_has_frag_list(skb)) { 8616413139dSWillem de Bruijn printk("skb fraglist:\n"); 8626413139dSWillem de Bruijn skb_walk_frags(skb, list_skb) 8636413139dSWillem de Bruijn skb_dump(level, list_skb, true); 8646413139dSWillem de Bruijn } 8656413139dSWillem de Bruijn } 8666413139dSWillem de Bruijn EXPORT_SYMBOL(skb_dump); 8676413139dSWillem de Bruijn 868d1a203eaSStephen Hemminger /** 86925121173SMichael S. Tsirkin * skb_tx_error - report an sk_buff xmit error 87025121173SMichael S. Tsirkin * @skb: buffer that triggered an error 87125121173SMichael S. Tsirkin * 87225121173SMichael S. Tsirkin * Report xmit error if a device callback is tracking this skb. 87325121173SMichael S. Tsirkin * skb must be freed afterwards. 87425121173SMichael S. Tsirkin */ 87525121173SMichael S. Tsirkin void skb_tx_error(struct sk_buff *skb) 87625121173SMichael S. Tsirkin { 8771f8b977aSWillem de Bruijn skb_zcopy_clear(skb, true); 87825121173SMichael S. Tsirkin } 87925121173SMichael S. Tsirkin EXPORT_SYMBOL(skb_tx_error); 88025121173SMichael S. Tsirkin 881be769db2SHerbert Xu #ifdef CONFIG_TRACEPOINTS 88225121173SMichael S. Tsirkin /** 883ead2ceb0SNeil Horman * consume_skb - free an skbuff 884ead2ceb0SNeil Horman * @skb: buffer to free 885ead2ceb0SNeil Horman * 886ead2ceb0SNeil Horman * Drop a ref to the buffer and free it if the usage count has hit zero 887ead2ceb0SNeil Horman * Functions identically to kfree_skb, but kfree_skb assumes that the frame 888ead2ceb0SNeil Horman * is being dropped after a failure and notes that 889ead2ceb0SNeil Horman */ 890ead2ceb0SNeil Horman void consume_skb(struct sk_buff *skb) 891ead2ceb0SNeil Horman { 8923889a803SPaolo Abeni if (!skb_unref(skb)) 893ead2ceb0SNeil Horman return; 8943889a803SPaolo Abeni 89507dc22e7SKoki Sanagi trace_consume_skb(skb); 896ead2ceb0SNeil Horman __kfree_skb(skb); 897ead2ceb0SNeil Horman } 898ead2ceb0SNeil Horman EXPORT_SYMBOL(consume_skb); 899be769db2SHerbert Xu #endif 900ead2ceb0SNeil Horman 9010a463c78SPaolo Abeni /** 902c1639be9SMauro Carvalho Chehab * __consume_stateless_skb - free an skbuff, assuming it is stateless 9030a463c78SPaolo Abeni * @skb: buffer to free 9040a463c78SPaolo Abeni * 905ca2c1418SPaolo Abeni * Alike consume_skb(), but this variant assumes that this is the last 906ca2c1418SPaolo Abeni * skb reference and all the head states have been already dropped 9070a463c78SPaolo Abeni */ 908ca2c1418SPaolo Abeni void __consume_stateless_skb(struct sk_buff *skb) 9090a463c78SPaolo Abeni { 9100a463c78SPaolo Abeni trace_consume_skb(skb); 9110a463c78SPaolo Abeni skb_release_data(skb); 9120a463c78SPaolo Abeni kfree_skbmem(skb); 9130a463c78SPaolo Abeni } 9140a463c78SPaolo Abeni 915f450d539SAlexander Lobakin static void napi_skb_cache_put(struct sk_buff *skb) 916795bb1c0SJesper Dangaard Brouer { 917795bb1c0SJesper Dangaard Brouer struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); 918f450d539SAlexander Lobakin u32 i; 919795bb1c0SJesper Dangaard Brouer 920f450d539SAlexander Lobakin kasan_poison_object_data(skbuff_head_cache, skb); 921795bb1c0SJesper Dangaard Brouer nc->skb_cache[nc->skb_count++] = skb; 922795bb1c0SJesper Dangaard Brouer 923795bb1c0SJesper Dangaard Brouer if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { 924f450d539SAlexander Lobakin for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) 925f450d539SAlexander Lobakin kasan_unpoison_object_data(skbuff_head_cache, 926f450d539SAlexander Lobakin nc->skb_cache[i]); 927f450d539SAlexander Lobakin 928f450d539SAlexander Lobakin kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, 929f450d539SAlexander Lobakin nc->skb_cache + NAPI_SKB_CACHE_HALF); 930f450d539SAlexander Lobakin nc->skb_count = NAPI_SKB_CACHE_HALF; 931795bb1c0SJesper Dangaard Brouer } 932795bb1c0SJesper Dangaard Brouer } 933f450d539SAlexander Lobakin 93415fad714SJesper Dangaard Brouer void __kfree_skb_defer(struct sk_buff *skb) 93515fad714SJesper Dangaard Brouer { 9369243adfcSAlexander Lobakin skb_release_all(skb); 9379243adfcSAlexander Lobakin napi_skb_cache_put(skb); 9389243adfcSAlexander Lobakin } 9399243adfcSAlexander Lobakin 9409243adfcSAlexander Lobakin void napi_skb_free_stolen_head(struct sk_buff *skb) 9419243adfcSAlexander Lobakin { 9429243adfcSAlexander Lobakin skb_dst_drop(skb); 9439243adfcSAlexander Lobakin skb_ext_put(skb); 944f450d539SAlexander Lobakin napi_skb_cache_put(skb); 94515fad714SJesper Dangaard Brouer } 946795bb1c0SJesper Dangaard Brouer 947795bb1c0SJesper Dangaard Brouer void napi_consume_skb(struct sk_buff *skb, int budget) 948795bb1c0SJesper Dangaard Brouer { 949885eb0a5SJesper Dangaard Brouer /* Zero budget indicate non-NAPI context called us, like netpoll */ 950795bb1c0SJesper Dangaard Brouer if (unlikely(!budget)) { 951885eb0a5SJesper Dangaard Brouer dev_consume_skb_any(skb); 952795bb1c0SJesper Dangaard Brouer return; 953795bb1c0SJesper Dangaard Brouer } 954795bb1c0SJesper Dangaard Brouer 9556454eca8SYunsheng Lin lockdep_assert_in_softirq(); 9566454eca8SYunsheng Lin 9577608894eSPaolo Abeni if (!skb_unref(skb)) 958795bb1c0SJesper Dangaard Brouer return; 9597608894eSPaolo Abeni 960795bb1c0SJesper Dangaard Brouer /* if reaching here SKB is ready to free */ 961795bb1c0SJesper Dangaard Brouer trace_consume_skb(skb); 962795bb1c0SJesper Dangaard Brouer 963795bb1c0SJesper Dangaard Brouer /* if SKB is a clone, don't handle this case */ 964abbdb5a7SEric Dumazet if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { 965795bb1c0SJesper Dangaard Brouer __kfree_skb(skb); 966795bb1c0SJesper Dangaard Brouer return; 967795bb1c0SJesper Dangaard Brouer } 968795bb1c0SJesper Dangaard Brouer 9699243adfcSAlexander Lobakin skb_release_all(skb); 970f450d539SAlexander Lobakin napi_skb_cache_put(skb); 971795bb1c0SJesper Dangaard Brouer } 972795bb1c0SJesper Dangaard Brouer EXPORT_SYMBOL(napi_consume_skb); 973795bb1c0SJesper Dangaard Brouer 974b1937227SEric Dumazet /* Make sure a field is enclosed inside headers_start/headers_end section */ 975b1937227SEric Dumazet #define CHECK_SKB_FIELD(field) \ 976b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ 977b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); \ 978b1937227SEric Dumazet BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ 979b1937227SEric Dumazet offsetof(struct sk_buff, headers_end)); \ 980b1937227SEric Dumazet 981dec18810SHerbert Xu static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 982dec18810SHerbert Xu { 983dec18810SHerbert Xu new->tstamp = old->tstamp; 984b1937227SEric Dumazet /* We do not copy old->sk */ 985dec18810SHerbert Xu new->dev = old->dev; 986b1937227SEric Dumazet memcpy(new->cb, old->cb, sizeof(old->cb)); 9877fee226aSEric Dumazet skb_dst_copy(new, old); 988df5042f4SFlorian Westphal __skb_ext_copy(new, old); 989b1937227SEric Dumazet __nf_copy(new, old, false); 9906aa895b0SPatrick McHardy 991b1937227SEric Dumazet /* Note : this field could be in headers_start/headers_end section 992b1937227SEric Dumazet * It is not yet because we do not want to have a 16 bit hole 993b1937227SEric Dumazet */ 994b1937227SEric Dumazet new->queue_mapping = old->queue_mapping; 99506021292SEliezer Tamir 996b1937227SEric Dumazet memcpy(&new->headers_start, &old->headers_start, 997b1937227SEric Dumazet offsetof(struct sk_buff, headers_end) - 998b1937227SEric Dumazet offsetof(struct sk_buff, headers_start)); 999b1937227SEric Dumazet CHECK_SKB_FIELD(protocol); 1000b1937227SEric Dumazet CHECK_SKB_FIELD(csum); 1001b1937227SEric Dumazet CHECK_SKB_FIELD(hash); 1002b1937227SEric Dumazet CHECK_SKB_FIELD(priority); 1003b1937227SEric Dumazet CHECK_SKB_FIELD(skb_iif); 1004b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_proto); 1005b1937227SEric Dumazet CHECK_SKB_FIELD(vlan_tci); 1006b1937227SEric Dumazet CHECK_SKB_FIELD(transport_header); 1007b1937227SEric Dumazet CHECK_SKB_FIELD(network_header); 1008b1937227SEric Dumazet CHECK_SKB_FIELD(mac_header); 1009b1937227SEric Dumazet CHECK_SKB_FIELD(inner_protocol); 1010b1937227SEric Dumazet CHECK_SKB_FIELD(inner_transport_header); 1011b1937227SEric Dumazet CHECK_SKB_FIELD(inner_network_header); 1012b1937227SEric Dumazet CHECK_SKB_FIELD(inner_mac_header); 1013b1937227SEric Dumazet CHECK_SKB_FIELD(mark); 1014b1937227SEric Dumazet #ifdef CONFIG_NETWORK_SECMARK 1015b1937227SEric Dumazet CHECK_SKB_FIELD(secmark); 1016b1937227SEric Dumazet #endif 1017e0d1095aSCong Wang #ifdef CONFIG_NET_RX_BUSY_POLL 1018b1937227SEric Dumazet CHECK_SKB_FIELD(napi_id); 101906021292SEliezer Tamir #endif 10202bd82484SEric Dumazet #ifdef CONFIG_XPS 10212bd82484SEric Dumazet CHECK_SKB_FIELD(sender_cpu); 10222bd82484SEric Dumazet #endif 1023b1937227SEric Dumazet #ifdef CONFIG_NET_SCHED 1024b1937227SEric Dumazet CHECK_SKB_FIELD(tc_index); 1025b1937227SEric Dumazet #endif 1026b1937227SEric Dumazet 1027dec18810SHerbert Xu } 1028dec18810SHerbert Xu 102982c49a35SHerbert Xu /* 103082c49a35SHerbert Xu * You should not add any new code to this function. Add it to 103182c49a35SHerbert Xu * __copy_skb_header above instead. 103282c49a35SHerbert Xu */ 1033e0053ec0SHerbert Xu static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 10341da177e4SLinus Torvalds { 10351da177e4SLinus Torvalds #define C(x) n->x = skb->x 10361da177e4SLinus Torvalds 10371da177e4SLinus Torvalds n->next = n->prev = NULL; 10381da177e4SLinus Torvalds n->sk = NULL; 1039dec18810SHerbert Xu __copy_skb_header(n, skb); 1040dec18810SHerbert Xu 10411da177e4SLinus Torvalds C(len); 10421da177e4SLinus Torvalds C(data_len); 10433e6b3b2eSAlexey Dobriyan C(mac_len); 1044334a8132SPatrick McHardy n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 104502f1c89dSPaul Moore n->cloned = 1; 10461da177e4SLinus Torvalds n->nohdr = 0; 1047b13dda9fSEric Dumazet n->peeked = 0; 1048e78bfb07SStefano Brivio C(pfmemalloc); 10491da177e4SLinus Torvalds n->destructor = NULL; 10501da177e4SLinus Torvalds C(tail); 10511da177e4SLinus Torvalds C(end); 105202f1c89dSPaul Moore C(head); 1053d3836f21SEric Dumazet C(head_frag); 105402f1c89dSPaul Moore C(data); 105502f1c89dSPaul Moore C(truesize); 105663354797SReshetova, Elena refcount_set(&n->users, 1); 10571da177e4SLinus Torvalds 10581da177e4SLinus Torvalds atomic_inc(&(skb_shinfo(skb)->dataref)); 10591da177e4SLinus Torvalds skb->cloned = 1; 10601da177e4SLinus Torvalds 10611da177e4SLinus Torvalds return n; 1062e0053ec0SHerbert Xu #undef C 1063e0053ec0SHerbert Xu } 1064e0053ec0SHerbert Xu 1065e0053ec0SHerbert Xu /** 1066da29e4b4SJakub Kicinski * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg 1067da29e4b4SJakub Kicinski * @first: first sk_buff of the msg 1068da29e4b4SJakub Kicinski */ 1069da29e4b4SJakub Kicinski struct sk_buff *alloc_skb_for_msg(struct sk_buff *first) 1070da29e4b4SJakub Kicinski { 1071da29e4b4SJakub Kicinski struct sk_buff *n; 1072da29e4b4SJakub Kicinski 1073da29e4b4SJakub Kicinski n = alloc_skb(0, GFP_ATOMIC); 1074da29e4b4SJakub Kicinski if (!n) 1075da29e4b4SJakub Kicinski return NULL; 1076da29e4b4SJakub Kicinski 1077da29e4b4SJakub Kicinski n->len = first->len; 1078da29e4b4SJakub Kicinski n->data_len = first->len; 1079da29e4b4SJakub Kicinski n->truesize = first->truesize; 1080da29e4b4SJakub Kicinski 1081da29e4b4SJakub Kicinski skb_shinfo(n)->frag_list = first; 1082da29e4b4SJakub Kicinski 1083da29e4b4SJakub Kicinski __copy_skb_header(n, first); 1084da29e4b4SJakub Kicinski n->destructor = NULL; 1085da29e4b4SJakub Kicinski 1086da29e4b4SJakub Kicinski return n; 1087da29e4b4SJakub Kicinski } 1088da29e4b4SJakub Kicinski EXPORT_SYMBOL_GPL(alloc_skb_for_msg); 1089da29e4b4SJakub Kicinski 1090da29e4b4SJakub Kicinski /** 1091e0053ec0SHerbert Xu * skb_morph - morph one skb into another 1092e0053ec0SHerbert Xu * @dst: the skb to receive the contents 1093e0053ec0SHerbert Xu * @src: the skb to supply the contents 1094e0053ec0SHerbert Xu * 1095e0053ec0SHerbert Xu * This is identical to skb_clone except that the target skb is 1096e0053ec0SHerbert Xu * supplied by the user. 1097e0053ec0SHerbert Xu * 1098e0053ec0SHerbert Xu * The target skb is returned upon exit. 1099e0053ec0SHerbert Xu */ 1100e0053ec0SHerbert Xu struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 1101e0053ec0SHerbert Xu { 11022d4baff8SHerbert Xu skb_release_all(dst); 1103e0053ec0SHerbert Xu return __skb_clone(dst, src); 1104e0053ec0SHerbert Xu } 1105e0053ec0SHerbert Xu EXPORT_SYMBOL_GPL(skb_morph); 1106e0053ec0SHerbert Xu 11076f89dbceSSowmini Varadhan int mm_account_pinned_pages(struct mmpin *mmp, size_t size) 1108a91dbff5SWillem de Bruijn { 1109a91dbff5SWillem de Bruijn unsigned long max_pg, num_pg, new_pg, old_pg; 1110a91dbff5SWillem de Bruijn struct user_struct *user; 1111a91dbff5SWillem de Bruijn 1112a91dbff5SWillem de Bruijn if (capable(CAP_IPC_LOCK) || !size) 1113a91dbff5SWillem de Bruijn return 0; 1114a91dbff5SWillem de Bruijn 1115a91dbff5SWillem de Bruijn num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ 1116a91dbff5SWillem de Bruijn max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 1117a91dbff5SWillem de Bruijn user = mmp->user ? : current_user(); 1118a91dbff5SWillem de Bruijn 1119a91dbff5SWillem de Bruijn do { 1120a91dbff5SWillem de Bruijn old_pg = atomic_long_read(&user->locked_vm); 1121a91dbff5SWillem de Bruijn new_pg = old_pg + num_pg; 1122a91dbff5SWillem de Bruijn if (new_pg > max_pg) 1123a91dbff5SWillem de Bruijn return -ENOBUFS; 1124a91dbff5SWillem de Bruijn } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != 1125a91dbff5SWillem de Bruijn old_pg); 1126a91dbff5SWillem de Bruijn 1127a91dbff5SWillem de Bruijn if (!mmp->user) { 1128a91dbff5SWillem de Bruijn mmp->user = get_uid(user); 1129a91dbff5SWillem de Bruijn mmp->num_pg = num_pg; 1130a91dbff5SWillem de Bruijn } else { 1131a91dbff5SWillem de Bruijn mmp->num_pg += num_pg; 1132a91dbff5SWillem de Bruijn } 1133a91dbff5SWillem de Bruijn 1134a91dbff5SWillem de Bruijn return 0; 1135a91dbff5SWillem de Bruijn } 11366f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_account_pinned_pages); 1137a91dbff5SWillem de Bruijn 11386f89dbceSSowmini Varadhan void mm_unaccount_pinned_pages(struct mmpin *mmp) 1139a91dbff5SWillem de Bruijn { 1140a91dbff5SWillem de Bruijn if (mmp->user) { 1141a91dbff5SWillem de Bruijn atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); 1142a91dbff5SWillem de Bruijn free_uid(mmp->user); 1143a91dbff5SWillem de Bruijn } 1144a91dbff5SWillem de Bruijn } 11456f89dbceSSowmini Varadhan EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages); 1146a91dbff5SWillem de Bruijn 11478c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) 114852267790SWillem de Bruijn { 114952267790SWillem de Bruijn struct ubuf_info *uarg; 115052267790SWillem de Bruijn struct sk_buff *skb; 115152267790SWillem de Bruijn 115252267790SWillem de Bruijn WARN_ON_ONCE(!in_task()); 115352267790SWillem de Bruijn 115452267790SWillem de Bruijn skb = sock_omalloc(sk, 0, GFP_KERNEL); 115552267790SWillem de Bruijn if (!skb) 115652267790SWillem de Bruijn return NULL; 115752267790SWillem de Bruijn 115852267790SWillem de Bruijn BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); 115952267790SWillem de Bruijn uarg = (void *)skb->cb; 1160a91dbff5SWillem de Bruijn uarg->mmp.user = NULL; 1161a91dbff5SWillem de Bruijn 1162a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) { 1163a91dbff5SWillem de Bruijn kfree_skb(skb); 1164a91dbff5SWillem de Bruijn return NULL; 1165a91dbff5SWillem de Bruijn } 116652267790SWillem de Bruijn 11678c793822SJonathan Lemon uarg->callback = msg_zerocopy_callback; 11684ab6c99dSWillem de Bruijn uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; 11694ab6c99dSWillem de Bruijn uarg->len = 1; 11704ab6c99dSWillem de Bruijn uarg->bytelen = size; 117152267790SWillem de Bruijn uarg->zerocopy = 1; 117204c2d33eSJonathan Lemon uarg->flags = SKBFL_ZEROCOPY_FRAG; 1173c1d1b437SEric Dumazet refcount_set(&uarg->refcnt, 1); 117452267790SWillem de Bruijn sock_hold(sk); 117552267790SWillem de Bruijn 117652267790SWillem de Bruijn return uarg; 117752267790SWillem de Bruijn } 11788c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_alloc); 117952267790SWillem de Bruijn 118052267790SWillem de Bruijn static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) 118152267790SWillem de Bruijn { 118252267790SWillem de Bruijn return container_of((void *)uarg, struct sk_buff, cb); 118352267790SWillem de Bruijn } 118452267790SWillem de Bruijn 11858c793822SJonathan Lemon struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, 11864ab6c99dSWillem de Bruijn struct ubuf_info *uarg) 11874ab6c99dSWillem de Bruijn { 11884ab6c99dSWillem de Bruijn if (uarg) { 11894ab6c99dSWillem de Bruijn const u32 byte_limit = 1 << 19; /* limit to a few TSO */ 11904ab6c99dSWillem de Bruijn u32 bytelen, next; 11914ab6c99dSWillem de Bruijn 11924ab6c99dSWillem de Bruijn /* realloc only when socket is locked (TCP, UDP cork), 11934ab6c99dSWillem de Bruijn * so uarg->len and sk_zckey access is serialized 11944ab6c99dSWillem de Bruijn */ 11954ab6c99dSWillem de Bruijn if (!sock_owned_by_user(sk)) { 11964ab6c99dSWillem de Bruijn WARN_ON_ONCE(1); 11974ab6c99dSWillem de Bruijn return NULL; 11984ab6c99dSWillem de Bruijn } 11994ab6c99dSWillem de Bruijn 12004ab6c99dSWillem de Bruijn bytelen = uarg->bytelen + size; 12014ab6c99dSWillem de Bruijn if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { 12024ab6c99dSWillem de Bruijn /* TCP can create new skb to attach new uarg */ 12034ab6c99dSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 12044ab6c99dSWillem de Bruijn goto new_alloc; 12054ab6c99dSWillem de Bruijn return NULL; 12064ab6c99dSWillem de Bruijn } 12074ab6c99dSWillem de Bruijn 12084ab6c99dSWillem de Bruijn next = (u32)atomic_read(&sk->sk_zckey); 12094ab6c99dSWillem de Bruijn if ((u32)(uarg->id + uarg->len) == next) { 1210a91dbff5SWillem de Bruijn if (mm_account_pinned_pages(&uarg->mmp, size)) 1211a91dbff5SWillem de Bruijn return NULL; 12124ab6c99dSWillem de Bruijn uarg->len++; 12134ab6c99dSWillem de Bruijn uarg->bytelen = bytelen; 12144ab6c99dSWillem de Bruijn atomic_set(&sk->sk_zckey, ++next); 1215100f6d8eSWillem de Bruijn 1216100f6d8eSWillem de Bruijn /* no extra ref when appending to datagram (MSG_MORE) */ 1217100f6d8eSWillem de Bruijn if (sk->sk_type == SOCK_STREAM) 12188e044917SJonathan Lemon net_zcopy_get(uarg); 1219100f6d8eSWillem de Bruijn 12204ab6c99dSWillem de Bruijn return uarg; 12214ab6c99dSWillem de Bruijn } 12224ab6c99dSWillem de Bruijn } 12234ab6c99dSWillem de Bruijn 12244ab6c99dSWillem de Bruijn new_alloc: 12258c793822SJonathan Lemon return msg_zerocopy_alloc(sk, size); 12264ab6c99dSWillem de Bruijn } 12278c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_realloc); 12284ab6c99dSWillem de Bruijn 12294ab6c99dSWillem de Bruijn static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) 12304ab6c99dSWillem de Bruijn { 12314ab6c99dSWillem de Bruijn struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); 12324ab6c99dSWillem de Bruijn u32 old_lo, old_hi; 12334ab6c99dSWillem de Bruijn u64 sum_len; 12344ab6c99dSWillem de Bruijn 12354ab6c99dSWillem de Bruijn old_lo = serr->ee.ee_info; 12364ab6c99dSWillem de Bruijn old_hi = serr->ee.ee_data; 12374ab6c99dSWillem de Bruijn sum_len = old_hi - old_lo + 1ULL + len; 12384ab6c99dSWillem de Bruijn 12394ab6c99dSWillem de Bruijn if (sum_len >= (1ULL << 32)) 12404ab6c99dSWillem de Bruijn return false; 12414ab6c99dSWillem de Bruijn 12424ab6c99dSWillem de Bruijn if (lo != old_hi + 1) 12434ab6c99dSWillem de Bruijn return false; 12444ab6c99dSWillem de Bruijn 12454ab6c99dSWillem de Bruijn serr->ee.ee_data += len; 12464ab6c99dSWillem de Bruijn return true; 12474ab6c99dSWillem de Bruijn } 12484ab6c99dSWillem de Bruijn 12498c793822SJonathan Lemon static void __msg_zerocopy_callback(struct ubuf_info *uarg) 125052267790SWillem de Bruijn { 12514ab6c99dSWillem de Bruijn struct sk_buff *tail, *skb = skb_from_uarg(uarg); 125252267790SWillem de Bruijn struct sock_exterr_skb *serr; 125352267790SWillem de Bruijn struct sock *sk = skb->sk; 12544ab6c99dSWillem de Bruijn struct sk_buff_head *q; 12554ab6c99dSWillem de Bruijn unsigned long flags; 12564ab6c99dSWillem de Bruijn u32 lo, hi; 12574ab6c99dSWillem de Bruijn u16 len; 125852267790SWillem de Bruijn 1259ccaffff1SWillem de Bruijn mm_unaccount_pinned_pages(&uarg->mmp); 1260ccaffff1SWillem de Bruijn 12614ab6c99dSWillem de Bruijn /* if !len, there was only 1 call, and it was aborted 12624ab6c99dSWillem de Bruijn * so do not queue a completion notification 12634ab6c99dSWillem de Bruijn */ 12644ab6c99dSWillem de Bruijn if (!uarg->len || sock_flag(sk, SOCK_DEAD)) 126552267790SWillem de Bruijn goto release; 126652267790SWillem de Bruijn 12674ab6c99dSWillem de Bruijn len = uarg->len; 12684ab6c99dSWillem de Bruijn lo = uarg->id; 12694ab6c99dSWillem de Bruijn hi = uarg->id + len - 1; 12704ab6c99dSWillem de Bruijn 127152267790SWillem de Bruijn serr = SKB_EXT_ERR(skb); 127252267790SWillem de Bruijn memset(serr, 0, sizeof(*serr)); 127352267790SWillem de Bruijn serr->ee.ee_errno = 0; 127452267790SWillem de Bruijn serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; 12754ab6c99dSWillem de Bruijn serr->ee.ee_data = hi; 12764ab6c99dSWillem de Bruijn serr->ee.ee_info = lo; 127775518851SJonathan Lemon if (!uarg->zerocopy) 127852267790SWillem de Bruijn serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; 127952267790SWillem de Bruijn 12804ab6c99dSWillem de Bruijn q = &sk->sk_error_queue; 12814ab6c99dSWillem de Bruijn spin_lock_irqsave(&q->lock, flags); 12824ab6c99dSWillem de Bruijn tail = skb_peek_tail(q); 12834ab6c99dSWillem de Bruijn if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || 12844ab6c99dSWillem de Bruijn !skb_zerocopy_notify_extend(tail, lo, len)) { 12854ab6c99dSWillem de Bruijn __skb_queue_tail(q, skb); 128652267790SWillem de Bruijn skb = NULL; 12874ab6c99dSWillem de Bruijn } 12884ab6c99dSWillem de Bruijn spin_unlock_irqrestore(&q->lock, flags); 128952267790SWillem de Bruijn 129052267790SWillem de Bruijn sk->sk_error_report(sk); 129152267790SWillem de Bruijn 129252267790SWillem de Bruijn release: 129352267790SWillem de Bruijn consume_skb(skb); 129452267790SWillem de Bruijn sock_put(sk); 129552267790SWillem de Bruijn } 129675518851SJonathan Lemon 12978c793822SJonathan Lemon void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, 129836177832SJonathan Lemon bool success) 129975518851SJonathan Lemon { 130075518851SJonathan Lemon uarg->zerocopy = uarg->zerocopy & success; 130175518851SJonathan Lemon 130275518851SJonathan Lemon if (refcount_dec_and_test(&uarg->refcnt)) 13038c793822SJonathan Lemon __msg_zerocopy_callback(uarg); 130475518851SJonathan Lemon } 13058c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_callback); 130652267790SWillem de Bruijn 13078c793822SJonathan Lemon void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) 130852267790SWillem de Bruijn { 130952267790SWillem de Bruijn struct sock *sk = skb_from_uarg(uarg)->sk; 131052267790SWillem de Bruijn 131152267790SWillem de Bruijn atomic_dec(&sk->sk_zckey); 13124ab6c99dSWillem de Bruijn uarg->len--; 131352267790SWillem de Bruijn 131452900d22SWillem de Bruijn if (have_uref) 13158c793822SJonathan Lemon msg_zerocopy_callback(NULL, uarg, true); 131652267790SWillem de Bruijn } 13178c793822SJonathan Lemon EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); 131852267790SWillem de Bruijn 1319b5947e5dSWillem de Bruijn int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) 1320b5947e5dSWillem de Bruijn { 1321b5947e5dSWillem de Bruijn return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); 1322b5947e5dSWillem de Bruijn } 1323b5947e5dSWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); 1324b5947e5dSWillem de Bruijn 132552267790SWillem de Bruijn int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, 132652267790SWillem de Bruijn struct msghdr *msg, int len, 132752267790SWillem de Bruijn struct ubuf_info *uarg) 132852267790SWillem de Bruijn { 13294ab6c99dSWillem de Bruijn struct ubuf_info *orig_uarg = skb_zcopy(skb); 133052267790SWillem de Bruijn struct iov_iter orig_iter = msg->msg_iter; 133152267790SWillem de Bruijn int err, orig_len = skb->len; 133252267790SWillem de Bruijn 13334ab6c99dSWillem de Bruijn /* An skb can only point to one uarg. This edge case happens when 13344ab6c99dSWillem de Bruijn * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. 13354ab6c99dSWillem de Bruijn */ 13364ab6c99dSWillem de Bruijn if (orig_uarg && uarg != orig_uarg) 13374ab6c99dSWillem de Bruijn return -EEXIST; 13384ab6c99dSWillem de Bruijn 133952267790SWillem de Bruijn err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); 134052267790SWillem de Bruijn if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { 134154d43117SWillem de Bruijn struct sock *save_sk = skb->sk; 134254d43117SWillem de Bruijn 134352267790SWillem de Bruijn /* Streams do not free skb on error. Reset to prev state. */ 134452267790SWillem de Bruijn msg->msg_iter = orig_iter; 134554d43117SWillem de Bruijn skb->sk = sk; 134652267790SWillem de Bruijn ___pskb_trim(skb, orig_len); 134754d43117SWillem de Bruijn skb->sk = save_sk; 134852267790SWillem de Bruijn return err; 134952267790SWillem de Bruijn } 135052267790SWillem de Bruijn 135152900d22SWillem de Bruijn skb_zcopy_set(skb, uarg, NULL); 135252267790SWillem de Bruijn return skb->len - orig_len; 135352267790SWillem de Bruijn } 135452267790SWillem de Bruijn EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); 135552267790SWillem de Bruijn 13561f8b977aSWillem de Bruijn static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, 135752267790SWillem de Bruijn gfp_t gfp_mask) 135852267790SWillem de Bruijn { 135952267790SWillem de Bruijn if (skb_zcopy(orig)) { 136052267790SWillem de Bruijn if (skb_zcopy(nskb)) { 136152267790SWillem de Bruijn /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ 136252267790SWillem de Bruijn if (!gfp_mask) { 136352267790SWillem de Bruijn WARN_ON_ONCE(1); 136452267790SWillem de Bruijn return -ENOMEM; 136552267790SWillem de Bruijn } 136652267790SWillem de Bruijn if (skb_uarg(nskb) == skb_uarg(orig)) 136752267790SWillem de Bruijn return 0; 136852267790SWillem de Bruijn if (skb_copy_ubufs(nskb, GFP_ATOMIC)) 136952267790SWillem de Bruijn return -EIO; 137052267790SWillem de Bruijn } 137152900d22SWillem de Bruijn skb_zcopy_set(nskb, skb_uarg(orig), NULL); 137252267790SWillem de Bruijn } 137352267790SWillem de Bruijn return 0; 137452267790SWillem de Bruijn } 137552267790SWillem de Bruijn 13762c53040fSBen Hutchings /** 13772c53040fSBen Hutchings * skb_copy_ubufs - copy userspace skb frags buffers to kernel 137848c83012SMichael S. Tsirkin * @skb: the skb to modify 137948c83012SMichael S. Tsirkin * @gfp_mask: allocation priority 138048c83012SMichael S. Tsirkin * 138106b4feb3SJonathan Lemon * This must be called on skb with SKBFL_ZEROCOPY_ENABLE. 138248c83012SMichael S. Tsirkin * It will copy all frags into kernel and drop the reference 138348c83012SMichael S. Tsirkin * to userspace pages. 138448c83012SMichael S. Tsirkin * 138548c83012SMichael S. Tsirkin * If this function is called from an interrupt gfp_mask() must be 138648c83012SMichael S. Tsirkin * %GFP_ATOMIC. 138748c83012SMichael S. Tsirkin * 138848c83012SMichael S. Tsirkin * Returns 0 on success or a negative error code on failure 138948c83012SMichael S. Tsirkin * to allocate kernel memory to copy to. 139048c83012SMichael S. Tsirkin */ 139148c83012SMichael S. Tsirkin int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) 1392a6686f2fSShirley Ma { 1393a6686f2fSShirley Ma int num_frags = skb_shinfo(skb)->nr_frags; 1394a6686f2fSShirley Ma struct page *page, *head = NULL; 13953ece7826SWillem de Bruijn int i, new_frags; 13963ece7826SWillem de Bruijn u32 d_off; 1397a6686f2fSShirley Ma 13983ece7826SWillem de Bruijn if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) 13993ece7826SWillem de Bruijn return -EINVAL; 14003ece7826SWillem de Bruijn 1401f72c4ac6SWillem de Bruijn if (!num_frags) 1402f72c4ac6SWillem de Bruijn goto release; 1403f72c4ac6SWillem de Bruijn 14043ece7826SWillem de Bruijn new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 14053ece7826SWillem de Bruijn for (i = 0; i < new_frags; i++) { 140602756ed4SKrishna Kumar page = alloc_page(gfp_mask); 1407a6686f2fSShirley Ma if (!page) { 1408a6686f2fSShirley Ma while (head) { 140940dadff2SSunghan Suh struct page *next = (struct page *)page_private(head); 1410a6686f2fSShirley Ma put_page(head); 1411a6686f2fSShirley Ma head = next; 1412a6686f2fSShirley Ma } 1413a6686f2fSShirley Ma return -ENOMEM; 1414a6686f2fSShirley Ma } 14153ece7826SWillem de Bruijn set_page_private(page, (unsigned long)head); 14163ece7826SWillem de Bruijn head = page; 14173ece7826SWillem de Bruijn } 14183ece7826SWillem de Bruijn 14193ece7826SWillem de Bruijn page = head; 14203ece7826SWillem de Bruijn d_off = 0; 14213ece7826SWillem de Bruijn for (i = 0; i < num_frags; i++) { 14223ece7826SWillem de Bruijn skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 14233ece7826SWillem de Bruijn u32 p_off, p_len, copied; 14243ece7826SWillem de Bruijn struct page *p; 14253ece7826SWillem de Bruijn u8 *vaddr; 1426c613c209SWillem de Bruijn 1427b54c9d5bSJonathan Lemon skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f), 1428c613c209SWillem de Bruijn p, p_off, p_len, copied) { 14293ece7826SWillem de Bruijn u32 copy, done = 0; 1430c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 14313ece7826SWillem de Bruijn 14323ece7826SWillem de Bruijn while (done < p_len) { 14333ece7826SWillem de Bruijn if (d_off == PAGE_SIZE) { 14343ece7826SWillem de Bruijn d_off = 0; 14353ece7826SWillem de Bruijn page = (struct page *)page_private(page); 14363ece7826SWillem de Bruijn } 14373ece7826SWillem de Bruijn copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 14383ece7826SWillem de Bruijn memcpy(page_address(page) + d_off, 14393ece7826SWillem de Bruijn vaddr + p_off + done, copy); 14403ece7826SWillem de Bruijn done += copy; 14413ece7826SWillem de Bruijn d_off += copy; 14423ece7826SWillem de Bruijn } 144351c56b00SEric Dumazet kunmap_atomic(vaddr); 1444c613c209SWillem de Bruijn } 1445a6686f2fSShirley Ma } 1446a6686f2fSShirley Ma 1447a6686f2fSShirley Ma /* skb frags release userspace buffers */ 144802756ed4SKrishna Kumar for (i = 0; i < num_frags; i++) 1449a8605c60SIan Campbell skb_frag_unref(skb, i); 1450a6686f2fSShirley Ma 1451a6686f2fSShirley Ma /* skb frags point to kernel buffers */ 14523ece7826SWillem de Bruijn for (i = 0; i < new_frags - 1; i++) { 14533ece7826SWillem de Bruijn __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 145440dadff2SSunghan Suh head = (struct page *)page_private(head); 1455a6686f2fSShirley Ma } 14563ece7826SWillem de Bruijn __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); 14573ece7826SWillem de Bruijn skb_shinfo(skb)->nr_frags = new_frags; 145848c83012SMichael S. Tsirkin 1459b90ddd56SWillem de Bruijn release: 14601f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 1461a6686f2fSShirley Ma return 0; 1462a6686f2fSShirley Ma } 1463dcc0fb78SMichael S. Tsirkin EXPORT_SYMBOL_GPL(skb_copy_ubufs); 1464a6686f2fSShirley Ma 1465e0053ec0SHerbert Xu /** 1466e0053ec0SHerbert Xu * skb_clone - duplicate an sk_buff 1467e0053ec0SHerbert Xu * @skb: buffer to clone 1468e0053ec0SHerbert Xu * @gfp_mask: allocation priority 1469e0053ec0SHerbert Xu * 1470e0053ec0SHerbert Xu * Duplicate an &sk_buff. The new one is not owned by a socket. Both 1471e0053ec0SHerbert Xu * copies share the same packet data but not structure. The new 1472e0053ec0SHerbert Xu * buffer has a reference count of 1. If the allocation fails the 1473e0053ec0SHerbert Xu * function returns %NULL otherwise the new buffer is returned. 1474e0053ec0SHerbert Xu * 1475e0053ec0SHerbert Xu * If this function is called from an interrupt gfp_mask() must be 1476e0053ec0SHerbert Xu * %GFP_ATOMIC. 1477e0053ec0SHerbert Xu */ 1478e0053ec0SHerbert Xu 1479e0053ec0SHerbert Xu struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 1480e0053ec0SHerbert Xu { 1481d0bf4a9eSEric Dumazet struct sk_buff_fclones *fclones = container_of(skb, 1482d0bf4a9eSEric Dumazet struct sk_buff_fclones, 1483d0bf4a9eSEric Dumazet skb1); 14846ffe75ebSEric Dumazet struct sk_buff *n; 1485e0053ec0SHerbert Xu 148670008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1487a6686f2fSShirley Ma return NULL; 1488a6686f2fSShirley Ma 1489e0053ec0SHerbert Xu if (skb->fclone == SKB_FCLONE_ORIG && 14902638595aSReshetova, Elena refcount_read(&fclones->fclone_ref) == 1) { 14916ffe75ebSEric Dumazet n = &fclones->skb2; 14922638595aSReshetova, Elena refcount_set(&fclones->fclone_ref, 2); 1493e0053ec0SHerbert Xu } else { 1494c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1495c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1496c93bdd0eSMel Gorman 1497e0053ec0SHerbert Xu n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 1498e0053ec0SHerbert Xu if (!n) 1499e0053ec0SHerbert Xu return NULL; 1500fe55f6d5SVegard Nossum 1501e0053ec0SHerbert Xu n->fclone = SKB_FCLONE_UNAVAILABLE; 1502e0053ec0SHerbert Xu } 1503e0053ec0SHerbert Xu 1504e0053ec0SHerbert Xu return __skb_clone(n, skb); 15051da177e4SLinus Torvalds } 1506b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_clone); 15071da177e4SLinus Torvalds 1508b0768a86SToshiaki Makita void skb_headers_offset_update(struct sk_buff *skb, int off) 1509f5b17294SPravin B Shelar { 1510030737bcSEric Dumazet /* Only adjust this if it actually is csum_start rather than csum */ 1511030737bcSEric Dumazet if (skb->ip_summed == CHECKSUM_PARTIAL) 1512030737bcSEric Dumazet skb->csum_start += off; 1513f5b17294SPravin B Shelar /* {transport,network,mac}_header and tail are relative to skb->head */ 1514f5b17294SPravin B Shelar skb->transport_header += off; 1515f5b17294SPravin B Shelar skb->network_header += off; 1516f5b17294SPravin B Shelar if (skb_mac_header_was_set(skb)) 1517f5b17294SPravin B Shelar skb->mac_header += off; 1518f5b17294SPravin B Shelar skb->inner_transport_header += off; 1519f5b17294SPravin B Shelar skb->inner_network_header += off; 1520aefbd2b3SPravin B Shelar skb->inner_mac_header += off; 1521f5b17294SPravin B Shelar } 1522b0768a86SToshiaki Makita EXPORT_SYMBOL(skb_headers_offset_update); 1523f5b17294SPravin B Shelar 152408303c18SIlya Lesokhin void skb_copy_header(struct sk_buff *new, const struct sk_buff *old) 15251da177e4SLinus Torvalds { 1526dec18810SHerbert Xu __copy_skb_header(new, old); 1527dec18810SHerbert Xu 15287967168cSHerbert Xu skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 15297967168cSHerbert Xu skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 15307967168cSHerbert Xu skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 15311da177e4SLinus Torvalds } 153208303c18SIlya Lesokhin EXPORT_SYMBOL(skb_copy_header); 15331da177e4SLinus Torvalds 1534c93bdd0eSMel Gorman static inline int skb_alloc_rx_flag(const struct sk_buff *skb) 1535c93bdd0eSMel Gorman { 1536c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1537c93bdd0eSMel Gorman return SKB_ALLOC_RX; 1538c93bdd0eSMel Gorman return 0; 1539c93bdd0eSMel Gorman } 1540c93bdd0eSMel Gorman 15411da177e4SLinus Torvalds /** 15421da177e4SLinus Torvalds * skb_copy - create private copy of an sk_buff 15431da177e4SLinus Torvalds * @skb: buffer to copy 15441da177e4SLinus Torvalds * @gfp_mask: allocation priority 15451da177e4SLinus Torvalds * 15461da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data. This is used when the 15471da177e4SLinus Torvalds * caller wishes to modify the data and needs a private copy of the 15481da177e4SLinus Torvalds * data to alter. Returns %NULL on failure or the pointer to the buffer 15491da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 15501da177e4SLinus Torvalds * 15511da177e4SLinus Torvalds * As by-product this function converts non-linear &sk_buff to linear 15521da177e4SLinus Torvalds * one, so that &sk_buff becomes completely private and caller is allowed 15531da177e4SLinus Torvalds * to modify all the data of returned buffer. This means that this 15541da177e4SLinus Torvalds * function is not recommended for use in circumstances when only 15551da177e4SLinus Torvalds * header is going to be modified. Use pskb_copy() instead. 15561da177e4SLinus Torvalds */ 15571da177e4SLinus Torvalds 1558dd0fc66fSAl Viro struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 15591da177e4SLinus Torvalds { 15606602cebbSEric Dumazet int headerlen = skb_headroom(skb); 1561ec47ea82SAlexander Duyck unsigned int size = skb_end_offset(skb) + skb->data_len; 1562c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(size, gfp_mask, 1563c93bdd0eSMel Gorman skb_alloc_rx_flag(skb), NUMA_NO_NODE); 15646602cebbSEric Dumazet 15651da177e4SLinus Torvalds if (!n) 15661da177e4SLinus Torvalds return NULL; 15671da177e4SLinus Torvalds 15681da177e4SLinus Torvalds /* Set the data pointer */ 15691da177e4SLinus Torvalds skb_reserve(n, headerlen); 15701da177e4SLinus Torvalds /* Set the tail pointer and length */ 15711da177e4SLinus Torvalds skb_put(n, skb->len); 15721da177e4SLinus Torvalds 15739f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 15741da177e4SLinus Torvalds 157508303c18SIlya Lesokhin skb_copy_header(n, skb); 15761da177e4SLinus Torvalds return n; 15771da177e4SLinus Torvalds } 1578b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy); 15791da177e4SLinus Torvalds 15801da177e4SLinus Torvalds /** 1581bad93e9dSOctavian Purdila * __pskb_copy_fclone - create copy of an sk_buff with private head. 15821da177e4SLinus Torvalds * @skb: buffer to copy 1583117632e6SEric Dumazet * @headroom: headroom of new skb 15841da177e4SLinus Torvalds * @gfp_mask: allocation priority 1585bad93e9dSOctavian Purdila * @fclone: if true allocate the copy of the skb from the fclone 1586bad93e9dSOctavian Purdila * cache instead of the head cache; it is recommended to set this 1587bad93e9dSOctavian Purdila * to true for the cases where the copy will likely be cloned 15881da177e4SLinus Torvalds * 15891da177e4SLinus Torvalds * Make a copy of both an &sk_buff and part of its data, located 15901da177e4SLinus Torvalds * in header. Fragmented data remain shared. This is used when 15911da177e4SLinus Torvalds * the caller wishes to modify only header of &sk_buff and needs 15921da177e4SLinus Torvalds * private copy of the header to alter. Returns %NULL on failure 15931da177e4SLinus Torvalds * or the pointer to the buffer on success. 15941da177e4SLinus Torvalds * The returned buffer has a reference count of 1. 15951da177e4SLinus Torvalds */ 15961da177e4SLinus Torvalds 1597bad93e9dSOctavian Purdila struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, 1598bad93e9dSOctavian Purdila gfp_t gfp_mask, bool fclone) 15991da177e4SLinus Torvalds { 1600117632e6SEric Dumazet unsigned int size = skb_headlen(skb) + headroom; 1601bad93e9dSOctavian Purdila int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); 1602bad93e9dSOctavian Purdila struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); 16036602cebbSEric Dumazet 16041da177e4SLinus Torvalds if (!n) 16051da177e4SLinus Torvalds goto out; 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds /* Set the data pointer */ 1608117632e6SEric Dumazet skb_reserve(n, headroom); 16091da177e4SLinus Torvalds /* Set the tail pointer and length */ 16101da177e4SLinus Torvalds skb_put(n, skb_headlen(skb)); 16111da177e4SLinus Torvalds /* Copy the bytes */ 1612d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, n->data, n->len); 16131da177e4SLinus Torvalds 161425f484a6SHerbert Xu n->truesize += skb->data_len; 16151da177e4SLinus Torvalds n->data_len = skb->data_len; 16161da177e4SLinus Torvalds n->len = skb->len; 16171da177e4SLinus Torvalds 16181da177e4SLinus Torvalds if (skb_shinfo(skb)->nr_frags) { 16191da177e4SLinus Torvalds int i; 16201da177e4SLinus Torvalds 16211f8b977aSWillem de Bruijn if (skb_orphan_frags(skb, gfp_mask) || 16221f8b977aSWillem de Bruijn skb_zerocopy_clone(n, skb, gfp_mask)) { 16231511022cSDan Carpenter kfree_skb(n); 16241511022cSDan Carpenter n = NULL; 1625a6686f2fSShirley Ma goto out; 1626a6686f2fSShirley Ma } 16271da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 16281da177e4SLinus Torvalds skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 1629ea2ab693SIan Campbell skb_frag_ref(skb, i); 16301da177e4SLinus Torvalds } 16311da177e4SLinus Torvalds skb_shinfo(n)->nr_frags = i; 16321da177e4SLinus Torvalds } 16331da177e4SLinus Torvalds 163421dc3301SDavid S. Miller if (skb_has_frag_list(skb)) { 16351da177e4SLinus Torvalds skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 16361da177e4SLinus Torvalds skb_clone_fraglist(n); 16371da177e4SLinus Torvalds } 16381da177e4SLinus Torvalds 163908303c18SIlya Lesokhin skb_copy_header(n, skb); 16401da177e4SLinus Torvalds out: 16411da177e4SLinus Torvalds return n; 16421da177e4SLinus Torvalds } 1643bad93e9dSOctavian Purdila EXPORT_SYMBOL(__pskb_copy_fclone); 16441da177e4SLinus Torvalds 16451da177e4SLinus Torvalds /** 16461da177e4SLinus Torvalds * pskb_expand_head - reallocate header of &sk_buff 16471da177e4SLinus Torvalds * @skb: buffer to reallocate 16481da177e4SLinus Torvalds * @nhead: room to add at head 16491da177e4SLinus Torvalds * @ntail: room to add at tail 16501da177e4SLinus Torvalds * @gfp_mask: allocation priority 16511da177e4SLinus Torvalds * 1652bc32383cSMathias Krause * Expands (or creates identical copy, if @nhead and @ntail are zero) 1653bc32383cSMathias Krause * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have 16541da177e4SLinus Torvalds * reference count of 1. Returns zero in the case of success or error, 16551da177e4SLinus Torvalds * if expansion failed. In the last case, &sk_buff is not changed. 16561da177e4SLinus Torvalds * 16571da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 16581da177e4SLinus Torvalds * reloaded after call to this function. 16591da177e4SLinus Torvalds */ 16601da177e4SLinus Torvalds 166186a76cafSVictor Fusco int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 1662dd0fc66fSAl Viro gfp_t gfp_mask) 16631da177e4SLinus Torvalds { 1664158f323bSEric Dumazet int i, osize = skb_end_offset(skb); 1665158f323bSEric Dumazet int size = osize + nhead + ntail; 16661da177e4SLinus Torvalds long off; 1667158f323bSEric Dumazet u8 *data; 16681da177e4SLinus Torvalds 16694edd87adSHerbert Xu BUG_ON(nhead < 0); 16704edd87adSHerbert Xu 16719f77fad3STim Hansen BUG_ON(skb_shared(skb)); 16721da177e4SLinus Torvalds 16731da177e4SLinus Torvalds size = SKB_DATA_ALIGN(size); 16741da177e4SLinus Torvalds 1675c93bdd0eSMel Gorman if (skb_pfmemalloc(skb)) 1676c93bdd0eSMel Gorman gfp_mask |= __GFP_MEMALLOC; 1677c93bdd0eSMel Gorman data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 1678c93bdd0eSMel Gorman gfp_mask, NUMA_NO_NODE, NULL); 16791da177e4SLinus Torvalds if (!data) 16801da177e4SLinus Torvalds goto nodata; 168187151b86SEric Dumazet size = SKB_WITH_OVERHEAD(ksize(data)); 16821da177e4SLinus Torvalds 16831da177e4SLinus Torvalds /* Copy only real data... and, alas, header. This should be 16846602cebbSEric Dumazet * optimized for the cases when header is void. 16856602cebbSEric Dumazet */ 16866602cebbSEric Dumazet memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); 16876602cebbSEric Dumazet 16886602cebbSEric Dumazet memcpy((struct skb_shared_info *)(data + size), 16896602cebbSEric Dumazet skb_shinfo(skb), 1690fed66381SEric Dumazet offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 16911da177e4SLinus Torvalds 16923e24591aSAlexander Duyck /* 16933e24591aSAlexander Duyck * if shinfo is shared we must drop the old head gracefully, but if it 16943e24591aSAlexander Duyck * is not we can just drop the old head and let the existing refcount 16953e24591aSAlexander Duyck * be since all we did is relocate the values 16963e24591aSAlexander Duyck */ 16973e24591aSAlexander Duyck if (skb_cloned(skb)) { 169870008aa5SMichael S. Tsirkin if (skb_orphan_frags(skb, gfp_mask)) 1699a6686f2fSShirley Ma goto nofrags; 17001f8b977aSWillem de Bruijn if (skb_zcopy(skb)) 1701c1d1b437SEric Dumazet refcount_inc(&skb_uarg(skb)->refcnt); 17021da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1703ea2ab693SIan Campbell skb_frag_ref(skb, i); 17041da177e4SLinus Torvalds 170521dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 17061da177e4SLinus Torvalds skb_clone_fraglist(skb); 17071da177e4SLinus Torvalds 17081da177e4SLinus Torvalds skb_release_data(skb); 17093e24591aSAlexander Duyck } else { 17103e24591aSAlexander Duyck skb_free_head(skb); 17111fd63041SEric Dumazet } 17121da177e4SLinus Torvalds off = (data + nhead) - skb->head; 17131da177e4SLinus Torvalds 17141da177e4SLinus Torvalds skb->head = data; 1715d3836f21SEric Dumazet skb->head_frag = 0; 17161da177e4SLinus Torvalds skb->data += off; 17174305b541SArnaldo Carvalho de Melo #ifdef NET_SKBUFF_DATA_USES_OFFSET 17184305b541SArnaldo Carvalho de Melo skb->end = size; 171956eb8882SPatrick McHardy off = nhead; 17204305b541SArnaldo Carvalho de Melo #else 17214305b541SArnaldo Carvalho de Melo skb->end = skb->head + size; 172256eb8882SPatrick McHardy #endif 172327a884dcSArnaldo Carvalho de Melo skb->tail += off; 1724b41abb42SPeter Pan(潘卫平) skb_headers_offset_update(skb, nhead); 17251da177e4SLinus Torvalds skb->cloned = 0; 1726334a8132SPatrick McHardy skb->hdr_len = 0; 17271da177e4SLinus Torvalds skb->nohdr = 0; 17281da177e4SLinus Torvalds atomic_set(&skb_shinfo(skb)->dataref, 1); 1729158f323bSEric Dumazet 1730de8f3a83SDaniel Borkmann skb_metadata_clear(skb); 1731de8f3a83SDaniel Borkmann 1732158f323bSEric Dumazet /* It is not generally safe to change skb->truesize. 1733158f323bSEric Dumazet * For the moment, we really care of rx path, or 1734158f323bSEric Dumazet * when skb is orphaned (not attached to a socket). 1735158f323bSEric Dumazet */ 1736158f323bSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 1737158f323bSEric Dumazet skb->truesize += size - osize; 1738158f323bSEric Dumazet 17391da177e4SLinus Torvalds return 0; 17401da177e4SLinus Torvalds 1741a6686f2fSShirley Ma nofrags: 1742a6686f2fSShirley Ma kfree(data); 17431da177e4SLinus Torvalds nodata: 17441da177e4SLinus Torvalds return -ENOMEM; 17451da177e4SLinus Torvalds } 1746b4ac530fSDavid S. Miller EXPORT_SYMBOL(pskb_expand_head); 17471da177e4SLinus Torvalds 17481da177e4SLinus Torvalds /* Make private copy of skb with writable head and some headroom */ 17491da177e4SLinus Torvalds 17501da177e4SLinus Torvalds struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 17511da177e4SLinus Torvalds { 17521da177e4SLinus Torvalds struct sk_buff *skb2; 17531da177e4SLinus Torvalds int delta = headroom - skb_headroom(skb); 17541da177e4SLinus Torvalds 17551da177e4SLinus Torvalds if (delta <= 0) 17561da177e4SLinus Torvalds skb2 = pskb_copy(skb, GFP_ATOMIC); 17571da177e4SLinus Torvalds else { 17581da177e4SLinus Torvalds skb2 = skb_clone(skb, GFP_ATOMIC); 17591da177e4SLinus Torvalds if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 17601da177e4SLinus Torvalds GFP_ATOMIC)) { 17611da177e4SLinus Torvalds kfree_skb(skb2); 17621da177e4SLinus Torvalds skb2 = NULL; 17631da177e4SLinus Torvalds } 17641da177e4SLinus Torvalds } 17651da177e4SLinus Torvalds return skb2; 17661da177e4SLinus Torvalds } 1767b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_realloc_headroom); 17681da177e4SLinus Torvalds 17691da177e4SLinus Torvalds /** 17701da177e4SLinus Torvalds * skb_copy_expand - copy and expand sk_buff 17711da177e4SLinus Torvalds * @skb: buffer to copy 17721da177e4SLinus Torvalds * @newheadroom: new free bytes at head 17731da177e4SLinus Torvalds * @newtailroom: new free bytes at tail 17741da177e4SLinus Torvalds * @gfp_mask: allocation priority 17751da177e4SLinus Torvalds * 17761da177e4SLinus Torvalds * Make a copy of both an &sk_buff and its data and while doing so 17771da177e4SLinus Torvalds * allocate additional space. 17781da177e4SLinus Torvalds * 17791da177e4SLinus Torvalds * This is used when the caller wishes to modify the data and needs a 17801da177e4SLinus Torvalds * private copy of the data to alter as well as more space for new fields. 17811da177e4SLinus Torvalds * Returns %NULL on failure or the pointer to the buffer 17821da177e4SLinus Torvalds * on success. The returned buffer has a reference count of 1. 17831da177e4SLinus Torvalds * 17841da177e4SLinus Torvalds * You must pass %GFP_ATOMIC as the allocation priority if this function 17851da177e4SLinus Torvalds * is called from an interrupt. 17861da177e4SLinus Torvalds */ 17871da177e4SLinus Torvalds struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 178886a76cafSVictor Fusco int newheadroom, int newtailroom, 1789dd0fc66fSAl Viro gfp_t gfp_mask) 17901da177e4SLinus Torvalds { 17911da177e4SLinus Torvalds /* 17921da177e4SLinus Torvalds * Allocate the copy buffer 17931da177e4SLinus Torvalds */ 1794c93bdd0eSMel Gorman struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, 1795c93bdd0eSMel Gorman gfp_mask, skb_alloc_rx_flag(skb), 1796c93bdd0eSMel Gorman NUMA_NO_NODE); 1797efd1e8d5SPatrick McHardy int oldheadroom = skb_headroom(skb); 17981da177e4SLinus Torvalds int head_copy_len, head_copy_off; 17991da177e4SLinus Torvalds 18001da177e4SLinus Torvalds if (!n) 18011da177e4SLinus Torvalds return NULL; 18021da177e4SLinus Torvalds 18031da177e4SLinus Torvalds skb_reserve(n, newheadroom); 18041da177e4SLinus Torvalds 18051da177e4SLinus Torvalds /* Set the tail pointer and length */ 18061da177e4SLinus Torvalds skb_put(n, skb->len); 18071da177e4SLinus Torvalds 1808efd1e8d5SPatrick McHardy head_copy_len = oldheadroom; 18091da177e4SLinus Torvalds head_copy_off = 0; 18101da177e4SLinus Torvalds if (newheadroom <= head_copy_len) 18111da177e4SLinus Torvalds head_copy_len = newheadroom; 18121da177e4SLinus Torvalds else 18131da177e4SLinus Torvalds head_copy_off = newheadroom - head_copy_len; 18141da177e4SLinus Torvalds 18151da177e4SLinus Torvalds /* Copy the linear header and data. */ 18169f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 18179f77fad3STim Hansen skb->len + head_copy_len)); 18181da177e4SLinus Torvalds 181908303c18SIlya Lesokhin skb_copy_header(n, skb); 18201da177e4SLinus Torvalds 1821030737bcSEric Dumazet skb_headers_offset_update(n, newheadroom - oldheadroom); 1822efd1e8d5SPatrick McHardy 18231da177e4SLinus Torvalds return n; 18241da177e4SLinus Torvalds } 1825b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_expand); 18261da177e4SLinus Torvalds 18271da177e4SLinus Torvalds /** 1828cd0a137aSFlorian Fainelli * __skb_pad - zero pad the tail of an skb 18291da177e4SLinus Torvalds * @skb: buffer to pad 18301da177e4SLinus Torvalds * @pad: space to pad 1831cd0a137aSFlorian Fainelli * @free_on_error: free buffer on error 18321da177e4SLinus Torvalds * 18331da177e4SLinus Torvalds * Ensure that a buffer is followed by a padding area that is zero 18341da177e4SLinus Torvalds * filled. Used by network drivers which may DMA or transfer data 18351da177e4SLinus Torvalds * beyond the buffer end onto the wire. 18361da177e4SLinus Torvalds * 1837cd0a137aSFlorian Fainelli * May return error in out of memory cases. The skb is freed on error 1838cd0a137aSFlorian Fainelli * if @free_on_error is true. 18391da177e4SLinus Torvalds */ 18401da177e4SLinus Torvalds 1841cd0a137aSFlorian Fainelli int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) 18421da177e4SLinus Torvalds { 18435b057c6bSHerbert Xu int err; 18445b057c6bSHerbert Xu int ntail; 18451da177e4SLinus Torvalds 18461da177e4SLinus Torvalds /* If the skbuff is non linear tailroom is always zero.. */ 18475b057c6bSHerbert Xu if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 18481da177e4SLinus Torvalds memset(skb->data+skb->len, 0, pad); 18495b057c6bSHerbert Xu return 0; 18501da177e4SLinus Torvalds } 18511da177e4SLinus Torvalds 18524305b541SArnaldo Carvalho de Melo ntail = skb->data_len + pad - (skb->end - skb->tail); 18535b057c6bSHerbert Xu if (likely(skb_cloned(skb) || ntail > 0)) { 18545b057c6bSHerbert Xu err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 18555b057c6bSHerbert Xu if (unlikely(err)) 18565b057c6bSHerbert Xu goto free_skb; 18575b057c6bSHerbert Xu } 18585b057c6bSHerbert Xu 18595b057c6bSHerbert Xu /* FIXME: The use of this function with non-linear skb's really needs 18605b057c6bSHerbert Xu * to be audited. 18615b057c6bSHerbert Xu */ 18625b057c6bSHerbert Xu err = skb_linearize(skb); 18635b057c6bSHerbert Xu if (unlikely(err)) 18645b057c6bSHerbert Xu goto free_skb; 18655b057c6bSHerbert Xu 18665b057c6bSHerbert Xu memset(skb->data + skb->len, 0, pad); 18675b057c6bSHerbert Xu return 0; 18685b057c6bSHerbert Xu 18695b057c6bSHerbert Xu free_skb: 1870cd0a137aSFlorian Fainelli if (free_on_error) 18711da177e4SLinus Torvalds kfree_skb(skb); 18725b057c6bSHerbert Xu return err; 18731da177e4SLinus Torvalds } 1874cd0a137aSFlorian Fainelli EXPORT_SYMBOL(__skb_pad); 18751da177e4SLinus Torvalds 18760dde3e16SIlpo Järvinen /** 18770c7ddf36SMathias Krause * pskb_put - add data to the tail of a potentially fragmented buffer 18780c7ddf36SMathias Krause * @skb: start of the buffer to use 18790c7ddf36SMathias Krause * @tail: tail fragment of the buffer to use 18800c7ddf36SMathias Krause * @len: amount of data to add 18810c7ddf36SMathias Krause * 18820c7ddf36SMathias Krause * This function extends the used data area of the potentially 18830c7ddf36SMathias Krause * fragmented buffer. @tail must be the last fragment of @skb -- or 18840c7ddf36SMathias Krause * @skb itself. If this would exceed the total buffer size the kernel 18850c7ddf36SMathias Krause * will panic. A pointer to the first byte of the extra data is 18860c7ddf36SMathias Krause * returned. 18870c7ddf36SMathias Krause */ 18880c7ddf36SMathias Krause 18894df864c1SJohannes Berg void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 18900c7ddf36SMathias Krause { 18910c7ddf36SMathias Krause if (tail != skb) { 18920c7ddf36SMathias Krause skb->data_len += len; 18930c7ddf36SMathias Krause skb->len += len; 18940c7ddf36SMathias Krause } 18950c7ddf36SMathias Krause return skb_put(tail, len); 18960c7ddf36SMathias Krause } 18970c7ddf36SMathias Krause EXPORT_SYMBOL_GPL(pskb_put); 18980c7ddf36SMathias Krause 18990c7ddf36SMathias Krause /** 19000dde3e16SIlpo Järvinen * skb_put - add data to a buffer 19010dde3e16SIlpo Järvinen * @skb: buffer to use 19020dde3e16SIlpo Järvinen * @len: amount of data to add 19030dde3e16SIlpo Järvinen * 19040dde3e16SIlpo Järvinen * This function extends the used data area of the buffer. If this would 19050dde3e16SIlpo Järvinen * exceed the total buffer size the kernel will panic. A pointer to the 19060dde3e16SIlpo Järvinen * first byte of the extra data is returned. 19070dde3e16SIlpo Järvinen */ 19084df864c1SJohannes Berg void *skb_put(struct sk_buff *skb, unsigned int len) 19090dde3e16SIlpo Järvinen { 19104df864c1SJohannes Berg void *tmp = skb_tail_pointer(skb); 19110dde3e16SIlpo Järvinen SKB_LINEAR_ASSERT(skb); 19120dde3e16SIlpo Järvinen skb->tail += len; 19130dde3e16SIlpo Järvinen skb->len += len; 19140dde3e16SIlpo Järvinen if (unlikely(skb->tail > skb->end)) 19150dde3e16SIlpo Järvinen skb_over_panic(skb, len, __builtin_return_address(0)); 19160dde3e16SIlpo Järvinen return tmp; 19170dde3e16SIlpo Järvinen } 19180dde3e16SIlpo Järvinen EXPORT_SYMBOL(skb_put); 19190dde3e16SIlpo Järvinen 19206be8ac2fSIlpo Järvinen /** 1921c2aa270aSIlpo Järvinen * skb_push - add data to the start of a buffer 1922c2aa270aSIlpo Järvinen * @skb: buffer to use 1923c2aa270aSIlpo Järvinen * @len: amount of data to add 1924c2aa270aSIlpo Järvinen * 1925c2aa270aSIlpo Järvinen * This function extends the used data area of the buffer at the buffer 1926c2aa270aSIlpo Järvinen * start. If this would exceed the total buffer headroom the kernel will 1927c2aa270aSIlpo Järvinen * panic. A pointer to the first byte of the extra data is returned. 1928c2aa270aSIlpo Järvinen */ 1929d58ff351SJohannes Berg void *skb_push(struct sk_buff *skb, unsigned int len) 1930c2aa270aSIlpo Järvinen { 1931c2aa270aSIlpo Järvinen skb->data -= len; 1932c2aa270aSIlpo Järvinen skb->len += len; 1933c2aa270aSIlpo Järvinen if (unlikely(skb->data < skb->head)) 1934c2aa270aSIlpo Järvinen skb_under_panic(skb, len, __builtin_return_address(0)); 1935c2aa270aSIlpo Järvinen return skb->data; 1936c2aa270aSIlpo Järvinen } 1937c2aa270aSIlpo Järvinen EXPORT_SYMBOL(skb_push); 1938c2aa270aSIlpo Järvinen 1939c2aa270aSIlpo Järvinen /** 19406be8ac2fSIlpo Järvinen * skb_pull - remove data from the start of a buffer 19416be8ac2fSIlpo Järvinen * @skb: buffer to use 19426be8ac2fSIlpo Järvinen * @len: amount of data to remove 19436be8ac2fSIlpo Järvinen * 19446be8ac2fSIlpo Järvinen * This function removes data from the start of a buffer, returning 19456be8ac2fSIlpo Järvinen * the memory to the headroom. A pointer to the next data in the buffer 19466be8ac2fSIlpo Järvinen * is returned. Once the data has been pulled future pushes will overwrite 19476be8ac2fSIlpo Järvinen * the old data. 19486be8ac2fSIlpo Järvinen */ 1949af72868bSJohannes Berg void *skb_pull(struct sk_buff *skb, unsigned int len) 19506be8ac2fSIlpo Järvinen { 195147d29646SDavid S. Miller return skb_pull_inline(skb, len); 19526be8ac2fSIlpo Järvinen } 19536be8ac2fSIlpo Järvinen EXPORT_SYMBOL(skb_pull); 19546be8ac2fSIlpo Järvinen 1955419ae74eSIlpo Järvinen /** 1956419ae74eSIlpo Järvinen * skb_trim - remove end from a buffer 1957419ae74eSIlpo Järvinen * @skb: buffer to alter 1958419ae74eSIlpo Järvinen * @len: new length 1959419ae74eSIlpo Järvinen * 1960419ae74eSIlpo Järvinen * Cut the length of a buffer down by removing data from the tail. If 1961419ae74eSIlpo Järvinen * the buffer is already under the length specified it is not modified. 1962419ae74eSIlpo Järvinen * The skb must be linear. 1963419ae74eSIlpo Järvinen */ 1964419ae74eSIlpo Järvinen void skb_trim(struct sk_buff *skb, unsigned int len) 1965419ae74eSIlpo Järvinen { 1966419ae74eSIlpo Järvinen if (skb->len > len) 1967419ae74eSIlpo Järvinen __skb_trim(skb, len); 1968419ae74eSIlpo Järvinen } 1969419ae74eSIlpo Järvinen EXPORT_SYMBOL(skb_trim); 1970419ae74eSIlpo Järvinen 19713cc0e873SHerbert Xu /* Trims skb to length len. It can change skb pointers. 19721da177e4SLinus Torvalds */ 19731da177e4SLinus Torvalds 19743cc0e873SHerbert Xu int ___pskb_trim(struct sk_buff *skb, unsigned int len) 19751da177e4SLinus Torvalds { 197627b437c8SHerbert Xu struct sk_buff **fragp; 197727b437c8SHerbert Xu struct sk_buff *frag; 19781da177e4SLinus Torvalds int offset = skb_headlen(skb); 19791da177e4SLinus Torvalds int nfrags = skb_shinfo(skb)->nr_frags; 19801da177e4SLinus Torvalds int i; 198127b437c8SHerbert Xu int err; 198227b437c8SHerbert Xu 198327b437c8SHerbert Xu if (skb_cloned(skb) && 198427b437c8SHerbert Xu unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 198527b437c8SHerbert Xu return err; 19861da177e4SLinus Torvalds 1987f4d26fb3SHerbert Xu i = 0; 1988f4d26fb3SHerbert Xu if (offset >= len) 1989f4d26fb3SHerbert Xu goto drop_pages; 1990f4d26fb3SHerbert Xu 1991f4d26fb3SHerbert Xu for (; i < nfrags; i++) { 19929e903e08SEric Dumazet int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 199327b437c8SHerbert Xu 199427b437c8SHerbert Xu if (end < len) { 19951da177e4SLinus Torvalds offset = end; 199627b437c8SHerbert Xu continue; 19971da177e4SLinus Torvalds } 19981da177e4SLinus Torvalds 19999e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 200027b437c8SHerbert Xu 2001f4d26fb3SHerbert Xu drop_pages: 200227b437c8SHerbert Xu skb_shinfo(skb)->nr_frags = i; 200327b437c8SHerbert Xu 200427b437c8SHerbert Xu for (; i < nfrags; i++) 2005ea2ab693SIan Campbell skb_frag_unref(skb, i); 200627b437c8SHerbert Xu 200721dc3301SDavid S. Miller if (skb_has_frag_list(skb)) 200827b437c8SHerbert Xu skb_drop_fraglist(skb); 2009f4d26fb3SHerbert Xu goto done; 201027b437c8SHerbert Xu } 201127b437c8SHerbert Xu 201227b437c8SHerbert Xu for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 201327b437c8SHerbert Xu fragp = &frag->next) { 201427b437c8SHerbert Xu int end = offset + frag->len; 201527b437c8SHerbert Xu 201627b437c8SHerbert Xu if (skb_shared(frag)) { 201727b437c8SHerbert Xu struct sk_buff *nfrag; 201827b437c8SHerbert Xu 201927b437c8SHerbert Xu nfrag = skb_clone(frag, GFP_ATOMIC); 202027b437c8SHerbert Xu if (unlikely(!nfrag)) 202127b437c8SHerbert Xu return -ENOMEM; 202227b437c8SHerbert Xu 202327b437c8SHerbert Xu nfrag->next = frag->next; 202485bb2a60SEric Dumazet consume_skb(frag); 202527b437c8SHerbert Xu frag = nfrag; 202627b437c8SHerbert Xu *fragp = frag; 202727b437c8SHerbert Xu } 202827b437c8SHerbert Xu 202927b437c8SHerbert Xu if (end < len) { 203027b437c8SHerbert Xu offset = end; 203127b437c8SHerbert Xu continue; 203227b437c8SHerbert Xu } 203327b437c8SHerbert Xu 203427b437c8SHerbert Xu if (end > len && 203527b437c8SHerbert Xu unlikely((err = pskb_trim(frag, len - offset)))) 203627b437c8SHerbert Xu return err; 203727b437c8SHerbert Xu 203827b437c8SHerbert Xu if (frag->next) 203927b437c8SHerbert Xu skb_drop_list(&frag->next); 204027b437c8SHerbert Xu break; 204127b437c8SHerbert Xu } 204227b437c8SHerbert Xu 2043f4d26fb3SHerbert Xu done: 204427b437c8SHerbert Xu if (len > skb_headlen(skb)) { 20451da177e4SLinus Torvalds skb->data_len -= skb->len - len; 20461da177e4SLinus Torvalds skb->len = len; 20471da177e4SLinus Torvalds } else { 20481da177e4SLinus Torvalds skb->len = len; 20491da177e4SLinus Torvalds skb->data_len = 0; 205027a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 20511da177e4SLinus Torvalds } 20521da177e4SLinus Torvalds 2053c21b48ccSEric Dumazet if (!skb->sk || skb->destructor == sock_edemux) 2054c21b48ccSEric Dumazet skb_condense(skb); 20551da177e4SLinus Torvalds return 0; 20561da177e4SLinus Torvalds } 2057b4ac530fSDavid S. Miller EXPORT_SYMBOL(___pskb_trim); 20581da177e4SLinus Torvalds 205988078d98SEric Dumazet /* Note : use pskb_trim_rcsum() instead of calling this directly 206088078d98SEric Dumazet */ 206188078d98SEric Dumazet int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) 206288078d98SEric Dumazet { 206388078d98SEric Dumazet if (skb->ip_summed == CHECKSUM_COMPLETE) { 206488078d98SEric Dumazet int delta = skb->len - len; 206588078d98SEric Dumazet 2066d55bef50SDimitris Michailidis skb->csum = csum_block_sub(skb->csum, 2067d55bef50SDimitris Michailidis skb_checksum(skb, len, delta, 0), 2068d55bef50SDimitris Michailidis len); 206954970a2fSVasily Averin } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 207054970a2fSVasily Averin int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; 207154970a2fSVasily Averin int offset = skb_checksum_start_offset(skb) + skb->csum_offset; 207254970a2fSVasily Averin 207354970a2fSVasily Averin if (offset + sizeof(__sum16) > hdlen) 207454970a2fSVasily Averin return -EINVAL; 207588078d98SEric Dumazet } 207688078d98SEric Dumazet return __pskb_trim(skb, len); 207788078d98SEric Dumazet } 207888078d98SEric Dumazet EXPORT_SYMBOL(pskb_trim_rcsum_slow); 207988078d98SEric Dumazet 20801da177e4SLinus Torvalds /** 20811da177e4SLinus Torvalds * __pskb_pull_tail - advance tail of skb header 20821da177e4SLinus Torvalds * @skb: buffer to reallocate 20831da177e4SLinus Torvalds * @delta: number of bytes to advance tail 20841da177e4SLinus Torvalds * 20851da177e4SLinus Torvalds * The function makes a sense only on a fragmented &sk_buff, 20861da177e4SLinus Torvalds * it expands header moving its tail forward and copying necessary 20871da177e4SLinus Torvalds * data from fragmented part. 20881da177e4SLinus Torvalds * 20891da177e4SLinus Torvalds * &sk_buff MUST have reference count of 1. 20901da177e4SLinus Torvalds * 20911da177e4SLinus Torvalds * Returns %NULL (and &sk_buff does not change) if pull failed 20921da177e4SLinus Torvalds * or value of new tail of skb in the case of success. 20931da177e4SLinus Torvalds * 20941da177e4SLinus Torvalds * All the pointers pointing into skb header may change and must be 20951da177e4SLinus Torvalds * reloaded after call to this function. 20961da177e4SLinus Torvalds */ 20971da177e4SLinus Torvalds 20981da177e4SLinus Torvalds /* Moves tail of skb head forward, copying data from fragmented part, 20991da177e4SLinus Torvalds * when it is necessary. 21001da177e4SLinus Torvalds * 1. It may fail due to malloc failure. 21011da177e4SLinus Torvalds * 2. It may change skb pointers. 21021da177e4SLinus Torvalds * 21031da177e4SLinus Torvalds * It is pretty complicated. Luckily, it is called only in exceptional cases. 21041da177e4SLinus Torvalds */ 2105af72868bSJohannes Berg void *__pskb_pull_tail(struct sk_buff *skb, int delta) 21061da177e4SLinus Torvalds { 21071da177e4SLinus Torvalds /* If skb has not enough free space at tail, get new one 21081da177e4SLinus Torvalds * plus 128 bytes for future expansions. If we have enough 21091da177e4SLinus Torvalds * room at tail, reallocate without expansion only if skb is cloned. 21101da177e4SLinus Torvalds */ 21114305b541SArnaldo Carvalho de Melo int i, k, eat = (skb->tail + delta) - skb->end; 21121da177e4SLinus Torvalds 21131da177e4SLinus Torvalds if (eat > 0 || skb_cloned(skb)) { 21141da177e4SLinus Torvalds if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 21151da177e4SLinus Torvalds GFP_ATOMIC)) 21161da177e4SLinus Torvalds return NULL; 21171da177e4SLinus Torvalds } 21181da177e4SLinus Torvalds 21199f77fad3STim Hansen BUG_ON(skb_copy_bits(skb, skb_headlen(skb), 21209f77fad3STim Hansen skb_tail_pointer(skb), delta)); 21211da177e4SLinus Torvalds 21221da177e4SLinus Torvalds /* Optimization: no fragments, no reasons to preestimate 21231da177e4SLinus Torvalds * size of pulled pages. Superb. 21241da177e4SLinus Torvalds */ 212521dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) 21261da177e4SLinus Torvalds goto pull_pages; 21271da177e4SLinus Torvalds 21281da177e4SLinus Torvalds /* Estimate size of pulled pages. */ 21291da177e4SLinus Torvalds eat = delta; 21301da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 21319e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 21329e903e08SEric Dumazet 21339e903e08SEric Dumazet if (size >= eat) 21341da177e4SLinus Torvalds goto pull_pages; 21359e903e08SEric Dumazet eat -= size; 21361da177e4SLinus Torvalds } 21371da177e4SLinus Torvalds 21381da177e4SLinus Torvalds /* If we need update frag list, we are in troubles. 213909001b03SWenhua Shi * Certainly, it is possible to add an offset to skb data, 21401da177e4SLinus Torvalds * but taking into account that pulling is expected to 21411da177e4SLinus Torvalds * be very rare operation, it is worth to fight against 21421da177e4SLinus Torvalds * further bloating skb head and crucify ourselves here instead. 21431da177e4SLinus Torvalds * Pure masohism, indeed. 8)8) 21441da177e4SLinus Torvalds */ 21451da177e4SLinus Torvalds if (eat) { 21461da177e4SLinus Torvalds struct sk_buff *list = skb_shinfo(skb)->frag_list; 21471da177e4SLinus Torvalds struct sk_buff *clone = NULL; 21481da177e4SLinus Torvalds struct sk_buff *insp = NULL; 21491da177e4SLinus Torvalds 21501da177e4SLinus Torvalds do { 21511da177e4SLinus Torvalds if (list->len <= eat) { 21521da177e4SLinus Torvalds /* Eaten as whole. */ 21531da177e4SLinus Torvalds eat -= list->len; 21541da177e4SLinus Torvalds list = list->next; 21551da177e4SLinus Torvalds insp = list; 21561da177e4SLinus Torvalds } else { 21571da177e4SLinus Torvalds /* Eaten partially. */ 21581da177e4SLinus Torvalds 21591da177e4SLinus Torvalds if (skb_shared(list)) { 21601da177e4SLinus Torvalds /* Sucks! We need to fork list. :-( */ 21611da177e4SLinus Torvalds clone = skb_clone(list, GFP_ATOMIC); 21621da177e4SLinus Torvalds if (!clone) 21631da177e4SLinus Torvalds return NULL; 21641da177e4SLinus Torvalds insp = list->next; 21651da177e4SLinus Torvalds list = clone; 21661da177e4SLinus Torvalds } else { 21671da177e4SLinus Torvalds /* This may be pulled without 21681da177e4SLinus Torvalds * problems. */ 21691da177e4SLinus Torvalds insp = list; 21701da177e4SLinus Torvalds } 21711da177e4SLinus Torvalds if (!pskb_pull(list, eat)) { 21721da177e4SLinus Torvalds kfree_skb(clone); 21731da177e4SLinus Torvalds return NULL; 21741da177e4SLinus Torvalds } 21751da177e4SLinus Torvalds break; 21761da177e4SLinus Torvalds } 21771da177e4SLinus Torvalds } while (eat); 21781da177e4SLinus Torvalds 21791da177e4SLinus Torvalds /* Free pulled out fragments. */ 21801da177e4SLinus Torvalds while ((list = skb_shinfo(skb)->frag_list) != insp) { 21811da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = list->next; 21821da177e4SLinus Torvalds kfree_skb(list); 21831da177e4SLinus Torvalds } 21841da177e4SLinus Torvalds /* And insert new clone at head. */ 21851da177e4SLinus Torvalds if (clone) { 21861da177e4SLinus Torvalds clone->next = list; 21871da177e4SLinus Torvalds skb_shinfo(skb)->frag_list = clone; 21881da177e4SLinus Torvalds } 21891da177e4SLinus Torvalds } 21901da177e4SLinus Torvalds /* Success! Now we may commit changes to skb data. */ 21911da177e4SLinus Torvalds 21921da177e4SLinus Torvalds pull_pages: 21931da177e4SLinus Torvalds eat = delta; 21941da177e4SLinus Torvalds k = 0; 21951da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 21969e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 21979e903e08SEric Dumazet 21989e903e08SEric Dumazet if (size <= eat) { 2199ea2ab693SIan Campbell skb_frag_unref(skb, i); 22009e903e08SEric Dumazet eat -= size; 22011da177e4SLinus Torvalds } else { 2202b54c9d5bSJonathan Lemon skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; 2203b54c9d5bSJonathan Lemon 2204b54c9d5bSJonathan Lemon *frag = skb_shinfo(skb)->frags[i]; 22051da177e4SLinus Torvalds if (eat) { 2206b54c9d5bSJonathan Lemon skb_frag_off_add(frag, eat); 2207b54c9d5bSJonathan Lemon skb_frag_size_sub(frag, eat); 22083ccc6c6fSlinzhang if (!i) 22093ccc6c6fSlinzhang goto end; 22101da177e4SLinus Torvalds eat = 0; 22111da177e4SLinus Torvalds } 22121da177e4SLinus Torvalds k++; 22131da177e4SLinus Torvalds } 22141da177e4SLinus Torvalds } 22151da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = k; 22161da177e4SLinus Torvalds 22173ccc6c6fSlinzhang end: 22181da177e4SLinus Torvalds skb->tail += delta; 22191da177e4SLinus Torvalds skb->data_len -= delta; 22201da177e4SLinus Torvalds 22211f8b977aSWillem de Bruijn if (!skb->data_len) 22221f8b977aSWillem de Bruijn skb_zcopy_clear(skb, false); 22231f8b977aSWillem de Bruijn 222427a884dcSArnaldo Carvalho de Melo return skb_tail_pointer(skb); 22251da177e4SLinus Torvalds } 2226b4ac530fSDavid S. Miller EXPORT_SYMBOL(__pskb_pull_tail); 22271da177e4SLinus Torvalds 222822019b17SEric Dumazet /** 222922019b17SEric Dumazet * skb_copy_bits - copy bits from skb to kernel buffer 223022019b17SEric Dumazet * @skb: source skb 223122019b17SEric Dumazet * @offset: offset in source 223222019b17SEric Dumazet * @to: destination buffer 223322019b17SEric Dumazet * @len: number of bytes to copy 223422019b17SEric Dumazet * 223522019b17SEric Dumazet * Copy the specified number of bytes from the source skb to the 223622019b17SEric Dumazet * destination buffer. 223722019b17SEric Dumazet * 223822019b17SEric Dumazet * CAUTION ! : 223922019b17SEric Dumazet * If its prototype is ever changed, 224022019b17SEric Dumazet * check arch/{*}/net/{*}.S files, 224122019b17SEric Dumazet * since it is called from BPF assembly code. 224222019b17SEric Dumazet */ 22431da177e4SLinus Torvalds int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 22441da177e4SLinus Torvalds { 22451a028e50SDavid S. Miller int start = skb_headlen(skb); 2246fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2247fbb398a8SDavid S. Miller int i, copy; 22481da177e4SLinus Torvalds 22491da177e4SLinus Torvalds if (offset > (int)skb->len - len) 22501da177e4SLinus Torvalds goto fault; 22511da177e4SLinus Torvalds 22521da177e4SLinus Torvalds /* Copy header. */ 22531a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 22541da177e4SLinus Torvalds if (copy > len) 22551da177e4SLinus Torvalds copy = len; 2256d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, offset, to, copy); 22571da177e4SLinus Torvalds if ((len -= copy) == 0) 22581da177e4SLinus Torvalds return 0; 22591da177e4SLinus Torvalds offset += copy; 22601da177e4SLinus Torvalds to += copy; 22611da177e4SLinus Torvalds } 22621da177e4SLinus Torvalds 22631da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 22641a028e50SDavid S. Miller int end; 226551c56b00SEric Dumazet skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 22661da177e4SLinus Torvalds 2267547b792cSIlpo Järvinen WARN_ON(start > offset + len); 22681a028e50SDavid S. Miller 226951c56b00SEric Dumazet end = start + skb_frag_size(f); 22701da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2271c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2272c613c209SWillem de Bruijn struct page *p; 22731da177e4SLinus Torvalds u8 *vaddr; 22741da177e4SLinus Torvalds 22751da177e4SLinus Torvalds if (copy > len) 22761da177e4SLinus Torvalds copy = len; 22771da177e4SLinus Torvalds 2278c613c209SWillem de Bruijn skb_frag_foreach_page(f, 2279b54c9d5bSJonathan Lemon skb_frag_off(f) + offset - start, 2280c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2281c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2282c613c209SWillem de Bruijn memcpy(to + copied, vaddr + p_off, p_len); 228351c56b00SEric Dumazet kunmap_atomic(vaddr); 2284c613c209SWillem de Bruijn } 22851da177e4SLinus Torvalds 22861da177e4SLinus Torvalds if ((len -= copy) == 0) 22871da177e4SLinus Torvalds return 0; 22881da177e4SLinus Torvalds offset += copy; 22891da177e4SLinus Torvalds to += copy; 22901da177e4SLinus Torvalds } 22911a028e50SDavid S. Miller start = end; 22921da177e4SLinus Torvalds } 22931da177e4SLinus Torvalds 2294fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 22951a028e50SDavid S. Miller int end; 22961da177e4SLinus Torvalds 2297547b792cSIlpo Järvinen WARN_ON(start > offset + len); 22981a028e50SDavid S. Miller 2299fbb398a8SDavid S. Miller end = start + frag_iter->len; 23001da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 23011da177e4SLinus Torvalds if (copy > len) 23021da177e4SLinus Torvalds copy = len; 2303fbb398a8SDavid S. Miller if (skb_copy_bits(frag_iter, offset - start, to, copy)) 23041da177e4SLinus Torvalds goto fault; 23051da177e4SLinus Torvalds if ((len -= copy) == 0) 23061da177e4SLinus Torvalds return 0; 23071da177e4SLinus Torvalds offset += copy; 23081da177e4SLinus Torvalds to += copy; 23091da177e4SLinus Torvalds } 23101a028e50SDavid S. Miller start = end; 23111da177e4SLinus Torvalds } 2312a6686f2fSShirley Ma 23131da177e4SLinus Torvalds if (!len) 23141da177e4SLinus Torvalds return 0; 23151da177e4SLinus Torvalds 23161da177e4SLinus Torvalds fault: 23171da177e4SLinus Torvalds return -EFAULT; 23181da177e4SLinus Torvalds } 2319b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_bits); 23201da177e4SLinus Torvalds 23219c55e01cSJens Axboe /* 23229c55e01cSJens Axboe * Callback from splice_to_pipe(), if we need to release some pages 23239c55e01cSJens Axboe * at the end of the spd in case we error'ed out in filling the pipe. 23249c55e01cSJens Axboe */ 23259c55e01cSJens Axboe static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) 23269c55e01cSJens Axboe { 23278b9d3728SJarek Poplawski put_page(spd->pages[i]); 23288b9d3728SJarek Poplawski } 23299c55e01cSJens Axboe 2330a108d5f3SDavid S. Miller static struct page *linear_to_page(struct page *page, unsigned int *len, 23314fb66994SJarek Poplawski unsigned int *offset, 233218aafc62SEric Dumazet struct sock *sk) 23338b9d3728SJarek Poplawski { 23345640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 23358b9d3728SJarek Poplawski 23365640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 23378b9d3728SJarek Poplawski return NULL; 23384fb66994SJarek Poplawski 23395640f768SEric Dumazet *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); 23404fb66994SJarek Poplawski 23415640f768SEric Dumazet memcpy(page_address(pfrag->page) + pfrag->offset, 23425640f768SEric Dumazet page_address(page) + *offset, *len); 23435640f768SEric Dumazet *offset = pfrag->offset; 23445640f768SEric Dumazet pfrag->offset += *len; 23454fb66994SJarek Poplawski 23465640f768SEric Dumazet return pfrag->page; 23479c55e01cSJens Axboe } 23489c55e01cSJens Axboe 234941c73a0dSEric Dumazet static bool spd_can_coalesce(const struct splice_pipe_desc *spd, 235041c73a0dSEric Dumazet struct page *page, 235141c73a0dSEric Dumazet unsigned int offset) 235241c73a0dSEric Dumazet { 235341c73a0dSEric Dumazet return spd->nr_pages && 235441c73a0dSEric Dumazet spd->pages[spd->nr_pages - 1] == page && 235541c73a0dSEric Dumazet (spd->partial[spd->nr_pages - 1].offset + 235641c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len == offset); 235741c73a0dSEric Dumazet } 235841c73a0dSEric Dumazet 23599c55e01cSJens Axboe /* 23609c55e01cSJens Axboe * Fill page/offset/length into spd, if it can hold more pages. 23619c55e01cSJens Axboe */ 2362a108d5f3SDavid S. Miller static bool spd_fill_page(struct splice_pipe_desc *spd, 236335f3d14dSJens Axboe struct pipe_inode_info *pipe, struct page *page, 23644fb66994SJarek Poplawski unsigned int *len, unsigned int offset, 236518aafc62SEric Dumazet bool linear, 23667a67e56fSJarek Poplawski struct sock *sk) 23679c55e01cSJens Axboe { 236841c73a0dSEric Dumazet if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) 2369a108d5f3SDavid S. Miller return true; 23709c55e01cSJens Axboe 23718b9d3728SJarek Poplawski if (linear) { 237218aafc62SEric Dumazet page = linear_to_page(page, len, &offset, sk); 23738b9d3728SJarek Poplawski if (!page) 2374a108d5f3SDavid S. Miller return true; 237541c73a0dSEric Dumazet } 237641c73a0dSEric Dumazet if (spd_can_coalesce(spd, page, offset)) { 237741c73a0dSEric Dumazet spd->partial[spd->nr_pages - 1].len += *len; 2378a108d5f3SDavid S. Miller return false; 237941c73a0dSEric Dumazet } 23808b9d3728SJarek Poplawski get_page(page); 23819c55e01cSJens Axboe spd->pages[spd->nr_pages] = page; 23824fb66994SJarek Poplawski spd->partial[spd->nr_pages].len = *len; 23839c55e01cSJens Axboe spd->partial[spd->nr_pages].offset = offset; 23849c55e01cSJens Axboe spd->nr_pages++; 23858b9d3728SJarek Poplawski 2386a108d5f3SDavid S. Miller return false; 23879c55e01cSJens Axboe } 23889c55e01cSJens Axboe 2389a108d5f3SDavid S. Miller static bool __splice_segment(struct page *page, unsigned int poff, 23902870c43dSOctavian Purdila unsigned int plen, unsigned int *off, 239118aafc62SEric Dumazet unsigned int *len, 2392d7ccf7c0SEric Dumazet struct splice_pipe_desc *spd, bool linear, 239335f3d14dSJens Axboe struct sock *sk, 239435f3d14dSJens Axboe struct pipe_inode_info *pipe) 23959c55e01cSJens Axboe { 23962870c43dSOctavian Purdila if (!*len) 2397a108d5f3SDavid S. Miller return true; 23989c55e01cSJens Axboe 23992870c43dSOctavian Purdila /* skip this segment if already processed */ 24002870c43dSOctavian Purdila if (*off >= plen) { 24012870c43dSOctavian Purdila *off -= plen; 2402a108d5f3SDavid S. Miller return false; 24032870c43dSOctavian Purdila } 24042870c43dSOctavian Purdila 24052870c43dSOctavian Purdila /* ignore any bits we already processed */ 24069ca1b22dSEric Dumazet poff += *off; 24079ca1b22dSEric Dumazet plen -= *off; 24082870c43dSOctavian Purdila *off = 0; 24092870c43dSOctavian Purdila 241018aafc62SEric Dumazet do { 241118aafc62SEric Dumazet unsigned int flen = min(*len, plen); 24122870c43dSOctavian Purdila 241318aafc62SEric Dumazet if (spd_fill_page(spd, pipe, page, &flen, poff, 241418aafc62SEric Dumazet linear, sk)) 2415a108d5f3SDavid S. Miller return true; 241618aafc62SEric Dumazet poff += flen; 241718aafc62SEric Dumazet plen -= flen; 24182870c43dSOctavian Purdila *len -= flen; 241918aafc62SEric Dumazet } while (*len && plen); 24202870c43dSOctavian Purdila 2421a108d5f3SDavid S. Miller return false; 2422db43a282SOctavian Purdila } 24239c55e01cSJens Axboe 24249c55e01cSJens Axboe /* 2425a108d5f3SDavid S. Miller * Map linear and fragment data from the skb to spd. It reports true if the 24262870c43dSOctavian Purdila * pipe is full or if we already spliced the requested length. 24279c55e01cSJens Axboe */ 2428a108d5f3SDavid S. Miller static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 242935f3d14dSJens Axboe unsigned int *offset, unsigned int *len, 243035f3d14dSJens Axboe struct splice_pipe_desc *spd, struct sock *sk) 24312870c43dSOctavian Purdila { 24322870c43dSOctavian Purdila int seg; 2433fa9835e5STom Herbert struct sk_buff *iter; 24349c55e01cSJens Axboe 24351d0c0b32SEric Dumazet /* map the linear part : 24362996d31fSAlexander Duyck * If skb->head_frag is set, this 'linear' part is backed by a 24372996d31fSAlexander Duyck * fragment, and if the head is not shared with any clones then 24382996d31fSAlexander Duyck * we can avoid a copy since we own the head portion of this page. 24399c55e01cSJens Axboe */ 24402870c43dSOctavian Purdila if (__splice_segment(virt_to_page(skb->data), 24412870c43dSOctavian Purdila (unsigned long) skb->data & (PAGE_SIZE - 1), 24422870c43dSOctavian Purdila skb_headlen(skb), 244318aafc62SEric Dumazet offset, len, spd, 24443a7c1ee4SAlexander Duyck skb_head_is_locked(skb), 24451d0c0b32SEric Dumazet sk, pipe)) 2446a108d5f3SDavid S. Miller return true; 24479c55e01cSJens Axboe 24489c55e01cSJens Axboe /* 24499c55e01cSJens Axboe * then map the fragments 24509c55e01cSJens Axboe */ 24519c55e01cSJens Axboe for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { 24529c55e01cSJens Axboe const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 24539c55e01cSJens Axboe 2454ea2ab693SIan Campbell if (__splice_segment(skb_frag_page(f), 2455b54c9d5bSJonathan Lemon skb_frag_off(f), skb_frag_size(f), 245618aafc62SEric Dumazet offset, len, spd, false, sk, pipe)) 2457a108d5f3SDavid S. Miller return true; 24589c55e01cSJens Axboe } 24599c55e01cSJens Axboe 2460fa9835e5STom Herbert skb_walk_frags(skb, iter) { 2461fa9835e5STom Herbert if (*offset >= iter->len) { 2462fa9835e5STom Herbert *offset -= iter->len; 2463fa9835e5STom Herbert continue; 2464fa9835e5STom Herbert } 2465fa9835e5STom Herbert /* __skb_splice_bits() only fails if the output has no room 2466fa9835e5STom Herbert * left, so no point in going over the frag_list for the error 2467fa9835e5STom Herbert * case. 2468fa9835e5STom Herbert */ 2469fa9835e5STom Herbert if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) 2470fa9835e5STom Herbert return true; 2471fa9835e5STom Herbert } 2472fa9835e5STom Herbert 2473a108d5f3SDavid S. Miller return false; 24749c55e01cSJens Axboe } 24759c55e01cSJens Axboe 24769c55e01cSJens Axboe /* 24779c55e01cSJens Axboe * Map data from the skb to a pipe. Should handle both the linear part, 2478fa9835e5STom Herbert * the fragments, and the frag list. 24799c55e01cSJens Axboe */ 2480a60e3cc7SHannes Frederic Sowa int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, 24819c55e01cSJens Axboe struct pipe_inode_info *pipe, unsigned int tlen, 248225869262SAl Viro unsigned int flags) 24839c55e01cSJens Axboe { 248441c73a0dSEric Dumazet struct partial_page partial[MAX_SKB_FRAGS]; 248541c73a0dSEric Dumazet struct page *pages[MAX_SKB_FRAGS]; 24869c55e01cSJens Axboe struct splice_pipe_desc spd = { 24879c55e01cSJens Axboe .pages = pages, 24889c55e01cSJens Axboe .partial = partial, 2489047fe360SEric Dumazet .nr_pages_max = MAX_SKB_FRAGS, 249028a625cbSMiklos Szeredi .ops = &nosteal_pipe_buf_ops, 24919c55e01cSJens Axboe .spd_release = sock_spd_release, 24929c55e01cSJens Axboe }; 249335f3d14dSJens Axboe int ret = 0; 249435f3d14dSJens Axboe 2495fa9835e5STom Herbert __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); 24969c55e01cSJens Axboe 2497a60e3cc7SHannes Frederic Sowa if (spd.nr_pages) 249825869262SAl Viro ret = splice_to_pipe(pipe, &spd); 24999c55e01cSJens Axboe 250035f3d14dSJens Axboe return ret; 25019c55e01cSJens Axboe } 25022b514574SHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_splice_bits); 25039c55e01cSJens Axboe 250420bf50deSTom Herbert /* Send skb data on a socket. Socket must be locked. */ 250520bf50deSTom Herbert int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, 250620bf50deSTom Herbert int len) 250720bf50deSTom Herbert { 250820bf50deSTom Herbert unsigned int orig_len = len; 250920bf50deSTom Herbert struct sk_buff *head = skb; 251020bf50deSTom Herbert unsigned short fragidx; 251120bf50deSTom Herbert int slen, ret; 251220bf50deSTom Herbert 251320bf50deSTom Herbert do_frag_list: 251420bf50deSTom Herbert 251520bf50deSTom Herbert /* Deal with head data */ 251620bf50deSTom Herbert while (offset < skb_headlen(skb) && len) { 251720bf50deSTom Herbert struct kvec kv; 251820bf50deSTom Herbert struct msghdr msg; 251920bf50deSTom Herbert 252020bf50deSTom Herbert slen = min_t(int, len, skb_headlen(skb) - offset); 252120bf50deSTom Herbert kv.iov_base = skb->data + offset; 2522db5980d8SJohn Fastabend kv.iov_len = slen; 252320bf50deSTom Herbert memset(&msg, 0, sizeof(msg)); 2524bd95e678SJohn Fastabend msg.msg_flags = MSG_DONTWAIT; 252520bf50deSTom Herbert 252620bf50deSTom Herbert ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); 252720bf50deSTom Herbert if (ret <= 0) 252820bf50deSTom Herbert goto error; 252920bf50deSTom Herbert 253020bf50deSTom Herbert offset += ret; 253120bf50deSTom Herbert len -= ret; 253220bf50deSTom Herbert } 253320bf50deSTom Herbert 253420bf50deSTom Herbert /* All the data was skb head? */ 253520bf50deSTom Herbert if (!len) 253620bf50deSTom Herbert goto out; 253720bf50deSTom Herbert 253820bf50deSTom Herbert /* Make offset relative to start of frags */ 253920bf50deSTom Herbert offset -= skb_headlen(skb); 254020bf50deSTom Herbert 254120bf50deSTom Herbert /* Find where we are in frag list */ 254220bf50deSTom Herbert for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 254320bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 254420bf50deSTom Herbert 2545d8e18a51SMatthew Wilcox (Oracle) if (offset < skb_frag_size(frag)) 254620bf50deSTom Herbert break; 254720bf50deSTom Herbert 2548d8e18a51SMatthew Wilcox (Oracle) offset -= skb_frag_size(frag); 254920bf50deSTom Herbert } 255020bf50deSTom Herbert 255120bf50deSTom Herbert for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { 255220bf50deSTom Herbert skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; 255320bf50deSTom Herbert 2554d8e18a51SMatthew Wilcox (Oracle) slen = min_t(size_t, len, skb_frag_size(frag) - offset); 255520bf50deSTom Herbert 255620bf50deSTom Herbert while (slen) { 2557d8e18a51SMatthew Wilcox (Oracle) ret = kernel_sendpage_locked(sk, skb_frag_page(frag), 2558b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset, 255920bf50deSTom Herbert slen, MSG_DONTWAIT); 256020bf50deSTom Herbert if (ret <= 0) 256120bf50deSTom Herbert goto error; 256220bf50deSTom Herbert 256320bf50deSTom Herbert len -= ret; 256420bf50deSTom Herbert offset += ret; 256520bf50deSTom Herbert slen -= ret; 256620bf50deSTom Herbert } 256720bf50deSTom Herbert 256820bf50deSTom Herbert offset = 0; 256920bf50deSTom Herbert } 257020bf50deSTom Herbert 257120bf50deSTom Herbert if (len) { 257220bf50deSTom Herbert /* Process any frag lists */ 257320bf50deSTom Herbert 257420bf50deSTom Herbert if (skb == head) { 257520bf50deSTom Herbert if (skb_has_frag_list(skb)) { 257620bf50deSTom Herbert skb = skb_shinfo(skb)->frag_list; 257720bf50deSTom Herbert goto do_frag_list; 257820bf50deSTom Herbert } 257920bf50deSTom Herbert } else if (skb->next) { 258020bf50deSTom Herbert skb = skb->next; 258120bf50deSTom Herbert goto do_frag_list; 258220bf50deSTom Herbert } 258320bf50deSTom Herbert } 258420bf50deSTom Herbert 258520bf50deSTom Herbert out: 258620bf50deSTom Herbert return orig_len - len; 258720bf50deSTom Herbert 258820bf50deSTom Herbert error: 258920bf50deSTom Herbert return orig_len == len ? ret : orig_len - len; 259020bf50deSTom Herbert } 259120bf50deSTom Herbert EXPORT_SYMBOL_GPL(skb_send_sock_locked); 259220bf50deSTom Herbert 2593357b40a1SHerbert Xu /** 2594357b40a1SHerbert Xu * skb_store_bits - store bits from kernel buffer to skb 2595357b40a1SHerbert Xu * @skb: destination buffer 2596357b40a1SHerbert Xu * @offset: offset in destination 2597357b40a1SHerbert Xu * @from: source buffer 2598357b40a1SHerbert Xu * @len: number of bytes to copy 2599357b40a1SHerbert Xu * 2600357b40a1SHerbert Xu * Copy the specified number of bytes from the source buffer to the 2601357b40a1SHerbert Xu * destination skb. This function handles all the messy bits of 2602357b40a1SHerbert Xu * traversing fragment lists and such. 2603357b40a1SHerbert Xu */ 2604357b40a1SHerbert Xu 26050c6fcc8aSStephen Hemminger int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 2606357b40a1SHerbert Xu { 26071a028e50SDavid S. Miller int start = skb_headlen(skb); 2608fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 2609fbb398a8SDavid S. Miller int i, copy; 2610357b40a1SHerbert Xu 2611357b40a1SHerbert Xu if (offset > (int)skb->len - len) 2612357b40a1SHerbert Xu goto fault; 2613357b40a1SHerbert Xu 26141a028e50SDavid S. Miller if ((copy = start - offset) > 0) { 2615357b40a1SHerbert Xu if (copy > len) 2616357b40a1SHerbert Xu copy = len; 261727d7ff46SArnaldo Carvalho de Melo skb_copy_to_linear_data_offset(skb, offset, from, copy); 2618357b40a1SHerbert Xu if ((len -= copy) == 0) 2619357b40a1SHerbert Xu return 0; 2620357b40a1SHerbert Xu offset += copy; 2621357b40a1SHerbert Xu from += copy; 2622357b40a1SHerbert Xu } 2623357b40a1SHerbert Xu 2624357b40a1SHerbert Xu for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2625357b40a1SHerbert Xu skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 26261a028e50SDavid S. Miller int end; 2627357b40a1SHerbert Xu 2628547b792cSIlpo Järvinen WARN_ON(start > offset + len); 26291a028e50SDavid S. Miller 26309e903e08SEric Dumazet end = start + skb_frag_size(frag); 2631357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2632c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2633c613c209SWillem de Bruijn struct page *p; 2634357b40a1SHerbert Xu u8 *vaddr; 2635357b40a1SHerbert Xu 2636357b40a1SHerbert Xu if (copy > len) 2637357b40a1SHerbert Xu copy = len; 2638357b40a1SHerbert Xu 2639c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2640b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2641c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2642c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2643c613c209SWillem de Bruijn memcpy(vaddr + p_off, from + copied, p_len); 264451c56b00SEric Dumazet kunmap_atomic(vaddr); 2645c613c209SWillem de Bruijn } 2646357b40a1SHerbert Xu 2647357b40a1SHerbert Xu if ((len -= copy) == 0) 2648357b40a1SHerbert Xu return 0; 2649357b40a1SHerbert Xu offset += copy; 2650357b40a1SHerbert Xu from += copy; 2651357b40a1SHerbert Xu } 26521a028e50SDavid S. Miller start = end; 2653357b40a1SHerbert Xu } 2654357b40a1SHerbert Xu 2655fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 26561a028e50SDavid S. Miller int end; 2657357b40a1SHerbert Xu 2658547b792cSIlpo Järvinen WARN_ON(start > offset + len); 26591a028e50SDavid S. Miller 2660fbb398a8SDavid S. Miller end = start + frag_iter->len; 2661357b40a1SHerbert Xu if ((copy = end - offset) > 0) { 2662357b40a1SHerbert Xu if (copy > len) 2663357b40a1SHerbert Xu copy = len; 2664fbb398a8SDavid S. Miller if (skb_store_bits(frag_iter, offset - start, 26651a028e50SDavid S. Miller from, copy)) 2666357b40a1SHerbert Xu goto fault; 2667357b40a1SHerbert Xu if ((len -= copy) == 0) 2668357b40a1SHerbert Xu return 0; 2669357b40a1SHerbert Xu offset += copy; 2670357b40a1SHerbert Xu from += copy; 2671357b40a1SHerbert Xu } 26721a028e50SDavid S. Miller start = end; 2673357b40a1SHerbert Xu } 2674357b40a1SHerbert Xu if (!len) 2675357b40a1SHerbert Xu return 0; 2676357b40a1SHerbert Xu 2677357b40a1SHerbert Xu fault: 2678357b40a1SHerbert Xu return -EFAULT; 2679357b40a1SHerbert Xu } 2680357b40a1SHerbert Xu EXPORT_SYMBOL(skb_store_bits); 2681357b40a1SHerbert Xu 26821da177e4SLinus Torvalds /* Checksum skb data. */ 26832817a336SDaniel Borkmann __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, 26842817a336SDaniel Borkmann __wsum csum, const struct skb_checksum_ops *ops) 26851da177e4SLinus Torvalds { 26861a028e50SDavid S. Miller int start = skb_headlen(skb); 26871a028e50SDavid S. Miller int i, copy = start - offset; 2688fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 26891da177e4SLinus Torvalds int pos = 0; 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds /* Checksum header. */ 26921da177e4SLinus Torvalds if (copy > 0) { 26931da177e4SLinus Torvalds if (copy > len) 26941da177e4SLinus Torvalds copy = len; 26952544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->update, csum_partial_ext, 26962544af03SMatteo Croce skb->data + offset, copy, csum); 26971da177e4SLinus Torvalds if ((len -= copy) == 0) 26981da177e4SLinus Torvalds return csum; 26991da177e4SLinus Torvalds offset += copy; 27001da177e4SLinus Torvalds pos = copy; 27011da177e4SLinus Torvalds } 27021da177e4SLinus Torvalds 27031da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 27041a028e50SDavid S. Miller int end; 270551c56b00SEric Dumazet skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 27061da177e4SLinus Torvalds 2707547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27081a028e50SDavid S. Miller 270951c56b00SEric Dumazet end = start + skb_frag_size(frag); 27101da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2711c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2712c613c209SWillem de Bruijn struct page *p; 271344bb9363SAl Viro __wsum csum2; 27141da177e4SLinus Torvalds u8 *vaddr; 27151da177e4SLinus Torvalds 27161da177e4SLinus Torvalds if (copy > len) 27171da177e4SLinus Torvalds copy = len; 2718c613c209SWillem de Bruijn 2719c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2720b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2721c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2722c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 27232544af03SMatteo Croce csum2 = INDIRECT_CALL_1(ops->update, 27242544af03SMatteo Croce csum_partial_ext, 27252544af03SMatteo Croce vaddr + p_off, p_len, 0); 272651c56b00SEric Dumazet kunmap_atomic(vaddr); 27272544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, 27282544af03SMatteo Croce csum_block_add_ext, csum, 27292544af03SMatteo Croce csum2, pos, p_len); 2730c613c209SWillem de Bruijn pos += p_len; 2731c613c209SWillem de Bruijn } 2732c613c209SWillem de Bruijn 27331da177e4SLinus Torvalds if (!(len -= copy)) 27341da177e4SLinus Torvalds return csum; 27351da177e4SLinus Torvalds offset += copy; 27361da177e4SLinus Torvalds } 27371a028e50SDavid S. Miller start = end; 27381da177e4SLinus Torvalds } 27391da177e4SLinus Torvalds 2740fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 27411a028e50SDavid S. Miller int end; 27421da177e4SLinus Torvalds 2743547b792cSIlpo Järvinen WARN_ON(start > offset + len); 27441a028e50SDavid S. Miller 2745fbb398a8SDavid S. Miller end = start + frag_iter->len; 27461da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 27475f92a738SAl Viro __wsum csum2; 27481da177e4SLinus Torvalds if (copy > len) 27491da177e4SLinus Torvalds copy = len; 27502817a336SDaniel Borkmann csum2 = __skb_checksum(frag_iter, offset - start, 27512817a336SDaniel Borkmann copy, 0, ops); 27522544af03SMatteo Croce csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext, 27532544af03SMatteo Croce csum, csum2, pos, copy); 27541da177e4SLinus Torvalds if ((len -= copy) == 0) 27551da177e4SLinus Torvalds return csum; 27561da177e4SLinus Torvalds offset += copy; 27571da177e4SLinus Torvalds pos += copy; 27581da177e4SLinus Torvalds } 27591a028e50SDavid S. Miller start = end; 27601da177e4SLinus Torvalds } 276109a62660SKris Katterjohn BUG_ON(len); 27621da177e4SLinus Torvalds 27631da177e4SLinus Torvalds return csum; 27641da177e4SLinus Torvalds } 27652817a336SDaniel Borkmann EXPORT_SYMBOL(__skb_checksum); 27662817a336SDaniel Borkmann 27672817a336SDaniel Borkmann __wsum skb_checksum(const struct sk_buff *skb, int offset, 27682817a336SDaniel Borkmann int len, __wsum csum) 27692817a336SDaniel Borkmann { 27702817a336SDaniel Borkmann const struct skb_checksum_ops ops = { 2771cea80ea8SDaniel Borkmann .update = csum_partial_ext, 27722817a336SDaniel Borkmann .combine = csum_block_add_ext, 27732817a336SDaniel Borkmann }; 27742817a336SDaniel Borkmann 27752817a336SDaniel Borkmann return __skb_checksum(skb, offset, len, csum, &ops); 27762817a336SDaniel Borkmann } 2777b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_checksum); 27781da177e4SLinus Torvalds 27791da177e4SLinus Torvalds /* Both of above in one bottle. */ 27801da177e4SLinus Torvalds 278181d77662SAl Viro __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 27828d5930dfSAl Viro u8 *to, int len) 27831da177e4SLinus Torvalds { 27841a028e50SDavid S. Miller int start = skb_headlen(skb); 27851a028e50SDavid S. Miller int i, copy = start - offset; 2786fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 27871da177e4SLinus Torvalds int pos = 0; 27888d5930dfSAl Viro __wsum csum = 0; 27891da177e4SLinus Torvalds 27901da177e4SLinus Torvalds /* Copy header. */ 27911da177e4SLinus Torvalds if (copy > 0) { 27921da177e4SLinus Torvalds if (copy > len) 27931da177e4SLinus Torvalds copy = len; 27941da177e4SLinus Torvalds csum = csum_partial_copy_nocheck(skb->data + offset, to, 2795cc44c17bSAl Viro copy); 27961da177e4SLinus Torvalds if ((len -= copy) == 0) 27971da177e4SLinus Torvalds return csum; 27981da177e4SLinus Torvalds offset += copy; 27991da177e4SLinus Torvalds to += copy; 28001da177e4SLinus Torvalds pos = copy; 28011da177e4SLinus Torvalds } 28021da177e4SLinus Torvalds 28031da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 28041a028e50SDavid S. Miller int end; 28051da177e4SLinus Torvalds 2806547b792cSIlpo Järvinen WARN_ON(start > offset + len); 28071a028e50SDavid S. Miller 28089e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 28091da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 2810c613c209SWillem de Bruijn skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2811c613c209SWillem de Bruijn u32 p_off, p_len, copied; 2812c613c209SWillem de Bruijn struct page *p; 28135084205fSAl Viro __wsum csum2; 28141da177e4SLinus Torvalds u8 *vaddr; 28151da177e4SLinus Torvalds 28161da177e4SLinus Torvalds if (copy > len) 28171da177e4SLinus Torvalds copy = len; 2818c613c209SWillem de Bruijn 2819c613c209SWillem de Bruijn skb_frag_foreach_page(frag, 2820b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start, 2821c613c209SWillem de Bruijn copy, p, p_off, p_len, copied) { 2822c613c209SWillem de Bruijn vaddr = kmap_atomic(p); 2823c613c209SWillem de Bruijn csum2 = csum_partial_copy_nocheck(vaddr + p_off, 2824c613c209SWillem de Bruijn to + copied, 2825cc44c17bSAl Viro p_len); 282651c56b00SEric Dumazet kunmap_atomic(vaddr); 28271da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 2828c613c209SWillem de Bruijn pos += p_len; 2829c613c209SWillem de Bruijn } 2830c613c209SWillem de Bruijn 28311da177e4SLinus Torvalds if (!(len -= copy)) 28321da177e4SLinus Torvalds return csum; 28331da177e4SLinus Torvalds offset += copy; 28341da177e4SLinus Torvalds to += copy; 28351da177e4SLinus Torvalds } 28361a028e50SDavid S. Miller start = end; 28371da177e4SLinus Torvalds } 28381da177e4SLinus Torvalds 2839fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 284081d77662SAl Viro __wsum csum2; 28411a028e50SDavid S. Miller int end; 28421da177e4SLinus Torvalds 2843547b792cSIlpo Järvinen WARN_ON(start > offset + len); 28441a028e50SDavid S. Miller 2845fbb398a8SDavid S. Miller end = start + frag_iter->len; 28461da177e4SLinus Torvalds if ((copy = end - offset) > 0) { 28471da177e4SLinus Torvalds if (copy > len) 28481da177e4SLinus Torvalds copy = len; 2849fbb398a8SDavid S. Miller csum2 = skb_copy_and_csum_bits(frag_iter, 28501a028e50SDavid S. Miller offset - start, 28518d5930dfSAl Viro to, copy); 28521da177e4SLinus Torvalds csum = csum_block_add(csum, csum2, pos); 28531da177e4SLinus Torvalds if ((len -= copy) == 0) 28541da177e4SLinus Torvalds return csum; 28551da177e4SLinus Torvalds offset += copy; 28561da177e4SLinus Torvalds to += copy; 28571da177e4SLinus Torvalds pos += copy; 28581da177e4SLinus Torvalds } 28591a028e50SDavid S. Miller start = end; 28601da177e4SLinus Torvalds } 286109a62660SKris Katterjohn BUG_ON(len); 28621da177e4SLinus Torvalds return csum; 28631da177e4SLinus Torvalds } 2864b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_bits); 28651da177e4SLinus Torvalds 286649f8e832SCong Wang __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) 286749f8e832SCong Wang { 286849f8e832SCong Wang __sum16 sum; 286949f8e832SCong Wang 287049f8e832SCong Wang sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); 287114641931SCong Wang /* See comments in __skb_checksum_complete(). */ 287249f8e832SCong Wang if (likely(!sum)) { 287349f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 287449f8e832SCong Wang !skb->csum_complete_sw) 28757fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 287649f8e832SCong Wang } 287749f8e832SCong Wang if (!skb_shared(skb)) 287849f8e832SCong Wang skb->csum_valid = !sum; 287949f8e832SCong Wang return sum; 288049f8e832SCong Wang } 288149f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete_head); 288249f8e832SCong Wang 288314641931SCong Wang /* This function assumes skb->csum already holds pseudo header's checksum, 288414641931SCong Wang * which has been changed from the hardware checksum, for example, by 288514641931SCong Wang * __skb_checksum_validate_complete(). And, the original skb->csum must 288614641931SCong Wang * have been validated unsuccessfully for CHECKSUM_COMPLETE case. 288714641931SCong Wang * 288814641931SCong Wang * It returns non-zero if the recomputed checksum is still invalid, otherwise 288914641931SCong Wang * zero. The new checksum is stored back into skb->csum unless the skb is 289014641931SCong Wang * shared. 289114641931SCong Wang */ 289249f8e832SCong Wang __sum16 __skb_checksum_complete(struct sk_buff *skb) 289349f8e832SCong Wang { 289449f8e832SCong Wang __wsum csum; 289549f8e832SCong Wang __sum16 sum; 289649f8e832SCong Wang 289749f8e832SCong Wang csum = skb_checksum(skb, 0, skb->len, 0); 289849f8e832SCong Wang 289949f8e832SCong Wang sum = csum_fold(csum_add(skb->csum, csum)); 290014641931SCong Wang /* This check is inverted, because we already knew the hardware 290114641931SCong Wang * checksum is invalid before calling this function. So, if the 290214641931SCong Wang * re-computed checksum is valid instead, then we have a mismatch 290314641931SCong Wang * between the original skb->csum and skb_checksum(). This means either 290414641931SCong Wang * the original hardware checksum is incorrect or we screw up skb->csum 290514641931SCong Wang * when moving skb->data around. 290614641931SCong Wang */ 290749f8e832SCong Wang if (likely(!sum)) { 290849f8e832SCong Wang if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && 290949f8e832SCong Wang !skb->csum_complete_sw) 29107fe50ac8SCong Wang netdev_rx_csum_fault(skb->dev, skb); 291149f8e832SCong Wang } 291249f8e832SCong Wang 291349f8e832SCong Wang if (!skb_shared(skb)) { 291449f8e832SCong Wang /* Save full packet checksum */ 291549f8e832SCong Wang skb->csum = csum; 291649f8e832SCong Wang skb->ip_summed = CHECKSUM_COMPLETE; 291749f8e832SCong Wang skb->csum_complete_sw = 1; 291849f8e832SCong Wang skb->csum_valid = !sum; 291949f8e832SCong Wang } 292049f8e832SCong Wang 292149f8e832SCong Wang return sum; 292249f8e832SCong Wang } 292349f8e832SCong Wang EXPORT_SYMBOL(__skb_checksum_complete); 292449f8e832SCong Wang 29259617813dSDavide Caratti static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum) 29269617813dSDavide Caratti { 29279617813dSDavide Caratti net_warn_ratelimited( 29289617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 29299617813dSDavide Caratti __func__); 29309617813dSDavide Caratti return 0; 29319617813dSDavide Caratti } 29329617813dSDavide Caratti 29339617813dSDavide Caratti static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2, 29349617813dSDavide Caratti int offset, int len) 29359617813dSDavide Caratti { 29369617813dSDavide Caratti net_warn_ratelimited( 29379617813dSDavide Caratti "%s: attempt to compute crc32c without libcrc32c.ko\n", 29389617813dSDavide Caratti __func__); 29399617813dSDavide Caratti return 0; 29409617813dSDavide Caratti } 29419617813dSDavide Caratti 29429617813dSDavide Caratti static const struct skb_checksum_ops default_crc32c_ops = { 29439617813dSDavide Caratti .update = warn_crc32c_csum_update, 29449617813dSDavide Caratti .combine = warn_crc32c_csum_combine, 29459617813dSDavide Caratti }; 29469617813dSDavide Caratti 29479617813dSDavide Caratti const struct skb_checksum_ops *crc32c_csum_stub __read_mostly = 29489617813dSDavide Caratti &default_crc32c_ops; 29499617813dSDavide Caratti EXPORT_SYMBOL(crc32c_csum_stub); 29509617813dSDavide Caratti 2951af2806f8SThomas Graf /** 2952af2806f8SThomas Graf * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() 2953af2806f8SThomas Graf * @from: source buffer 2954af2806f8SThomas Graf * 2955af2806f8SThomas Graf * Calculates the amount of linear headroom needed in the 'to' skb passed 2956af2806f8SThomas Graf * into skb_zerocopy(). 2957af2806f8SThomas Graf */ 2958af2806f8SThomas Graf unsigned int 2959af2806f8SThomas Graf skb_zerocopy_headlen(const struct sk_buff *from) 2960af2806f8SThomas Graf { 2961af2806f8SThomas Graf unsigned int hlen = 0; 2962af2806f8SThomas Graf 2963af2806f8SThomas Graf if (!from->head_frag || 2964af2806f8SThomas Graf skb_headlen(from) < L1_CACHE_BYTES || 2965af2806f8SThomas Graf skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 2966af2806f8SThomas Graf hlen = skb_headlen(from); 2967af2806f8SThomas Graf 2968af2806f8SThomas Graf if (skb_has_frag_list(from)) 2969af2806f8SThomas Graf hlen = from->len; 2970af2806f8SThomas Graf 2971af2806f8SThomas Graf return hlen; 2972af2806f8SThomas Graf } 2973af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); 2974af2806f8SThomas Graf 2975af2806f8SThomas Graf /** 2976af2806f8SThomas Graf * skb_zerocopy - Zero copy skb to skb 2977af2806f8SThomas Graf * @to: destination buffer 29787fceb4deSMasanari Iida * @from: source buffer 2979af2806f8SThomas Graf * @len: number of bytes to copy from source buffer 2980af2806f8SThomas Graf * @hlen: size of linear headroom in destination buffer 2981af2806f8SThomas Graf * 2982af2806f8SThomas Graf * Copies up to `len` bytes from `from` to `to` by creating references 2983af2806f8SThomas Graf * to the frags in the source buffer. 2984af2806f8SThomas Graf * 2985af2806f8SThomas Graf * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2986af2806f8SThomas Graf * headroom in the `to` buffer. 298736d5fe6aSZoltan Kiss * 298836d5fe6aSZoltan Kiss * Return value: 298936d5fe6aSZoltan Kiss * 0: everything is OK 299036d5fe6aSZoltan Kiss * -ENOMEM: couldn't orphan frags of @from due to lack of memory 299136d5fe6aSZoltan Kiss * -EFAULT: skb_copy_bits() found some problem with skb geometry 2992af2806f8SThomas Graf */ 299336d5fe6aSZoltan Kiss int 299436d5fe6aSZoltan Kiss skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) 2995af2806f8SThomas Graf { 2996af2806f8SThomas Graf int i, j = 0; 2997af2806f8SThomas Graf int plen = 0; /* length of skb->head fragment */ 299836d5fe6aSZoltan Kiss int ret; 2999af2806f8SThomas Graf struct page *page; 3000af2806f8SThomas Graf unsigned int offset; 3001af2806f8SThomas Graf 3002af2806f8SThomas Graf BUG_ON(!from->head_frag && !hlen); 3003af2806f8SThomas Graf 3004af2806f8SThomas Graf /* dont bother with small payloads */ 300536d5fe6aSZoltan Kiss if (len <= skb_tailroom(to)) 300636d5fe6aSZoltan Kiss return skb_copy_bits(from, 0, skb_put(to, len), len); 3007af2806f8SThomas Graf 3008af2806f8SThomas Graf if (hlen) { 300936d5fe6aSZoltan Kiss ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 301036d5fe6aSZoltan Kiss if (unlikely(ret)) 301136d5fe6aSZoltan Kiss return ret; 3012af2806f8SThomas Graf len -= hlen; 3013af2806f8SThomas Graf } else { 3014af2806f8SThomas Graf plen = min_t(int, skb_headlen(from), len); 3015af2806f8SThomas Graf if (plen) { 3016af2806f8SThomas Graf page = virt_to_head_page(from->head); 3017af2806f8SThomas Graf offset = from->data - (unsigned char *)page_address(page); 3018af2806f8SThomas Graf __skb_fill_page_desc(to, 0, page, offset, plen); 3019af2806f8SThomas Graf get_page(page); 3020af2806f8SThomas Graf j = 1; 3021af2806f8SThomas Graf len -= plen; 3022af2806f8SThomas Graf } 3023af2806f8SThomas Graf } 3024af2806f8SThomas Graf 3025af2806f8SThomas Graf to->truesize += len + plen; 3026af2806f8SThomas Graf to->len += len + plen; 3027af2806f8SThomas Graf to->data_len += len + plen; 3028af2806f8SThomas Graf 302936d5fe6aSZoltan Kiss if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { 303036d5fe6aSZoltan Kiss skb_tx_error(from); 303136d5fe6aSZoltan Kiss return -ENOMEM; 303236d5fe6aSZoltan Kiss } 30331f8b977aSWillem de Bruijn skb_zerocopy_clone(to, from, GFP_ATOMIC); 303436d5fe6aSZoltan Kiss 3035af2806f8SThomas Graf for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 3036d8e18a51SMatthew Wilcox (Oracle) int size; 3037d8e18a51SMatthew Wilcox (Oracle) 3038af2806f8SThomas Graf if (!len) 3039af2806f8SThomas Graf break; 3040af2806f8SThomas Graf skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; 3041d8e18a51SMatthew Wilcox (Oracle) size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]), 3042d8e18a51SMatthew Wilcox (Oracle) len); 3043d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&skb_shinfo(to)->frags[j], size); 3044d8e18a51SMatthew Wilcox (Oracle) len -= size; 3045af2806f8SThomas Graf skb_frag_ref(to, j); 3046af2806f8SThomas Graf j++; 3047af2806f8SThomas Graf } 3048af2806f8SThomas Graf skb_shinfo(to)->nr_frags = j; 304936d5fe6aSZoltan Kiss 305036d5fe6aSZoltan Kiss return 0; 3051af2806f8SThomas Graf } 3052af2806f8SThomas Graf EXPORT_SYMBOL_GPL(skb_zerocopy); 3053af2806f8SThomas Graf 30541da177e4SLinus Torvalds void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 30551da177e4SLinus Torvalds { 3056d3bc23e7SAl Viro __wsum csum; 30571da177e4SLinus Torvalds long csstart; 30581da177e4SLinus Torvalds 305984fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) 306055508d60SMichał Mirosław csstart = skb_checksum_start_offset(skb); 30611da177e4SLinus Torvalds else 30621da177e4SLinus Torvalds csstart = skb_headlen(skb); 30631da177e4SLinus Torvalds 306409a62660SKris Katterjohn BUG_ON(csstart > skb_headlen(skb)); 30651da177e4SLinus Torvalds 3066d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data(skb, to, csstart); 30671da177e4SLinus Torvalds 30681da177e4SLinus Torvalds csum = 0; 30691da177e4SLinus Torvalds if (csstart != skb->len) 30701da177e4SLinus Torvalds csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 30718d5930dfSAl Viro skb->len - csstart); 30721da177e4SLinus Torvalds 307384fa7933SPatrick McHardy if (skb->ip_summed == CHECKSUM_PARTIAL) { 3074ff1dcadbSAl Viro long csstuff = csstart + skb->csum_offset; 30751da177e4SLinus Torvalds 3076d3bc23e7SAl Viro *((__sum16 *)(to + csstuff)) = csum_fold(csum); 30771da177e4SLinus Torvalds } 30781da177e4SLinus Torvalds } 3079b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_copy_and_csum_dev); 30801da177e4SLinus Torvalds 30811da177e4SLinus Torvalds /** 30821da177e4SLinus Torvalds * skb_dequeue - remove from the head of the queue 30831da177e4SLinus Torvalds * @list: list to dequeue from 30841da177e4SLinus Torvalds * 30851da177e4SLinus Torvalds * Remove the head of the list. The list lock is taken so the function 30861da177e4SLinus Torvalds * may be used safely with other locking list functions. The head item is 30871da177e4SLinus Torvalds * returned or %NULL if the list is empty. 30881da177e4SLinus Torvalds */ 30891da177e4SLinus Torvalds 30901da177e4SLinus Torvalds struct sk_buff *skb_dequeue(struct sk_buff_head *list) 30911da177e4SLinus Torvalds { 30921da177e4SLinus Torvalds unsigned long flags; 30931da177e4SLinus Torvalds struct sk_buff *result; 30941da177e4SLinus Torvalds 30951da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 30961da177e4SLinus Torvalds result = __skb_dequeue(list); 30971da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 30981da177e4SLinus Torvalds return result; 30991da177e4SLinus Torvalds } 3100b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue); 31011da177e4SLinus Torvalds 31021da177e4SLinus Torvalds /** 31031da177e4SLinus Torvalds * skb_dequeue_tail - remove from the tail of the queue 31041da177e4SLinus Torvalds * @list: list to dequeue from 31051da177e4SLinus Torvalds * 31061da177e4SLinus Torvalds * Remove the tail of the list. The list lock is taken so the function 31071da177e4SLinus Torvalds * may be used safely with other locking list functions. The tail item is 31081da177e4SLinus Torvalds * returned or %NULL if the list is empty. 31091da177e4SLinus Torvalds */ 31101da177e4SLinus Torvalds struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 31111da177e4SLinus Torvalds { 31121da177e4SLinus Torvalds unsigned long flags; 31131da177e4SLinus Torvalds struct sk_buff *result; 31141da177e4SLinus Torvalds 31151da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31161da177e4SLinus Torvalds result = __skb_dequeue_tail(list); 31171da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31181da177e4SLinus Torvalds return result; 31191da177e4SLinus Torvalds } 3120b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_dequeue_tail); 31211da177e4SLinus Torvalds 31221da177e4SLinus Torvalds /** 31231da177e4SLinus Torvalds * skb_queue_purge - empty a list 31241da177e4SLinus Torvalds * @list: list to empty 31251da177e4SLinus Torvalds * 31261da177e4SLinus Torvalds * Delete all buffers on an &sk_buff list. Each buffer is removed from 31271da177e4SLinus Torvalds * the list and one reference dropped. This function takes the list 31281da177e4SLinus Torvalds * lock and is atomic with respect to other list locking functions. 31291da177e4SLinus Torvalds */ 31301da177e4SLinus Torvalds void skb_queue_purge(struct sk_buff_head *list) 31311da177e4SLinus Torvalds { 31321da177e4SLinus Torvalds struct sk_buff *skb; 31331da177e4SLinus Torvalds while ((skb = skb_dequeue(list)) != NULL) 31341da177e4SLinus Torvalds kfree_skb(skb); 31351da177e4SLinus Torvalds } 3136b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_purge); 31371da177e4SLinus Torvalds 31381da177e4SLinus Torvalds /** 31399f5afeaeSYaogong Wang * skb_rbtree_purge - empty a skb rbtree 31409f5afeaeSYaogong Wang * @root: root of the rbtree to empty 3141385114deSPeter Oskolkov * Return value: the sum of truesizes of all purged skbs. 31429f5afeaeSYaogong Wang * 31439f5afeaeSYaogong Wang * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from 31449f5afeaeSYaogong Wang * the list and one reference dropped. This function does not take 31459f5afeaeSYaogong Wang * any lock. Synchronization should be handled by the caller (e.g., TCP 31469f5afeaeSYaogong Wang * out-of-order queue is protected by the socket lock). 31479f5afeaeSYaogong Wang */ 3148385114deSPeter Oskolkov unsigned int skb_rbtree_purge(struct rb_root *root) 31499f5afeaeSYaogong Wang { 31507c90584cSEric Dumazet struct rb_node *p = rb_first(root); 3151385114deSPeter Oskolkov unsigned int sum = 0; 31529f5afeaeSYaogong Wang 31537c90584cSEric Dumazet while (p) { 31547c90584cSEric Dumazet struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); 31557c90584cSEric Dumazet 31567c90584cSEric Dumazet p = rb_next(p); 31577c90584cSEric Dumazet rb_erase(&skb->rbnode, root); 3158385114deSPeter Oskolkov sum += skb->truesize; 31599f5afeaeSYaogong Wang kfree_skb(skb); 31607c90584cSEric Dumazet } 3161385114deSPeter Oskolkov return sum; 31629f5afeaeSYaogong Wang } 31639f5afeaeSYaogong Wang 31649f5afeaeSYaogong Wang /** 31651da177e4SLinus Torvalds * skb_queue_head - queue a buffer at the list head 31661da177e4SLinus Torvalds * @list: list to use 31671da177e4SLinus Torvalds * @newsk: buffer to queue 31681da177e4SLinus Torvalds * 31691da177e4SLinus Torvalds * Queue a buffer at the start of the list. This function takes the 31701da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 31711da177e4SLinus Torvalds * safely. 31721da177e4SLinus Torvalds * 31731da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 31741da177e4SLinus Torvalds */ 31751da177e4SLinus Torvalds void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 31761da177e4SLinus Torvalds { 31771da177e4SLinus Torvalds unsigned long flags; 31781da177e4SLinus Torvalds 31791da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 31801da177e4SLinus Torvalds __skb_queue_head(list, newsk); 31811da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 31821da177e4SLinus Torvalds } 3183b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_head); 31841da177e4SLinus Torvalds 31851da177e4SLinus Torvalds /** 31861da177e4SLinus Torvalds * skb_queue_tail - queue a buffer at the list tail 31871da177e4SLinus Torvalds * @list: list to use 31881da177e4SLinus Torvalds * @newsk: buffer to queue 31891da177e4SLinus Torvalds * 31901da177e4SLinus Torvalds * Queue a buffer at the tail of the list. This function takes the 31911da177e4SLinus Torvalds * list lock and can be used safely with other locking &sk_buff functions 31921da177e4SLinus Torvalds * safely. 31931da177e4SLinus Torvalds * 31941da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 31951da177e4SLinus Torvalds */ 31961da177e4SLinus Torvalds void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 31971da177e4SLinus Torvalds { 31981da177e4SLinus Torvalds unsigned long flags; 31991da177e4SLinus Torvalds 32001da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 32011da177e4SLinus Torvalds __skb_queue_tail(list, newsk); 32021da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 32031da177e4SLinus Torvalds } 3204b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_queue_tail); 32058728b834SDavid S. Miller 32061da177e4SLinus Torvalds /** 32071da177e4SLinus Torvalds * skb_unlink - remove a buffer from a list 32081da177e4SLinus Torvalds * @skb: buffer to remove 32098728b834SDavid S. Miller * @list: list to use 32101da177e4SLinus Torvalds * 32118728b834SDavid S. Miller * Remove a packet from a list. The list locks are taken and this 32128728b834SDavid S. Miller * function is atomic with respect to other list locked calls 32131da177e4SLinus Torvalds * 32148728b834SDavid S. Miller * You must know what list the SKB is on. 32151da177e4SLinus Torvalds */ 32168728b834SDavid S. Miller void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 32171da177e4SLinus Torvalds { 32181da177e4SLinus Torvalds unsigned long flags; 32191da177e4SLinus Torvalds 32201da177e4SLinus Torvalds spin_lock_irqsave(&list->lock, flags); 32218728b834SDavid S. Miller __skb_unlink(skb, list); 32221da177e4SLinus Torvalds spin_unlock_irqrestore(&list->lock, flags); 32231da177e4SLinus Torvalds } 3224b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_unlink); 32251da177e4SLinus Torvalds 32261da177e4SLinus Torvalds /** 32271da177e4SLinus Torvalds * skb_append - append a buffer 32281da177e4SLinus Torvalds * @old: buffer to insert after 32291da177e4SLinus Torvalds * @newsk: buffer to insert 32308728b834SDavid S. Miller * @list: list to use 32311da177e4SLinus Torvalds * 32321da177e4SLinus Torvalds * Place a packet after a given packet in a list. The list locks are taken 32331da177e4SLinus Torvalds * and this function is atomic with respect to other list locked calls. 32341da177e4SLinus Torvalds * A buffer cannot be placed on two lists at the same time. 32351da177e4SLinus Torvalds */ 32368728b834SDavid S. Miller void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 32371da177e4SLinus Torvalds { 32381da177e4SLinus Torvalds unsigned long flags; 32391da177e4SLinus Torvalds 32408728b834SDavid S. Miller spin_lock_irqsave(&list->lock, flags); 32417de6c033SGerrit Renker __skb_queue_after(list, old, newsk); 32428728b834SDavid S. Miller spin_unlock_irqrestore(&list->lock, flags); 32431da177e4SLinus Torvalds } 3244b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_append); 32451da177e4SLinus Torvalds 32461da177e4SLinus Torvalds static inline void skb_split_inside_header(struct sk_buff *skb, 32471da177e4SLinus Torvalds struct sk_buff* skb1, 32481da177e4SLinus Torvalds const u32 len, const int pos) 32491da177e4SLinus Torvalds { 32501da177e4SLinus Torvalds int i; 32511da177e4SLinus Torvalds 3252d626f62bSArnaldo Carvalho de Melo skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 3253d626f62bSArnaldo Carvalho de Melo pos - len); 32541da177e4SLinus Torvalds /* And move data appendix as is. */ 32551da177e4SLinus Torvalds for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 32561da177e4SLinus Torvalds skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 32571da177e4SLinus Torvalds 32581da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 32591da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 32601da177e4SLinus Torvalds skb1->data_len = skb->data_len; 32611da177e4SLinus Torvalds skb1->len += skb1->data_len; 32621da177e4SLinus Torvalds skb->data_len = 0; 32631da177e4SLinus Torvalds skb->len = len; 326427a884dcSArnaldo Carvalho de Melo skb_set_tail_pointer(skb, len); 32651da177e4SLinus Torvalds } 32661da177e4SLinus Torvalds 32671da177e4SLinus Torvalds static inline void skb_split_no_header(struct sk_buff *skb, 32681da177e4SLinus Torvalds struct sk_buff* skb1, 32691da177e4SLinus Torvalds const u32 len, int pos) 32701da177e4SLinus Torvalds { 32711da177e4SLinus Torvalds int i, k = 0; 32721da177e4SLinus Torvalds const int nfrags = skb_shinfo(skb)->nr_frags; 32731da177e4SLinus Torvalds 32741da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags = 0; 32751da177e4SLinus Torvalds skb1->len = skb1->data_len = skb->len - len; 32761da177e4SLinus Torvalds skb->len = len; 32771da177e4SLinus Torvalds skb->data_len = len - pos; 32781da177e4SLinus Torvalds 32791da177e4SLinus Torvalds for (i = 0; i < nfrags; i++) { 32809e903e08SEric Dumazet int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 32811da177e4SLinus Torvalds 32821da177e4SLinus Torvalds if (pos + size > len) { 32831da177e4SLinus Torvalds skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 32841da177e4SLinus Torvalds 32851da177e4SLinus Torvalds if (pos < len) { 32861da177e4SLinus Torvalds /* Split frag. 32871da177e4SLinus Torvalds * We have two variants in this case: 32881da177e4SLinus Torvalds * 1. Move all the frag to the second 32891da177e4SLinus Torvalds * part, if it is possible. F.e. 32901da177e4SLinus Torvalds * this approach is mandatory for TUX, 32911da177e4SLinus Torvalds * where splitting is expensive. 32921da177e4SLinus Torvalds * 2. Split is accurately. We make this. 32931da177e4SLinus Torvalds */ 3294ea2ab693SIan Campbell skb_frag_ref(skb, i); 3295b54c9d5bSJonathan Lemon skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos); 32969e903e08SEric Dumazet skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 32979e903e08SEric Dumazet skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 32981da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 32991da177e4SLinus Torvalds } 33001da177e4SLinus Torvalds k++; 33011da177e4SLinus Torvalds } else 33021da177e4SLinus Torvalds skb_shinfo(skb)->nr_frags++; 33031da177e4SLinus Torvalds pos += size; 33041da177e4SLinus Torvalds } 33051da177e4SLinus Torvalds skb_shinfo(skb1)->nr_frags = k; 33061da177e4SLinus Torvalds } 33071da177e4SLinus Torvalds 33081da177e4SLinus Torvalds /** 33091da177e4SLinus Torvalds * skb_split - Split fragmented skb to two parts at length len. 33101da177e4SLinus Torvalds * @skb: the buffer to split 33111da177e4SLinus Torvalds * @skb1: the buffer to receive the second part 33121da177e4SLinus Torvalds * @len: new length for skb 33131da177e4SLinus Torvalds */ 33141da177e4SLinus Torvalds void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 33151da177e4SLinus Torvalds { 33161da177e4SLinus Torvalds int pos = skb_headlen(skb); 33171da177e4SLinus Torvalds 331806b4feb3SJonathan Lemon skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; 33191f8b977aSWillem de Bruijn skb_zerocopy_clone(skb1, skb, 0); 33201da177e4SLinus Torvalds if (len < pos) /* Split line is inside header. */ 33211da177e4SLinus Torvalds skb_split_inside_header(skb, skb1, len, pos); 33221da177e4SLinus Torvalds else /* Second chunk has no header, nothing to copy. */ 33231da177e4SLinus Torvalds skb_split_no_header(skb, skb1, len, pos); 33241da177e4SLinus Torvalds } 3325b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_split); 33261da177e4SLinus Torvalds 33279f782db3SIlpo Järvinen /* Shifting from/to a cloned skb is a no-go. 33289f782db3SIlpo Järvinen * 33299f782db3SIlpo Järvinen * Caller cannot keep skb_shinfo related pointers past calling here! 33309f782db3SIlpo Järvinen */ 3331832d11c5SIlpo Järvinen static int skb_prepare_for_shift(struct sk_buff *skb) 3332832d11c5SIlpo Järvinen { 3333097b9146SMarco Elver int ret = 0; 3334097b9146SMarco Elver 3335097b9146SMarco Elver if (skb_cloned(skb)) { 3336097b9146SMarco Elver /* Save and restore truesize: pskb_expand_head() may reallocate 3337097b9146SMarco Elver * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we 3338097b9146SMarco Elver * cannot change truesize at this point. 3339097b9146SMarco Elver */ 3340097b9146SMarco Elver unsigned int save_truesize = skb->truesize; 3341097b9146SMarco Elver 3342097b9146SMarco Elver ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 3343097b9146SMarco Elver skb->truesize = save_truesize; 3344097b9146SMarco Elver } 3345097b9146SMarco Elver return ret; 3346832d11c5SIlpo Järvinen } 3347832d11c5SIlpo Järvinen 3348832d11c5SIlpo Järvinen /** 3349832d11c5SIlpo Järvinen * skb_shift - Shifts paged data partially from skb to another 3350832d11c5SIlpo Järvinen * @tgt: buffer into which tail data gets added 3351832d11c5SIlpo Järvinen * @skb: buffer from which the paged data comes from 3352832d11c5SIlpo Järvinen * @shiftlen: shift up to this many bytes 3353832d11c5SIlpo Järvinen * 3354832d11c5SIlpo Järvinen * Attempts to shift up to shiftlen worth of bytes, which may be less than 335520e994a0SFeng King * the length of the skb, from skb to tgt. Returns number bytes shifted. 3356832d11c5SIlpo Järvinen * It's up to caller to free skb if everything was shifted. 3357832d11c5SIlpo Järvinen * 3358832d11c5SIlpo Järvinen * If @tgt runs out of frags, the whole operation is aborted. 3359832d11c5SIlpo Järvinen * 3360832d11c5SIlpo Järvinen * Skb cannot include anything else but paged data while tgt is allowed 3361832d11c5SIlpo Järvinen * to have non-paged data as well. 3362832d11c5SIlpo Järvinen * 3363832d11c5SIlpo Järvinen * TODO: full sized shift could be optimized but that would need 3364832d11c5SIlpo Järvinen * specialized skb free'er to handle frags without up-to-date nr_frags. 3365832d11c5SIlpo Järvinen */ 3366832d11c5SIlpo Järvinen int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) 3367832d11c5SIlpo Järvinen { 3368832d11c5SIlpo Järvinen int from, to, merge, todo; 3369d8e18a51SMatthew Wilcox (Oracle) skb_frag_t *fragfrom, *fragto; 3370832d11c5SIlpo Järvinen 3371832d11c5SIlpo Järvinen BUG_ON(shiftlen > skb->len); 3372f8071cdeSEric Dumazet 3373f8071cdeSEric Dumazet if (skb_headlen(skb)) 3374f8071cdeSEric Dumazet return 0; 33751f8b977aSWillem de Bruijn if (skb_zcopy(tgt) || skb_zcopy(skb)) 33761f8b977aSWillem de Bruijn return 0; 3377832d11c5SIlpo Järvinen 3378832d11c5SIlpo Järvinen todo = shiftlen; 3379832d11c5SIlpo Järvinen from = 0; 3380832d11c5SIlpo Järvinen to = skb_shinfo(tgt)->nr_frags; 3381832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3382832d11c5SIlpo Järvinen 3383832d11c5SIlpo Järvinen /* Actual merge is delayed until the point when we know we can 3384832d11c5SIlpo Järvinen * commit all, so that we don't have to undo partial changes 3385832d11c5SIlpo Järvinen */ 3386832d11c5SIlpo Järvinen if (!to || 3387ea2ab693SIan Campbell !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), 3388b54c9d5bSJonathan Lemon skb_frag_off(fragfrom))) { 3389832d11c5SIlpo Järvinen merge = -1; 3390832d11c5SIlpo Järvinen } else { 3391832d11c5SIlpo Järvinen merge = to - 1; 3392832d11c5SIlpo Järvinen 33939e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3394832d11c5SIlpo Järvinen if (todo < 0) { 3395832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || 3396832d11c5SIlpo Järvinen skb_prepare_for_shift(tgt)) 3397832d11c5SIlpo Järvinen return 0; 3398832d11c5SIlpo Järvinen 33999f782db3SIlpo Järvinen /* All previous frag pointers might be stale! */ 34009f782db3SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3401832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3402832d11c5SIlpo Järvinen 34039e903e08SEric Dumazet skb_frag_size_add(fragto, shiftlen); 34049e903e08SEric Dumazet skb_frag_size_sub(fragfrom, shiftlen); 3405b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, shiftlen); 3406832d11c5SIlpo Järvinen 3407832d11c5SIlpo Järvinen goto onlymerged; 3408832d11c5SIlpo Järvinen } 3409832d11c5SIlpo Järvinen 3410832d11c5SIlpo Järvinen from++; 3411832d11c5SIlpo Järvinen } 3412832d11c5SIlpo Järvinen 3413832d11c5SIlpo Järvinen /* Skip full, not-fitting skb to avoid expensive operations */ 3414832d11c5SIlpo Järvinen if ((shiftlen == skb->len) && 3415832d11c5SIlpo Järvinen (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) 3416832d11c5SIlpo Järvinen return 0; 3417832d11c5SIlpo Järvinen 3418832d11c5SIlpo Järvinen if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) 3419832d11c5SIlpo Järvinen return 0; 3420832d11c5SIlpo Järvinen 3421832d11c5SIlpo Järvinen while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { 3422832d11c5SIlpo Järvinen if (to == MAX_SKB_FRAGS) 3423832d11c5SIlpo Järvinen return 0; 3424832d11c5SIlpo Järvinen 3425832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[from]; 3426832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[to]; 3427832d11c5SIlpo Järvinen 34289e903e08SEric Dumazet if (todo >= skb_frag_size(fragfrom)) { 3429832d11c5SIlpo Järvinen *fragto = *fragfrom; 34309e903e08SEric Dumazet todo -= skb_frag_size(fragfrom); 3431832d11c5SIlpo Järvinen from++; 3432832d11c5SIlpo Järvinen to++; 3433832d11c5SIlpo Järvinen 3434832d11c5SIlpo Järvinen } else { 3435ea2ab693SIan Campbell __skb_frag_ref(fragfrom); 3436b54c9d5bSJonathan Lemon skb_frag_page_copy(fragto, fragfrom); 3437b54c9d5bSJonathan Lemon skb_frag_off_copy(fragto, fragfrom); 34389e903e08SEric Dumazet skb_frag_size_set(fragto, todo); 3439832d11c5SIlpo Järvinen 3440b54c9d5bSJonathan Lemon skb_frag_off_add(fragfrom, todo); 34419e903e08SEric Dumazet skb_frag_size_sub(fragfrom, todo); 3442832d11c5SIlpo Järvinen todo = 0; 3443832d11c5SIlpo Järvinen 3444832d11c5SIlpo Järvinen to++; 3445832d11c5SIlpo Järvinen break; 3446832d11c5SIlpo Järvinen } 3447832d11c5SIlpo Järvinen } 3448832d11c5SIlpo Järvinen 3449832d11c5SIlpo Järvinen /* Ready to "commit" this state change to tgt */ 3450832d11c5SIlpo Järvinen skb_shinfo(tgt)->nr_frags = to; 3451832d11c5SIlpo Järvinen 3452832d11c5SIlpo Järvinen if (merge >= 0) { 3453832d11c5SIlpo Järvinen fragfrom = &skb_shinfo(skb)->frags[0]; 3454832d11c5SIlpo Järvinen fragto = &skb_shinfo(tgt)->frags[merge]; 3455832d11c5SIlpo Järvinen 34569e903e08SEric Dumazet skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 3457ea2ab693SIan Campbell __skb_frag_unref(fragfrom); 3458832d11c5SIlpo Järvinen } 3459832d11c5SIlpo Järvinen 3460832d11c5SIlpo Järvinen /* Reposition in the original skb */ 3461832d11c5SIlpo Järvinen to = 0; 3462832d11c5SIlpo Järvinen while (from < skb_shinfo(skb)->nr_frags) 3463832d11c5SIlpo Järvinen skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; 3464832d11c5SIlpo Järvinen skb_shinfo(skb)->nr_frags = to; 3465832d11c5SIlpo Järvinen 3466832d11c5SIlpo Järvinen BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); 3467832d11c5SIlpo Järvinen 3468832d11c5SIlpo Järvinen onlymerged: 3469832d11c5SIlpo Järvinen /* Most likely the tgt won't ever need its checksum anymore, skb on 3470832d11c5SIlpo Järvinen * the other hand might need it if it needs to be resent 3471832d11c5SIlpo Järvinen */ 3472832d11c5SIlpo Järvinen tgt->ip_summed = CHECKSUM_PARTIAL; 3473832d11c5SIlpo Järvinen skb->ip_summed = CHECKSUM_PARTIAL; 3474832d11c5SIlpo Järvinen 3475832d11c5SIlpo Järvinen /* Yak, is it really working this way? Some helper please? */ 3476832d11c5SIlpo Järvinen skb->len -= shiftlen; 3477832d11c5SIlpo Järvinen skb->data_len -= shiftlen; 3478832d11c5SIlpo Järvinen skb->truesize -= shiftlen; 3479832d11c5SIlpo Järvinen tgt->len += shiftlen; 3480832d11c5SIlpo Järvinen tgt->data_len += shiftlen; 3481832d11c5SIlpo Järvinen tgt->truesize += shiftlen; 3482832d11c5SIlpo Järvinen 3483832d11c5SIlpo Järvinen return shiftlen; 3484832d11c5SIlpo Järvinen } 3485832d11c5SIlpo Järvinen 3486677e90edSThomas Graf /** 3487677e90edSThomas Graf * skb_prepare_seq_read - Prepare a sequential read of skb data 3488677e90edSThomas Graf * @skb: the buffer to read 3489677e90edSThomas Graf * @from: lower offset of data to be read 3490677e90edSThomas Graf * @to: upper offset of data to be read 3491677e90edSThomas Graf * @st: state variable 3492677e90edSThomas Graf * 3493677e90edSThomas Graf * Initializes the specified state variable. Must be called before 3494677e90edSThomas Graf * invoking skb_seq_read() for the first time. 3495677e90edSThomas Graf */ 3496677e90edSThomas Graf void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 3497677e90edSThomas Graf unsigned int to, struct skb_seq_state *st) 3498677e90edSThomas Graf { 3499677e90edSThomas Graf st->lower_offset = from; 3500677e90edSThomas Graf st->upper_offset = to; 3501677e90edSThomas Graf st->root_skb = st->cur_skb = skb; 3502677e90edSThomas Graf st->frag_idx = st->stepped_offset = 0; 3503677e90edSThomas Graf st->frag_data = NULL; 350497550f6fSWillem de Bruijn st->frag_off = 0; 3505677e90edSThomas Graf } 3506b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_prepare_seq_read); 3507677e90edSThomas Graf 3508677e90edSThomas Graf /** 3509677e90edSThomas Graf * skb_seq_read - Sequentially read skb data 3510677e90edSThomas Graf * @consumed: number of bytes consumed by the caller so far 3511677e90edSThomas Graf * @data: destination pointer for data to be returned 3512677e90edSThomas Graf * @st: state variable 3513677e90edSThomas Graf * 3514bc32383cSMathias Krause * Reads a block of skb data at @consumed relative to the 3515677e90edSThomas Graf * lower offset specified to skb_prepare_seq_read(). Assigns 3516bc32383cSMathias Krause * the head of the data block to @data and returns the length 3517677e90edSThomas Graf * of the block or 0 if the end of the skb data or the upper 3518677e90edSThomas Graf * offset has been reached. 3519677e90edSThomas Graf * 3520677e90edSThomas Graf * The caller is not required to consume all of the data 3521bc32383cSMathias Krause * returned, i.e. @consumed is typically set to the number 3522677e90edSThomas Graf * of bytes already consumed and the next call to 3523677e90edSThomas Graf * skb_seq_read() will return the remaining part of the block. 3524677e90edSThomas Graf * 352525985edcSLucas De Marchi * Note 1: The size of each block of data returned can be arbitrary, 3526e793c0f7SMasanari Iida * this limitation is the cost for zerocopy sequential 3527677e90edSThomas Graf * reads of potentially non linear data. 3528677e90edSThomas Graf * 3529bc2cda1eSRandy Dunlap * Note 2: Fragment lists within fragments are not implemented 3530677e90edSThomas Graf * at the moment, state->root_skb could be replaced with 3531677e90edSThomas Graf * a stack for this purpose. 3532677e90edSThomas Graf */ 3533677e90edSThomas Graf unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 3534677e90edSThomas Graf struct skb_seq_state *st) 3535677e90edSThomas Graf { 3536677e90edSThomas Graf unsigned int block_limit, abs_offset = consumed + st->lower_offset; 3537677e90edSThomas Graf skb_frag_t *frag; 3538677e90edSThomas Graf 3539aeb193eaSWedson Almeida Filho if (unlikely(abs_offset >= st->upper_offset)) { 3540aeb193eaSWedson Almeida Filho if (st->frag_data) { 3541aeb193eaSWedson Almeida Filho kunmap_atomic(st->frag_data); 3542aeb193eaSWedson Almeida Filho st->frag_data = NULL; 3543aeb193eaSWedson Almeida Filho } 3544677e90edSThomas Graf return 0; 3545aeb193eaSWedson Almeida Filho } 3546677e90edSThomas Graf 3547677e90edSThomas Graf next_skb: 354895e3b24cSHerbert Xu block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; 3549677e90edSThomas Graf 3550995b3379SThomas Chenault if (abs_offset < block_limit && !st->frag_data) { 355195e3b24cSHerbert Xu *data = st->cur_skb->data + (abs_offset - st->stepped_offset); 3552677e90edSThomas Graf return block_limit - abs_offset; 3553677e90edSThomas Graf } 3554677e90edSThomas Graf 3555677e90edSThomas Graf if (st->frag_idx == 0 && !st->frag_data) 3556677e90edSThomas Graf st->stepped_offset += skb_headlen(st->cur_skb); 3557677e90edSThomas Graf 3558677e90edSThomas Graf while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 355997550f6fSWillem de Bruijn unsigned int pg_idx, pg_off, pg_sz; 3560677e90edSThomas Graf 356197550f6fSWillem de Bruijn frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 356297550f6fSWillem de Bruijn 356397550f6fSWillem de Bruijn pg_idx = 0; 356497550f6fSWillem de Bruijn pg_off = skb_frag_off(frag); 356597550f6fSWillem de Bruijn pg_sz = skb_frag_size(frag); 356697550f6fSWillem de Bruijn 356797550f6fSWillem de Bruijn if (skb_frag_must_loop(skb_frag_page(frag))) { 356897550f6fSWillem de Bruijn pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT; 356997550f6fSWillem de Bruijn pg_off = offset_in_page(pg_off + st->frag_off); 357097550f6fSWillem de Bruijn pg_sz = min_t(unsigned int, pg_sz - st->frag_off, 357197550f6fSWillem de Bruijn PAGE_SIZE - pg_off); 357297550f6fSWillem de Bruijn } 357397550f6fSWillem de Bruijn 357497550f6fSWillem de Bruijn block_limit = pg_sz + st->stepped_offset; 3575677e90edSThomas Graf if (abs_offset < block_limit) { 3576677e90edSThomas Graf if (!st->frag_data) 357797550f6fSWillem de Bruijn st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx); 3578677e90edSThomas Graf 357997550f6fSWillem de Bruijn *data = (u8 *)st->frag_data + pg_off + 3580677e90edSThomas Graf (abs_offset - st->stepped_offset); 3581677e90edSThomas Graf 3582677e90edSThomas Graf return block_limit - abs_offset; 3583677e90edSThomas Graf } 3584677e90edSThomas Graf 3585677e90edSThomas Graf if (st->frag_data) { 358651c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3587677e90edSThomas Graf st->frag_data = NULL; 3588677e90edSThomas Graf } 3589677e90edSThomas Graf 359097550f6fSWillem de Bruijn st->stepped_offset += pg_sz; 359197550f6fSWillem de Bruijn st->frag_off += pg_sz; 359297550f6fSWillem de Bruijn if (st->frag_off == skb_frag_size(frag)) { 359397550f6fSWillem de Bruijn st->frag_off = 0; 3594677e90edSThomas Graf st->frag_idx++; 359597550f6fSWillem de Bruijn } 3596677e90edSThomas Graf } 3597677e90edSThomas Graf 35985b5a60daSOlaf Kirch if (st->frag_data) { 359951c56b00SEric Dumazet kunmap_atomic(st->frag_data); 36005b5a60daSOlaf Kirch st->frag_data = NULL; 36015b5a60daSOlaf Kirch } 36025b5a60daSOlaf Kirch 360321dc3301SDavid S. Miller if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { 3604677e90edSThomas Graf st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 360595e3b24cSHerbert Xu st->frag_idx = 0; 3606677e90edSThomas Graf goto next_skb; 360771b3346dSShyam Iyer } else if (st->cur_skb->next) { 360871b3346dSShyam Iyer st->cur_skb = st->cur_skb->next; 360971b3346dSShyam Iyer st->frag_idx = 0; 3610677e90edSThomas Graf goto next_skb; 3611677e90edSThomas Graf } 3612677e90edSThomas Graf 3613677e90edSThomas Graf return 0; 3614677e90edSThomas Graf } 3615b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_seq_read); 3616677e90edSThomas Graf 3617677e90edSThomas Graf /** 3618677e90edSThomas Graf * skb_abort_seq_read - Abort a sequential read of skb data 3619677e90edSThomas Graf * @st: state variable 3620677e90edSThomas Graf * 3621677e90edSThomas Graf * Must be called if skb_seq_read() was not called until it 3622677e90edSThomas Graf * returned 0. 3623677e90edSThomas Graf */ 3624677e90edSThomas Graf void skb_abort_seq_read(struct skb_seq_state *st) 3625677e90edSThomas Graf { 3626677e90edSThomas Graf if (st->frag_data) 362751c56b00SEric Dumazet kunmap_atomic(st->frag_data); 3628677e90edSThomas Graf } 3629b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_abort_seq_read); 3630677e90edSThomas Graf 36313fc7e8a6SThomas Graf #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 36323fc7e8a6SThomas Graf 36333fc7e8a6SThomas Graf static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 36343fc7e8a6SThomas Graf struct ts_config *conf, 36353fc7e8a6SThomas Graf struct ts_state *state) 36363fc7e8a6SThomas Graf { 36373fc7e8a6SThomas Graf return skb_seq_read(offset, text, TS_SKB_CB(state)); 36383fc7e8a6SThomas Graf } 36393fc7e8a6SThomas Graf 36403fc7e8a6SThomas Graf static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 36413fc7e8a6SThomas Graf { 36423fc7e8a6SThomas Graf skb_abort_seq_read(TS_SKB_CB(state)); 36433fc7e8a6SThomas Graf } 36443fc7e8a6SThomas Graf 36453fc7e8a6SThomas Graf /** 36463fc7e8a6SThomas Graf * skb_find_text - Find a text pattern in skb data 36473fc7e8a6SThomas Graf * @skb: the buffer to look in 36483fc7e8a6SThomas Graf * @from: search offset 36493fc7e8a6SThomas Graf * @to: search limit 36503fc7e8a6SThomas Graf * @config: textsearch configuration 36513fc7e8a6SThomas Graf * 36523fc7e8a6SThomas Graf * Finds a pattern in the skb data according to the specified 36533fc7e8a6SThomas Graf * textsearch configuration. Use textsearch_next() to retrieve 36543fc7e8a6SThomas Graf * subsequent occurrences of the pattern. Returns the offset 36553fc7e8a6SThomas Graf * to the first occurrence or UINT_MAX if no match was found. 36563fc7e8a6SThomas Graf */ 36573fc7e8a6SThomas Graf unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 3658059a2440SBojan Prtvar unsigned int to, struct ts_config *config) 36593fc7e8a6SThomas Graf { 3660059a2440SBojan Prtvar struct ts_state state; 3661f72b948dSPhil Oester unsigned int ret; 3662f72b948dSPhil Oester 36633fc7e8a6SThomas Graf config->get_next_block = skb_ts_get_next_block; 36643fc7e8a6SThomas Graf config->finish = skb_ts_finish; 36653fc7e8a6SThomas Graf 3666059a2440SBojan Prtvar skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); 36673fc7e8a6SThomas Graf 3668059a2440SBojan Prtvar ret = textsearch_find(config, &state); 3669f72b948dSPhil Oester return (ret <= to - from ? ret : UINT_MAX); 36703fc7e8a6SThomas Graf } 3671b4ac530fSDavid S. Miller EXPORT_SYMBOL(skb_find_text); 36723fc7e8a6SThomas Graf 3673be12a1feSHannes Frederic Sowa int skb_append_pagefrags(struct sk_buff *skb, struct page *page, 3674be12a1feSHannes Frederic Sowa int offset, size_t size) 3675be12a1feSHannes Frederic Sowa { 3676be12a1feSHannes Frederic Sowa int i = skb_shinfo(skb)->nr_frags; 3677be12a1feSHannes Frederic Sowa 3678be12a1feSHannes Frederic Sowa if (skb_can_coalesce(skb, i, page, offset)) { 3679be12a1feSHannes Frederic Sowa skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); 3680be12a1feSHannes Frederic Sowa } else if (i < MAX_SKB_FRAGS) { 3681be12a1feSHannes Frederic Sowa get_page(page); 3682be12a1feSHannes Frederic Sowa skb_fill_page_desc(skb, i, page, offset, size); 3683be12a1feSHannes Frederic Sowa } else { 3684be12a1feSHannes Frederic Sowa return -EMSGSIZE; 3685be12a1feSHannes Frederic Sowa } 3686be12a1feSHannes Frederic Sowa 3687be12a1feSHannes Frederic Sowa return 0; 3688be12a1feSHannes Frederic Sowa } 3689be12a1feSHannes Frederic Sowa EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3690be12a1feSHannes Frederic Sowa 3691cbb042f9SHerbert Xu /** 3692cbb042f9SHerbert Xu * skb_pull_rcsum - pull skb and update receive checksum 3693cbb042f9SHerbert Xu * @skb: buffer to update 3694cbb042f9SHerbert Xu * @len: length of data pulled 3695cbb042f9SHerbert Xu * 3696cbb042f9SHerbert Xu * This function performs an skb_pull on the packet and updates 3697fee54fa5SUrs Thuermann * the CHECKSUM_COMPLETE checksum. It should be used on 369884fa7933SPatrick McHardy * receive path processing instead of skb_pull unless you know 369984fa7933SPatrick McHardy * that the checksum difference is zero (e.g., a valid IP header) 370084fa7933SPatrick McHardy * or you are setting ip_summed to CHECKSUM_NONE. 3701cbb042f9SHerbert Xu */ 3702af72868bSJohannes Berg void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 3703cbb042f9SHerbert Xu { 370431b33dfbSPravin B Shelar unsigned char *data = skb->data; 370531b33dfbSPravin B Shelar 3706cbb042f9SHerbert Xu BUG_ON(len > skb->len); 370731b33dfbSPravin B Shelar __skb_pull(skb, len); 370831b33dfbSPravin B Shelar skb_postpull_rcsum(skb, data, len); 370931b33dfbSPravin B Shelar return skb->data; 3710cbb042f9SHerbert Xu } 3711f94691acSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(skb_pull_rcsum); 3712f94691acSArnaldo Carvalho de Melo 371313acc94eSYonghong Song static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) 371413acc94eSYonghong Song { 371513acc94eSYonghong Song skb_frag_t head_frag; 371613acc94eSYonghong Song struct page *page; 371713acc94eSYonghong Song 371813acc94eSYonghong Song page = virt_to_head_page(frag_skb->head); 3719d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(&head_frag, page); 3720b54c9d5bSJonathan Lemon skb_frag_off_set(&head_frag, frag_skb->data - 3721b54c9d5bSJonathan Lemon (unsigned char *)page_address(page)); 3722d8e18a51SMatthew Wilcox (Oracle) skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); 372313acc94eSYonghong Song return head_frag; 372413acc94eSYonghong Song } 372513acc94eSYonghong Song 37263a1296a3SSteffen Klassert struct sk_buff *skb_segment_list(struct sk_buff *skb, 37273a1296a3SSteffen Klassert netdev_features_t features, 37283a1296a3SSteffen Klassert unsigned int offset) 37293a1296a3SSteffen Klassert { 37303a1296a3SSteffen Klassert struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; 37313a1296a3SSteffen Klassert unsigned int tnl_hlen = skb_tnl_header_len(skb); 37323a1296a3SSteffen Klassert unsigned int delta_truesize = 0; 37333a1296a3SSteffen Klassert unsigned int delta_len = 0; 37343a1296a3SSteffen Klassert struct sk_buff *tail = NULL; 373553475c5dSDongseok Yi struct sk_buff *nskb, *tmp; 373653475c5dSDongseok Yi int err; 37373a1296a3SSteffen Klassert 37383a1296a3SSteffen Klassert skb_push(skb, -skb_network_offset(skb) + offset); 37393a1296a3SSteffen Klassert 37403a1296a3SSteffen Klassert skb_shinfo(skb)->frag_list = NULL; 37413a1296a3SSteffen Klassert 37423a1296a3SSteffen Klassert do { 37433a1296a3SSteffen Klassert nskb = list_skb; 37443a1296a3SSteffen Klassert list_skb = list_skb->next; 37453a1296a3SSteffen Klassert 374653475c5dSDongseok Yi err = 0; 374753475c5dSDongseok Yi if (skb_shared(nskb)) { 374853475c5dSDongseok Yi tmp = skb_clone(nskb, GFP_ATOMIC); 374953475c5dSDongseok Yi if (tmp) { 375053475c5dSDongseok Yi consume_skb(nskb); 375153475c5dSDongseok Yi nskb = tmp; 375253475c5dSDongseok Yi err = skb_unclone(nskb, GFP_ATOMIC); 375353475c5dSDongseok Yi } else { 375453475c5dSDongseok Yi err = -ENOMEM; 375553475c5dSDongseok Yi } 375653475c5dSDongseok Yi } 375753475c5dSDongseok Yi 37583a1296a3SSteffen Klassert if (!tail) 37593a1296a3SSteffen Klassert skb->next = nskb; 37603a1296a3SSteffen Klassert else 37613a1296a3SSteffen Klassert tail->next = nskb; 37623a1296a3SSteffen Klassert 376353475c5dSDongseok Yi if (unlikely(err)) { 376453475c5dSDongseok Yi nskb->next = list_skb; 376553475c5dSDongseok Yi goto err_linearize; 376653475c5dSDongseok Yi } 376753475c5dSDongseok Yi 37683a1296a3SSteffen Klassert tail = nskb; 37693a1296a3SSteffen Klassert 37703a1296a3SSteffen Klassert delta_len += nskb->len; 37713a1296a3SSteffen Klassert delta_truesize += nskb->truesize; 37723a1296a3SSteffen Klassert 37733a1296a3SSteffen Klassert skb_push(nskb, -skb_network_offset(nskb) + offset); 37743a1296a3SSteffen Klassert 3775cf673ed0SFlorian Westphal skb_release_head_state(nskb); 37763a1296a3SSteffen Klassert __copy_skb_header(nskb, skb); 37773a1296a3SSteffen Klassert 37783a1296a3SSteffen Klassert skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); 37793a1296a3SSteffen Klassert skb_copy_from_linear_data_offset(skb, -tnl_hlen, 37803a1296a3SSteffen Klassert nskb->data - tnl_hlen, 37813a1296a3SSteffen Klassert offset + tnl_hlen); 37823a1296a3SSteffen Klassert 37833a1296a3SSteffen Klassert if (skb_needs_linearize(nskb, features) && 37843a1296a3SSteffen Klassert __skb_linearize(nskb)) 37853a1296a3SSteffen Klassert goto err_linearize; 37863a1296a3SSteffen Klassert 37873a1296a3SSteffen Klassert } while (list_skb); 37883a1296a3SSteffen Klassert 37893a1296a3SSteffen Klassert skb->truesize = skb->truesize - delta_truesize; 37903a1296a3SSteffen Klassert skb->data_len = skb->data_len - delta_len; 37913a1296a3SSteffen Klassert skb->len = skb->len - delta_len; 37923a1296a3SSteffen Klassert 37933a1296a3SSteffen Klassert skb_gso_reset(skb); 37943a1296a3SSteffen Klassert 37953a1296a3SSteffen Klassert skb->prev = tail; 37963a1296a3SSteffen Klassert 37973a1296a3SSteffen Klassert if (skb_needs_linearize(skb, features) && 37983a1296a3SSteffen Klassert __skb_linearize(skb)) 37993a1296a3SSteffen Klassert goto err_linearize; 38003a1296a3SSteffen Klassert 38013a1296a3SSteffen Klassert skb_get(skb); 38023a1296a3SSteffen Klassert 38033a1296a3SSteffen Klassert return skb; 38043a1296a3SSteffen Klassert 38053a1296a3SSteffen Klassert err_linearize: 38063a1296a3SSteffen Klassert kfree_skb_list(skb->next); 38073a1296a3SSteffen Klassert skb->next = NULL; 38083a1296a3SSteffen Klassert return ERR_PTR(-ENOMEM); 38093a1296a3SSteffen Klassert } 38103a1296a3SSteffen Klassert EXPORT_SYMBOL_GPL(skb_segment_list); 38113a1296a3SSteffen Klassert 38123a1296a3SSteffen Klassert int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) 38133a1296a3SSteffen Klassert { 38143a1296a3SSteffen Klassert if (unlikely(p->len + skb->len >= 65536)) 38153a1296a3SSteffen Klassert return -E2BIG; 38163a1296a3SSteffen Klassert 38173a1296a3SSteffen Klassert if (NAPI_GRO_CB(p)->last == p) 38183a1296a3SSteffen Klassert skb_shinfo(p)->frag_list = skb; 38193a1296a3SSteffen Klassert else 38203a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last->next = skb; 38213a1296a3SSteffen Klassert 38223a1296a3SSteffen Klassert skb_pull(skb, skb_gro_offset(skb)); 38233a1296a3SSteffen Klassert 38243a1296a3SSteffen Klassert NAPI_GRO_CB(p)->last = skb; 38253a1296a3SSteffen Klassert NAPI_GRO_CB(p)->count++; 38263a1296a3SSteffen Klassert p->data_len += skb->len; 38273a1296a3SSteffen Klassert p->truesize += skb->truesize; 38283a1296a3SSteffen Klassert p->len += skb->len; 38293a1296a3SSteffen Klassert 38303a1296a3SSteffen Klassert NAPI_GRO_CB(skb)->same_flow = 1; 38313a1296a3SSteffen Klassert 38323a1296a3SSteffen Klassert return 0; 38333a1296a3SSteffen Klassert } 38343a1296a3SSteffen Klassert 3835f4c50d99SHerbert Xu /** 3836f4c50d99SHerbert Xu * skb_segment - Perform protocol segmentation on skb. 3837df5771ffSMichael S. Tsirkin * @head_skb: buffer to segment 3838576a30ebSHerbert Xu * @features: features for the output path (see dev->features) 3839f4c50d99SHerbert Xu * 3840f4c50d99SHerbert Xu * This function performs segmentation on the given skb. It returns 38414c821d75SBen Hutchings * a pointer to the first in a list of new skbs for the segments. 38424c821d75SBen Hutchings * In case of error it returns ERR_PTR(err). 3843f4c50d99SHerbert Xu */ 3844df5771ffSMichael S. Tsirkin struct sk_buff *skb_segment(struct sk_buff *head_skb, 3845df5771ffSMichael S. Tsirkin netdev_features_t features) 3846f4c50d99SHerbert Xu { 3847f4c50d99SHerbert Xu struct sk_buff *segs = NULL; 3848f4c50d99SHerbert Xu struct sk_buff *tail = NULL; 38491a4cedafSMichael S. Tsirkin struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; 3850df5771ffSMichael S. Tsirkin skb_frag_t *frag = skb_shinfo(head_skb)->frags; 3851df5771ffSMichael S. Tsirkin unsigned int mss = skb_shinfo(head_skb)->gso_size; 3852df5771ffSMichael S. Tsirkin unsigned int doffset = head_skb->data - skb_mac_header(head_skb); 38531fd819ecSMichael S. Tsirkin struct sk_buff *frag_skb = head_skb; 3854f4c50d99SHerbert Xu unsigned int offset = doffset; 3855df5771ffSMichael S. Tsirkin unsigned int tnl_hlen = skb_tnl_header_len(head_skb); 3856802ab55aSAlexander Duyck unsigned int partial_segs = 0; 3857f4c50d99SHerbert Xu unsigned int headroom; 3858802ab55aSAlexander Duyck unsigned int len = head_skb->len; 3859ec5f0615SPravin B Shelar __be16 proto; 386036c98382SAlexander Duyck bool csum, sg; 3861df5771ffSMichael S. Tsirkin int nfrags = skb_shinfo(head_skb)->nr_frags; 3862f4c50d99SHerbert Xu int err = -ENOMEM; 3863f4c50d99SHerbert Xu int i = 0; 3864f4c50d99SHerbert Xu int pos; 3865f4c50d99SHerbert Xu 38663dcbdb13SShmulik Ladkani if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && 38673dcbdb13SShmulik Ladkani (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { 38683dcbdb13SShmulik Ladkani /* gso_size is untrusted, and we have a frag_list with a linear 38693dcbdb13SShmulik Ladkani * non head_frag head. 38703dcbdb13SShmulik Ladkani * 38713dcbdb13SShmulik Ladkani * (we assume checking the first list_skb member suffices; 38723dcbdb13SShmulik Ladkani * i.e if either of the list_skb members have non head_frag 38733dcbdb13SShmulik Ladkani * head, then the first one has too). 38743dcbdb13SShmulik Ladkani * 38753dcbdb13SShmulik Ladkani * If head_skb's headlen does not fit requested gso_size, it 38763dcbdb13SShmulik Ladkani * means that the frag_list members do NOT terminate on exact 38773dcbdb13SShmulik Ladkani * gso_size boundaries. Hence we cannot perform skb_frag_t page 38783dcbdb13SShmulik Ladkani * sharing. Therefore we must fallback to copying the frag_list 38793dcbdb13SShmulik Ladkani * skbs; we do so by disabling SG. 38803dcbdb13SShmulik Ladkani */ 38813dcbdb13SShmulik Ladkani if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) 38823dcbdb13SShmulik Ladkani features &= ~NETIF_F_SG; 38833dcbdb13SShmulik Ladkani } 38843dcbdb13SShmulik Ladkani 38855882a07cSWei-Chun Chao __skb_push(head_skb, doffset); 38862f631133SMiaohe Lin proto = skb_network_protocol(head_skb, NULL); 3887ec5f0615SPravin B Shelar if (unlikely(!proto)) 3888ec5f0615SPravin B Shelar return ERR_PTR(-EINVAL); 3889ec5f0615SPravin B Shelar 389036c98382SAlexander Duyck sg = !!(features & NETIF_F_SG); 3891f245d079SAlexander Duyck csum = !!can_checksum_protocol(features, proto); 38927e2b10c1STom Herbert 389307b26c94SSteffen Klassert if (sg && csum && (mss != GSO_BY_FRAGS)) { 389407b26c94SSteffen Klassert if (!(features & NETIF_F_GSO_PARTIAL)) { 389507b26c94SSteffen Klassert struct sk_buff *iter; 389643170c4eSIlan Tayari unsigned int frag_len; 389707b26c94SSteffen Klassert 389807b26c94SSteffen Klassert if (!list_skb || 389907b26c94SSteffen Klassert !net_gso_ok(features, skb_shinfo(head_skb)->gso_type)) 390007b26c94SSteffen Klassert goto normal; 390107b26c94SSteffen Klassert 390243170c4eSIlan Tayari /* If we get here then all the required 390343170c4eSIlan Tayari * GSO features except frag_list are supported. 390443170c4eSIlan Tayari * Try to split the SKB to multiple GSO SKBs 390543170c4eSIlan Tayari * with no frag_list. 390643170c4eSIlan Tayari * Currently we can do that only when the buffers don't 390743170c4eSIlan Tayari * have a linear part and all the buffers except 390843170c4eSIlan Tayari * the last are of the same length. 390907b26c94SSteffen Klassert */ 391043170c4eSIlan Tayari frag_len = list_skb->len; 391107b26c94SSteffen Klassert skb_walk_frags(head_skb, iter) { 391243170c4eSIlan Tayari if (frag_len != iter->len && iter->next) 391343170c4eSIlan Tayari goto normal; 3914eaffadbbSIlan Tayari if (skb_headlen(iter) && !iter->head_frag) 391507b26c94SSteffen Klassert goto normal; 391607b26c94SSteffen Klassert 391707b26c94SSteffen Klassert len -= iter->len; 391807b26c94SSteffen Klassert } 391943170c4eSIlan Tayari 392043170c4eSIlan Tayari if (len != frag_len) 392143170c4eSIlan Tayari goto normal; 392207b26c94SSteffen Klassert } 392307b26c94SSteffen Klassert 3924802ab55aSAlexander Duyck /* GSO partial only requires that we trim off any excess that 3925802ab55aSAlexander Duyck * doesn't fit into an MSS sized block, so take care of that 3926802ab55aSAlexander Duyck * now. 3927802ab55aSAlexander Duyck */ 3928802ab55aSAlexander Duyck partial_segs = len / mss; 3929d7fb5a80SAlexander Duyck if (partial_segs > 1) 3930802ab55aSAlexander Duyck mss *= partial_segs; 3931d7fb5a80SAlexander Duyck else 3932d7fb5a80SAlexander Duyck partial_segs = 0; 3933802ab55aSAlexander Duyck } 3934802ab55aSAlexander Duyck 393507b26c94SSteffen Klassert normal: 3936df5771ffSMichael S. Tsirkin headroom = skb_headroom(head_skb); 3937df5771ffSMichael S. Tsirkin pos = skb_headlen(head_skb); 3938f4c50d99SHerbert Xu 3939f4c50d99SHerbert Xu do { 3940f4c50d99SHerbert Xu struct sk_buff *nskb; 39418cb19905SMichael S. Tsirkin skb_frag_t *nskb_frag; 3942c8884eddSHerbert Xu int hsize; 3943f4c50d99SHerbert Xu int size; 3944f4c50d99SHerbert Xu 39453953c46cSMarcelo Ricardo Leitner if (unlikely(mss == GSO_BY_FRAGS)) { 39463953c46cSMarcelo Ricardo Leitner len = list_skb->len; 39473953c46cSMarcelo Ricardo Leitner } else { 3948df5771ffSMichael S. Tsirkin len = head_skb->len - offset; 3949f4c50d99SHerbert Xu if (len > mss) 3950f4c50d99SHerbert Xu len = mss; 39513953c46cSMarcelo Ricardo Leitner } 3952f4c50d99SHerbert Xu 3953df5771ffSMichael S. Tsirkin hsize = skb_headlen(head_skb) - offset; 3954f4c50d99SHerbert Xu 3955dbd50f23SXin Long if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) && 39561a4cedafSMichael S. Tsirkin (skb_headlen(list_skb) == len || sg)) { 39571a4cedafSMichael S. Tsirkin BUG_ON(skb_headlen(list_skb) > len); 395889319d38SHerbert Xu 39599d8506ccSHerbert Xu i = 0; 39601a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 39611a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 39621fd819ecSMichael S. Tsirkin frag_skb = list_skb; 39631a4cedafSMichael S. Tsirkin pos += skb_headlen(list_skb); 39649d8506ccSHerbert Xu 39659d8506ccSHerbert Xu while (pos < offset + len) { 39669d8506ccSHerbert Xu BUG_ON(i >= nfrags); 39679d8506ccSHerbert Xu 39684e1beba1SMichael S. Tsirkin size = skb_frag_size(frag); 39699d8506ccSHerbert Xu if (pos + size > offset + len) 39709d8506ccSHerbert Xu break; 39719d8506ccSHerbert Xu 39729d8506ccSHerbert Xu i++; 39739d8506ccSHerbert Xu pos += size; 39744e1beba1SMichael S. Tsirkin frag++; 39759d8506ccSHerbert Xu } 39769d8506ccSHerbert Xu 39771a4cedafSMichael S. Tsirkin nskb = skb_clone(list_skb, GFP_ATOMIC); 39781a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 397989319d38SHerbert Xu 3980f4c50d99SHerbert Xu if (unlikely(!nskb)) 3981f4c50d99SHerbert Xu goto err; 3982f4c50d99SHerbert Xu 39839d8506ccSHerbert Xu if (unlikely(pskb_trim(nskb, len))) { 39849d8506ccSHerbert Xu kfree_skb(nskb); 39859d8506ccSHerbert Xu goto err; 39869d8506ccSHerbert Xu } 39879d8506ccSHerbert Xu 3988ec47ea82SAlexander Duyck hsize = skb_end_offset(nskb); 398989319d38SHerbert Xu if (skb_cow_head(nskb, doffset + headroom)) { 399089319d38SHerbert Xu kfree_skb(nskb); 399189319d38SHerbert Xu goto err; 399289319d38SHerbert Xu } 399389319d38SHerbert Xu 3994ec47ea82SAlexander Duyck nskb->truesize += skb_end_offset(nskb) - hsize; 399589319d38SHerbert Xu skb_release_head_state(nskb); 399689319d38SHerbert Xu __skb_push(nskb, doffset); 399789319d38SHerbert Xu } else { 399800b229f7SPaolo Abeni if (hsize < 0) 399900b229f7SPaolo Abeni hsize = 0; 4000dbd50f23SXin Long if (hsize > len || !sg) 4001dbd50f23SXin Long hsize = len; 4002dbd50f23SXin Long 4003c93bdd0eSMel Gorman nskb = __alloc_skb(hsize + doffset + headroom, 4004df5771ffSMichael S. Tsirkin GFP_ATOMIC, skb_alloc_rx_flag(head_skb), 4005c93bdd0eSMel Gorman NUMA_NO_NODE); 400689319d38SHerbert Xu 400789319d38SHerbert Xu if (unlikely(!nskb)) 400889319d38SHerbert Xu goto err; 400989319d38SHerbert Xu 401089319d38SHerbert Xu skb_reserve(nskb, headroom); 401189319d38SHerbert Xu __skb_put(nskb, doffset); 401289319d38SHerbert Xu } 401389319d38SHerbert Xu 4014f4c50d99SHerbert Xu if (segs) 4015f4c50d99SHerbert Xu tail->next = nskb; 4016f4c50d99SHerbert Xu else 4017f4c50d99SHerbert Xu segs = nskb; 4018f4c50d99SHerbert Xu tail = nskb; 4019f4c50d99SHerbert Xu 4020df5771ffSMichael S. Tsirkin __copy_skb_header(nskb, head_skb); 4021f4c50d99SHerbert Xu 4022030737bcSEric Dumazet skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 4023fcdfe3a7SVlad Yasevich skb_reset_mac_len(nskb); 402468c33163SPravin B Shelar 4025df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 402668c33163SPravin B Shelar nskb->data - tnl_hlen, 402768c33163SPravin B Shelar doffset + tnl_hlen); 402889319d38SHerbert Xu 40299d8506ccSHerbert Xu if (nskb->len == len + doffset) 40301cdbcb79SSimon Horman goto perform_csum_check; 403189319d38SHerbert Xu 40327fbeffedSAlexander Duyck if (!sg) { 40331454c9faSYadu Kishore if (!csum) { 40347fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 40356f85a124SHerbert Xu nskb->ip_summed = CHECKSUM_NONE; 403676443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 403776443456SAlexander Duyck skb_copy_and_csum_bits(head_skb, offset, 40381454c9faSYadu Kishore skb_put(nskb, 40391454c9faSYadu Kishore len), 40408d5930dfSAl Viro len); 40417e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 4042de843723STom Herbert skb_headroom(nskb) + doffset; 40431454c9faSYadu Kishore } else { 40441454c9faSYadu Kishore skb_copy_bits(head_skb, offset, 40451454c9faSYadu Kishore skb_put(nskb, len), 40461454c9faSYadu Kishore len); 40471454c9faSYadu Kishore } 4048f4c50d99SHerbert Xu continue; 4049f4c50d99SHerbert Xu } 4050f4c50d99SHerbert Xu 40518cb19905SMichael S. Tsirkin nskb_frag = skb_shinfo(nskb)->frags; 4052f4c50d99SHerbert Xu 4053df5771ffSMichael S. Tsirkin skb_copy_from_linear_data_offset(head_skb, offset, 4054d626f62bSArnaldo Carvalho de Melo skb_put(nskb, hsize), hsize); 4055f4c50d99SHerbert Xu 405606b4feb3SJonathan Lemon skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags & 405706b4feb3SJonathan Lemon SKBFL_SHARED_FRAG; 4058cef401deSEric Dumazet 4059bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4060bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) 4061bf5c25d6SWillem de Bruijn goto err; 4062bf5c25d6SWillem de Bruijn 40639d8506ccSHerbert Xu while (pos < offset + len) { 40649d8506ccSHerbert Xu if (i >= nfrags) { 40659d8506ccSHerbert Xu i = 0; 40661a4cedafSMichael S. Tsirkin nfrags = skb_shinfo(list_skb)->nr_frags; 40671a4cedafSMichael S. Tsirkin frag = skb_shinfo(list_skb)->frags; 40681fd819ecSMichael S. Tsirkin frag_skb = list_skb; 406913acc94eSYonghong Song if (!skb_headlen(list_skb)) { 40709d8506ccSHerbert Xu BUG_ON(!nfrags); 407113acc94eSYonghong Song } else { 407213acc94eSYonghong Song BUG_ON(!list_skb->head_frag); 40739d8506ccSHerbert Xu 407413acc94eSYonghong Song /* to make room for head_frag. */ 407513acc94eSYonghong Song i--; 407613acc94eSYonghong Song frag--; 407713acc94eSYonghong Song } 4078bf5c25d6SWillem de Bruijn if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || 4079bf5c25d6SWillem de Bruijn skb_zerocopy_clone(nskb, frag_skb, 4080bf5c25d6SWillem de Bruijn GFP_ATOMIC)) 4081bf5c25d6SWillem de Bruijn goto err; 4082bf5c25d6SWillem de Bruijn 40831a4cedafSMichael S. Tsirkin list_skb = list_skb->next; 40849d8506ccSHerbert Xu } 40859d8506ccSHerbert Xu 40869d8506ccSHerbert Xu if (unlikely(skb_shinfo(nskb)->nr_frags >= 40879d8506ccSHerbert Xu MAX_SKB_FRAGS)) { 40889d8506ccSHerbert Xu net_warn_ratelimited( 40899d8506ccSHerbert Xu "skb_segment: too many frags: %u %u\n", 40909d8506ccSHerbert Xu pos, mss); 4091ff907a11SEric Dumazet err = -EINVAL; 40929d8506ccSHerbert Xu goto err; 40939d8506ccSHerbert Xu } 40949d8506ccSHerbert Xu 409513acc94eSYonghong Song *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; 40968cb19905SMichael S. Tsirkin __skb_frag_ref(nskb_frag); 40978cb19905SMichael S. Tsirkin size = skb_frag_size(nskb_frag); 4098f4c50d99SHerbert Xu 4099f4c50d99SHerbert Xu if (pos < offset) { 4100b54c9d5bSJonathan Lemon skb_frag_off_add(nskb_frag, offset - pos); 41018cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, offset - pos); 4102f4c50d99SHerbert Xu } 4103f4c50d99SHerbert Xu 410489319d38SHerbert Xu skb_shinfo(nskb)->nr_frags++; 4105f4c50d99SHerbert Xu 4106f4c50d99SHerbert Xu if (pos + size <= offset + len) { 4107f4c50d99SHerbert Xu i++; 41084e1beba1SMichael S. Tsirkin frag++; 4109f4c50d99SHerbert Xu pos += size; 4110f4c50d99SHerbert Xu } else { 41118cb19905SMichael S. Tsirkin skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); 411289319d38SHerbert Xu goto skip_fraglist; 4113f4c50d99SHerbert Xu } 4114f4c50d99SHerbert Xu 41158cb19905SMichael S. Tsirkin nskb_frag++; 4116f4c50d99SHerbert Xu } 4117f4c50d99SHerbert Xu 411889319d38SHerbert Xu skip_fraglist: 4119f4c50d99SHerbert Xu nskb->data_len = len - hsize; 4120f4c50d99SHerbert Xu nskb->len += nskb->data_len; 4121f4c50d99SHerbert Xu nskb->truesize += nskb->data_len; 4122ec5f0615SPravin B Shelar 41231cdbcb79SSimon Horman perform_csum_check: 41247fbeffedSAlexander Duyck if (!csum) { 4125ff907a11SEric Dumazet if (skb_has_shared_frag(nskb) && 4126ff907a11SEric Dumazet __skb_linearize(nskb)) 4127ddff00d4SAlexander Duyck goto err; 4128ff907a11SEric Dumazet 41297fbeffedSAlexander Duyck if (!nskb->remcsum_offload) 4130ec5f0615SPravin B Shelar nskb->ip_summed = CHECKSUM_NONE; 413176443456SAlexander Duyck SKB_GSO_CB(nskb)->csum = 413276443456SAlexander Duyck skb_checksum(nskb, doffset, 413376443456SAlexander Duyck nskb->len - doffset, 0); 41347e2b10c1STom Herbert SKB_GSO_CB(nskb)->csum_start = 41357e2b10c1STom Herbert skb_headroom(nskb) + doffset; 4136ec5f0615SPravin B Shelar } 4137df5771ffSMichael S. Tsirkin } while ((offset += len) < head_skb->len); 4138f4c50d99SHerbert Xu 4139bec3cfdcSEric Dumazet /* Some callers want to get the end of the list. 4140bec3cfdcSEric Dumazet * Put it in segs->prev to avoid walking the list. 4141bec3cfdcSEric Dumazet * (see validate_xmit_skb_list() for example) 4142bec3cfdcSEric Dumazet */ 4143bec3cfdcSEric Dumazet segs->prev = tail; 4144432c856fSToshiaki Makita 4145802ab55aSAlexander Duyck if (partial_segs) { 414607b26c94SSteffen Klassert struct sk_buff *iter; 4147802ab55aSAlexander Duyck int type = skb_shinfo(head_skb)->gso_type; 414807b26c94SSteffen Klassert unsigned short gso_size = skb_shinfo(head_skb)->gso_size; 4149802ab55aSAlexander Duyck 4150802ab55aSAlexander Duyck /* Update type to add partial and then remove dodgy if set */ 415107b26c94SSteffen Klassert type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL; 4152802ab55aSAlexander Duyck type &= ~SKB_GSO_DODGY; 4153802ab55aSAlexander Duyck 4154802ab55aSAlexander Duyck /* Update GSO info and prepare to start updating headers on 4155802ab55aSAlexander Duyck * our way back down the stack of protocols. 4156802ab55aSAlexander Duyck */ 415707b26c94SSteffen Klassert for (iter = segs; iter; iter = iter->next) { 415807b26c94SSteffen Klassert skb_shinfo(iter)->gso_size = gso_size; 415907b26c94SSteffen Klassert skb_shinfo(iter)->gso_segs = partial_segs; 416007b26c94SSteffen Klassert skb_shinfo(iter)->gso_type = type; 416107b26c94SSteffen Klassert SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset; 416207b26c94SSteffen Klassert } 416307b26c94SSteffen Klassert 416407b26c94SSteffen Klassert if (tail->len - doffset <= gso_size) 416507b26c94SSteffen Klassert skb_shinfo(tail)->gso_size = 0; 416607b26c94SSteffen Klassert else if (tail != segs) 416707b26c94SSteffen Klassert skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size); 4168802ab55aSAlexander Duyck } 4169802ab55aSAlexander Duyck 4170432c856fSToshiaki Makita /* Following permits correct backpressure, for protocols 4171432c856fSToshiaki Makita * using skb_set_owner_w(). 4172432c856fSToshiaki Makita * Idea is to tranfert ownership from head_skb to last segment. 4173432c856fSToshiaki Makita */ 4174432c856fSToshiaki Makita if (head_skb->destructor == sock_wfree) { 4175432c856fSToshiaki Makita swap(tail->truesize, head_skb->truesize); 4176432c856fSToshiaki Makita swap(tail->destructor, head_skb->destructor); 4177432c856fSToshiaki Makita swap(tail->sk, head_skb->sk); 4178432c856fSToshiaki Makita } 4179f4c50d99SHerbert Xu return segs; 4180f4c50d99SHerbert Xu 4181f4c50d99SHerbert Xu err: 4182289dccbeSEric Dumazet kfree_skb_list(segs); 4183f4c50d99SHerbert Xu return ERR_PTR(err); 4184f4c50d99SHerbert Xu } 4185f4c50d99SHerbert Xu EXPORT_SYMBOL_GPL(skb_segment); 4186f4c50d99SHerbert Xu 4187d4546c25SDavid Miller int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) 418871d93b39SHerbert Xu { 41898a29111cSEric Dumazet struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 419067147ba9SHerbert Xu unsigned int offset = skb_gro_offset(skb); 419167147ba9SHerbert Xu unsigned int headlen = skb_headlen(skb); 41928a29111cSEric Dumazet unsigned int len = skb_gro_len(skb); 4193715dc1f3SEric Dumazet unsigned int delta_truesize; 4194d4546c25SDavid Miller struct sk_buff *lp; 419571d93b39SHerbert Xu 41960ab03f35SSteffen Klassert if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) 419771d93b39SHerbert Xu return -E2BIG; 419871d93b39SHerbert Xu 419929e98242SEric Dumazet lp = NAPI_GRO_CB(p)->last; 42008a29111cSEric Dumazet pinfo = skb_shinfo(lp); 42018a29111cSEric Dumazet 42028a29111cSEric Dumazet if (headlen <= offset) { 420342da6994SHerbert Xu skb_frag_t *frag; 420466e92fcfSHerbert Xu skb_frag_t *frag2; 42059aaa156cSHerbert Xu int i = skbinfo->nr_frags; 42069aaa156cSHerbert Xu int nr_frags = pinfo->nr_frags + i; 420742da6994SHerbert Xu 420866e92fcfSHerbert Xu if (nr_frags > MAX_SKB_FRAGS) 42098a29111cSEric Dumazet goto merge; 421081705ad1SHerbert Xu 42118a29111cSEric Dumazet offset -= headlen; 42129aaa156cSHerbert Xu pinfo->nr_frags = nr_frags; 42139aaa156cSHerbert Xu skbinfo->nr_frags = 0; 4214f5572068SHerbert Xu 42159aaa156cSHerbert Xu frag = pinfo->frags + nr_frags; 42169aaa156cSHerbert Xu frag2 = skbinfo->frags + i; 421766e92fcfSHerbert Xu do { 421866e92fcfSHerbert Xu *--frag = *--frag2; 421966e92fcfSHerbert Xu } while (--i); 422066e92fcfSHerbert Xu 4221b54c9d5bSJonathan Lemon skb_frag_off_add(frag, offset); 42229e903e08SEric Dumazet skb_frag_size_sub(frag, offset); 422366e92fcfSHerbert Xu 4224715dc1f3SEric Dumazet /* all fragments truesize : remove (head size + sk_buff) */ 4225ec47ea82SAlexander Duyck delta_truesize = skb->truesize - 4226ec47ea82SAlexander Duyck SKB_TRUESIZE(skb_end_offset(skb)); 4227715dc1f3SEric Dumazet 4228f5572068SHerbert Xu skb->truesize -= skb->data_len; 4229f5572068SHerbert Xu skb->len -= skb->data_len; 4230f5572068SHerbert Xu skb->data_len = 0; 4231f5572068SHerbert Xu 4232715dc1f3SEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; 42335d38a079SHerbert Xu goto done; 4234d7e8883cSEric Dumazet } else if (skb->head_frag) { 4235d7e8883cSEric Dumazet int nr_frags = pinfo->nr_frags; 4236d7e8883cSEric Dumazet skb_frag_t *frag = pinfo->frags + nr_frags; 4237d7e8883cSEric Dumazet struct page *page = virt_to_head_page(skb->head); 4238d7e8883cSEric Dumazet unsigned int first_size = headlen - offset; 4239d7e8883cSEric Dumazet unsigned int first_offset; 4240d7e8883cSEric Dumazet 4241d7e8883cSEric Dumazet if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) 42428a29111cSEric Dumazet goto merge; 4243d7e8883cSEric Dumazet 4244d7e8883cSEric Dumazet first_offset = skb->data - 4245d7e8883cSEric Dumazet (unsigned char *)page_address(page) + 4246d7e8883cSEric Dumazet offset; 4247d7e8883cSEric Dumazet 4248d7e8883cSEric Dumazet pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; 4249d7e8883cSEric Dumazet 4250d8e18a51SMatthew Wilcox (Oracle) __skb_frag_set_page(frag, page); 4251b54c9d5bSJonathan Lemon skb_frag_off_set(frag, first_offset); 4252d7e8883cSEric Dumazet skb_frag_size_set(frag, first_size); 4253d7e8883cSEric Dumazet 4254d7e8883cSEric Dumazet memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); 4255d7e8883cSEric Dumazet /* We dont need to clear skbinfo->nr_frags here */ 4256d7e8883cSEric Dumazet 4257715dc1f3SEric Dumazet delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4258d7e8883cSEric Dumazet NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 4259d7e8883cSEric Dumazet goto done; 42608a29111cSEric Dumazet } 426171d93b39SHerbert Xu 426271d93b39SHerbert Xu merge: 4263715dc1f3SEric Dumazet delta_truesize = skb->truesize; 426467147ba9SHerbert Xu if (offset > headlen) { 4265d1dc7abfSMichal Schmidt unsigned int eat = offset - headlen; 4266d1dc7abfSMichal Schmidt 4267b54c9d5bSJonathan Lemon skb_frag_off_add(&skbinfo->frags[0], eat); 42689e903e08SEric Dumazet skb_frag_size_sub(&skbinfo->frags[0], eat); 4269d1dc7abfSMichal Schmidt skb->data_len -= eat; 4270d1dc7abfSMichal Schmidt skb->len -= eat; 427167147ba9SHerbert Xu offset = headlen; 427256035022SHerbert Xu } 427356035022SHerbert Xu 427467147ba9SHerbert Xu __skb_pull(skb, offset); 427556035022SHerbert Xu 427629e98242SEric Dumazet if (NAPI_GRO_CB(p)->last == p) 42778a29111cSEric Dumazet skb_shinfo(p)->frag_list = skb; 42788a29111cSEric Dumazet else 4279c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last->next = skb; 4280c3c7c254SEric Dumazet NAPI_GRO_CB(p)->last = skb; 4281f4a775d1SEric Dumazet __skb_header_release(skb); 42828a29111cSEric Dumazet lp = p; 428371d93b39SHerbert Xu 42845d38a079SHerbert Xu done: 42855d38a079SHerbert Xu NAPI_GRO_CB(p)->count++; 428637fe4732SHerbert Xu p->data_len += len; 4287715dc1f3SEric Dumazet p->truesize += delta_truesize; 428837fe4732SHerbert Xu p->len += len; 42898a29111cSEric Dumazet if (lp != p) { 42908a29111cSEric Dumazet lp->data_len += len; 42918a29111cSEric Dumazet lp->truesize += delta_truesize; 42928a29111cSEric Dumazet lp->len += len; 42938a29111cSEric Dumazet } 429471d93b39SHerbert Xu NAPI_GRO_CB(skb)->same_flow = 1; 429571d93b39SHerbert Xu return 0; 429671d93b39SHerbert Xu } 429771d93b39SHerbert Xu 4298df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 4299df5042f4SFlorian Westphal #define SKB_EXT_ALIGN_VALUE 8 4300df5042f4SFlorian Westphal #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE) 4301df5042f4SFlorian Westphal 4302df5042f4SFlorian Westphal static const u8 skb_ext_type_len[] = { 4303df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4304df5042f4SFlorian Westphal [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info), 4305df5042f4SFlorian Westphal #endif 43064165079bSFlorian Westphal #ifdef CONFIG_XFRM 43074165079bSFlorian Westphal [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), 43084165079bSFlorian Westphal #endif 430995a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 431095a7233cSPaul Blakey [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), 431195a7233cSPaul Blakey #endif 43123ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 43133ee17bc7SMat Martineau [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext), 43143ee17bc7SMat Martineau #endif 4315df5042f4SFlorian Westphal }; 4316df5042f4SFlorian Westphal 4317df5042f4SFlorian Westphal static __always_inline unsigned int skb_ext_total_length(void) 4318df5042f4SFlorian Westphal { 4319df5042f4SFlorian Westphal return SKB_EXT_CHUNKSIZEOF(struct skb_ext) + 4320df5042f4SFlorian Westphal #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 4321df5042f4SFlorian Westphal skb_ext_type_len[SKB_EXT_BRIDGE_NF] + 4322df5042f4SFlorian Westphal #endif 43234165079bSFlorian Westphal #ifdef CONFIG_XFRM 43244165079bSFlorian Westphal skb_ext_type_len[SKB_EXT_SEC_PATH] + 43254165079bSFlorian Westphal #endif 432695a7233cSPaul Blakey #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 432795a7233cSPaul Blakey skb_ext_type_len[TC_SKB_EXT] + 432895a7233cSPaul Blakey #endif 43293ee17bc7SMat Martineau #if IS_ENABLED(CONFIG_MPTCP) 43303ee17bc7SMat Martineau skb_ext_type_len[SKB_EXT_MPTCP] + 43313ee17bc7SMat Martineau #endif 4332df5042f4SFlorian Westphal 0; 4333df5042f4SFlorian Westphal } 4334df5042f4SFlorian Westphal 4335df5042f4SFlorian Westphal static void skb_extensions_init(void) 4336df5042f4SFlorian Westphal { 4337df5042f4SFlorian Westphal BUILD_BUG_ON(SKB_EXT_NUM >= 8); 4338df5042f4SFlorian Westphal BUILD_BUG_ON(skb_ext_total_length() > 255); 4339df5042f4SFlorian Westphal 4340df5042f4SFlorian Westphal skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache", 4341df5042f4SFlorian Westphal SKB_EXT_ALIGN_VALUE * skb_ext_total_length(), 4342df5042f4SFlorian Westphal 0, 4343df5042f4SFlorian Westphal SLAB_HWCACHE_ALIGN|SLAB_PANIC, 4344df5042f4SFlorian Westphal NULL); 4345df5042f4SFlorian Westphal } 4346df5042f4SFlorian Westphal #else 4347df5042f4SFlorian Westphal static void skb_extensions_init(void) {} 4348df5042f4SFlorian Westphal #endif 4349df5042f4SFlorian Westphal 43501da177e4SLinus Torvalds void __init skb_init(void) 43511da177e4SLinus Torvalds { 435279a8a642SKees Cook skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", 43531da177e4SLinus Torvalds sizeof(struct sk_buff), 43541da177e4SLinus Torvalds 0, 4355e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 435679a8a642SKees Cook offsetof(struct sk_buff, cb), 435779a8a642SKees Cook sizeof_field(struct sk_buff, cb), 435820c2df83SPaul Mundt NULL); 4359d179cd12SDavid S. Miller skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 4360d0bf4a9eSEric Dumazet sizeof(struct sk_buff_fclones), 4361d179cd12SDavid S. Miller 0, 4362e5d679f3SAlexey Dobriyan SLAB_HWCACHE_ALIGN|SLAB_PANIC, 436320c2df83SPaul Mundt NULL); 4364df5042f4SFlorian Westphal skb_extensions_init(); 43651da177e4SLinus Torvalds } 43661da177e4SLinus Torvalds 436751c739d1SDavid S. Miller static int 436848a1df65SJason A. Donenfeld __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, 436948a1df65SJason A. Donenfeld unsigned int recursion_level) 4370716ea3a7SDavid Howells { 43711a028e50SDavid S. Miller int start = skb_headlen(skb); 43721a028e50SDavid S. Miller int i, copy = start - offset; 4373fbb398a8SDavid S. Miller struct sk_buff *frag_iter; 4374716ea3a7SDavid Howells int elt = 0; 4375716ea3a7SDavid Howells 437648a1df65SJason A. Donenfeld if (unlikely(recursion_level >= 24)) 437748a1df65SJason A. Donenfeld return -EMSGSIZE; 437848a1df65SJason A. Donenfeld 4379716ea3a7SDavid Howells if (copy > 0) { 4380716ea3a7SDavid Howells if (copy > len) 4381716ea3a7SDavid Howells copy = len; 4382642f1490SJens Axboe sg_set_buf(sg, skb->data + offset, copy); 4383716ea3a7SDavid Howells elt++; 4384716ea3a7SDavid Howells if ((len -= copy) == 0) 4385716ea3a7SDavid Howells return elt; 4386716ea3a7SDavid Howells offset += copy; 4387716ea3a7SDavid Howells } 4388716ea3a7SDavid Howells 4389716ea3a7SDavid Howells for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 43901a028e50SDavid S. Miller int end; 4391716ea3a7SDavid Howells 4392547b792cSIlpo Järvinen WARN_ON(start > offset + len); 43931a028e50SDavid S. Miller 43949e903e08SEric Dumazet end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 4395716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 4396716ea3a7SDavid Howells skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 439748a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 439848a1df65SJason A. Donenfeld return -EMSGSIZE; 4399716ea3a7SDavid Howells 4400716ea3a7SDavid Howells if (copy > len) 4401716ea3a7SDavid Howells copy = len; 4402ea2ab693SIan Campbell sg_set_page(&sg[elt], skb_frag_page(frag), copy, 4403b54c9d5bSJonathan Lemon skb_frag_off(frag) + offset - start); 4404716ea3a7SDavid Howells elt++; 4405716ea3a7SDavid Howells if (!(len -= copy)) 4406716ea3a7SDavid Howells return elt; 4407716ea3a7SDavid Howells offset += copy; 4408716ea3a7SDavid Howells } 44091a028e50SDavid S. Miller start = end; 4410716ea3a7SDavid Howells } 4411716ea3a7SDavid Howells 4412fbb398a8SDavid S. Miller skb_walk_frags(skb, frag_iter) { 441348a1df65SJason A. Donenfeld int end, ret; 4414716ea3a7SDavid Howells 4415547b792cSIlpo Järvinen WARN_ON(start > offset + len); 44161a028e50SDavid S. Miller 4417fbb398a8SDavid S. Miller end = start + frag_iter->len; 4418716ea3a7SDavid Howells if ((copy = end - offset) > 0) { 441948a1df65SJason A. Donenfeld if (unlikely(elt && sg_is_last(&sg[elt - 1]))) 442048a1df65SJason A. Donenfeld return -EMSGSIZE; 442148a1df65SJason A. Donenfeld 4422716ea3a7SDavid Howells if (copy > len) 4423716ea3a7SDavid Howells copy = len; 442448a1df65SJason A. Donenfeld ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, 442548a1df65SJason A. Donenfeld copy, recursion_level + 1); 442648a1df65SJason A. Donenfeld if (unlikely(ret < 0)) 442748a1df65SJason A. Donenfeld return ret; 442848a1df65SJason A. Donenfeld elt += ret; 4429716ea3a7SDavid Howells if ((len -= copy) == 0) 4430716ea3a7SDavid Howells return elt; 4431716ea3a7SDavid Howells offset += copy; 4432716ea3a7SDavid Howells } 44331a028e50SDavid S. Miller start = end; 4434716ea3a7SDavid Howells } 4435716ea3a7SDavid Howells BUG_ON(len); 4436716ea3a7SDavid Howells return elt; 4437716ea3a7SDavid Howells } 4438716ea3a7SDavid Howells 443948a1df65SJason A. Donenfeld /** 444048a1df65SJason A. Donenfeld * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 444148a1df65SJason A. Donenfeld * @skb: Socket buffer containing the buffers to be mapped 444248a1df65SJason A. Donenfeld * @sg: The scatter-gather list to map into 444348a1df65SJason A. Donenfeld * @offset: The offset into the buffer's contents to start mapping 444448a1df65SJason A. Donenfeld * @len: Length of buffer space to be mapped 444548a1df65SJason A. Donenfeld * 444648a1df65SJason A. Donenfeld * Fill the specified scatter-gather list with mappings/pointers into a 444748a1df65SJason A. Donenfeld * region of the buffer space attached to a socket buffer. Returns either 444848a1df65SJason A. Donenfeld * the number of scatterlist items used, or -EMSGSIZE if the contents 444948a1df65SJason A. Donenfeld * could not fit. 445048a1df65SJason A. Donenfeld */ 445148a1df65SJason A. Donenfeld int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 445248a1df65SJason A. Donenfeld { 445348a1df65SJason A. Donenfeld int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); 445448a1df65SJason A. Donenfeld 445548a1df65SJason A. Donenfeld if (nsg <= 0) 445648a1df65SJason A. Donenfeld return nsg; 445748a1df65SJason A. Donenfeld 445848a1df65SJason A. Donenfeld sg_mark_end(&sg[nsg - 1]); 445948a1df65SJason A. Donenfeld 446048a1df65SJason A. Donenfeld return nsg; 446148a1df65SJason A. Donenfeld } 446248a1df65SJason A. Donenfeld EXPORT_SYMBOL_GPL(skb_to_sgvec); 446348a1df65SJason A. Donenfeld 446425a91d8dSFan Du /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given 446525a91d8dSFan Du * sglist without mark the sg which contain last skb data as the end. 446625a91d8dSFan Du * So the caller can mannipulate sg list as will when padding new data after 446725a91d8dSFan Du * the first call without calling sg_unmark_end to expend sg list. 446825a91d8dSFan Du * 446925a91d8dSFan Du * Scenario to use skb_to_sgvec_nomark: 447025a91d8dSFan Du * 1. sg_init_table 447125a91d8dSFan Du * 2. skb_to_sgvec_nomark(payload1) 447225a91d8dSFan Du * 3. skb_to_sgvec_nomark(payload2) 447325a91d8dSFan Du * 447425a91d8dSFan Du * This is equivalent to: 447525a91d8dSFan Du * 1. sg_init_table 447625a91d8dSFan Du * 2. skb_to_sgvec(payload1) 447725a91d8dSFan Du * 3. sg_unmark_end 447825a91d8dSFan Du * 4. skb_to_sgvec(payload2) 447925a91d8dSFan Du * 448025a91d8dSFan Du * When mapping mutilple payload conditionally, skb_to_sgvec_nomark 448125a91d8dSFan Du * is more preferable. 448225a91d8dSFan Du */ 448325a91d8dSFan Du int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, 448425a91d8dSFan Du int offset, int len) 448525a91d8dSFan Du { 448648a1df65SJason A. Donenfeld return __skb_to_sgvec(skb, sg, offset, len, 0); 448725a91d8dSFan Du } 448825a91d8dSFan Du EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); 448925a91d8dSFan Du 449051c739d1SDavid S. Miller 449151c739d1SDavid S. Miller 4492716ea3a7SDavid Howells /** 4493716ea3a7SDavid Howells * skb_cow_data - Check that a socket buffer's data buffers are writable 4494716ea3a7SDavid Howells * @skb: The socket buffer to check. 4495716ea3a7SDavid Howells * @tailbits: Amount of trailing space to be added 4496716ea3a7SDavid Howells * @trailer: Returned pointer to the skb where the @tailbits space begins 4497716ea3a7SDavid Howells * 4498716ea3a7SDavid Howells * Make sure that the data buffers attached to a socket buffer are 4499716ea3a7SDavid Howells * writable. If they are not, private copies are made of the data buffers 4500716ea3a7SDavid Howells * and the socket buffer is set to use these instead. 4501716ea3a7SDavid Howells * 4502716ea3a7SDavid Howells * If @tailbits is given, make sure that there is space to write @tailbits 4503716ea3a7SDavid Howells * bytes of data beyond current end of socket buffer. @trailer will be 4504716ea3a7SDavid Howells * set to point to the skb in which this space begins. 4505716ea3a7SDavid Howells * 4506716ea3a7SDavid Howells * The number of scatterlist elements required to completely map the 4507716ea3a7SDavid Howells * COW'd and extended socket buffer will be returned. 4508716ea3a7SDavid Howells */ 4509716ea3a7SDavid Howells int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 4510716ea3a7SDavid Howells { 4511716ea3a7SDavid Howells int copyflag; 4512716ea3a7SDavid Howells int elt; 4513716ea3a7SDavid Howells struct sk_buff *skb1, **skb_p; 4514716ea3a7SDavid Howells 4515716ea3a7SDavid Howells /* If skb is cloned or its head is paged, reallocate 4516716ea3a7SDavid Howells * head pulling out all the pages (pages are considered not writable 4517716ea3a7SDavid Howells * at the moment even if they are anonymous). 4518716ea3a7SDavid Howells */ 4519716ea3a7SDavid Howells if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 4520c15fc199SMiaohe Lin !__pskb_pull_tail(skb, __skb_pagelen(skb))) 4521716ea3a7SDavid Howells return -ENOMEM; 4522716ea3a7SDavid Howells 4523716ea3a7SDavid Howells /* Easy case. Most of packets will go this way. */ 452421dc3301SDavid S. Miller if (!skb_has_frag_list(skb)) { 4525716ea3a7SDavid Howells /* A little of trouble, not enough of space for trailer. 4526716ea3a7SDavid Howells * This should not happen, when stack is tuned to generate 4527716ea3a7SDavid Howells * good frames. OK, on miss we reallocate and reserve even more 4528716ea3a7SDavid Howells * space, 128 bytes is fair. */ 4529716ea3a7SDavid Howells 4530716ea3a7SDavid Howells if (skb_tailroom(skb) < tailbits && 4531716ea3a7SDavid Howells pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 4532716ea3a7SDavid Howells return -ENOMEM; 4533716ea3a7SDavid Howells 4534716ea3a7SDavid Howells /* Voila! */ 4535716ea3a7SDavid Howells *trailer = skb; 4536716ea3a7SDavid Howells return 1; 4537716ea3a7SDavid Howells } 4538716ea3a7SDavid Howells 4539716ea3a7SDavid Howells /* Misery. We are in troubles, going to mincer fragments... */ 4540716ea3a7SDavid Howells 4541716ea3a7SDavid Howells elt = 1; 4542716ea3a7SDavid Howells skb_p = &skb_shinfo(skb)->frag_list; 4543716ea3a7SDavid Howells copyflag = 0; 4544716ea3a7SDavid Howells 4545716ea3a7SDavid Howells while ((skb1 = *skb_p) != NULL) { 4546716ea3a7SDavid Howells int ntail = 0; 4547716ea3a7SDavid Howells 4548716ea3a7SDavid Howells /* The fragment is partially pulled by someone, 4549716ea3a7SDavid Howells * this can happen on input. Copy it and everything 4550716ea3a7SDavid Howells * after it. */ 4551716ea3a7SDavid Howells 4552716ea3a7SDavid Howells if (skb_shared(skb1)) 4553716ea3a7SDavid Howells copyflag = 1; 4554716ea3a7SDavid Howells 4555716ea3a7SDavid Howells /* If the skb is the last, worry about trailer. */ 4556716ea3a7SDavid Howells 4557716ea3a7SDavid Howells if (skb1->next == NULL && tailbits) { 4558716ea3a7SDavid Howells if (skb_shinfo(skb1)->nr_frags || 455921dc3301SDavid S. Miller skb_has_frag_list(skb1) || 4560716ea3a7SDavid Howells skb_tailroom(skb1) < tailbits) 4561716ea3a7SDavid Howells ntail = tailbits + 128; 4562716ea3a7SDavid Howells } 4563716ea3a7SDavid Howells 4564716ea3a7SDavid Howells if (copyflag || 4565716ea3a7SDavid Howells skb_cloned(skb1) || 4566716ea3a7SDavid Howells ntail || 4567716ea3a7SDavid Howells skb_shinfo(skb1)->nr_frags || 456821dc3301SDavid S. Miller skb_has_frag_list(skb1)) { 4569716ea3a7SDavid Howells struct sk_buff *skb2; 4570716ea3a7SDavid Howells 4571716ea3a7SDavid Howells /* Fuck, we are miserable poor guys... */ 4572716ea3a7SDavid Howells if (ntail == 0) 4573716ea3a7SDavid Howells skb2 = skb_copy(skb1, GFP_ATOMIC); 4574716ea3a7SDavid Howells else 4575716ea3a7SDavid Howells skb2 = skb_copy_expand(skb1, 4576716ea3a7SDavid Howells skb_headroom(skb1), 4577716ea3a7SDavid Howells ntail, 4578716ea3a7SDavid Howells GFP_ATOMIC); 4579716ea3a7SDavid Howells if (unlikely(skb2 == NULL)) 4580716ea3a7SDavid Howells return -ENOMEM; 4581716ea3a7SDavid Howells 4582716ea3a7SDavid Howells if (skb1->sk) 4583716ea3a7SDavid Howells skb_set_owner_w(skb2, skb1->sk); 4584716ea3a7SDavid Howells 4585716ea3a7SDavid Howells /* Looking around. Are we still alive? 4586716ea3a7SDavid Howells * OK, link new skb, drop old one */ 4587716ea3a7SDavid Howells 4588716ea3a7SDavid Howells skb2->next = skb1->next; 4589716ea3a7SDavid Howells *skb_p = skb2; 4590716ea3a7SDavid Howells kfree_skb(skb1); 4591716ea3a7SDavid Howells skb1 = skb2; 4592716ea3a7SDavid Howells } 4593716ea3a7SDavid Howells elt++; 4594716ea3a7SDavid Howells *trailer = skb1; 4595716ea3a7SDavid Howells skb_p = &skb1->next; 4596716ea3a7SDavid Howells } 4597716ea3a7SDavid Howells 4598716ea3a7SDavid Howells return elt; 4599716ea3a7SDavid Howells } 4600b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_cow_data); 4601716ea3a7SDavid Howells 4602b1faf566SEric Dumazet static void sock_rmem_free(struct sk_buff *skb) 4603b1faf566SEric Dumazet { 4604b1faf566SEric Dumazet struct sock *sk = skb->sk; 4605b1faf566SEric Dumazet 4606b1faf566SEric Dumazet atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 4607b1faf566SEric Dumazet } 4608b1faf566SEric Dumazet 46098605330aSSoheil Hassas Yeganeh static void skb_set_err_queue(struct sk_buff *skb) 46108605330aSSoheil Hassas Yeganeh { 46118605330aSSoheil Hassas Yeganeh /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. 46128605330aSSoheil Hassas Yeganeh * So, it is safe to (mis)use it to mark skbs on the error queue. 46138605330aSSoheil Hassas Yeganeh */ 46148605330aSSoheil Hassas Yeganeh skb->pkt_type = PACKET_OUTGOING; 46158605330aSSoheil Hassas Yeganeh BUILD_BUG_ON(PACKET_OUTGOING == 0); 46168605330aSSoheil Hassas Yeganeh } 46178605330aSSoheil Hassas Yeganeh 4618b1faf566SEric Dumazet /* 4619b1faf566SEric Dumazet * Note: We dont mem charge error packets (no sk_forward_alloc changes) 4620b1faf566SEric Dumazet */ 4621b1faf566SEric Dumazet int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 4622b1faf566SEric Dumazet { 4623b1faf566SEric Dumazet if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 4624ebb3b78dSEric Dumazet (unsigned int)READ_ONCE(sk->sk_rcvbuf)) 4625b1faf566SEric Dumazet return -ENOMEM; 4626b1faf566SEric Dumazet 4627b1faf566SEric Dumazet skb_orphan(skb); 4628b1faf566SEric Dumazet skb->sk = sk; 4629b1faf566SEric Dumazet skb->destructor = sock_rmem_free; 4630b1faf566SEric Dumazet atomic_add(skb->truesize, &sk->sk_rmem_alloc); 46318605330aSSoheil Hassas Yeganeh skb_set_err_queue(skb); 4632b1faf566SEric Dumazet 4633abb57ea4SEric Dumazet /* before exiting rcu section, make sure dst is refcounted */ 4634abb57ea4SEric Dumazet skb_dst_force(skb); 4635abb57ea4SEric Dumazet 4636b1faf566SEric Dumazet skb_queue_tail(&sk->sk_error_queue, skb); 4637b1faf566SEric Dumazet if (!sock_flag(sk, SOCK_DEAD)) 46386e5d58fdSVinicius Costa Gomes sk->sk_error_report(sk); 4639b1faf566SEric Dumazet return 0; 4640b1faf566SEric Dumazet } 4641b1faf566SEric Dumazet EXPORT_SYMBOL(sock_queue_err_skb); 4642b1faf566SEric Dumazet 464383a1a1a7SSoheil Hassas Yeganeh static bool is_icmp_err_skb(const struct sk_buff *skb) 464483a1a1a7SSoheil Hassas Yeganeh { 464583a1a1a7SSoheil Hassas Yeganeh return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || 464683a1a1a7SSoheil Hassas Yeganeh SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); 464783a1a1a7SSoheil Hassas Yeganeh } 464883a1a1a7SSoheil Hassas Yeganeh 4649364a9e93SWillem de Bruijn struct sk_buff *sock_dequeue_err_skb(struct sock *sk) 4650364a9e93SWillem de Bruijn { 4651364a9e93SWillem de Bruijn struct sk_buff_head *q = &sk->sk_error_queue; 465283a1a1a7SSoheil Hassas Yeganeh struct sk_buff *skb, *skb_next = NULL; 465383a1a1a7SSoheil Hassas Yeganeh bool icmp_next = false; 4654997d5c3fSEric Dumazet unsigned long flags; 4655364a9e93SWillem de Bruijn 4656997d5c3fSEric Dumazet spin_lock_irqsave(&q->lock, flags); 4657364a9e93SWillem de Bruijn skb = __skb_dequeue(q); 465838b25793SSoheil Hassas Yeganeh if (skb && (skb_next = skb_peek(q))) { 465983a1a1a7SSoheil Hassas Yeganeh icmp_next = is_icmp_err_skb(skb_next); 466038b25793SSoheil Hassas Yeganeh if (icmp_next) 4661985f7337SWillem de Bruijn sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno; 466238b25793SSoheil Hassas Yeganeh } 4663997d5c3fSEric Dumazet spin_unlock_irqrestore(&q->lock, flags); 4664364a9e93SWillem de Bruijn 466583a1a1a7SSoheil Hassas Yeganeh if (is_icmp_err_skb(skb) && !icmp_next) 466683a1a1a7SSoheil Hassas Yeganeh sk->sk_err = 0; 466783a1a1a7SSoheil Hassas Yeganeh 466883a1a1a7SSoheil Hassas Yeganeh if (skb_next) 4669364a9e93SWillem de Bruijn sk->sk_error_report(sk); 4670364a9e93SWillem de Bruijn 4671364a9e93SWillem de Bruijn return skb; 4672364a9e93SWillem de Bruijn } 4673364a9e93SWillem de Bruijn EXPORT_SYMBOL(sock_dequeue_err_skb); 4674364a9e93SWillem de Bruijn 4675cab41c47SAlexander Duyck /** 4676cab41c47SAlexander Duyck * skb_clone_sk - create clone of skb, and take reference to socket 4677cab41c47SAlexander Duyck * @skb: the skb to clone 4678cab41c47SAlexander Duyck * 4679cab41c47SAlexander Duyck * This function creates a clone of a buffer that holds a reference on 4680cab41c47SAlexander Duyck * sk_refcnt. Buffers created via this function are meant to be 4681cab41c47SAlexander Duyck * returned using sock_queue_err_skb, or free via kfree_skb. 4682cab41c47SAlexander Duyck * 4683cab41c47SAlexander Duyck * When passing buffers allocated with this function to sock_queue_err_skb 4684cab41c47SAlexander Duyck * it is necessary to wrap the call with sock_hold/sock_put in order to 4685cab41c47SAlexander Duyck * prevent the socket from being released prior to being enqueued on 4686cab41c47SAlexander Duyck * the sk_error_queue. 4687cab41c47SAlexander Duyck */ 468862bccb8cSAlexander Duyck struct sk_buff *skb_clone_sk(struct sk_buff *skb) 468962bccb8cSAlexander Duyck { 469062bccb8cSAlexander Duyck struct sock *sk = skb->sk; 469162bccb8cSAlexander Duyck struct sk_buff *clone; 469262bccb8cSAlexander Duyck 469341c6d650SReshetova, Elena if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) 469462bccb8cSAlexander Duyck return NULL; 469562bccb8cSAlexander Duyck 469662bccb8cSAlexander Duyck clone = skb_clone(skb, GFP_ATOMIC); 469762bccb8cSAlexander Duyck if (!clone) { 469862bccb8cSAlexander Duyck sock_put(sk); 469962bccb8cSAlexander Duyck return NULL; 470062bccb8cSAlexander Duyck } 470162bccb8cSAlexander Duyck 470262bccb8cSAlexander Duyck clone->sk = sk; 470362bccb8cSAlexander Duyck clone->destructor = sock_efree; 470462bccb8cSAlexander Duyck 470562bccb8cSAlexander Duyck return clone; 470662bccb8cSAlexander Duyck } 470762bccb8cSAlexander Duyck EXPORT_SYMBOL(skb_clone_sk); 470862bccb8cSAlexander Duyck 470937846ef0SAlexander Duyck static void __skb_complete_tx_timestamp(struct sk_buff *skb, 471037846ef0SAlexander Duyck struct sock *sk, 47114ef1b286SSoheil Hassas Yeganeh int tstype, 47124ef1b286SSoheil Hassas Yeganeh bool opt_stats) 4713ac45f602SPatrick Ohly { 4714ac45f602SPatrick Ohly struct sock_exterr_skb *serr; 4715ac45f602SPatrick Ohly int err; 4716ac45f602SPatrick Ohly 47174ef1b286SSoheil Hassas Yeganeh BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); 47184ef1b286SSoheil Hassas Yeganeh 4719ac45f602SPatrick Ohly serr = SKB_EXT_ERR(skb); 4720ac45f602SPatrick Ohly memset(serr, 0, sizeof(*serr)); 4721ac45f602SPatrick Ohly serr->ee.ee_errno = ENOMSG; 4722ac45f602SPatrick Ohly serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 4723e7fd2885SWillem de Bruijn serr->ee.ee_info = tstype; 47244ef1b286SSoheil Hassas Yeganeh serr->opt_stats = opt_stats; 47251862d620SWillem de Bruijn serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; 47264ed2d765SWillem de Bruijn if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { 472709c2d251SWillem de Bruijn serr->ee.ee_data = skb_shinfo(skb)->tskey; 4728ac5cc977SWANG Cong if (sk->sk_protocol == IPPROTO_TCP && 4729ac5cc977SWANG Cong sk->sk_type == SOCK_STREAM) 47304ed2d765SWillem de Bruijn serr->ee.ee_data -= sk->sk_tskey; 47314ed2d765SWillem de Bruijn } 473229030374SEric Dumazet 4733ac45f602SPatrick Ohly err = sock_queue_err_skb(sk, skb); 473429030374SEric Dumazet 4735ac45f602SPatrick Ohly if (err) 4736ac45f602SPatrick Ohly kfree_skb(skb); 4737ac45f602SPatrick Ohly } 473837846ef0SAlexander Duyck 4739b245be1fSWillem de Bruijn static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly) 4740b245be1fSWillem de Bruijn { 4741b245be1fSWillem de Bruijn bool ret; 4742b245be1fSWillem de Bruijn 4743b245be1fSWillem de Bruijn if (likely(sysctl_tstamp_allow_data || tsonly)) 4744b245be1fSWillem de Bruijn return true; 4745b245be1fSWillem de Bruijn 4746b245be1fSWillem de Bruijn read_lock_bh(&sk->sk_callback_lock); 4747b245be1fSWillem de Bruijn ret = sk->sk_socket && sk->sk_socket->file && 4748b245be1fSWillem de Bruijn file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW); 4749b245be1fSWillem de Bruijn read_unlock_bh(&sk->sk_callback_lock); 4750b245be1fSWillem de Bruijn return ret; 4751b245be1fSWillem de Bruijn } 4752b245be1fSWillem de Bruijn 475337846ef0SAlexander Duyck void skb_complete_tx_timestamp(struct sk_buff *skb, 475437846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps) 475537846ef0SAlexander Duyck { 475637846ef0SAlexander Duyck struct sock *sk = skb->sk; 475737846ef0SAlexander Duyck 4758b245be1fSWillem de Bruijn if (!skb_may_tx_timestamp(sk, false)) 475935b99dffSWillem de Bruijn goto err; 4760b245be1fSWillem de Bruijn 47619ac25fc0SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 47629ac25fc0SEric Dumazet * but only if the socket refcount is not zero. 47639ac25fc0SEric Dumazet */ 476441c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 476537846ef0SAlexander Duyck *skb_hwtstamps(skb) = *hwtstamps; 47664ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); 476737846ef0SAlexander Duyck sock_put(sk); 476835b99dffSWillem de Bruijn return; 476937846ef0SAlexander Duyck } 477035b99dffSWillem de Bruijn 477135b99dffSWillem de Bruijn err: 477235b99dffSWillem de Bruijn kfree_skb(skb); 47739ac25fc0SEric Dumazet } 477437846ef0SAlexander Duyck EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); 477537846ef0SAlexander Duyck 477637846ef0SAlexander Duyck void __skb_tstamp_tx(struct sk_buff *orig_skb, 4777e7ed11eeSYousuk Seung const struct sk_buff *ack_skb, 477837846ef0SAlexander Duyck struct skb_shared_hwtstamps *hwtstamps, 477937846ef0SAlexander Duyck struct sock *sk, int tstype) 478037846ef0SAlexander Duyck { 478137846ef0SAlexander Duyck struct sk_buff *skb; 47824ef1b286SSoheil Hassas Yeganeh bool tsonly, opt_stats = false; 478337846ef0SAlexander Duyck 47843a8dd971SWillem de Bruijn if (!sk) 47853a8dd971SWillem de Bruijn return; 47863a8dd971SWillem de Bruijn 4787b50a5c70SMiroslav Lichvar if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && 4788b50a5c70SMiroslav Lichvar skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) 4789b50a5c70SMiroslav Lichvar return; 4790b50a5c70SMiroslav Lichvar 47913a8dd971SWillem de Bruijn tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY; 47923a8dd971SWillem de Bruijn if (!skb_may_tx_timestamp(sk, tsonly)) 479337846ef0SAlexander Duyck return; 479437846ef0SAlexander Duyck 47951c885808SFrancis Yan if (tsonly) { 47961c885808SFrancis Yan #ifdef CONFIG_INET 47971c885808SFrancis Yan if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && 47981c885808SFrancis Yan sk->sk_protocol == IPPROTO_TCP && 47994ef1b286SSoheil Hassas Yeganeh sk->sk_type == SOCK_STREAM) { 4800e7ed11eeSYousuk Seung skb = tcp_get_timestamping_opt_stats(sk, orig_skb, 4801e7ed11eeSYousuk Seung ack_skb); 48024ef1b286SSoheil Hassas Yeganeh opt_stats = true; 48034ef1b286SSoheil Hassas Yeganeh } else 48041c885808SFrancis Yan #endif 48051c885808SFrancis Yan skb = alloc_skb(0, GFP_ATOMIC); 48061c885808SFrancis Yan } else { 480737846ef0SAlexander Duyck skb = skb_clone(orig_skb, GFP_ATOMIC); 48081c885808SFrancis Yan } 480937846ef0SAlexander Duyck if (!skb) 481037846ef0SAlexander Duyck return; 481137846ef0SAlexander Duyck 481249ca0d8bSWillem de Bruijn if (tsonly) { 4813fff88030SWillem de Bruijn skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & 4814fff88030SWillem de Bruijn SKBTX_ANY_TSTAMP; 481549ca0d8bSWillem de Bruijn skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; 481649ca0d8bSWillem de Bruijn } 481749ca0d8bSWillem de Bruijn 481849ca0d8bSWillem de Bruijn if (hwtstamps) 481949ca0d8bSWillem de Bruijn *skb_hwtstamps(skb) = *hwtstamps; 482049ca0d8bSWillem de Bruijn else 482149ca0d8bSWillem de Bruijn skb->tstamp = ktime_get_real(); 482249ca0d8bSWillem de Bruijn 48234ef1b286SSoheil Hassas Yeganeh __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); 482437846ef0SAlexander Duyck } 4825e7fd2885SWillem de Bruijn EXPORT_SYMBOL_GPL(__skb_tstamp_tx); 4826e7fd2885SWillem de Bruijn 4827e7fd2885SWillem de Bruijn void skb_tstamp_tx(struct sk_buff *orig_skb, 4828e7fd2885SWillem de Bruijn struct skb_shared_hwtstamps *hwtstamps) 4829e7fd2885SWillem de Bruijn { 4830e7ed11eeSYousuk Seung return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk, 4831e7fd2885SWillem de Bruijn SCM_TSTAMP_SND); 4832e7fd2885SWillem de Bruijn } 4833ac45f602SPatrick Ohly EXPORT_SYMBOL_GPL(skb_tstamp_tx); 4834ac45f602SPatrick Ohly 48356e3e939fSJohannes Berg void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) 48366e3e939fSJohannes Berg { 48376e3e939fSJohannes Berg struct sock *sk = skb->sk; 48386e3e939fSJohannes Berg struct sock_exterr_skb *serr; 4839dd4f1072SEric Dumazet int err = 1; 48406e3e939fSJohannes Berg 48416e3e939fSJohannes Berg skb->wifi_acked_valid = 1; 48426e3e939fSJohannes Berg skb->wifi_acked = acked; 48436e3e939fSJohannes Berg 48446e3e939fSJohannes Berg serr = SKB_EXT_ERR(skb); 48456e3e939fSJohannes Berg memset(serr, 0, sizeof(*serr)); 48466e3e939fSJohannes Berg serr->ee.ee_errno = ENOMSG; 48476e3e939fSJohannes Berg serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; 48486e3e939fSJohannes Berg 4849dd4f1072SEric Dumazet /* Take a reference to prevent skb_orphan() from freeing the socket, 4850dd4f1072SEric Dumazet * but only if the socket refcount is not zero. 4851dd4f1072SEric Dumazet */ 485241c6d650SReshetova, Elena if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) { 48536e3e939fSJohannes Berg err = sock_queue_err_skb(sk, skb); 4854dd4f1072SEric Dumazet sock_put(sk); 4855dd4f1072SEric Dumazet } 48566e3e939fSJohannes Berg if (err) 48576e3e939fSJohannes Berg kfree_skb(skb); 48586e3e939fSJohannes Berg } 48596e3e939fSJohannes Berg EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); 48606e3e939fSJohannes Berg 4861f35d9d8aSRusty Russell /** 4862f35d9d8aSRusty Russell * skb_partial_csum_set - set up and verify partial csum values for packet 4863f35d9d8aSRusty Russell * @skb: the skb to set 4864f35d9d8aSRusty Russell * @start: the number of bytes after skb->data to start checksumming. 4865f35d9d8aSRusty Russell * @off: the offset from start to place the checksum. 4866f35d9d8aSRusty Russell * 4867f35d9d8aSRusty Russell * For untrusted partially-checksummed packets, we need to make sure the values 4868f35d9d8aSRusty Russell * for skb->csum_start and skb->csum_offset are valid so we don't oops. 4869f35d9d8aSRusty Russell * 4870f35d9d8aSRusty Russell * This function checks and sets those values and skb->ip_summed: if this 4871f35d9d8aSRusty Russell * returns false you should drop the packet. 4872f35d9d8aSRusty Russell */ 4873f35d9d8aSRusty Russell bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) 4874f35d9d8aSRusty Russell { 487552b5d6f5SEric Dumazet u32 csum_end = (u32)start + (u32)off + sizeof(__sum16); 487652b5d6f5SEric Dumazet u32 csum_start = skb_headroom(skb) + (u32)start; 487752b5d6f5SEric Dumazet 487852b5d6f5SEric Dumazet if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { 487952b5d6f5SEric Dumazet net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n", 488052b5d6f5SEric Dumazet start, off, skb_headroom(skb), skb_headlen(skb)); 4881f35d9d8aSRusty Russell return false; 4882f35d9d8aSRusty Russell } 4883f35d9d8aSRusty Russell skb->ip_summed = CHECKSUM_PARTIAL; 488452b5d6f5SEric Dumazet skb->csum_start = csum_start; 4885f35d9d8aSRusty Russell skb->csum_offset = off; 4886e5d5decaSJason Wang skb_set_transport_header(skb, start); 4887f35d9d8aSRusty Russell return true; 4888f35d9d8aSRusty Russell } 4889b4ac530fSDavid S. Miller EXPORT_SYMBOL_GPL(skb_partial_csum_set); 4890f35d9d8aSRusty Russell 4891ed1f50c3SPaul Durrant static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, 4892ed1f50c3SPaul Durrant unsigned int max) 4893ed1f50c3SPaul Durrant { 4894ed1f50c3SPaul Durrant if (skb_headlen(skb) >= len) 4895ed1f50c3SPaul Durrant return 0; 4896ed1f50c3SPaul Durrant 4897ed1f50c3SPaul Durrant /* If we need to pullup then pullup to the max, so we 4898ed1f50c3SPaul Durrant * won't need to do it again. 4899ed1f50c3SPaul Durrant */ 4900ed1f50c3SPaul Durrant if (max > skb->len) 4901ed1f50c3SPaul Durrant max = skb->len; 4902ed1f50c3SPaul Durrant 4903ed1f50c3SPaul Durrant if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) 4904ed1f50c3SPaul Durrant return -ENOMEM; 4905ed1f50c3SPaul Durrant 4906ed1f50c3SPaul Durrant if (skb_headlen(skb) < len) 4907ed1f50c3SPaul Durrant return -EPROTO; 4908ed1f50c3SPaul Durrant 4909ed1f50c3SPaul Durrant return 0; 4910ed1f50c3SPaul Durrant } 4911ed1f50c3SPaul Durrant 4912f9708b43SJan Beulich #define MAX_TCP_HDR_LEN (15 * 4) 4913f9708b43SJan Beulich 4914f9708b43SJan Beulich static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, 4915f9708b43SJan Beulich typeof(IPPROTO_IP) proto, 4916f9708b43SJan Beulich unsigned int off) 4917f9708b43SJan Beulich { 4918f9708b43SJan Beulich int err; 4919f9708b43SJan Beulich 4920161d1792SKees Cook switch (proto) { 4921f9708b43SJan Beulich case IPPROTO_TCP: 4922f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), 4923f9708b43SJan Beulich off + MAX_TCP_HDR_LEN); 4924f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4925f9708b43SJan Beulich offsetof(struct tcphdr, 4926f9708b43SJan Beulich check))) 4927f9708b43SJan Beulich err = -EPROTO; 4928f9708b43SJan Beulich return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; 4929f9708b43SJan Beulich 4930f9708b43SJan Beulich case IPPROTO_UDP: 4931f9708b43SJan Beulich err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), 4932f9708b43SJan Beulich off + sizeof(struct udphdr)); 4933f9708b43SJan Beulich if (!err && !skb_partial_csum_set(skb, off, 4934f9708b43SJan Beulich offsetof(struct udphdr, 4935f9708b43SJan Beulich check))) 4936f9708b43SJan Beulich err = -EPROTO; 4937f9708b43SJan Beulich return err ? ERR_PTR(err) : &udp_hdr(skb)->check; 4938f9708b43SJan Beulich } 4939f9708b43SJan Beulich 4940f9708b43SJan Beulich return ERR_PTR(-EPROTO); 4941f9708b43SJan Beulich } 4942f9708b43SJan Beulich 4943ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 4944ed1f50c3SPaul Durrant * maximally sized IP and TCP or UDP headers. 4945ed1f50c3SPaul Durrant */ 4946ed1f50c3SPaul Durrant #define MAX_IP_HDR_LEN 128 4947ed1f50c3SPaul Durrant 4948f9708b43SJan Beulich static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) 4949ed1f50c3SPaul Durrant { 4950ed1f50c3SPaul Durrant unsigned int off; 4951ed1f50c3SPaul Durrant bool fragment; 4952f9708b43SJan Beulich __sum16 *csum; 4953ed1f50c3SPaul Durrant int err; 4954ed1f50c3SPaul Durrant 4955ed1f50c3SPaul Durrant fragment = false; 4956ed1f50c3SPaul Durrant 4957ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 4958ed1f50c3SPaul Durrant sizeof(struct iphdr), 4959ed1f50c3SPaul Durrant MAX_IP_HDR_LEN); 4960ed1f50c3SPaul Durrant if (err < 0) 4961ed1f50c3SPaul Durrant goto out; 4962ed1f50c3SPaul Durrant 496311f920d2SMiaohe Lin if (ip_is_fragment(ip_hdr(skb))) 4964ed1f50c3SPaul Durrant fragment = true; 4965ed1f50c3SPaul Durrant 4966ed1f50c3SPaul Durrant off = ip_hdrlen(skb); 4967ed1f50c3SPaul Durrant 4968ed1f50c3SPaul Durrant err = -EPROTO; 4969ed1f50c3SPaul Durrant 4970ed1f50c3SPaul Durrant if (fragment) 4971ed1f50c3SPaul Durrant goto out; 4972ed1f50c3SPaul Durrant 4973f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); 4974f9708b43SJan Beulich if (IS_ERR(csum)) 4975f9708b43SJan Beulich return PTR_ERR(csum); 4976ed1f50c3SPaul Durrant 4977ed1f50c3SPaul Durrant if (recalculate) 4978f9708b43SJan Beulich *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 4979ed1f50c3SPaul Durrant ip_hdr(skb)->daddr, 4980ed1f50c3SPaul Durrant skb->len - off, 4981f9708b43SJan Beulich ip_hdr(skb)->protocol, 0); 4982ed1f50c3SPaul Durrant err = 0; 4983ed1f50c3SPaul Durrant 4984ed1f50c3SPaul Durrant out: 4985ed1f50c3SPaul Durrant return err; 4986ed1f50c3SPaul Durrant } 4987ed1f50c3SPaul Durrant 4988ed1f50c3SPaul Durrant /* This value should be large enough to cover a tagged ethernet header plus 4989ed1f50c3SPaul Durrant * an IPv6 header, all options, and a maximal TCP or UDP header. 4990ed1f50c3SPaul Durrant */ 4991ed1f50c3SPaul Durrant #define MAX_IPV6_HDR_LEN 256 4992ed1f50c3SPaul Durrant 4993ed1f50c3SPaul Durrant #define OPT_HDR(type, skb, off) \ 4994ed1f50c3SPaul Durrant (type *)(skb_network_header(skb) + (off)) 4995ed1f50c3SPaul Durrant 4996ed1f50c3SPaul Durrant static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) 4997ed1f50c3SPaul Durrant { 4998ed1f50c3SPaul Durrant int err; 4999ed1f50c3SPaul Durrant u8 nexthdr; 5000ed1f50c3SPaul Durrant unsigned int off; 5001ed1f50c3SPaul Durrant unsigned int len; 5002ed1f50c3SPaul Durrant bool fragment; 5003ed1f50c3SPaul Durrant bool done; 5004f9708b43SJan Beulich __sum16 *csum; 5005ed1f50c3SPaul Durrant 5006ed1f50c3SPaul Durrant fragment = false; 5007ed1f50c3SPaul Durrant done = false; 5008ed1f50c3SPaul Durrant 5009ed1f50c3SPaul Durrant off = sizeof(struct ipv6hdr); 5010ed1f50c3SPaul Durrant 5011ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); 5012ed1f50c3SPaul Durrant if (err < 0) 5013ed1f50c3SPaul Durrant goto out; 5014ed1f50c3SPaul Durrant 5015ed1f50c3SPaul Durrant nexthdr = ipv6_hdr(skb)->nexthdr; 5016ed1f50c3SPaul Durrant 5017ed1f50c3SPaul Durrant len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); 5018ed1f50c3SPaul Durrant while (off <= len && !done) { 5019ed1f50c3SPaul Durrant switch (nexthdr) { 5020ed1f50c3SPaul Durrant case IPPROTO_DSTOPTS: 5021ed1f50c3SPaul Durrant case IPPROTO_HOPOPTS: 5022ed1f50c3SPaul Durrant case IPPROTO_ROUTING: { 5023ed1f50c3SPaul Durrant struct ipv6_opt_hdr *hp; 5024ed1f50c3SPaul Durrant 5025ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5026ed1f50c3SPaul Durrant off + 5027ed1f50c3SPaul Durrant sizeof(struct ipv6_opt_hdr), 5028ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5029ed1f50c3SPaul Durrant if (err < 0) 5030ed1f50c3SPaul Durrant goto out; 5031ed1f50c3SPaul Durrant 5032ed1f50c3SPaul Durrant hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); 5033ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5034ed1f50c3SPaul Durrant off += ipv6_optlen(hp); 5035ed1f50c3SPaul Durrant break; 5036ed1f50c3SPaul Durrant } 5037ed1f50c3SPaul Durrant case IPPROTO_AH: { 5038ed1f50c3SPaul Durrant struct ip_auth_hdr *hp; 5039ed1f50c3SPaul Durrant 5040ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5041ed1f50c3SPaul Durrant off + 5042ed1f50c3SPaul Durrant sizeof(struct ip_auth_hdr), 5043ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5044ed1f50c3SPaul Durrant if (err < 0) 5045ed1f50c3SPaul Durrant goto out; 5046ed1f50c3SPaul Durrant 5047ed1f50c3SPaul Durrant hp = OPT_HDR(struct ip_auth_hdr, skb, off); 5048ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5049ed1f50c3SPaul Durrant off += ipv6_authlen(hp); 5050ed1f50c3SPaul Durrant break; 5051ed1f50c3SPaul Durrant } 5052ed1f50c3SPaul Durrant case IPPROTO_FRAGMENT: { 5053ed1f50c3SPaul Durrant struct frag_hdr *hp; 5054ed1f50c3SPaul Durrant 5055ed1f50c3SPaul Durrant err = skb_maybe_pull_tail(skb, 5056ed1f50c3SPaul Durrant off + 5057ed1f50c3SPaul Durrant sizeof(struct frag_hdr), 5058ed1f50c3SPaul Durrant MAX_IPV6_HDR_LEN); 5059ed1f50c3SPaul Durrant if (err < 0) 5060ed1f50c3SPaul Durrant goto out; 5061ed1f50c3SPaul Durrant 5062ed1f50c3SPaul Durrant hp = OPT_HDR(struct frag_hdr, skb, off); 5063ed1f50c3SPaul Durrant 5064ed1f50c3SPaul Durrant if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) 5065ed1f50c3SPaul Durrant fragment = true; 5066ed1f50c3SPaul Durrant 5067ed1f50c3SPaul Durrant nexthdr = hp->nexthdr; 5068ed1f50c3SPaul Durrant off += sizeof(struct frag_hdr); 5069ed1f50c3SPaul Durrant break; 5070ed1f50c3SPaul Durrant } 5071ed1f50c3SPaul Durrant default: 5072ed1f50c3SPaul Durrant done = true; 5073ed1f50c3SPaul Durrant break; 5074ed1f50c3SPaul Durrant } 5075ed1f50c3SPaul Durrant } 5076ed1f50c3SPaul Durrant 5077ed1f50c3SPaul Durrant err = -EPROTO; 5078ed1f50c3SPaul Durrant 5079ed1f50c3SPaul Durrant if (!done || fragment) 5080ed1f50c3SPaul Durrant goto out; 5081ed1f50c3SPaul Durrant 5082f9708b43SJan Beulich csum = skb_checksum_setup_ip(skb, nexthdr, off); 5083f9708b43SJan Beulich if (IS_ERR(csum)) 5084f9708b43SJan Beulich return PTR_ERR(csum); 5085ed1f50c3SPaul Durrant 5086ed1f50c3SPaul Durrant if (recalculate) 5087f9708b43SJan Beulich *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5088ed1f50c3SPaul Durrant &ipv6_hdr(skb)->daddr, 5089f9708b43SJan Beulich skb->len - off, nexthdr, 0); 5090ed1f50c3SPaul Durrant err = 0; 5091ed1f50c3SPaul Durrant 5092ed1f50c3SPaul Durrant out: 5093ed1f50c3SPaul Durrant return err; 5094ed1f50c3SPaul Durrant } 5095ed1f50c3SPaul Durrant 5096ed1f50c3SPaul Durrant /** 5097ed1f50c3SPaul Durrant * skb_checksum_setup - set up partial checksum offset 5098ed1f50c3SPaul Durrant * @skb: the skb to set up 5099ed1f50c3SPaul Durrant * @recalculate: if true the pseudo-header checksum will be recalculated 5100ed1f50c3SPaul Durrant */ 5101ed1f50c3SPaul Durrant int skb_checksum_setup(struct sk_buff *skb, bool recalculate) 5102ed1f50c3SPaul Durrant { 5103ed1f50c3SPaul Durrant int err; 5104ed1f50c3SPaul Durrant 5105ed1f50c3SPaul Durrant switch (skb->protocol) { 5106ed1f50c3SPaul Durrant case htons(ETH_P_IP): 5107f9708b43SJan Beulich err = skb_checksum_setup_ipv4(skb, recalculate); 5108ed1f50c3SPaul Durrant break; 5109ed1f50c3SPaul Durrant 5110ed1f50c3SPaul Durrant case htons(ETH_P_IPV6): 5111ed1f50c3SPaul Durrant err = skb_checksum_setup_ipv6(skb, recalculate); 5112ed1f50c3SPaul Durrant break; 5113ed1f50c3SPaul Durrant 5114ed1f50c3SPaul Durrant default: 5115ed1f50c3SPaul Durrant err = -EPROTO; 5116ed1f50c3SPaul Durrant break; 5117ed1f50c3SPaul Durrant } 5118ed1f50c3SPaul Durrant 5119ed1f50c3SPaul Durrant return err; 5120ed1f50c3SPaul Durrant } 5121ed1f50c3SPaul Durrant EXPORT_SYMBOL(skb_checksum_setup); 5122ed1f50c3SPaul Durrant 51239afd85c9SLinus Lüssing /** 51249afd85c9SLinus Lüssing * skb_checksum_maybe_trim - maybe trims the given skb 51259afd85c9SLinus Lüssing * @skb: the skb to check 51269afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 51279afd85c9SLinus Lüssing * 51289afd85c9SLinus Lüssing * Checks whether the given skb has data beyond the given transport length. 51299afd85c9SLinus Lüssing * If so, returns a cloned skb trimmed to this transport length. 51309afd85c9SLinus Lüssing * Otherwise returns the provided skb. Returns NULL in error cases 51319afd85c9SLinus Lüssing * (e.g. transport_len exceeds skb length or out-of-memory). 51329afd85c9SLinus Lüssing * 5133a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5134a516993fSLinus Lüssing * differs from the provided skb. 51359afd85c9SLinus Lüssing */ 51369afd85c9SLinus Lüssing static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, 51379afd85c9SLinus Lüssing unsigned int transport_len) 51389afd85c9SLinus Lüssing { 51399afd85c9SLinus Lüssing struct sk_buff *skb_chk; 51409afd85c9SLinus Lüssing unsigned int len = skb_transport_offset(skb) + transport_len; 51419afd85c9SLinus Lüssing int ret; 51429afd85c9SLinus Lüssing 5143a516993fSLinus Lüssing if (skb->len < len) 51449afd85c9SLinus Lüssing return NULL; 5145a516993fSLinus Lüssing else if (skb->len == len) 51469afd85c9SLinus Lüssing return skb; 51479afd85c9SLinus Lüssing 51489afd85c9SLinus Lüssing skb_chk = skb_clone(skb, GFP_ATOMIC); 51499afd85c9SLinus Lüssing if (!skb_chk) 51509afd85c9SLinus Lüssing return NULL; 51519afd85c9SLinus Lüssing 51529afd85c9SLinus Lüssing ret = pskb_trim_rcsum(skb_chk, len); 51539afd85c9SLinus Lüssing if (ret) { 51549afd85c9SLinus Lüssing kfree_skb(skb_chk); 51559afd85c9SLinus Lüssing return NULL; 51569afd85c9SLinus Lüssing } 51579afd85c9SLinus Lüssing 51589afd85c9SLinus Lüssing return skb_chk; 51599afd85c9SLinus Lüssing } 51609afd85c9SLinus Lüssing 51619afd85c9SLinus Lüssing /** 51629afd85c9SLinus Lüssing * skb_checksum_trimmed - validate checksum of an skb 51639afd85c9SLinus Lüssing * @skb: the skb to check 51649afd85c9SLinus Lüssing * @transport_len: the data length beyond the network header 51659afd85c9SLinus Lüssing * @skb_chkf: checksum function to use 51669afd85c9SLinus Lüssing * 51679afd85c9SLinus Lüssing * Applies the given checksum function skb_chkf to the provided skb. 51689afd85c9SLinus Lüssing * Returns a checked and maybe trimmed skb. Returns NULL on error. 51699afd85c9SLinus Lüssing * 51709afd85c9SLinus Lüssing * If the skb has data beyond the given transport length, then a 51719afd85c9SLinus Lüssing * trimmed & cloned skb is checked and returned. 51729afd85c9SLinus Lüssing * 5173a516993fSLinus Lüssing * Caller needs to set the skb transport header and free any returned skb if it 5174a516993fSLinus Lüssing * differs from the provided skb. 51759afd85c9SLinus Lüssing */ 51769afd85c9SLinus Lüssing struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, 51779afd85c9SLinus Lüssing unsigned int transport_len, 51789afd85c9SLinus Lüssing __sum16(*skb_chkf)(struct sk_buff *skb)) 51799afd85c9SLinus Lüssing { 51809afd85c9SLinus Lüssing struct sk_buff *skb_chk; 51819afd85c9SLinus Lüssing unsigned int offset = skb_transport_offset(skb); 5182fcba67c9SLinus Lüssing __sum16 ret; 51839afd85c9SLinus Lüssing 51849afd85c9SLinus Lüssing skb_chk = skb_checksum_maybe_trim(skb, transport_len); 51859afd85c9SLinus Lüssing if (!skb_chk) 5186a516993fSLinus Lüssing goto err; 51879afd85c9SLinus Lüssing 5188a516993fSLinus Lüssing if (!pskb_may_pull(skb_chk, offset)) 5189a516993fSLinus Lüssing goto err; 51909afd85c9SLinus Lüssing 51919b368814SLinus Lüssing skb_pull_rcsum(skb_chk, offset); 51929afd85c9SLinus Lüssing ret = skb_chkf(skb_chk); 51939b368814SLinus Lüssing skb_push_rcsum(skb_chk, offset); 51949afd85c9SLinus Lüssing 5195a516993fSLinus Lüssing if (ret) 5196a516993fSLinus Lüssing goto err; 51979afd85c9SLinus Lüssing 51989afd85c9SLinus Lüssing return skb_chk; 5199a516993fSLinus Lüssing 5200a516993fSLinus Lüssing err: 5201a516993fSLinus Lüssing if (skb_chk && skb_chk != skb) 5202a516993fSLinus Lüssing kfree_skb(skb_chk); 5203a516993fSLinus Lüssing 5204a516993fSLinus Lüssing return NULL; 5205a516993fSLinus Lüssing 52069afd85c9SLinus Lüssing } 52079afd85c9SLinus Lüssing EXPORT_SYMBOL(skb_checksum_trimmed); 52089afd85c9SLinus Lüssing 52094497b076SBen Hutchings void __skb_warn_lro_forwarding(const struct sk_buff *skb) 52104497b076SBen Hutchings { 5211e87cc472SJoe Perches net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", 5212e87cc472SJoe Perches skb->dev->name); 52134497b076SBen Hutchings } 52144497b076SBen Hutchings EXPORT_SYMBOL(__skb_warn_lro_forwarding); 5215bad43ca8SEric Dumazet 5216bad43ca8SEric Dumazet void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 5217bad43ca8SEric Dumazet { 52183d861f66SEric Dumazet if (head_stolen) { 52193d861f66SEric Dumazet skb_release_head_state(skb); 5220bad43ca8SEric Dumazet kmem_cache_free(skbuff_head_cache, skb); 52213d861f66SEric Dumazet } else { 5222bad43ca8SEric Dumazet __kfree_skb(skb); 5223bad43ca8SEric Dumazet } 52243d861f66SEric Dumazet } 5225bad43ca8SEric Dumazet EXPORT_SYMBOL(kfree_skb_partial); 5226bad43ca8SEric Dumazet 5227bad43ca8SEric Dumazet /** 5228bad43ca8SEric Dumazet * skb_try_coalesce - try to merge skb to prior one 5229bad43ca8SEric Dumazet * @to: prior buffer 5230bad43ca8SEric Dumazet * @from: buffer to add 5231bad43ca8SEric Dumazet * @fragstolen: pointer to boolean 5232c6c4b97cSRandy Dunlap * @delta_truesize: how much more was allocated than was requested 5233bad43ca8SEric Dumazet */ 5234bad43ca8SEric Dumazet bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 5235bad43ca8SEric Dumazet bool *fragstolen, int *delta_truesize) 5236bad43ca8SEric Dumazet { 5237c818fa9eSEric Dumazet struct skb_shared_info *to_shinfo, *from_shinfo; 5238bad43ca8SEric Dumazet int i, delta, len = from->len; 5239bad43ca8SEric Dumazet 5240bad43ca8SEric Dumazet *fragstolen = false; 5241bad43ca8SEric Dumazet 5242bad43ca8SEric Dumazet if (skb_cloned(to)) 5243bad43ca8SEric Dumazet return false; 5244bad43ca8SEric Dumazet 5245bad43ca8SEric Dumazet if (len <= skb_tailroom(to)) { 5246e93a0435SEric Dumazet if (len) 5247bad43ca8SEric Dumazet BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 5248bad43ca8SEric Dumazet *delta_truesize = 0; 5249bad43ca8SEric Dumazet return true; 5250bad43ca8SEric Dumazet } 5251bad43ca8SEric Dumazet 5252c818fa9eSEric Dumazet to_shinfo = skb_shinfo(to); 5253c818fa9eSEric Dumazet from_shinfo = skb_shinfo(from); 5254c818fa9eSEric Dumazet if (to_shinfo->frag_list || from_shinfo->frag_list) 5255bad43ca8SEric Dumazet return false; 52561f8b977aSWillem de Bruijn if (skb_zcopy(to) || skb_zcopy(from)) 52571f8b977aSWillem de Bruijn return false; 5258bad43ca8SEric Dumazet 5259bad43ca8SEric Dumazet if (skb_headlen(from) != 0) { 5260bad43ca8SEric Dumazet struct page *page; 5261bad43ca8SEric Dumazet unsigned int offset; 5262bad43ca8SEric Dumazet 5263c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5264c818fa9eSEric Dumazet from_shinfo->nr_frags >= MAX_SKB_FRAGS) 5265bad43ca8SEric Dumazet return false; 5266bad43ca8SEric Dumazet 5267bad43ca8SEric Dumazet if (skb_head_is_locked(from)) 5268bad43ca8SEric Dumazet return false; 5269bad43ca8SEric Dumazet 5270bad43ca8SEric Dumazet delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 5271bad43ca8SEric Dumazet 5272bad43ca8SEric Dumazet page = virt_to_head_page(from->head); 5273bad43ca8SEric Dumazet offset = from->data - (unsigned char *)page_address(page); 5274bad43ca8SEric Dumazet 5275c818fa9eSEric Dumazet skb_fill_page_desc(to, to_shinfo->nr_frags, 5276bad43ca8SEric Dumazet page, offset, skb_headlen(from)); 5277bad43ca8SEric Dumazet *fragstolen = true; 5278bad43ca8SEric Dumazet } else { 5279c818fa9eSEric Dumazet if (to_shinfo->nr_frags + 5280c818fa9eSEric Dumazet from_shinfo->nr_frags > MAX_SKB_FRAGS) 5281bad43ca8SEric Dumazet return false; 5282bad43ca8SEric Dumazet 5283f4b549a5SWeiping Pan delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); 5284bad43ca8SEric Dumazet } 5285bad43ca8SEric Dumazet 5286bad43ca8SEric Dumazet WARN_ON_ONCE(delta < len); 5287bad43ca8SEric Dumazet 5288c818fa9eSEric Dumazet memcpy(to_shinfo->frags + to_shinfo->nr_frags, 5289c818fa9eSEric Dumazet from_shinfo->frags, 5290c818fa9eSEric Dumazet from_shinfo->nr_frags * sizeof(skb_frag_t)); 5291c818fa9eSEric Dumazet to_shinfo->nr_frags += from_shinfo->nr_frags; 5292bad43ca8SEric Dumazet 5293bad43ca8SEric Dumazet if (!skb_cloned(from)) 5294c818fa9eSEric Dumazet from_shinfo->nr_frags = 0; 5295bad43ca8SEric Dumazet 52968ea853fdSLi RongQing /* if the skb is not cloned this does nothing 52978ea853fdSLi RongQing * since we set nr_frags to 0. 52988ea853fdSLi RongQing */ 5299c818fa9eSEric Dumazet for (i = 0; i < from_shinfo->nr_frags; i++) 5300c818fa9eSEric Dumazet __skb_frag_ref(&from_shinfo->frags[i]); 5301bad43ca8SEric Dumazet 5302bad43ca8SEric Dumazet to->truesize += delta; 5303bad43ca8SEric Dumazet to->len += len; 5304bad43ca8SEric Dumazet to->data_len += len; 5305bad43ca8SEric Dumazet 5306bad43ca8SEric Dumazet *delta_truesize = delta; 5307bad43ca8SEric Dumazet return true; 5308bad43ca8SEric Dumazet } 5309bad43ca8SEric Dumazet EXPORT_SYMBOL(skb_try_coalesce); 5310621e84d6SNicolas Dichtel 5311621e84d6SNicolas Dichtel /** 53128b27f277SNicolas Dichtel * skb_scrub_packet - scrub an skb 5313621e84d6SNicolas Dichtel * 5314621e84d6SNicolas Dichtel * @skb: buffer to clean 53158b27f277SNicolas Dichtel * @xnet: packet is crossing netns 5316621e84d6SNicolas Dichtel * 53178b27f277SNicolas Dichtel * skb_scrub_packet can be used after encapsulating or decapsulting a packet 53188b27f277SNicolas Dichtel * into/from a tunnel. Some information have to be cleared during these 53198b27f277SNicolas Dichtel * operations. 53208b27f277SNicolas Dichtel * skb_scrub_packet can also be used to clean a skb before injecting it in 53218b27f277SNicolas Dichtel * another namespace (@xnet == true). We have to clear all information in the 53228b27f277SNicolas Dichtel * skb that could impact namespace isolation. 5323621e84d6SNicolas Dichtel */ 53248b27f277SNicolas Dichtel void skb_scrub_packet(struct sk_buff *skb, bool xnet) 5325621e84d6SNicolas Dichtel { 5326621e84d6SNicolas Dichtel skb->pkt_type = PACKET_HOST; 5327621e84d6SNicolas Dichtel skb->skb_iif = 0; 532860ff7467SWANG Cong skb->ignore_df = 0; 5329621e84d6SNicolas Dichtel skb_dst_drop(skb); 5330174e2381SFlorian Westphal skb_ext_reset(skb); 5331895b5c9fSFlorian Westphal nf_reset_ct(skb); 5332621e84d6SNicolas Dichtel nf_reset_trace(skb); 5333213dd74aSHerbert Xu 53346f9a5069SPetr Machata #ifdef CONFIG_NET_SWITCHDEV 53356f9a5069SPetr Machata skb->offload_fwd_mark = 0; 5336875e8939SIdo Schimmel skb->offload_l3_fwd_mark = 0; 53376f9a5069SPetr Machata #endif 53386f9a5069SPetr Machata 5339213dd74aSHerbert Xu if (!xnet) 5340213dd74aSHerbert Xu return; 5341213dd74aSHerbert Xu 53422b5ec1a5SYe Yin ipvs_reset(skb); 5343213dd74aSHerbert Xu skb->mark = 0; 5344c47d8c2fSJesus Sanchez-Palencia skb->tstamp = 0; 5345621e84d6SNicolas Dichtel } 5346621e84d6SNicolas Dichtel EXPORT_SYMBOL_GPL(skb_scrub_packet); 5347de960aa9SFlorian Westphal 5348de960aa9SFlorian Westphal /** 5349de960aa9SFlorian Westphal * skb_gso_transport_seglen - Return length of individual segments of a gso packet 5350de960aa9SFlorian Westphal * 5351de960aa9SFlorian Westphal * @skb: GSO skb 5352de960aa9SFlorian Westphal * 5353de960aa9SFlorian Westphal * skb_gso_transport_seglen is used to determine the real size of the 5354de960aa9SFlorian Westphal * individual segments, including Layer4 headers (TCP/UDP). 5355de960aa9SFlorian Westphal * 5356de960aa9SFlorian Westphal * The MAC/L2 or network (IP, IPv6) headers are not accounted for. 5357de960aa9SFlorian Westphal */ 5358a4a77718SDaniel Axtens static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 5359de960aa9SFlorian Westphal { 5360de960aa9SFlorian Westphal const struct skb_shared_info *shinfo = skb_shinfo(skb); 5361f993bc25SFlorian Westphal unsigned int thlen = 0; 5362f993bc25SFlorian Westphal 5363f993bc25SFlorian Westphal if (skb->encapsulation) { 5364f993bc25SFlorian Westphal thlen = skb_inner_transport_header(skb) - 5365f993bc25SFlorian Westphal skb_transport_header(skb); 5366de960aa9SFlorian Westphal 5367de960aa9SFlorian Westphal if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 5368f993bc25SFlorian Westphal thlen += inner_tcp_hdrlen(skb); 5369f993bc25SFlorian Westphal } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { 5370f993bc25SFlorian Westphal thlen = tcp_hdrlen(skb); 53711dd27cdeSDaniel Axtens } else if (unlikely(skb_is_gso_sctp(skb))) { 537290017accSMarcelo Ricardo Leitner thlen = sizeof(struct sctphdr); 5373ee80d1ebSWillem de Bruijn } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 5374ee80d1ebSWillem de Bruijn thlen = sizeof(struct udphdr); 5375f993bc25SFlorian Westphal } 53766d39d589SFlorian Westphal /* UFO sets gso_size to the size of the fragmentation 53776d39d589SFlorian Westphal * payload, i.e. the size of the L4 (UDP) header is already 53786d39d589SFlorian Westphal * accounted for. 53796d39d589SFlorian Westphal */ 5380f993bc25SFlorian Westphal return thlen + shinfo->gso_size; 5381de960aa9SFlorian Westphal } 5382a4a77718SDaniel Axtens 5383a4a77718SDaniel Axtens /** 5384a4a77718SDaniel Axtens * skb_gso_network_seglen - Return length of individual segments of a gso packet 5385a4a77718SDaniel Axtens * 5386a4a77718SDaniel Axtens * @skb: GSO skb 5387a4a77718SDaniel Axtens * 5388a4a77718SDaniel Axtens * skb_gso_network_seglen is used to determine the real size of the 5389a4a77718SDaniel Axtens * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). 5390a4a77718SDaniel Axtens * 5391a4a77718SDaniel Axtens * The MAC/L2 header is not accounted for. 5392a4a77718SDaniel Axtens */ 5393a4a77718SDaniel Axtens static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) 5394a4a77718SDaniel Axtens { 5395a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - 5396a4a77718SDaniel Axtens skb_network_header(skb); 5397a4a77718SDaniel Axtens 5398a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5399a4a77718SDaniel Axtens } 5400a4a77718SDaniel Axtens 5401a4a77718SDaniel Axtens /** 5402a4a77718SDaniel Axtens * skb_gso_mac_seglen - Return length of individual segments of a gso packet 5403a4a77718SDaniel Axtens * 5404a4a77718SDaniel Axtens * @skb: GSO skb 5405a4a77718SDaniel Axtens * 5406a4a77718SDaniel Axtens * skb_gso_mac_seglen is used to determine the real size of the 5407a4a77718SDaniel Axtens * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 5408a4a77718SDaniel Axtens * headers (TCP/UDP). 5409a4a77718SDaniel Axtens */ 5410a4a77718SDaniel Axtens static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) 5411a4a77718SDaniel Axtens { 5412a4a77718SDaniel Axtens unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 5413a4a77718SDaniel Axtens 5414a4a77718SDaniel Axtens return hdr_len + skb_gso_transport_seglen(skb); 5415a4a77718SDaniel Axtens } 54160d5501c1SVlad Yasevich 5417ae7ef81eSMarcelo Ricardo Leitner /** 54182b16f048SDaniel Axtens * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS 54192b16f048SDaniel Axtens * 54202b16f048SDaniel Axtens * There are a couple of instances where we have a GSO skb, and we 54212b16f048SDaniel Axtens * want to determine what size it would be after it is segmented. 54222b16f048SDaniel Axtens * 54232b16f048SDaniel Axtens * We might want to check: 54242b16f048SDaniel Axtens * - L3+L4+payload size (e.g. IP forwarding) 54252b16f048SDaniel Axtens * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) 54262b16f048SDaniel Axtens * 54272b16f048SDaniel Axtens * This is a helper to do that correctly considering GSO_BY_FRAGS. 54282b16f048SDaniel Axtens * 542949682bfaSMathieu Malaterre * @skb: GSO skb 543049682bfaSMathieu Malaterre * 54312b16f048SDaniel Axtens * @seg_len: The segmented length (from skb_gso_*_seglen). In the 54322b16f048SDaniel Axtens * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. 54332b16f048SDaniel Axtens * 54342b16f048SDaniel Axtens * @max_len: The maximum permissible length. 54352b16f048SDaniel Axtens * 54362b16f048SDaniel Axtens * Returns true if the segmented length <= max length. 54372b16f048SDaniel Axtens */ 54382b16f048SDaniel Axtens static inline bool skb_gso_size_check(const struct sk_buff *skb, 54392b16f048SDaniel Axtens unsigned int seg_len, 54402b16f048SDaniel Axtens unsigned int max_len) { 54412b16f048SDaniel Axtens const struct skb_shared_info *shinfo = skb_shinfo(skb); 54422b16f048SDaniel Axtens const struct sk_buff *iter; 54432b16f048SDaniel Axtens 54442b16f048SDaniel Axtens if (shinfo->gso_size != GSO_BY_FRAGS) 54452b16f048SDaniel Axtens return seg_len <= max_len; 54462b16f048SDaniel Axtens 54472b16f048SDaniel Axtens /* Undo this so we can re-use header sizes */ 54482b16f048SDaniel Axtens seg_len -= GSO_BY_FRAGS; 54492b16f048SDaniel Axtens 54502b16f048SDaniel Axtens skb_walk_frags(skb, iter) { 54512b16f048SDaniel Axtens if (seg_len + skb_headlen(iter) > max_len) 54522b16f048SDaniel Axtens return false; 54532b16f048SDaniel Axtens } 54542b16f048SDaniel Axtens 54552b16f048SDaniel Axtens return true; 54562b16f048SDaniel Axtens } 54572b16f048SDaniel Axtens 54582b16f048SDaniel Axtens /** 5459779b7931SDaniel Axtens * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? 5460ae7ef81eSMarcelo Ricardo Leitner * 5461ae7ef81eSMarcelo Ricardo Leitner * @skb: GSO skb 546276f21b99SDavid S. Miller * @mtu: MTU to validate against 5463ae7ef81eSMarcelo Ricardo Leitner * 5464779b7931SDaniel Axtens * skb_gso_validate_network_len validates if a given skb will fit a 5465779b7931SDaniel Axtens * wanted MTU once split. It considers L3 headers, L4 headers, and the 5466779b7931SDaniel Axtens * payload. 5467ae7ef81eSMarcelo Ricardo Leitner */ 5468779b7931SDaniel Axtens bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) 5469ae7ef81eSMarcelo Ricardo Leitner { 54702b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); 5471ae7ef81eSMarcelo Ricardo Leitner } 5472779b7931SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); 5473ae7ef81eSMarcelo Ricardo Leitner 54742b16f048SDaniel Axtens /** 54752b16f048SDaniel Axtens * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? 54762b16f048SDaniel Axtens * 54772b16f048SDaniel Axtens * @skb: GSO skb 54782b16f048SDaniel Axtens * @len: length to validate against 54792b16f048SDaniel Axtens * 54802b16f048SDaniel Axtens * skb_gso_validate_mac_len validates if a given skb will fit a wanted 54812b16f048SDaniel Axtens * length once split, including L2, L3 and L4 headers and the payload. 54822b16f048SDaniel Axtens */ 54832b16f048SDaniel Axtens bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) 54842b16f048SDaniel Axtens { 54852b16f048SDaniel Axtens return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); 54862b16f048SDaniel Axtens } 54872b16f048SDaniel Axtens EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); 54882b16f048SDaniel Axtens 54890d5501c1SVlad Yasevich static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 54900d5501c1SVlad Yasevich { 5491d85e8be2SYuya Kusakabe int mac_len, meta_len; 5492d85e8be2SYuya Kusakabe void *meta; 54934bbb3e0eSToshiaki Makita 54940d5501c1SVlad Yasevich if (skb_cow(skb, skb_headroom(skb)) < 0) { 54950d5501c1SVlad Yasevich kfree_skb(skb); 54960d5501c1SVlad Yasevich return NULL; 54970d5501c1SVlad Yasevich } 54980d5501c1SVlad Yasevich 54994bbb3e0eSToshiaki Makita mac_len = skb->data - skb_mac_header(skb); 5500ae474573SToshiaki Makita if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) { 55014bbb3e0eSToshiaki Makita memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 55024bbb3e0eSToshiaki Makita mac_len - VLAN_HLEN - ETH_TLEN); 5503ae474573SToshiaki Makita } 5504d85e8be2SYuya Kusakabe 5505d85e8be2SYuya Kusakabe meta_len = skb_metadata_len(skb); 5506d85e8be2SYuya Kusakabe if (meta_len) { 5507d85e8be2SYuya Kusakabe meta = skb_metadata_end(skb) - meta_len; 5508d85e8be2SYuya Kusakabe memmove(meta + VLAN_HLEN, meta, meta_len); 5509d85e8be2SYuya Kusakabe } 5510d85e8be2SYuya Kusakabe 55110d5501c1SVlad Yasevich skb->mac_header += VLAN_HLEN; 55120d5501c1SVlad Yasevich return skb; 55130d5501c1SVlad Yasevich } 55140d5501c1SVlad Yasevich 55150d5501c1SVlad Yasevich struct sk_buff *skb_vlan_untag(struct sk_buff *skb) 55160d5501c1SVlad Yasevich { 55170d5501c1SVlad Yasevich struct vlan_hdr *vhdr; 55180d5501c1SVlad Yasevich u16 vlan_tci; 55190d5501c1SVlad Yasevich 5520df8a39deSJiri Pirko if (unlikely(skb_vlan_tag_present(skb))) { 55210d5501c1SVlad Yasevich /* vlan_tci is already set-up so leave this for another time */ 55220d5501c1SVlad Yasevich return skb; 55230d5501c1SVlad Yasevich } 55240d5501c1SVlad Yasevich 55250d5501c1SVlad Yasevich skb = skb_share_check(skb, GFP_ATOMIC); 55260d5501c1SVlad Yasevich if (unlikely(!skb)) 55270d5501c1SVlad Yasevich goto err_free; 552855eff0ebSMiaohe Lin /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */ 552955eff0ebSMiaohe Lin if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) 55300d5501c1SVlad Yasevich goto err_free; 55310d5501c1SVlad Yasevich 55320d5501c1SVlad Yasevich vhdr = (struct vlan_hdr *)skb->data; 55330d5501c1SVlad Yasevich vlan_tci = ntohs(vhdr->h_vlan_TCI); 55340d5501c1SVlad Yasevich __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); 55350d5501c1SVlad Yasevich 55360d5501c1SVlad Yasevich skb_pull_rcsum(skb, VLAN_HLEN); 55370d5501c1SVlad Yasevich vlan_set_encap_proto(skb, vhdr); 55380d5501c1SVlad Yasevich 55390d5501c1SVlad Yasevich skb = skb_reorder_vlan_header(skb); 55400d5501c1SVlad Yasevich if (unlikely(!skb)) 55410d5501c1SVlad Yasevich goto err_free; 55420d5501c1SVlad Yasevich 55430d5501c1SVlad Yasevich skb_reset_network_header(skb); 55448be33ecfSAlexander Lobakin if (!skb_transport_header_was_set(skb)) 55450d5501c1SVlad Yasevich skb_reset_transport_header(skb); 55460d5501c1SVlad Yasevich skb_reset_mac_len(skb); 55470d5501c1SVlad Yasevich 55480d5501c1SVlad Yasevich return skb; 55490d5501c1SVlad Yasevich 55500d5501c1SVlad Yasevich err_free: 55510d5501c1SVlad Yasevich kfree_skb(skb); 55520d5501c1SVlad Yasevich return NULL; 55530d5501c1SVlad Yasevich } 55540d5501c1SVlad Yasevich EXPORT_SYMBOL(skb_vlan_untag); 55552e4e4410SEric Dumazet 5556e2195121SJiri Pirko int skb_ensure_writable(struct sk_buff *skb, int write_len) 5557e2195121SJiri Pirko { 5558e2195121SJiri Pirko if (!pskb_may_pull(skb, write_len)) 5559e2195121SJiri Pirko return -ENOMEM; 5560e2195121SJiri Pirko 5561e2195121SJiri Pirko if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 5562e2195121SJiri Pirko return 0; 5563e2195121SJiri Pirko 5564e2195121SJiri Pirko return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 5565e2195121SJiri Pirko } 5566e2195121SJiri Pirko EXPORT_SYMBOL(skb_ensure_writable); 5567e2195121SJiri Pirko 5568bfca4c52SShmulik Ladkani /* remove VLAN header from packet and update csum accordingly. 5569bfca4c52SShmulik Ladkani * expects a non skb_vlan_tag_present skb with a vlan tag payload 5570bfca4c52SShmulik Ladkani */ 5571bfca4c52SShmulik Ladkani int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) 557293515d53SJiri Pirko { 557393515d53SJiri Pirko struct vlan_hdr *vhdr; 5574b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 557593515d53SJiri Pirko int err; 557693515d53SJiri Pirko 5577b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5578b6a79208SShmulik Ladkani "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n", 5579b6a79208SShmulik Ladkani offset)) { 5580b6a79208SShmulik Ladkani return -EINVAL; 5581b6a79208SShmulik Ladkani } 5582b6a79208SShmulik Ladkani 558393515d53SJiri Pirko err = skb_ensure_writable(skb, VLAN_ETH_HLEN); 558493515d53SJiri Pirko if (unlikely(err)) 5585b6a79208SShmulik Ladkani return err; 558693515d53SJiri Pirko 558793515d53SJiri Pirko skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 558893515d53SJiri Pirko 558993515d53SJiri Pirko vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 559093515d53SJiri Pirko *vlan_tci = ntohs(vhdr->h_vlan_TCI); 559193515d53SJiri Pirko 559293515d53SJiri Pirko memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 559393515d53SJiri Pirko __skb_pull(skb, VLAN_HLEN); 559493515d53SJiri Pirko 559593515d53SJiri Pirko vlan_set_encap_proto(skb, vhdr); 559693515d53SJiri Pirko skb->mac_header += VLAN_HLEN; 559793515d53SJiri Pirko 559893515d53SJiri Pirko if (skb_network_offset(skb) < ETH_HLEN) 559993515d53SJiri Pirko skb_set_network_header(skb, ETH_HLEN); 560093515d53SJiri Pirko 560193515d53SJiri Pirko skb_reset_mac_len(skb); 560293515d53SJiri Pirko 560393515d53SJiri Pirko return err; 560493515d53SJiri Pirko } 5605bfca4c52SShmulik Ladkani EXPORT_SYMBOL(__skb_vlan_pop); 560693515d53SJiri Pirko 5607b6a79208SShmulik Ladkani /* Pop a vlan tag either from hwaccel or from payload. 5608b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5609b6a79208SShmulik Ladkani */ 561093515d53SJiri Pirko int skb_vlan_pop(struct sk_buff *skb) 561193515d53SJiri Pirko { 561293515d53SJiri Pirko u16 vlan_tci; 561393515d53SJiri Pirko __be16 vlan_proto; 561493515d53SJiri Pirko int err; 561593515d53SJiri Pirko 5616df8a39deSJiri Pirko if (likely(skb_vlan_tag_present(skb))) { 5617b1817524SMichał Mirosław __vlan_hwaccel_clear_tag(skb); 561893515d53SJiri Pirko } else { 5619ecf4ee41SShmulik Ladkani if (unlikely(!eth_type_vlan(skb->protocol))) 562093515d53SJiri Pirko return 0; 562193515d53SJiri Pirko 562293515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 562393515d53SJiri Pirko if (err) 562493515d53SJiri Pirko return err; 562593515d53SJiri Pirko } 562693515d53SJiri Pirko /* move next vlan tag to hw accel tag */ 5627ecf4ee41SShmulik Ladkani if (likely(!eth_type_vlan(skb->protocol))) 562893515d53SJiri Pirko return 0; 562993515d53SJiri Pirko 563093515d53SJiri Pirko vlan_proto = skb->protocol; 563193515d53SJiri Pirko err = __skb_vlan_pop(skb, &vlan_tci); 563293515d53SJiri Pirko if (unlikely(err)) 563393515d53SJiri Pirko return err; 563493515d53SJiri Pirko 563593515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 563693515d53SJiri Pirko return 0; 563793515d53SJiri Pirko } 563893515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_pop); 563993515d53SJiri Pirko 5640b6a79208SShmulik Ladkani /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present). 5641b6a79208SShmulik Ladkani * Expects skb->data at mac header. 5642b6a79208SShmulik Ladkani */ 564393515d53SJiri Pirko int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) 564493515d53SJiri Pirko { 5645df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) { 5646b6a79208SShmulik Ladkani int offset = skb->data - skb_mac_header(skb); 564793515d53SJiri Pirko int err; 564893515d53SJiri Pirko 5649b6a79208SShmulik Ladkani if (WARN_ONCE(offset, 5650b6a79208SShmulik Ladkani "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n", 5651b6a79208SShmulik Ladkani offset)) { 5652b6a79208SShmulik Ladkani return -EINVAL; 5653b6a79208SShmulik Ladkani } 5654b6a79208SShmulik Ladkani 565593515d53SJiri Pirko err = __vlan_insert_tag(skb, skb->vlan_proto, 5656df8a39deSJiri Pirko skb_vlan_tag_get(skb)); 5657b6a79208SShmulik Ladkani if (err) 565893515d53SJiri Pirko return err; 56599241e2dfSDaniel Borkmann 566093515d53SJiri Pirko skb->protocol = skb->vlan_proto; 566193515d53SJiri Pirko skb->mac_len += VLAN_HLEN; 566293515d53SJiri Pirko 56636b83d28aSDaniel Borkmann skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); 566493515d53SJiri Pirko } 566593515d53SJiri Pirko __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 566693515d53SJiri Pirko return 0; 566793515d53SJiri Pirko } 566893515d53SJiri Pirko EXPORT_SYMBOL(skb_vlan_push); 566993515d53SJiri Pirko 567019fbcb36SGuillaume Nault /** 567119fbcb36SGuillaume Nault * skb_eth_pop() - Drop the Ethernet header at the head of a packet 567219fbcb36SGuillaume Nault * 567319fbcb36SGuillaume Nault * @skb: Socket buffer to modify 567419fbcb36SGuillaume Nault * 567519fbcb36SGuillaume Nault * Drop the Ethernet header of @skb. 567619fbcb36SGuillaume Nault * 567719fbcb36SGuillaume Nault * Expects that skb->data points to the mac header and that no VLAN tags are 567819fbcb36SGuillaume Nault * present. 567919fbcb36SGuillaume Nault * 568019fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 568119fbcb36SGuillaume Nault */ 568219fbcb36SGuillaume Nault int skb_eth_pop(struct sk_buff *skb) 568319fbcb36SGuillaume Nault { 568419fbcb36SGuillaume Nault if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || 568519fbcb36SGuillaume Nault skb_network_offset(skb) < ETH_HLEN) 568619fbcb36SGuillaume Nault return -EPROTO; 568719fbcb36SGuillaume Nault 568819fbcb36SGuillaume Nault skb_pull_rcsum(skb, ETH_HLEN); 568919fbcb36SGuillaume Nault skb_reset_mac_header(skb); 569019fbcb36SGuillaume Nault skb_reset_mac_len(skb); 569119fbcb36SGuillaume Nault 569219fbcb36SGuillaume Nault return 0; 569319fbcb36SGuillaume Nault } 569419fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_pop); 569519fbcb36SGuillaume Nault 569619fbcb36SGuillaume Nault /** 569719fbcb36SGuillaume Nault * skb_eth_push() - Add a new Ethernet header at the head of a packet 569819fbcb36SGuillaume Nault * 569919fbcb36SGuillaume Nault * @skb: Socket buffer to modify 570019fbcb36SGuillaume Nault * @dst: Destination MAC address of the new header 570119fbcb36SGuillaume Nault * @src: Source MAC address of the new header 570219fbcb36SGuillaume Nault * 570319fbcb36SGuillaume Nault * Prepend @skb with a new Ethernet header. 570419fbcb36SGuillaume Nault * 570519fbcb36SGuillaume Nault * Expects that skb->data points to the mac header, which must be empty. 570619fbcb36SGuillaume Nault * 570719fbcb36SGuillaume Nault * Returns 0 on success, -errno otherwise. 570819fbcb36SGuillaume Nault */ 570919fbcb36SGuillaume Nault int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, 571019fbcb36SGuillaume Nault const unsigned char *src) 571119fbcb36SGuillaume Nault { 571219fbcb36SGuillaume Nault struct ethhdr *eth; 571319fbcb36SGuillaume Nault int err; 571419fbcb36SGuillaume Nault 571519fbcb36SGuillaume Nault if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) 571619fbcb36SGuillaume Nault return -EPROTO; 571719fbcb36SGuillaume Nault 571819fbcb36SGuillaume Nault err = skb_cow_head(skb, sizeof(*eth)); 571919fbcb36SGuillaume Nault if (err < 0) 572019fbcb36SGuillaume Nault return err; 572119fbcb36SGuillaume Nault 572219fbcb36SGuillaume Nault skb_push(skb, sizeof(*eth)); 572319fbcb36SGuillaume Nault skb_reset_mac_header(skb); 572419fbcb36SGuillaume Nault skb_reset_mac_len(skb); 572519fbcb36SGuillaume Nault 572619fbcb36SGuillaume Nault eth = eth_hdr(skb); 572719fbcb36SGuillaume Nault ether_addr_copy(eth->h_dest, dst); 572819fbcb36SGuillaume Nault ether_addr_copy(eth->h_source, src); 572919fbcb36SGuillaume Nault eth->h_proto = skb->protocol; 573019fbcb36SGuillaume Nault 573119fbcb36SGuillaume Nault skb_postpush_rcsum(skb, eth, sizeof(*eth)); 573219fbcb36SGuillaume Nault 573319fbcb36SGuillaume Nault return 0; 573419fbcb36SGuillaume Nault } 573519fbcb36SGuillaume Nault EXPORT_SYMBOL(skb_eth_push); 573619fbcb36SGuillaume Nault 57378822e270SJohn Hurley /* Update the ethertype of hdr and the skb csum value if required. */ 57388822e270SJohn Hurley static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, 57398822e270SJohn Hurley __be16 ethertype) 57408822e270SJohn Hurley { 57418822e270SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 57428822e270SJohn Hurley __be16 diff[] = { ~hdr->h_proto, ethertype }; 57438822e270SJohn Hurley 57448822e270SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 57458822e270SJohn Hurley } 57468822e270SJohn Hurley 57478822e270SJohn Hurley hdr->h_proto = ethertype; 57488822e270SJohn Hurley } 57498822e270SJohn Hurley 57508822e270SJohn Hurley /** 5751e7dbfed1SMartin Varghese * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of 5752e7dbfed1SMartin Varghese * the packet 57538822e270SJohn Hurley * 57548822e270SJohn Hurley * @skb: buffer 57558822e270SJohn Hurley * @mpls_lse: MPLS label stack entry to push 57568822e270SJohn Hurley * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848) 5757fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 5758e7dbfed1SMartin Varghese * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is 5759e7dbfed1SMartin Varghese * ethernet 57608822e270SJohn Hurley * 57618822e270SJohn Hurley * Expects skb->data at mac header. 57628822e270SJohn Hurley * 57638822e270SJohn Hurley * Returns 0 on success, -errno otherwise. 57648822e270SJohn Hurley */ 5765fa4e0f88SDavide Caratti int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, 5766d04ac224SMartin Varghese int mac_len, bool ethernet) 57678822e270SJohn Hurley { 57688822e270SJohn Hurley struct mpls_shim_hdr *lse; 57698822e270SJohn Hurley int err; 57708822e270SJohn Hurley 57718822e270SJohn Hurley if (unlikely(!eth_p_mpls(mpls_proto))) 57728822e270SJohn Hurley return -EINVAL; 57738822e270SJohn Hurley 57748822e270SJohn Hurley /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */ 57758822e270SJohn Hurley if (skb->encapsulation) 57768822e270SJohn Hurley return -EINVAL; 57778822e270SJohn Hurley 57788822e270SJohn Hurley err = skb_cow_head(skb, MPLS_HLEN); 57798822e270SJohn Hurley if (unlikely(err)) 57808822e270SJohn Hurley return err; 57818822e270SJohn Hurley 57828822e270SJohn Hurley if (!skb->inner_protocol) { 5783e7dbfed1SMartin Varghese skb_set_inner_network_header(skb, skb_network_offset(skb)); 57848822e270SJohn Hurley skb_set_inner_protocol(skb, skb->protocol); 57858822e270SJohn Hurley } 57868822e270SJohn Hurley 57878822e270SJohn Hurley skb_push(skb, MPLS_HLEN); 57888822e270SJohn Hurley memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 5789fa4e0f88SDavide Caratti mac_len); 57908822e270SJohn Hurley skb_reset_mac_header(skb); 5791fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5792e7dbfed1SMartin Varghese skb_reset_mac_len(skb); 57938822e270SJohn Hurley 57948822e270SJohn Hurley lse = mpls_hdr(skb); 57958822e270SJohn Hurley lse->label_stack_entry = mpls_lse; 57968822e270SJohn Hurley skb_postpush_rcsum(skb, lse, MPLS_HLEN); 57978822e270SJohn Hurley 57984296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) 57998822e270SJohn Hurley skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); 58008822e270SJohn Hurley skb->protocol = mpls_proto; 58018822e270SJohn Hurley 58028822e270SJohn Hurley return 0; 58038822e270SJohn Hurley } 58048822e270SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_push); 58058822e270SJohn Hurley 58062e4e4410SEric Dumazet /** 5807ed246ceeSJohn Hurley * skb_mpls_pop() - pop the outermost MPLS header 5808ed246ceeSJohn Hurley * 5809ed246ceeSJohn Hurley * @skb: buffer 5810ed246ceeSJohn Hurley * @next_proto: ethertype of header after popped MPLS header 5811fa4e0f88SDavide Caratti * @mac_len: length of the MAC header 581276f99f98SMartin Varghese * @ethernet: flag to indicate if the packet is ethernet 5813ed246ceeSJohn Hurley * 5814ed246ceeSJohn Hurley * Expects skb->data at mac header. 5815ed246ceeSJohn Hurley * 5816ed246ceeSJohn Hurley * Returns 0 on success, -errno otherwise. 5817ed246ceeSJohn Hurley */ 5818040b5cfbSMartin Varghese int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, 5819040b5cfbSMartin Varghese bool ethernet) 5820ed246ceeSJohn Hurley { 5821ed246ceeSJohn Hurley int err; 5822ed246ceeSJohn Hurley 5823ed246ceeSJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5824dedc5a08SDavide Caratti return 0; 5825ed246ceeSJohn Hurley 5826fa4e0f88SDavide Caratti err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); 5827ed246ceeSJohn Hurley if (unlikely(err)) 5828ed246ceeSJohn Hurley return err; 5829ed246ceeSJohn Hurley 5830ed246ceeSJohn Hurley skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 5831ed246ceeSJohn Hurley memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 5832fa4e0f88SDavide Caratti mac_len); 5833ed246ceeSJohn Hurley 5834ed246ceeSJohn Hurley __skb_pull(skb, MPLS_HLEN); 5835ed246ceeSJohn Hurley skb_reset_mac_header(skb); 5836fa4e0f88SDavide Caratti skb_set_network_header(skb, mac_len); 5837ed246ceeSJohn Hurley 58384296adc3SGuillaume Nault if (ethernet && mac_len >= ETH_HLEN) { 5839ed246ceeSJohn Hurley struct ethhdr *hdr; 5840ed246ceeSJohn Hurley 5841ed246ceeSJohn Hurley /* use mpls_hdr() to get ethertype to account for VLANs. */ 5842ed246ceeSJohn Hurley hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 5843ed246ceeSJohn Hurley skb_mod_eth_type(skb, hdr, next_proto); 5844ed246ceeSJohn Hurley } 5845ed246ceeSJohn Hurley skb->protocol = next_proto; 5846ed246ceeSJohn Hurley 5847ed246ceeSJohn Hurley return 0; 5848ed246ceeSJohn Hurley } 5849ed246ceeSJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_pop); 5850ed246ceeSJohn Hurley 5851ed246ceeSJohn Hurley /** 5852d27cf5c5SJohn Hurley * skb_mpls_update_lse() - modify outermost MPLS header and update csum 5853d27cf5c5SJohn Hurley * 5854d27cf5c5SJohn Hurley * @skb: buffer 5855d27cf5c5SJohn Hurley * @mpls_lse: new MPLS label stack entry to update to 5856d27cf5c5SJohn Hurley * 5857d27cf5c5SJohn Hurley * Expects skb->data at mac header. 5858d27cf5c5SJohn Hurley * 5859d27cf5c5SJohn Hurley * Returns 0 on success, -errno otherwise. 5860d27cf5c5SJohn Hurley */ 5861d27cf5c5SJohn Hurley int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) 5862d27cf5c5SJohn Hurley { 5863d27cf5c5SJohn Hurley int err; 5864d27cf5c5SJohn Hurley 5865d27cf5c5SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 5866d27cf5c5SJohn Hurley return -EINVAL; 5867d27cf5c5SJohn Hurley 5868d27cf5c5SJohn Hurley err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 5869d27cf5c5SJohn Hurley if (unlikely(err)) 5870d27cf5c5SJohn Hurley return err; 5871d27cf5c5SJohn Hurley 5872d27cf5c5SJohn Hurley if (skb->ip_summed == CHECKSUM_COMPLETE) { 5873d27cf5c5SJohn Hurley __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; 5874d27cf5c5SJohn Hurley 5875d27cf5c5SJohn Hurley skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); 5876d27cf5c5SJohn Hurley } 5877d27cf5c5SJohn Hurley 5878d27cf5c5SJohn Hurley mpls_hdr(skb)->label_stack_entry = mpls_lse; 5879d27cf5c5SJohn Hurley 5880d27cf5c5SJohn Hurley return 0; 5881d27cf5c5SJohn Hurley } 5882d27cf5c5SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_update_lse); 5883d27cf5c5SJohn Hurley 5884d27cf5c5SJohn Hurley /** 58852a2ea508SJohn Hurley * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header 58862a2ea508SJohn Hurley * 58872a2ea508SJohn Hurley * @skb: buffer 58882a2ea508SJohn Hurley * 58892a2ea508SJohn Hurley * Expects skb->data at mac header. 58902a2ea508SJohn Hurley * 58912a2ea508SJohn Hurley * Returns 0 on success, -errno otherwise. 58922a2ea508SJohn Hurley */ 58932a2ea508SJohn Hurley int skb_mpls_dec_ttl(struct sk_buff *skb) 58942a2ea508SJohn Hurley { 58952a2ea508SJohn Hurley u32 lse; 58962a2ea508SJohn Hurley u8 ttl; 58972a2ea508SJohn Hurley 58982a2ea508SJohn Hurley if (unlikely(!eth_p_mpls(skb->protocol))) 58992a2ea508SJohn Hurley return -EINVAL; 59002a2ea508SJohn Hurley 590113de4ed9SDavide Caratti if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) 590213de4ed9SDavide Caratti return -ENOMEM; 590313de4ed9SDavide Caratti 59042a2ea508SJohn Hurley lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); 59052a2ea508SJohn Hurley ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 59062a2ea508SJohn Hurley if (!--ttl) 59072a2ea508SJohn Hurley return -EINVAL; 59082a2ea508SJohn Hurley 59092a2ea508SJohn Hurley lse &= ~MPLS_LS_TTL_MASK; 59102a2ea508SJohn Hurley lse |= ttl << MPLS_LS_TTL_SHIFT; 59112a2ea508SJohn Hurley 59122a2ea508SJohn Hurley return skb_mpls_update_lse(skb, cpu_to_be32(lse)); 59132a2ea508SJohn Hurley } 59142a2ea508SJohn Hurley EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl); 59152a2ea508SJohn Hurley 59162a2ea508SJohn Hurley /** 59172e4e4410SEric Dumazet * alloc_skb_with_frags - allocate skb with page frags 59182e4e4410SEric Dumazet * 5919de3f0d0eSMasanari Iida * @header_len: size of linear part 5920de3f0d0eSMasanari Iida * @data_len: needed length in frags 5921de3f0d0eSMasanari Iida * @max_page_order: max page order desired. 5922de3f0d0eSMasanari Iida * @errcode: pointer to error code if any 5923de3f0d0eSMasanari Iida * @gfp_mask: allocation mask 59242e4e4410SEric Dumazet * 59252e4e4410SEric Dumazet * This can be used to allocate a paged skb, given a maximal order for frags. 59262e4e4410SEric Dumazet */ 59272e4e4410SEric Dumazet struct sk_buff *alloc_skb_with_frags(unsigned long header_len, 59282e4e4410SEric Dumazet unsigned long data_len, 59292e4e4410SEric Dumazet int max_page_order, 59302e4e4410SEric Dumazet int *errcode, 59312e4e4410SEric Dumazet gfp_t gfp_mask) 59322e4e4410SEric Dumazet { 59332e4e4410SEric Dumazet int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 59342e4e4410SEric Dumazet unsigned long chunk; 59352e4e4410SEric Dumazet struct sk_buff *skb; 59362e4e4410SEric Dumazet struct page *page; 59372e4e4410SEric Dumazet int i; 59382e4e4410SEric Dumazet 59392e4e4410SEric Dumazet *errcode = -EMSGSIZE; 59402e4e4410SEric Dumazet /* Note this test could be relaxed, if we succeed to allocate 59412e4e4410SEric Dumazet * high order pages... 59422e4e4410SEric Dumazet */ 59432e4e4410SEric Dumazet if (npages > MAX_SKB_FRAGS) 59442e4e4410SEric Dumazet return NULL; 59452e4e4410SEric Dumazet 59462e4e4410SEric Dumazet *errcode = -ENOBUFS; 5947f8c468e8SDavid Rientjes skb = alloc_skb(header_len, gfp_mask); 59482e4e4410SEric Dumazet if (!skb) 59492e4e4410SEric Dumazet return NULL; 59502e4e4410SEric Dumazet 59512e4e4410SEric Dumazet skb->truesize += npages << PAGE_SHIFT; 59522e4e4410SEric Dumazet 59532e4e4410SEric Dumazet for (i = 0; npages > 0; i++) { 59542e4e4410SEric Dumazet int order = max_page_order; 59552e4e4410SEric Dumazet 59562e4e4410SEric Dumazet while (order) { 59572e4e4410SEric Dumazet if (npages >= 1 << order) { 5958d0164adcSMel Gorman page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) | 59592e4e4410SEric Dumazet __GFP_COMP | 5960d14b56f5SMichal Hocko __GFP_NOWARN, 59612e4e4410SEric Dumazet order); 59622e4e4410SEric Dumazet if (page) 59632e4e4410SEric Dumazet goto fill_page; 59642e4e4410SEric Dumazet /* Do not retry other high order allocations */ 59652e4e4410SEric Dumazet order = 1; 59662e4e4410SEric Dumazet max_page_order = 0; 59672e4e4410SEric Dumazet } 59682e4e4410SEric Dumazet order--; 59692e4e4410SEric Dumazet } 59702e4e4410SEric Dumazet page = alloc_page(gfp_mask); 59712e4e4410SEric Dumazet if (!page) 59722e4e4410SEric Dumazet goto failure; 59732e4e4410SEric Dumazet fill_page: 59742e4e4410SEric Dumazet chunk = min_t(unsigned long, data_len, 59752e4e4410SEric Dumazet PAGE_SIZE << order); 59762e4e4410SEric Dumazet skb_fill_page_desc(skb, i, page, 0, chunk); 59772e4e4410SEric Dumazet data_len -= chunk; 59782e4e4410SEric Dumazet npages -= 1 << order; 59792e4e4410SEric Dumazet } 59802e4e4410SEric Dumazet return skb; 59812e4e4410SEric Dumazet 59822e4e4410SEric Dumazet failure: 59832e4e4410SEric Dumazet kfree_skb(skb); 59842e4e4410SEric Dumazet return NULL; 59852e4e4410SEric Dumazet } 59862e4e4410SEric Dumazet EXPORT_SYMBOL(alloc_skb_with_frags); 59876fa01ccdSSowmini Varadhan 59886fa01ccdSSowmini Varadhan /* carve out the first off bytes from skb when off < headlen */ 59896fa01ccdSSowmini Varadhan static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, 59906fa01ccdSSowmini Varadhan const int headlen, gfp_t gfp_mask) 59916fa01ccdSSowmini Varadhan { 59926fa01ccdSSowmini Varadhan int i; 59936fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 59946fa01ccdSSowmini Varadhan int new_hlen = headlen - off; 59956fa01ccdSSowmini Varadhan u8 *data; 59966fa01ccdSSowmini Varadhan 59976fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 59986fa01ccdSSowmini Varadhan 59996fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 60006fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 60016fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 60026fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 60036fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 60046fa01ccdSSowmini Varadhan if (!data) 60056fa01ccdSSowmini Varadhan return -ENOMEM; 60066fa01ccdSSowmini Varadhan 60076fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 60086fa01ccdSSowmini Varadhan 60096fa01ccdSSowmini Varadhan /* Copy real data, and all frags */ 60106fa01ccdSSowmini Varadhan skb_copy_from_linear_data_offset(skb, off, data, new_hlen); 60116fa01ccdSSowmini Varadhan skb->len -= off; 60126fa01ccdSSowmini Varadhan 60136fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 60146fa01ccdSSowmini Varadhan skb_shinfo(skb), 60156fa01ccdSSowmini Varadhan offsetof(struct skb_shared_info, 60166fa01ccdSSowmini Varadhan frags[skb_shinfo(skb)->nr_frags])); 60176fa01ccdSSowmini Varadhan if (skb_cloned(skb)) { 60186fa01ccdSSowmini Varadhan /* drop the old head gracefully */ 60196fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 60206fa01ccdSSowmini Varadhan kfree(data); 60216fa01ccdSSowmini Varadhan return -ENOMEM; 60226fa01ccdSSowmini Varadhan } 60236fa01ccdSSowmini Varadhan for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 60246fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 60256fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 60266fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 60276fa01ccdSSowmini Varadhan skb_release_data(skb); 60286fa01ccdSSowmini Varadhan } else { 60296fa01ccdSSowmini Varadhan /* we can reuse existing recount- all we did was 60306fa01ccdSSowmini Varadhan * relocate values 60316fa01ccdSSowmini Varadhan */ 60326fa01ccdSSowmini Varadhan skb_free_head(skb); 60336fa01ccdSSowmini Varadhan } 60346fa01ccdSSowmini Varadhan 60356fa01ccdSSowmini Varadhan skb->head = data; 60366fa01ccdSSowmini Varadhan skb->data = data; 60376fa01ccdSSowmini Varadhan skb->head_frag = 0; 60386fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 60396fa01ccdSSowmini Varadhan skb->end = size; 60406fa01ccdSSowmini Varadhan #else 60416fa01ccdSSowmini Varadhan skb->end = skb->head + size; 60426fa01ccdSSowmini Varadhan #endif 60436fa01ccdSSowmini Varadhan skb_set_tail_pointer(skb, skb_headlen(skb)); 60446fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 60456fa01ccdSSowmini Varadhan skb->cloned = 0; 60466fa01ccdSSowmini Varadhan skb->hdr_len = 0; 60476fa01ccdSSowmini Varadhan skb->nohdr = 0; 60486fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 60496fa01ccdSSowmini Varadhan 60506fa01ccdSSowmini Varadhan return 0; 60516fa01ccdSSowmini Varadhan } 60526fa01ccdSSowmini Varadhan 60536fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp); 60546fa01ccdSSowmini Varadhan 60556fa01ccdSSowmini Varadhan /* carve out the first eat bytes from skb's frag_list. May recurse into 60566fa01ccdSSowmini Varadhan * pskb_carve() 60576fa01ccdSSowmini Varadhan */ 60586fa01ccdSSowmini Varadhan static int pskb_carve_frag_list(struct sk_buff *skb, 60596fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo, int eat, 60606fa01ccdSSowmini Varadhan gfp_t gfp_mask) 60616fa01ccdSSowmini Varadhan { 60626fa01ccdSSowmini Varadhan struct sk_buff *list = shinfo->frag_list; 60636fa01ccdSSowmini Varadhan struct sk_buff *clone = NULL; 60646fa01ccdSSowmini Varadhan struct sk_buff *insp = NULL; 60656fa01ccdSSowmini Varadhan 60666fa01ccdSSowmini Varadhan do { 60676fa01ccdSSowmini Varadhan if (!list) { 60686fa01ccdSSowmini Varadhan pr_err("Not enough bytes to eat. Want %d\n", eat); 60696fa01ccdSSowmini Varadhan return -EFAULT; 60706fa01ccdSSowmini Varadhan } 60716fa01ccdSSowmini Varadhan if (list->len <= eat) { 60726fa01ccdSSowmini Varadhan /* Eaten as whole. */ 60736fa01ccdSSowmini Varadhan eat -= list->len; 60746fa01ccdSSowmini Varadhan list = list->next; 60756fa01ccdSSowmini Varadhan insp = list; 60766fa01ccdSSowmini Varadhan } else { 60776fa01ccdSSowmini Varadhan /* Eaten partially. */ 60786fa01ccdSSowmini Varadhan if (skb_shared(list)) { 60796fa01ccdSSowmini Varadhan clone = skb_clone(list, gfp_mask); 60806fa01ccdSSowmini Varadhan if (!clone) 60816fa01ccdSSowmini Varadhan return -ENOMEM; 60826fa01ccdSSowmini Varadhan insp = list->next; 60836fa01ccdSSowmini Varadhan list = clone; 60846fa01ccdSSowmini Varadhan } else { 60856fa01ccdSSowmini Varadhan /* This may be pulled without problems. */ 60866fa01ccdSSowmini Varadhan insp = list; 60876fa01ccdSSowmini Varadhan } 60886fa01ccdSSowmini Varadhan if (pskb_carve(list, eat, gfp_mask) < 0) { 60896fa01ccdSSowmini Varadhan kfree_skb(clone); 60906fa01ccdSSowmini Varadhan return -ENOMEM; 60916fa01ccdSSowmini Varadhan } 60926fa01ccdSSowmini Varadhan break; 60936fa01ccdSSowmini Varadhan } 60946fa01ccdSSowmini Varadhan } while (eat); 60956fa01ccdSSowmini Varadhan 60966fa01ccdSSowmini Varadhan /* Free pulled out fragments. */ 60976fa01ccdSSowmini Varadhan while ((list = shinfo->frag_list) != insp) { 60986fa01ccdSSowmini Varadhan shinfo->frag_list = list->next; 60996fa01ccdSSowmini Varadhan kfree_skb(list); 61006fa01ccdSSowmini Varadhan } 61016fa01ccdSSowmini Varadhan /* And insert new clone at head. */ 61026fa01ccdSSowmini Varadhan if (clone) { 61036fa01ccdSSowmini Varadhan clone->next = list; 61046fa01ccdSSowmini Varadhan shinfo->frag_list = clone; 61056fa01ccdSSowmini Varadhan } 61066fa01ccdSSowmini Varadhan return 0; 61076fa01ccdSSowmini Varadhan } 61086fa01ccdSSowmini Varadhan 61096fa01ccdSSowmini Varadhan /* carve off first len bytes from skb. Split line (off) is in the 61106fa01ccdSSowmini Varadhan * non-linear part of skb 61116fa01ccdSSowmini Varadhan */ 61126fa01ccdSSowmini Varadhan static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, 61136fa01ccdSSowmini Varadhan int pos, gfp_t gfp_mask) 61146fa01ccdSSowmini Varadhan { 61156fa01ccdSSowmini Varadhan int i, k = 0; 61166fa01ccdSSowmini Varadhan int size = skb_end_offset(skb); 61176fa01ccdSSowmini Varadhan u8 *data; 61186fa01ccdSSowmini Varadhan const int nfrags = skb_shinfo(skb)->nr_frags; 61196fa01ccdSSowmini Varadhan struct skb_shared_info *shinfo; 61206fa01ccdSSowmini Varadhan 61216fa01ccdSSowmini Varadhan size = SKB_DATA_ALIGN(size); 61226fa01ccdSSowmini Varadhan 61236fa01ccdSSowmini Varadhan if (skb_pfmemalloc(skb)) 61246fa01ccdSSowmini Varadhan gfp_mask |= __GFP_MEMALLOC; 61256fa01ccdSSowmini Varadhan data = kmalloc_reserve(size + 61266fa01ccdSSowmini Varadhan SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 61276fa01ccdSSowmini Varadhan gfp_mask, NUMA_NO_NODE, NULL); 61286fa01ccdSSowmini Varadhan if (!data) 61296fa01ccdSSowmini Varadhan return -ENOMEM; 61306fa01ccdSSowmini Varadhan 61316fa01ccdSSowmini Varadhan size = SKB_WITH_OVERHEAD(ksize(data)); 61326fa01ccdSSowmini Varadhan 61336fa01ccdSSowmini Varadhan memcpy((struct skb_shared_info *)(data + size), 6134e3ec1e8cSMiaohe Lin skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); 61356fa01ccdSSowmini Varadhan if (skb_orphan_frags(skb, gfp_mask)) { 61366fa01ccdSSowmini Varadhan kfree(data); 61376fa01ccdSSowmini Varadhan return -ENOMEM; 61386fa01ccdSSowmini Varadhan } 61396fa01ccdSSowmini Varadhan shinfo = (struct skb_shared_info *)(data + size); 61406fa01ccdSSowmini Varadhan for (i = 0; i < nfrags; i++) { 61416fa01ccdSSowmini Varadhan int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); 61426fa01ccdSSowmini Varadhan 61436fa01ccdSSowmini Varadhan if (pos + fsize > off) { 61446fa01ccdSSowmini Varadhan shinfo->frags[k] = skb_shinfo(skb)->frags[i]; 61456fa01ccdSSowmini Varadhan 61466fa01ccdSSowmini Varadhan if (pos < off) { 61476fa01ccdSSowmini Varadhan /* Split frag. 61486fa01ccdSSowmini Varadhan * We have two variants in this case: 61496fa01ccdSSowmini Varadhan * 1. Move all the frag to the second 61506fa01ccdSSowmini Varadhan * part, if it is possible. F.e. 61516fa01ccdSSowmini Varadhan * this approach is mandatory for TUX, 61526fa01ccdSSowmini Varadhan * where splitting is expensive. 61536fa01ccdSSowmini Varadhan * 2. Split is accurately. We make this. 61546fa01ccdSSowmini Varadhan */ 6155b54c9d5bSJonathan Lemon skb_frag_off_add(&shinfo->frags[0], off - pos); 61566fa01ccdSSowmini Varadhan skb_frag_size_sub(&shinfo->frags[0], off - pos); 61576fa01ccdSSowmini Varadhan } 61586fa01ccdSSowmini Varadhan skb_frag_ref(skb, i); 61596fa01ccdSSowmini Varadhan k++; 61606fa01ccdSSowmini Varadhan } 61616fa01ccdSSowmini Varadhan pos += fsize; 61626fa01ccdSSowmini Varadhan } 61636fa01ccdSSowmini Varadhan shinfo->nr_frags = k; 61646fa01ccdSSowmini Varadhan if (skb_has_frag_list(skb)) 61656fa01ccdSSowmini Varadhan skb_clone_fraglist(skb); 61666fa01ccdSSowmini Varadhan 61676fa01ccdSSowmini Varadhan /* split line is in frag list */ 6168eabe8618SMiaohe Lin if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { 6169eabe8618SMiaohe Lin /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */ 6170eabe8618SMiaohe Lin if (skb_has_frag_list(skb)) 6171eabe8618SMiaohe Lin kfree_skb_list(skb_shinfo(skb)->frag_list); 6172eabe8618SMiaohe Lin kfree(data); 6173eabe8618SMiaohe Lin return -ENOMEM; 61746fa01ccdSSowmini Varadhan } 61756fa01ccdSSowmini Varadhan skb_release_data(skb); 61766fa01ccdSSowmini Varadhan 61776fa01ccdSSowmini Varadhan skb->head = data; 61786fa01ccdSSowmini Varadhan skb->head_frag = 0; 61796fa01ccdSSowmini Varadhan skb->data = data; 61806fa01ccdSSowmini Varadhan #ifdef NET_SKBUFF_DATA_USES_OFFSET 61816fa01ccdSSowmini Varadhan skb->end = size; 61826fa01ccdSSowmini Varadhan #else 61836fa01ccdSSowmini Varadhan skb->end = skb->head + size; 61846fa01ccdSSowmini Varadhan #endif 61856fa01ccdSSowmini Varadhan skb_reset_tail_pointer(skb); 61866fa01ccdSSowmini Varadhan skb_headers_offset_update(skb, 0); 61876fa01ccdSSowmini Varadhan skb->cloned = 0; 61886fa01ccdSSowmini Varadhan skb->hdr_len = 0; 61896fa01ccdSSowmini Varadhan skb->nohdr = 0; 61906fa01ccdSSowmini Varadhan skb->len -= off; 61916fa01ccdSSowmini Varadhan skb->data_len = skb->len; 61926fa01ccdSSowmini Varadhan atomic_set(&skb_shinfo(skb)->dataref, 1); 61936fa01ccdSSowmini Varadhan return 0; 61946fa01ccdSSowmini Varadhan } 61956fa01ccdSSowmini Varadhan 61966fa01ccdSSowmini Varadhan /* remove len bytes from the beginning of the skb */ 61976fa01ccdSSowmini Varadhan static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) 61986fa01ccdSSowmini Varadhan { 61996fa01ccdSSowmini Varadhan int headlen = skb_headlen(skb); 62006fa01ccdSSowmini Varadhan 62016fa01ccdSSowmini Varadhan if (len < headlen) 62026fa01ccdSSowmini Varadhan return pskb_carve_inside_header(skb, len, headlen, gfp); 62036fa01ccdSSowmini Varadhan else 62046fa01ccdSSowmini Varadhan return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); 62056fa01ccdSSowmini Varadhan } 62066fa01ccdSSowmini Varadhan 62076fa01ccdSSowmini Varadhan /* Extract to_copy bytes starting at off from skb, and return this in 62086fa01ccdSSowmini Varadhan * a new skb 62096fa01ccdSSowmini Varadhan */ 62106fa01ccdSSowmini Varadhan struct sk_buff *pskb_extract(struct sk_buff *skb, int off, 62116fa01ccdSSowmini Varadhan int to_copy, gfp_t gfp) 62126fa01ccdSSowmini Varadhan { 62136fa01ccdSSowmini Varadhan struct sk_buff *clone = skb_clone(skb, gfp); 62146fa01ccdSSowmini Varadhan 62156fa01ccdSSowmini Varadhan if (!clone) 62166fa01ccdSSowmini Varadhan return NULL; 62176fa01ccdSSowmini Varadhan 62186fa01ccdSSowmini Varadhan if (pskb_carve(clone, off, gfp) < 0 || 62196fa01ccdSSowmini Varadhan pskb_trim(clone, to_copy)) { 62206fa01ccdSSowmini Varadhan kfree_skb(clone); 62216fa01ccdSSowmini Varadhan return NULL; 62226fa01ccdSSowmini Varadhan } 62236fa01ccdSSowmini Varadhan return clone; 62246fa01ccdSSowmini Varadhan } 62256fa01ccdSSowmini Varadhan EXPORT_SYMBOL(pskb_extract); 6226c8c8b127SEric Dumazet 6227c8c8b127SEric Dumazet /** 6228c8c8b127SEric Dumazet * skb_condense - try to get rid of fragments/frag_list if possible 6229c8c8b127SEric Dumazet * @skb: buffer 6230c8c8b127SEric Dumazet * 6231c8c8b127SEric Dumazet * Can be used to save memory before skb is added to a busy queue. 6232c8c8b127SEric Dumazet * If packet has bytes in frags and enough tail room in skb->head, 6233c8c8b127SEric Dumazet * pull all of them, so that we can free the frags right now and adjust 6234c8c8b127SEric Dumazet * truesize. 6235c8c8b127SEric Dumazet * Notes: 6236c8c8b127SEric Dumazet * We do not reallocate skb->head thus can not fail. 6237c8c8b127SEric Dumazet * Caller must re-evaluate skb->truesize if needed. 6238c8c8b127SEric Dumazet */ 6239c8c8b127SEric Dumazet void skb_condense(struct sk_buff *skb) 6240c8c8b127SEric Dumazet { 62413174fed9SEric Dumazet if (skb->data_len) { 62423174fed9SEric Dumazet if (skb->data_len > skb->end - skb->tail || 6243c8c8b127SEric Dumazet skb_cloned(skb)) 6244c8c8b127SEric Dumazet return; 6245c8c8b127SEric Dumazet 6246c8c8b127SEric Dumazet /* Nice, we can free page frag(s) right now */ 6247c8c8b127SEric Dumazet __pskb_pull_tail(skb, skb->data_len); 62483174fed9SEric Dumazet } 62493174fed9SEric Dumazet /* At this point, skb->truesize might be over estimated, 62503174fed9SEric Dumazet * because skb had a fragment, and fragments do not tell 62513174fed9SEric Dumazet * their truesize. 62523174fed9SEric Dumazet * When we pulled its content into skb->head, fragment 62533174fed9SEric Dumazet * was freed, but __pskb_pull_tail() could not possibly 62543174fed9SEric Dumazet * adjust skb->truesize, not knowing the frag truesize. 6255c8c8b127SEric Dumazet */ 6256c8c8b127SEric Dumazet skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 6257c8c8b127SEric Dumazet } 6258df5042f4SFlorian Westphal 6259df5042f4SFlorian Westphal #ifdef CONFIG_SKB_EXTENSIONS 6260df5042f4SFlorian Westphal static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id) 6261df5042f4SFlorian Westphal { 6262df5042f4SFlorian Westphal return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); 6263df5042f4SFlorian Westphal } 6264df5042f4SFlorian Westphal 62658b69a803SPaolo Abeni /** 62668b69a803SPaolo Abeni * __skb_ext_alloc - allocate a new skb extensions storage 62678b69a803SPaolo Abeni * 62684930f483SFlorian Westphal * @flags: See kmalloc(). 62694930f483SFlorian Westphal * 62708b69a803SPaolo Abeni * Returns the newly allocated pointer. The pointer can later attached to a 62718b69a803SPaolo Abeni * skb via __skb_ext_set(). 62728b69a803SPaolo Abeni * Note: caller must handle the skb_ext as an opaque data. 62738b69a803SPaolo Abeni */ 62744930f483SFlorian Westphal struct skb_ext *__skb_ext_alloc(gfp_t flags) 6275df5042f4SFlorian Westphal { 62764930f483SFlorian Westphal struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags); 6277df5042f4SFlorian Westphal 6278df5042f4SFlorian Westphal if (new) { 6279df5042f4SFlorian Westphal memset(new->offset, 0, sizeof(new->offset)); 6280df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6281df5042f4SFlorian Westphal } 6282df5042f4SFlorian Westphal 6283df5042f4SFlorian Westphal return new; 6284df5042f4SFlorian Westphal } 6285df5042f4SFlorian Westphal 62864165079bSFlorian Westphal static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old, 62874165079bSFlorian Westphal unsigned int old_active) 6288df5042f4SFlorian Westphal { 6289df5042f4SFlorian Westphal struct skb_ext *new; 6290df5042f4SFlorian Westphal 6291df5042f4SFlorian Westphal if (refcount_read(&old->refcnt) == 1) 6292df5042f4SFlorian Westphal return old; 6293df5042f4SFlorian Westphal 6294df5042f4SFlorian Westphal new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC); 6295df5042f4SFlorian Westphal if (!new) 6296df5042f4SFlorian Westphal return NULL; 6297df5042f4SFlorian Westphal 6298df5042f4SFlorian Westphal memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE); 6299df5042f4SFlorian Westphal refcount_set(&new->refcnt, 1); 6300df5042f4SFlorian Westphal 63014165079bSFlorian Westphal #ifdef CONFIG_XFRM 63024165079bSFlorian Westphal if (old_active & (1 << SKB_EXT_SEC_PATH)) { 63034165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH); 63044165079bSFlorian Westphal unsigned int i; 63054165079bSFlorian Westphal 63064165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 63074165079bSFlorian Westphal xfrm_state_hold(sp->xvec[i]); 63084165079bSFlorian Westphal } 63094165079bSFlorian Westphal #endif 6310df5042f4SFlorian Westphal __skb_ext_put(old); 6311df5042f4SFlorian Westphal return new; 6312df5042f4SFlorian Westphal } 6313df5042f4SFlorian Westphal 6314df5042f4SFlorian Westphal /** 63158b69a803SPaolo Abeni * __skb_ext_set - attach the specified extension storage to this skb 63168b69a803SPaolo Abeni * @skb: buffer 63178b69a803SPaolo Abeni * @id: extension id 63188b69a803SPaolo Abeni * @ext: extension storage previously allocated via __skb_ext_alloc() 63198b69a803SPaolo Abeni * 63208b69a803SPaolo Abeni * Existing extensions, if any, are cleared. 63218b69a803SPaolo Abeni * 63228b69a803SPaolo Abeni * Returns the pointer to the extension. 63238b69a803SPaolo Abeni */ 63248b69a803SPaolo Abeni void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, 63258b69a803SPaolo Abeni struct skb_ext *ext) 63268b69a803SPaolo Abeni { 63278b69a803SPaolo Abeni unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext); 63288b69a803SPaolo Abeni 63298b69a803SPaolo Abeni skb_ext_put(skb); 63308b69a803SPaolo Abeni newlen = newoff + skb_ext_type_len[id]; 63318b69a803SPaolo Abeni ext->chunks = newlen; 63328b69a803SPaolo Abeni ext->offset[id] = newoff; 63338b69a803SPaolo Abeni skb->extensions = ext; 63348b69a803SPaolo Abeni skb->active_extensions = 1 << id; 63358b69a803SPaolo Abeni return skb_ext_get_ptr(ext, id); 63368b69a803SPaolo Abeni } 63378b69a803SPaolo Abeni 63388b69a803SPaolo Abeni /** 6339df5042f4SFlorian Westphal * skb_ext_add - allocate space for given extension, COW if needed 6340df5042f4SFlorian Westphal * @skb: buffer 6341df5042f4SFlorian Westphal * @id: extension to allocate space for 6342df5042f4SFlorian Westphal * 6343df5042f4SFlorian Westphal * Allocates enough space for the given extension. 6344df5042f4SFlorian Westphal * If the extension is already present, a pointer to that extension 6345df5042f4SFlorian Westphal * is returned. 6346df5042f4SFlorian Westphal * 6347df5042f4SFlorian Westphal * If the skb was cloned, COW applies and the returned memory can be 6348df5042f4SFlorian Westphal * modified without changing the extension space of clones buffers. 6349df5042f4SFlorian Westphal * 6350df5042f4SFlorian Westphal * Returns pointer to the extension or NULL on allocation failure. 6351df5042f4SFlorian Westphal */ 6352df5042f4SFlorian Westphal void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) 6353df5042f4SFlorian Westphal { 6354df5042f4SFlorian Westphal struct skb_ext *new, *old = NULL; 6355df5042f4SFlorian Westphal unsigned int newlen, newoff; 6356df5042f4SFlorian Westphal 6357df5042f4SFlorian Westphal if (skb->active_extensions) { 6358df5042f4SFlorian Westphal old = skb->extensions; 6359df5042f4SFlorian Westphal 63604165079bSFlorian Westphal new = skb_ext_maybe_cow(old, skb->active_extensions); 6361df5042f4SFlorian Westphal if (!new) 6362df5042f4SFlorian Westphal return NULL; 6363df5042f4SFlorian Westphal 6364682ec859SPaolo Abeni if (__skb_ext_exist(new, id)) 6365df5042f4SFlorian Westphal goto set_active; 6366df5042f4SFlorian Westphal 6367e94e50bdSPaolo Abeni newoff = new->chunks; 6368df5042f4SFlorian Westphal } else { 6369df5042f4SFlorian Westphal newoff = SKB_EXT_CHUNKSIZEOF(*new); 6370df5042f4SFlorian Westphal 63714930f483SFlorian Westphal new = __skb_ext_alloc(GFP_ATOMIC); 6372df5042f4SFlorian Westphal if (!new) 6373df5042f4SFlorian Westphal return NULL; 6374df5042f4SFlorian Westphal } 6375df5042f4SFlorian Westphal 6376df5042f4SFlorian Westphal newlen = newoff + skb_ext_type_len[id]; 6377df5042f4SFlorian Westphal new->chunks = newlen; 6378df5042f4SFlorian Westphal new->offset[id] = newoff; 6379df5042f4SFlorian Westphal set_active: 6380682ec859SPaolo Abeni skb->extensions = new; 6381df5042f4SFlorian Westphal skb->active_extensions |= 1 << id; 6382df5042f4SFlorian Westphal return skb_ext_get_ptr(new, id); 6383df5042f4SFlorian Westphal } 6384df5042f4SFlorian Westphal EXPORT_SYMBOL(skb_ext_add); 6385df5042f4SFlorian Westphal 63864165079bSFlorian Westphal #ifdef CONFIG_XFRM 63874165079bSFlorian Westphal static void skb_ext_put_sp(struct sec_path *sp) 63884165079bSFlorian Westphal { 63894165079bSFlorian Westphal unsigned int i; 63904165079bSFlorian Westphal 63914165079bSFlorian Westphal for (i = 0; i < sp->len; i++) 63924165079bSFlorian Westphal xfrm_state_put(sp->xvec[i]); 63934165079bSFlorian Westphal } 63944165079bSFlorian Westphal #endif 63954165079bSFlorian Westphal 6396df5042f4SFlorian Westphal void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) 6397df5042f4SFlorian Westphal { 6398df5042f4SFlorian Westphal struct skb_ext *ext = skb->extensions; 6399df5042f4SFlorian Westphal 6400df5042f4SFlorian Westphal skb->active_extensions &= ~(1 << id); 6401df5042f4SFlorian Westphal if (skb->active_extensions == 0) { 6402df5042f4SFlorian Westphal skb->extensions = NULL; 6403df5042f4SFlorian Westphal __skb_ext_put(ext); 64044165079bSFlorian Westphal #ifdef CONFIG_XFRM 64054165079bSFlorian Westphal } else if (id == SKB_EXT_SEC_PATH && 64064165079bSFlorian Westphal refcount_read(&ext->refcnt) == 1) { 64074165079bSFlorian Westphal struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH); 64084165079bSFlorian Westphal 64094165079bSFlorian Westphal skb_ext_put_sp(sp); 64104165079bSFlorian Westphal sp->len = 0; 64114165079bSFlorian Westphal #endif 6412df5042f4SFlorian Westphal } 6413df5042f4SFlorian Westphal } 6414df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_del); 6415df5042f4SFlorian Westphal 6416df5042f4SFlorian Westphal void __skb_ext_put(struct skb_ext *ext) 6417df5042f4SFlorian Westphal { 6418df5042f4SFlorian Westphal /* If this is last clone, nothing can increment 6419df5042f4SFlorian Westphal * it after check passes. Avoids one atomic op. 6420df5042f4SFlorian Westphal */ 6421df5042f4SFlorian Westphal if (refcount_read(&ext->refcnt) == 1) 6422df5042f4SFlorian Westphal goto free_now; 6423df5042f4SFlorian Westphal 6424df5042f4SFlorian Westphal if (!refcount_dec_and_test(&ext->refcnt)) 6425df5042f4SFlorian Westphal return; 6426df5042f4SFlorian Westphal free_now: 64274165079bSFlorian Westphal #ifdef CONFIG_XFRM 64284165079bSFlorian Westphal if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH)) 64294165079bSFlorian Westphal skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH)); 64304165079bSFlorian Westphal #endif 64314165079bSFlorian Westphal 6432df5042f4SFlorian Westphal kmem_cache_free(skbuff_ext_cache, ext); 6433df5042f4SFlorian Westphal } 6434df5042f4SFlorian Westphal EXPORT_SYMBOL(__skb_ext_put); 6435df5042f4SFlorian Westphal #endif /* CONFIG_SKB_EXTENSIONS */ 6436