11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 7393e14b6dSMasatake YAMATO #include <linux/seq_file.h> 74e0b46d0eSHerbert Xu #include <linux/uio.h> 751576d986SJason Wang #include <linux/skb_array.h> 76761876c8SJason Wang #include <linux/bpf.h> 77761876c8SJason Wang #include <linux/bpf_trace.h> 7890e33d45SPetar Penkov #include <linux/mutex.h> 791da177e4SLinus Torvalds 807c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 811da177e4SLinus Torvalds 8214daa021SRusty Russell /* Uncomment to enable debugging */ 8314daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8414daa021SRusty Russell 851da177e4SLinus Torvalds #ifdef TUN_DEBUG 861da177e4SLinus Torvalds static int debug; 8714daa021SRusty Russell 886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 896b8a66eeSJoe Perches do { \ 906b8a66eeSJoe Perches if (tun->debug) \ 916b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 926b8a66eeSJoe Perches } while (0) 936b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (debug == 2) \ 966b8a66eeSJoe Perches printk(level fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 9814daa021SRusty Russell #else 996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1006b8a66eeSJoe Perches do { \ 1016b8a66eeSJoe Perches if (0) \ 1026b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1036b8a66eeSJoe Perches } while (0) 1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches printk(level fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 111761876c8SJason Wang #define TUN_HEADROOM 256 1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11366ccbc9cSJason Wang 114031f5e03SMichael S. Tsirkin /* TUN device flags */ 115031f5e03SMichael S. Tsirkin 116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 117031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 118031f5e03SMichael S. Tsirkin */ 119031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1228b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 123031f5e03SMichael S. Tsirkin 124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12590e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 12690e33d45SPetar Penkov 1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1280690899bSMichael S. Tsirkin 129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 130f271b2ccSMax Krasnyansky struct tap_filter { 131f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 132f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 133f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134f271b2ccSMax Krasnyansky }; 135f271b2ccSMax Krasnyansky 136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 139b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 140c8d68e6bSJason Wang 14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14296442e42SJason Wang 143608b9977SPaolo Abeni struct tun_pcpu_stats { 144608b9977SPaolo Abeni u64 rx_packets; 145608b9977SPaolo Abeni u64 rx_bytes; 146608b9977SPaolo Abeni u64 tx_packets; 147608b9977SPaolo Abeni u64 tx_bytes; 148608b9977SPaolo Abeni struct u64_stats_sync syncp; 149608b9977SPaolo Abeni u32 rx_dropped; 150608b9977SPaolo Abeni u32 tx_dropped; 151608b9977SPaolo Abeni u32 rx_frame_errors; 152608b9977SPaolo Abeni }; 153608b9977SPaolo Abeni 15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15592d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 15654f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 15754f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 15836fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 15954f968d6SJason Wang * this). 1606e914fc7SJason Wang * 1616e914fc7SJason Wang * RCU usage: 16236fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1636e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16454f968d6SJason Wang */ 165631ab46bSEric W. Biederman struct tun_file { 16654f968d6SJason Wang struct sock sk; 16754f968d6SJason Wang struct socket socket; 16854f968d6SJason Wang struct socket_wq wq; 1696e914fc7SJason Wang struct tun_struct __rcu *tun; 17054f968d6SJason Wang struct fasync_struct *fasync; 17154f968d6SJason Wang /* only used for fasnyc */ 17254f968d6SJason Wang unsigned int flags; 173fb7589a1SPavel Emelyanov union { 174c8d68e6bSJason Wang u16 queue_index; 175fb7589a1SPavel Emelyanov unsigned int ifindex; 176fb7589a1SPavel Emelyanov }; 17794317099SPetar Penkov struct napi_struct napi; 178aec72f33SEric Dumazet bool napi_enabled; 17990e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1804008e97fSJason Wang struct list_head next; 1814008e97fSJason Wang struct tun_struct *detached; 1821576d986SJason Wang struct skb_array tx_array; 183631ab46bSEric W. Biederman }; 184631ab46bSEric W. Biederman 18596442e42SJason Wang struct tun_flow_entry { 18696442e42SJason Wang struct hlist_node hash_link; 18796442e42SJason Wang struct rcu_head rcu; 18896442e42SJason Wang struct tun_struct *tun; 18996442e42SJason Wang 19096442e42SJason Wang u32 rxhash; 1919bc88939STom Herbert u32 rps_rxhash; 19296442e42SJason Wang int queue_index; 19396442e42SJason Wang unsigned long updated; 19496442e42SJason Wang }; 19596442e42SJason Wang 19696442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 19796442e42SJason Wang 19854f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 19936fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 20054f968d6SJason Wang * file were attached to a persist device. 20154f968d6SJason Wang */ 20214daa021SRusty Russell struct tun_struct { 203c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 204c8d68e6bSJason Wang unsigned int numqueues; 205f271b2ccSMax Krasnyansky unsigned int flags; 2060625c883SEric W. Biederman kuid_t owner; 2070625c883SEric W. Biederman kgid_t group; 20814daa021SRusty Russell 20914daa021SRusty Russell struct net_device *dev; 210c8f44affSMichał Mirosław netdev_features_t set_features; 21188255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 212d591a1f3SDavid S. Miller NETIF_F_TSO6) 213d9d52b51SMichael S. Tsirkin 214eaea34b2SPaolo Abeni int align; 215d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 21654f968d6SJason Wang int sndbuf; 21754f968d6SJason Wang struct tap_filter txflt; 21854f968d6SJason Wang struct sock_fprog fprog; 21954f968d6SJason Wang /* protected by rtnl lock */ 22054f968d6SJason Wang bool filter_attached; 22114daa021SRusty Russell #ifdef TUN_DEBUG 22214daa021SRusty Russell int debug; 22314daa021SRusty Russell #endif 22496442e42SJason Wang spinlock_t lock; 22596442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 22696442e42SJason Wang struct timer_list flow_gc_timer; 22796442e42SJason Wang unsigned long ageing_time; 2284008e97fSJason Wang unsigned int numdisabled; 2294008e97fSJason Wang struct list_head disabled; 2305dbbaf2dSPaul Moore void *security; 231b8732fb7SJason Wang u32 flow_count; 2325503fcecSJason Wang u32 rx_batched; 233608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 234761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 23514daa021SRusty Russell }; 23614daa021SRusty Russell 23794317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 23894317099SPetar Penkov { 23994317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 24094317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 24194317099SPetar Penkov struct sk_buff_head process_queue; 24294317099SPetar Penkov struct sk_buff *skb; 24394317099SPetar Penkov int received = 0; 24494317099SPetar Penkov 24594317099SPetar Penkov __skb_queue_head_init(&process_queue); 24694317099SPetar Penkov 24794317099SPetar Penkov spin_lock(&queue->lock); 24894317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 24994317099SPetar Penkov spin_unlock(&queue->lock); 25094317099SPetar Penkov 25194317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 25294317099SPetar Penkov napi_gro_receive(napi, skb); 25394317099SPetar Penkov ++received; 25494317099SPetar Penkov } 25594317099SPetar Penkov 25694317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 25794317099SPetar Penkov spin_lock(&queue->lock); 25894317099SPetar Penkov skb_queue_splice(&process_queue, queue); 25994317099SPetar Penkov spin_unlock(&queue->lock); 26094317099SPetar Penkov } 26194317099SPetar Penkov 26294317099SPetar Penkov return received; 26394317099SPetar Penkov } 26494317099SPetar Penkov 26594317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 26694317099SPetar Penkov { 26794317099SPetar Penkov unsigned int received; 26894317099SPetar Penkov 26994317099SPetar Penkov received = tun_napi_receive(napi, budget); 27094317099SPetar Penkov 27194317099SPetar Penkov if (received < budget) 27294317099SPetar Penkov napi_complete_done(napi, received); 27394317099SPetar Penkov 27494317099SPetar Penkov return received; 27594317099SPetar Penkov } 27694317099SPetar Penkov 27794317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 27894317099SPetar Penkov bool napi_en) 27994317099SPetar Penkov { 280aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 28194317099SPetar Penkov if (napi_en) { 28294317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 28394317099SPetar Penkov NAPI_POLL_WEIGHT); 28494317099SPetar Penkov napi_enable(&tfile->napi); 28590e33d45SPetar Penkov mutex_init(&tfile->napi_mutex); 28694317099SPetar Penkov } 28794317099SPetar Penkov } 28894317099SPetar Penkov 28994317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 29094317099SPetar Penkov { 291aec72f33SEric Dumazet if (tfile->napi_enabled) 29294317099SPetar Penkov napi_disable(&tfile->napi); 29394317099SPetar Penkov } 29494317099SPetar Penkov 29594317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 29694317099SPetar Penkov { 297aec72f33SEric Dumazet if (tfile->napi_enabled) 29894317099SPetar Penkov netif_napi_del(&tfile->napi); 29994317099SPetar Penkov } 30094317099SPetar Penkov 30190e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun) 30290e33d45SPetar Penkov { 30390e33d45SPetar Penkov return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 30490e33d45SPetar Penkov } 30590e33d45SPetar Penkov 3068b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3078b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3088b8e658bSGreg Kurz { 3098b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3108b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3118b8e658bSGreg Kurz } 3128b8e658bSGreg Kurz 3138b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3148b8e658bSGreg Kurz { 3158b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3168b8e658bSGreg Kurz 3178b8e658bSGreg Kurz if (put_user(be, argp)) 3188b8e658bSGreg Kurz return -EFAULT; 3198b8e658bSGreg Kurz 3208b8e658bSGreg Kurz return 0; 3218b8e658bSGreg Kurz } 3228b8e658bSGreg Kurz 3238b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3248b8e658bSGreg Kurz { 3258b8e658bSGreg Kurz int be; 3268b8e658bSGreg Kurz 3278b8e658bSGreg Kurz if (get_user(be, argp)) 3288b8e658bSGreg Kurz return -EFAULT; 3298b8e658bSGreg Kurz 3308b8e658bSGreg Kurz if (be) 3318b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3328b8e658bSGreg Kurz else 3338b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3348b8e658bSGreg Kurz 3358b8e658bSGreg Kurz return 0; 3368b8e658bSGreg Kurz } 3378b8e658bSGreg Kurz #else 3388b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3398b8e658bSGreg Kurz { 3408b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3418b8e658bSGreg Kurz } 3428b8e658bSGreg Kurz 3438b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3448b8e658bSGreg Kurz { 3458b8e658bSGreg Kurz return -EINVAL; 3468b8e658bSGreg Kurz } 3478b8e658bSGreg Kurz 3488b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3498b8e658bSGreg Kurz { 3508b8e658bSGreg Kurz return -EINVAL; 3518b8e658bSGreg Kurz } 3528b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3538b8e658bSGreg Kurz 35425bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 35525bd55bbSGreg Kurz { 3567d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3578b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 35825bd55bbSGreg Kurz } 35925bd55bbSGreg Kurz 36056f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 36156f0dcc5SMichael S. Tsirkin { 36225bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 36356f0dcc5SMichael S. Tsirkin } 36456f0dcc5SMichael S. Tsirkin 36556f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 36656f0dcc5SMichael S. Tsirkin { 36725bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 36856f0dcc5SMichael S. Tsirkin } 36956f0dcc5SMichael S. Tsirkin 37096442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 37196442e42SJason Wang { 37296442e42SJason Wang return rxhash & 0x3ff; 37396442e42SJason Wang } 37496442e42SJason Wang 37596442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 37696442e42SJason Wang { 37796442e42SJason Wang struct tun_flow_entry *e; 37896442e42SJason Wang 379b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 38096442e42SJason Wang if (e->rxhash == rxhash) 38196442e42SJason Wang return e; 38296442e42SJason Wang } 38396442e42SJason Wang return NULL; 38496442e42SJason Wang } 38596442e42SJason Wang 38696442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 38796442e42SJason Wang struct hlist_head *head, 38896442e42SJason Wang u32 rxhash, u16 queue_index) 38996442e42SJason Wang { 3909fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3919fdc6befSEric Dumazet 39296442e42SJason Wang if (e) { 39396442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 39496442e42SJason Wang rxhash, queue_index); 39596442e42SJason Wang e->updated = jiffies; 39696442e42SJason Wang e->rxhash = rxhash; 3979bc88939STom Herbert e->rps_rxhash = 0; 39896442e42SJason Wang e->queue_index = queue_index; 39996442e42SJason Wang e->tun = tun; 40096442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 401b8732fb7SJason Wang ++tun->flow_count; 40296442e42SJason Wang } 40396442e42SJason Wang return e; 40496442e42SJason Wang } 40596442e42SJason Wang 40696442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 40796442e42SJason Wang { 40896442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 40996442e42SJason Wang e->rxhash, e->queue_index); 41096442e42SJason Wang hlist_del_rcu(&e->hash_link); 4119fdc6befSEric Dumazet kfree_rcu(e, rcu); 412b8732fb7SJason Wang --tun->flow_count; 41396442e42SJason Wang } 41496442e42SJason Wang 41596442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 41696442e42SJason Wang { 41796442e42SJason Wang int i; 41896442e42SJason Wang 41996442e42SJason Wang spin_lock_bh(&tun->lock); 42096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 42196442e42SJason Wang struct tun_flow_entry *e; 422b67bfe0dSSasha Levin struct hlist_node *n; 42396442e42SJason Wang 424b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 42596442e42SJason Wang tun_flow_delete(tun, e); 42696442e42SJason Wang } 42796442e42SJason Wang spin_unlock_bh(&tun->lock); 42896442e42SJason Wang } 42996442e42SJason Wang 43096442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 43196442e42SJason Wang { 43296442e42SJason Wang int i; 43396442e42SJason Wang 43496442e42SJason Wang spin_lock_bh(&tun->lock); 43596442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 43696442e42SJason Wang struct tun_flow_entry *e; 437b67bfe0dSSasha Levin struct hlist_node *n; 43896442e42SJason Wang 439b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 44096442e42SJason Wang if (e->queue_index == queue_index) 44196442e42SJason Wang tun_flow_delete(tun, e); 44296442e42SJason Wang } 44396442e42SJason Wang } 44496442e42SJason Wang spin_unlock_bh(&tun->lock); 44596442e42SJason Wang } 44696442e42SJason Wang 44796442e42SJason Wang static void tun_flow_cleanup(unsigned long data) 44896442e42SJason Wang { 44996442e42SJason Wang struct tun_struct *tun = (struct tun_struct *)data; 45096442e42SJason Wang unsigned long delay = tun->ageing_time; 45196442e42SJason Wang unsigned long next_timer = jiffies + delay; 45296442e42SJason Wang unsigned long count = 0; 45396442e42SJason Wang int i; 45496442e42SJason Wang 45596442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 45696442e42SJason Wang 457*7dbfb4efSEric Dumazet spin_lock(&tun->lock); 45896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45996442e42SJason Wang struct tun_flow_entry *e; 460b67bfe0dSSasha Levin struct hlist_node *n; 46196442e42SJason Wang 462b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 46396442e42SJason Wang unsigned long this_timer; 46496442e42SJason Wang count++; 46596442e42SJason Wang this_timer = e->updated + delay; 46696442e42SJason Wang if (time_before_eq(this_timer, jiffies)) 46796442e42SJason Wang tun_flow_delete(tun, e); 46896442e42SJason Wang else if (time_before(this_timer, next_timer)) 46996442e42SJason Wang next_timer = this_timer; 47096442e42SJason Wang } 47196442e42SJason Wang } 47296442e42SJason Wang 47396442e42SJason Wang if (count) 47496442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 475*7dbfb4efSEric Dumazet spin_unlock(&tun->lock); 47696442e42SJason Wang } 47796442e42SJason Wang 47849974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4799e85722dSJason Wang struct tun_file *tfile) 48096442e42SJason Wang { 48196442e42SJason Wang struct hlist_head *head; 48296442e42SJason Wang struct tun_flow_entry *e; 48396442e42SJason Wang unsigned long delay = tun->ageing_time; 4849e85722dSJason Wang u16 queue_index = tfile->queue_index; 48596442e42SJason Wang 48696442e42SJason Wang if (!rxhash) 48796442e42SJason Wang return; 48896442e42SJason Wang else 48996442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 49096442e42SJason Wang 49196442e42SJason Wang rcu_read_lock(); 49296442e42SJason Wang 4939e85722dSJason Wang /* We may get a very small possibility of OOO during switching, not 4949e85722dSJason Wang * worth to optimize.*/ 4959e85722dSJason Wang if (tun->numqueues == 1 || tfile->detached) 49696442e42SJason Wang goto unlock; 49796442e42SJason Wang 49896442e42SJason Wang e = tun_flow_find(head, rxhash); 49996442e42SJason Wang if (likely(e)) { 50096442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 50196442e42SJason Wang e->queue_index = queue_index; 50296442e42SJason Wang e->updated = jiffies; 5039bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 50496442e42SJason Wang } else { 50596442e42SJason Wang spin_lock_bh(&tun->lock); 506b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 507b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 50896442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 50996442e42SJason Wang 51096442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 51196442e42SJason Wang mod_timer(&tun->flow_gc_timer, 51296442e42SJason Wang round_jiffies_up(jiffies + delay)); 51396442e42SJason Wang spin_unlock_bh(&tun->lock); 51496442e42SJason Wang } 51596442e42SJason Wang 51696442e42SJason Wang unlock: 51796442e42SJason Wang rcu_read_unlock(); 51896442e42SJason Wang } 51996442e42SJason Wang 5209bc88939STom Herbert /** 5219bc88939STom Herbert * Save the hash received in the stack receive path and update the 5229bc88939STom Herbert * flow_hash table accordingly. 5239bc88939STom Herbert */ 5249bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5259bc88939STom Herbert { 526567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5279bc88939STom Herbert e->rps_rxhash = hash; 5289bc88939STom Herbert } 5299bc88939STom Herbert 530c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 53192d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 532c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 533c8d68e6bSJason Wang * the userspace application move between processors, we may get a 534c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 535c8d68e6bSJason Wang * hope the rxq no. may help here. 536c8d68e6bSJason Wang */ 537f663dd9aSJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 53899932d4fSDaniel Borkmann void *accel_priv, select_queue_fallback_t fallback) 539c8d68e6bSJason Wang { 540c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 54196442e42SJason Wang struct tun_flow_entry *e; 542c8d68e6bSJason Wang u32 txq = 0; 543c8d68e6bSJason Wang u32 numqueues = 0; 544c8d68e6bSJason Wang 545c8d68e6bSJason Wang rcu_read_lock(); 54692bb73eaSJason Wang numqueues = ACCESS_ONCE(tun->numqueues); 547c8d68e6bSJason Wang 548feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 549c8d68e6bSJason Wang if (txq) { 55096442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5519bc88939STom Herbert if (e) { 5529bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 553fbe4d456SZhi Yong Wu txq = e->queue_index; 5549bc88939STom Herbert } else 555c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 556c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 557c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 558c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 559c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 560c8d68e6bSJason Wang txq -= numqueues; 561c8d68e6bSJason Wang } 562c8d68e6bSJason Wang 563c8d68e6bSJason Wang rcu_read_unlock(); 564c8d68e6bSJason Wang return txq; 565c8d68e6bSJason Wang } 566c8d68e6bSJason Wang 567cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 568cde8b15fSJason Wang { 569cde8b15fSJason Wang const struct cred *cred = current_cred(); 570c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 571cde8b15fSJason Wang 572cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 573cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 574c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 575cde8b15fSJason Wang } 576cde8b15fSJason Wang 577c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 578c8d68e6bSJason Wang { 579c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 580c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 581c8d68e6bSJason Wang } 582c8d68e6bSJason Wang 5834008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 5844008e97fSJason Wang { 5854008e97fSJason Wang tfile->detached = tun; 5864008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 5874008e97fSJason Wang ++tun->numdisabled; 5884008e97fSJason Wang } 5894008e97fSJason Wang 590d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 5914008e97fSJason Wang { 5924008e97fSJason Wang struct tun_struct *tun = tfile->detached; 5934008e97fSJason Wang 5944008e97fSJason Wang tfile->detached = NULL; 5954008e97fSJason Wang list_del_init(&tfile->next); 5964008e97fSJason Wang --tun->numdisabled; 5974008e97fSJason Wang return tun; 5984008e97fSJason Wang } 5994008e97fSJason Wang 6004bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6014bfb0513SJason Wang { 6021576d986SJason Wang struct sk_buff *skb; 6031576d986SJason Wang 6041576d986SJason Wang while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) 6051576d986SJason Wang kfree_skb(skb); 6061576d986SJason Wang 6075503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6084bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6094bfb0513SJason Wang } 6104bfb0513SJason Wang 611c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 612c8d68e6bSJason Wang { 613c8d68e6bSJason Wang struct tun_file *ntfile; 614c8d68e6bSJason Wang struct tun_struct *tun; 615c8d68e6bSJason Wang 616b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 617b8deabd3SJason Wang 61894317099SPetar Penkov if (tun && clean) { 61994317099SPetar Penkov tun_napi_disable(tun, tfile); 62094317099SPetar Penkov tun_napi_del(tun, tfile); 62194317099SPetar Penkov } 62294317099SPetar Penkov 6239e85722dSJason Wang if (tun && !tfile->detached) { 624c8d68e6bSJason Wang u16 index = tfile->queue_index; 625c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 626c8d68e6bSJason Wang 627c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 628c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 629b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 630c8d68e6bSJason Wang ntfile->queue_index = index; 631c8d68e6bSJason Wang 632c8d68e6bSJason Wang --tun->numqueues; 6339e85722dSJason Wang if (clean) { 634c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 635c8d68e6bSJason Wang sock_put(&tfile->sk); 6369e85722dSJason Wang } else 6374008e97fSJason Wang tun_disable_queue(tun, tfile); 638c8d68e6bSJason Wang 639c8d68e6bSJason Wang synchronize_net(); 64096442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 641c8d68e6bSJason Wang /* Drop read queue */ 6424bfb0513SJason Wang tun_queue_purge(tfile); 643c8d68e6bSJason Wang tun_set_real_num_queues(tun); 644dd38bd85SJason Wang } else if (tfile->detached && clean) { 6454008e97fSJason Wang tun = tun_enable_queue(tfile); 646dd38bd85SJason Wang sock_put(&tfile->sk); 647dd38bd85SJason Wang } 648c8d68e6bSJason Wang 649c8d68e6bSJason Wang if (clean) { 650af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 651af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 652af668b3cSMichael S. Tsirkin 65340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 654af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6554008e97fSJason Wang unregister_netdevice(tun->dev); 656af668b3cSMichael S. Tsirkin } 6571576d986SJason Wang if (tun) 6581576d986SJason Wang skb_array_cleanup(&tfile->tx_array); 659140e807dSEric W. Biederman sock_put(&tfile->sk); 660c8d68e6bSJason Wang } 661c8d68e6bSJason Wang } 662c8d68e6bSJason Wang 663c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 664c8d68e6bSJason Wang { 665c8d68e6bSJason Wang rtnl_lock(); 666c8d68e6bSJason Wang __tun_detach(tfile, clean); 667c8d68e6bSJason Wang rtnl_unlock(); 668c8d68e6bSJason Wang } 669c8d68e6bSJason Wang 670c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 671c8d68e6bSJason Wang { 672c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 673761876c8SJason Wang struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); 6744008e97fSJason Wang struct tun_file *tfile, *tmp; 675c8d68e6bSJason Wang int i, n = tun->numqueues; 676c8d68e6bSJason Wang 677c8d68e6bSJason Wang for (i = 0; i < n; i++) { 678b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 679c8d68e6bSJason Wang BUG_ON(!tfile); 68094317099SPetar Penkov tun_napi_disable(tun, tfile); 681addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 6829e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 683c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 684c8d68e6bSJason Wang --tun->numqueues; 685c8d68e6bSJason Wang } 6869e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 687addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 6889e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 689c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 6909e85722dSJason Wang } 691c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 692c8d68e6bSJason Wang 693c8d68e6bSJason Wang synchronize_net(); 694c8d68e6bSJason Wang for (i = 0; i < n; i++) { 695b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 69694317099SPetar Penkov tun_napi_del(tun, tfile); 697c8d68e6bSJason Wang /* Drop read queue */ 6984bfb0513SJason Wang tun_queue_purge(tfile); 699c8d68e6bSJason Wang sock_put(&tfile->sk); 700c8d68e6bSJason Wang } 7014008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7024008e97fSJason Wang tun_enable_queue(tfile); 7034bfb0513SJason Wang tun_queue_purge(tfile); 7044008e97fSJason Wang sock_put(&tfile->sk); 7054008e97fSJason Wang } 7064008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 707dd38bd85SJason Wang 708761876c8SJason Wang if (xdp_prog) 709761876c8SJason Wang bpf_prog_put(xdp_prog); 710761876c8SJason Wang 71140630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 712dd38bd85SJason Wang module_put(THIS_MODULE); 713c8d68e6bSJason Wang } 714c8d68e6bSJason Wang 71594317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 71694317099SPetar Penkov bool skip_filter, bool napi) 717a7385ba2SEric W. Biederman { 718631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7191576d986SJason Wang struct net_device *dev = tun->dev; 72038231b7aSEric W. Biederman int err; 721a7385ba2SEric W. Biederman 7225dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7235dbbaf2dSPaul Moore if (err < 0) 7245dbbaf2dSPaul Moore goto out; 7255dbbaf2dSPaul Moore 72638231b7aSEric W. Biederman err = -EINVAL; 7279e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 72838231b7aSEric W. Biederman goto out; 72938231b7aSEric W. Biederman 73038231b7aSEric W. Biederman err = -EBUSY; 73140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 732c8d68e6bSJason Wang goto out; 733c8d68e6bSJason Wang 734c8d68e6bSJason Wang err = -E2BIG; 7354008e97fSJason Wang if (!tfile->detached && 7364008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 73738231b7aSEric W. Biederman goto out; 73838231b7aSEric W. Biederman 73938231b7aSEric W. Biederman err = 0; 74054f968d6SJason Wang 74192d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 742849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7438ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7448ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7458ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 74654f968d6SJason Wang if (!err) 74754f968d6SJason Wang goto out; 74854f968d6SJason Wang } 7491576d986SJason Wang 7501576d986SJason Wang if (!tfile->detached && 7511576d986SJason Wang skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { 7521576d986SJason Wang err = -ENOMEM; 7531576d986SJason Wang goto out; 7541576d986SJason Wang } 7551576d986SJason Wang 756c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 757addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 7586e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 759c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 760c8d68e6bSJason Wang tun->numqueues++; 761c8d68e6bSJason Wang 76294317099SPetar Penkov if (tfile->detached) { 7634008e97fSJason Wang tun_enable_queue(tfile); 76494317099SPetar Penkov } else { 7654008e97fSJason Wang sock_hold(&tfile->sk); 76694317099SPetar Penkov tun_napi_init(tun, tfile, napi); 76794317099SPetar Penkov } 7684008e97fSJason Wang 769c8d68e6bSJason Wang tun_set_real_num_queues(tun); 770c8d68e6bSJason Wang 771c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 772c8d68e6bSJason Wang * refcnt. 773c8d68e6bSJason Wang */ 774a7385ba2SEric W. Biederman 77538231b7aSEric W. Biederman out: 77638231b7aSEric W. Biederman return err; 777a7385ba2SEric W. Biederman } 778a7385ba2SEric W. Biederman 7799484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 780631ab46bSEric W. Biederman { 7816e914fc7SJason Wang struct tun_struct *tun; 782c70f1829SEric W. Biederman 7836e914fc7SJason Wang rcu_read_lock(); 7846e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 7856e914fc7SJason Wang if (tun) 7866e914fc7SJason Wang dev_hold(tun->dev); 7876e914fc7SJason Wang rcu_read_unlock(); 788c70f1829SEric W. Biederman 789c70f1829SEric W. Biederman return tun; 790631ab46bSEric W. Biederman } 791631ab46bSEric W. Biederman 792631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 793631ab46bSEric W. Biederman { 7946e914fc7SJason Wang dev_put(tun->dev); 795631ab46bSEric W. Biederman } 796631ab46bSEric W. Biederman 7976b8a66eeSJoe Perches /* TAP filtering */ 798f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 799f271b2ccSMax Krasnyansky { 800f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 801f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 802f271b2ccSMax Krasnyansky } 803f271b2ccSMax Krasnyansky 804f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 805f271b2ccSMax Krasnyansky { 806f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 807f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 808f271b2ccSMax Krasnyansky } 809f271b2ccSMax Krasnyansky 810f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 811f271b2ccSMax Krasnyansky { 812f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 813f271b2ccSMax Krasnyansky struct tun_filter uf; 814f271b2ccSMax Krasnyansky int err, alen, n, nexact; 815f271b2ccSMax Krasnyansky 816f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 817f271b2ccSMax Krasnyansky return -EFAULT; 818f271b2ccSMax Krasnyansky 819f271b2ccSMax Krasnyansky if (!uf.count) { 820f271b2ccSMax Krasnyansky /* Disabled */ 821f271b2ccSMax Krasnyansky filter->count = 0; 822f271b2ccSMax Krasnyansky return 0; 823f271b2ccSMax Krasnyansky } 824f271b2ccSMax Krasnyansky 825f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 82628e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 82728e8190dSMarkus Elfring if (IS_ERR(addr)) 82828e8190dSMarkus Elfring return PTR_ERR(addr); 829f271b2ccSMax Krasnyansky 830f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 831f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 832f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 833f271b2ccSMax Krasnyansky filter->count = 0; 834f271b2ccSMax Krasnyansky wmb(); 835f271b2ccSMax Krasnyansky 836f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 837f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 838f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 839f271b2ccSMax Krasnyansky 840f271b2ccSMax Krasnyansky nexact = n; 841f271b2ccSMax Krasnyansky 842cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 843cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 844f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 845cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 846cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 847cfbf84fcSAlex Williamson err = 0; /* no filter */ 8483b8d2a69SMarkus Elfring goto free_addr; 849cfbf84fcSAlex Williamson } 850f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 851cfbf84fcSAlex Williamson } 852f271b2ccSMax Krasnyansky 853f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 854f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 855f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 856f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 857f271b2ccSMax Krasnyansky 858f271b2ccSMax Krasnyansky /* Now enable the filter */ 859f271b2ccSMax Krasnyansky wmb(); 860f271b2ccSMax Krasnyansky filter->count = nexact; 861f271b2ccSMax Krasnyansky 862f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 863f271b2ccSMax Krasnyansky err = nexact; 8643b8d2a69SMarkus Elfring free_addr: 865f271b2ccSMax Krasnyansky kfree(addr); 866f271b2ccSMax Krasnyansky return err; 867f271b2ccSMax Krasnyansky } 868f271b2ccSMax Krasnyansky 869f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 870f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 871f271b2ccSMax Krasnyansky { 872f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 873f271b2ccSMax Krasnyansky * at this point. */ 874f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 875f271b2ccSMax Krasnyansky int i; 876f271b2ccSMax Krasnyansky 877f271b2ccSMax Krasnyansky /* Exact match */ 878f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 8792e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 880f271b2ccSMax Krasnyansky return 1; 881f271b2ccSMax Krasnyansky 882f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 883f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 884f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 885f271b2ccSMax Krasnyansky 886f271b2ccSMax Krasnyansky return 0; 887f271b2ccSMax Krasnyansky } 888f271b2ccSMax Krasnyansky 889f271b2ccSMax Krasnyansky /* 890f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 891f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 892f271b2ccSMax Krasnyansky */ 893f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 894f271b2ccSMax Krasnyansky { 895f271b2ccSMax Krasnyansky if (!filter->count) 896f271b2ccSMax Krasnyansky return 1; 897f271b2ccSMax Krasnyansky 898f271b2ccSMax Krasnyansky return run_filter(filter, skb); 899f271b2ccSMax Krasnyansky } 900f271b2ccSMax Krasnyansky 9011da177e4SLinus Torvalds /* Network device part of the driver */ 9021da177e4SLinus Torvalds 9037282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops; 9041da177e4SLinus Torvalds 905c70f1829SEric W. Biederman /* Net device detach from fd. */ 906c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 907c70f1829SEric W. Biederman { 908c8d68e6bSJason Wang tun_detach_all(dev); 909c70f1829SEric W. Biederman } 910c70f1829SEric W. Biederman 9111da177e4SLinus Torvalds /* Net device open. */ 9121da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 9131da177e4SLinus Torvalds { 914b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 915b20e2d54SHannes Frederic Sowa int i; 916b20e2d54SHannes Frederic Sowa 917c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 918b20e2d54SHannes Frederic Sowa 919b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 920b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 921b20e2d54SHannes Frederic Sowa 922b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 923b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 924b20e2d54SHannes Frederic Sowa } 925b20e2d54SHannes Frederic Sowa 9261da177e4SLinus Torvalds return 0; 9271da177e4SLinus Torvalds } 9281da177e4SLinus Torvalds 9291da177e4SLinus Torvalds /* Net device close. */ 9301da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 9311da177e4SLinus Torvalds { 932c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 9331da177e4SLinus Torvalds return 0; 9341da177e4SLinus Torvalds } 9351da177e4SLinus Torvalds 9361da177e4SLinus Torvalds /* Net device start xmit */ 937424efe9cSStephen Hemminger static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 9381da177e4SLinus Torvalds { 9391da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 940c8d68e6bSJason Wang int txq = skb->queue_mapping; 9416e914fc7SJason Wang struct tun_file *tfile; 942fa35864eSDominic Curran u32 numqueues = 0; 9431da177e4SLinus Torvalds 9446e914fc7SJason Wang rcu_read_lock(); 945c8d68e6bSJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 946fa35864eSDominic Curran numqueues = ACCESS_ONCE(tun->numqueues); 947c8d68e6bSJason Wang 9481da177e4SLinus Torvalds /* Drop packet if interface is not attached */ 949fa35864eSDominic Curran if (txq >= numqueues) 9501da177e4SLinus Torvalds goto drop; 9511da177e4SLinus Torvalds 9523df97ba8SJason Wang #ifdef CONFIG_RPS 9533df97ba8SJason Wang if (numqueues == 1 && static_key_false(&rps_needed)) { 9549bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 9559bc88939STom Herbert * RPS hash and save it into the flow_table here. 9569bc88939STom Herbert */ 9579bc88939STom Herbert __u32 rxhash; 9589bc88939STom Herbert 959feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 9609bc88939STom Herbert if (rxhash) { 9619bc88939STom Herbert struct tun_flow_entry *e; 9629bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 9639bc88939STom Herbert rxhash); 9649bc88939STom Herbert if (e) 9659bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 9669bc88939STom Herbert } 9679bc88939STom Herbert } 9683df97ba8SJason Wang #endif 9699bc88939STom Herbert 9706e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 9716e914fc7SJason Wang 972c8d68e6bSJason Wang BUG_ON(!tfile); 973c8d68e6bSJason Wang 974f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 975f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 976f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 977f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 978f271b2ccSMax Krasnyansky goto drop; 979f271b2ccSMax Krasnyansky 98054f968d6SJason Wang if (tfile->socket.sk->sk_filter && 98154f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 98299405162SMichael S. Tsirkin goto drop; 98399405162SMichael S. Tsirkin 9841f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 9857bf66305SJason Wang goto drop; 9867bf66305SJason Wang 9877b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 988eda29772SRichard Cochran 9890110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 9907bf66305SJason Wang * for indefinite time. 9917bf66305SJason Wang */ 9920110d6f2SMichael S. Tsirkin skb_orphan(skb); 9930110d6f2SMichael S. Tsirkin 994f8af75f3SEric Dumazet nf_reset(skb); 995f8af75f3SEric Dumazet 9961576d986SJason Wang if (skb_array_produce(&tfile->tx_array, skb)) 9971576d986SJason Wang goto drop; 9981da177e4SLinus Torvalds 9991da177e4SLinus Torvalds /* Notify and wake up reader process */ 100054f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 100154f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 10029e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 10036e914fc7SJason Wang 10046e914fc7SJason Wang rcu_read_unlock(); 10056ed10654SPatrick McHardy return NETDEV_TX_OK; 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds drop: 1008608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1009149d36f7SMichael S. Tsirkin skb_tx_error(skb); 10101da177e4SLinus Torvalds kfree_skb(skb); 10116e914fc7SJason Wang rcu_read_unlock(); 1012baeababbSJason Wang return NET_XMIT_DROP; 10131da177e4SLinus Torvalds } 10141da177e4SLinus Torvalds 1015f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 10161da177e4SLinus Torvalds { 1017f271b2ccSMax Krasnyansky /* 1018f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1019f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1020f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1021f271b2ccSMax Krasnyansky */ 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds 1024c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1025c8f44affSMichał Mirosław netdev_features_t features) 102688255375SMichał Mirosław { 102788255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 102888255375SMichał Mirosław 102988255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 103088255375SMichał Mirosław } 1031bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1032bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev) 1033bebd097aSNeil Horman { 1034bebd097aSNeil Horman /* 1035bebd097aSNeil Horman * Tun only receives frames when: 1036bebd097aSNeil Horman * 1) the char device endpoint gets data from user space 1037bebd097aSNeil Horman * 2) the tun socket gets a sendmsg call from user space 103894317099SPetar Penkov * If NAPI is not enabled, since both of those are synchronous 103994317099SPetar Penkov * operations, we are guaranteed never to have pending data when we poll 104094317099SPetar Penkov * for it so there is nothing to do here but return. 1041bebd097aSNeil Horman * We need this though so netpoll recognizes us as an interface that 1042bebd097aSNeil Horman * supports polling, which enables bridge devices in virt setups to 1043bebd097aSNeil Horman * still use netconsole 104494317099SPetar Penkov * If NAPI is enabled, however, we need to schedule polling for all 104590e33d45SPetar Penkov * queues unless we are using napi_gro_frags(), which we call in 104690e33d45SPetar Penkov * process context and not in NAPI context. 1047bebd097aSNeil Horman */ 104894317099SPetar Penkov struct tun_struct *tun = netdev_priv(dev); 104994317099SPetar Penkov 105094317099SPetar Penkov if (tun->flags & IFF_NAPI) { 105194317099SPetar Penkov struct tun_file *tfile; 105294317099SPetar Penkov int i; 105394317099SPetar Penkov 105490e33d45SPetar Penkov if (tun_napi_frags_enabled(tun)) 105590e33d45SPetar Penkov return; 105690e33d45SPetar Penkov 105794317099SPetar Penkov rcu_read_lock(); 105894317099SPetar Penkov for (i = 0; i < tun->numqueues; i++) { 105994317099SPetar Penkov tfile = rcu_dereference(tun->tfiles[i]); 1060aec72f33SEric Dumazet if (tfile->napi_enabled) 106194317099SPetar Penkov napi_schedule(&tfile->napi); 106294317099SPetar Penkov } 106394317099SPetar Penkov rcu_read_unlock(); 106494317099SPetar Penkov } 1065bebd097aSNeil Horman return; 1066bebd097aSNeil Horman } 1067bebd097aSNeil Horman #endif 1068eaea34b2SPaolo Abeni 1069eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1070eaea34b2SPaolo Abeni { 1071eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1072eaea34b2SPaolo Abeni 1073eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1074eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1075eaea34b2SPaolo Abeni 1076eaea34b2SPaolo Abeni tun->align = new_hr; 1077eaea34b2SPaolo Abeni } 1078eaea34b2SPaolo Abeni 1079bc1f4470Sstephen hemminger static void 1080608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1081608b9977SPaolo Abeni { 1082608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1083608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1084608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1085608b9977SPaolo Abeni int i; 1086608b9977SPaolo Abeni 1087608b9977SPaolo Abeni for_each_possible_cpu(i) { 1088608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1089608b9977SPaolo Abeni unsigned int start; 1090608b9977SPaolo Abeni 1091608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1092608b9977SPaolo Abeni do { 1093608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1094608b9977SPaolo Abeni rxpackets = p->rx_packets; 1095608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1096608b9977SPaolo Abeni txpackets = p->tx_packets; 1097608b9977SPaolo Abeni txbytes = p->tx_bytes; 1098608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1099608b9977SPaolo Abeni 1100608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1101608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1102608b9977SPaolo Abeni stats->tx_packets += txpackets; 1103608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1104608b9977SPaolo Abeni 1105608b9977SPaolo Abeni /* u32 counters */ 1106608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1107608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1108608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1109608b9977SPaolo Abeni } 1110608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1111608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1112608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1113608b9977SPaolo Abeni } 1114608b9977SPaolo Abeni 1115761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1116761876c8SJason Wang struct netlink_ext_ack *extack) 1117761876c8SJason Wang { 1118761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1119761876c8SJason Wang struct bpf_prog *old_prog; 1120761876c8SJason Wang 1121761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1122761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1123761876c8SJason Wang if (old_prog) 1124761876c8SJason Wang bpf_prog_put(old_prog); 1125761876c8SJason Wang 1126761876c8SJason Wang return 0; 1127761876c8SJason Wang } 1128761876c8SJason Wang 1129761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1130761876c8SJason Wang { 1131761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1132761876c8SJason Wang const struct bpf_prog *xdp_prog; 1133761876c8SJason Wang 1134761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1135761876c8SJason Wang if (xdp_prog) 1136761876c8SJason Wang return xdp_prog->aux->id; 1137761876c8SJason Wang 1138761876c8SJason Wang return 0; 1139761876c8SJason Wang } 1140761876c8SJason Wang 1141761876c8SJason Wang static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1142761876c8SJason Wang { 1143761876c8SJason Wang switch (xdp->command) { 1144761876c8SJason Wang case XDP_SETUP_PROG: 1145761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1146761876c8SJason Wang case XDP_QUERY_PROG: 1147761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1148761876c8SJason Wang xdp->prog_attached = !!xdp->prog_id; 1149761876c8SJason Wang return 0; 1150761876c8SJason Wang default: 1151761876c8SJason Wang return -EINVAL; 1152761876c8SJason Wang } 1153761876c8SJason Wang } 1154761876c8SJason Wang 1155758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1156c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1157758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1158758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 115900829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 116088255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1161c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1162bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1163bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1164bebd097aSNeil Horman #endif 1165eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1166608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1167758e43b7SStephen Hemminger }; 1168758e43b7SStephen Hemminger 1169758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1170c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1171758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1172758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 117300829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 117488255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1175afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1176758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1177758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1178c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1179bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1180bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1181bebd097aSNeil Horman #endif 11825e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1183eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1184608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1185761876c8SJason Wang .ndo_xdp = tun_xdp, 1186758e43b7SStephen Hemminger }; 1187758e43b7SStephen Hemminger 1188944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 118996442e42SJason Wang { 119096442e42SJason Wang int i; 119196442e42SJason Wang 119296442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 119396442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 119496442e42SJason Wang 119596442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 119696442e42SJason Wang setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); 119796442e42SJason Wang mod_timer(&tun->flow_gc_timer, 119896442e42SJason Wang round_jiffies_up(jiffies + tun->ageing_time)); 119996442e42SJason Wang } 120096442e42SJason Wang 120196442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 120296442e42SJason Wang { 120396442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 120496442e42SJason Wang tun_flow_flush(tun); 120596442e42SJason Wang } 120696442e42SJason Wang 120791572088SJarod Wilson #define MIN_MTU 68 120891572088SJarod Wilson #define MAX_MTU 65535 120991572088SJarod Wilson 12101da177e4SLinus Torvalds /* Initialize net device. */ 12111da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 12121da177e4SLinus Torvalds { 12131da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 12141da177e4SLinus Torvalds 12151da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 121640630b82SMichael S. Tsirkin case IFF_TUN: 1217758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1218758e43b7SStephen Hemminger 12191da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 12201da177e4SLinus Torvalds dev->hard_header_len = 0; 12211da177e4SLinus Torvalds dev->addr_len = 0; 12221da177e4SLinus Torvalds dev->mtu = 1500; 12231da177e4SLinus Torvalds 12241da177e4SLinus Torvalds /* Zero header length */ 12251da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 12261da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 12271da177e4SLinus Torvalds break; 12281da177e4SLinus Torvalds 122940630b82SMichael S. Tsirkin case IFF_TAP: 12307a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 12311da177e4SLinus Torvalds /* Ethernet TAP Device */ 12321da177e4SLinus Torvalds ether_setup(dev); 1233550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1234a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 123536226a8dSBrian Braunstein 1236f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 123736226a8dSBrian Braunstein 12381da177e4SLinus Torvalds break; 12391da177e4SLinus Torvalds } 124091572088SJarod Wilson 124191572088SJarod Wilson dev->min_mtu = MIN_MTU; 124291572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 12431da177e4SLinus Torvalds } 12441da177e4SLinus Torvalds 12451da177e4SLinus Torvalds /* Character device part */ 12461da177e4SLinus Torvalds 12471da177e4SLinus Torvalds /* Poll */ 12481da177e4SLinus Torvalds static unsigned int tun_chr_poll(struct file *file, poll_table *wait) 12491da177e4SLinus Torvalds { 1250b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 12519484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 12523c8a9c63SMariusz Kozlowski struct sock *sk; 125333dccbb0SHerbert Xu unsigned int mask = 0; 12541da177e4SLinus Torvalds 12551da177e4SLinus Torvalds if (!tun) 1256eac9e902SEric W. Biederman return POLLERR; 12571da177e4SLinus Torvalds 125854f968d6SJason Wang sk = tfile->socket.sk; 12593c8a9c63SMariusz Kozlowski 12606b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 12611da177e4SLinus Torvalds 12629e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 12631da177e4SLinus Torvalds 12641576d986SJason Wang if (!skb_array_empty(&tfile->tx_array)) 12651da177e4SLinus Torvalds mask |= POLLIN | POLLRDNORM; 12661da177e4SLinus Torvalds 1267b20e2d54SHannes Frederic Sowa if (tun->dev->flags & IFF_UP && 1268b20e2d54SHannes Frederic Sowa (sock_writeable(sk) || 12699cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1270b20e2d54SHannes Frederic Sowa sock_writeable(sk)))) 127133dccbb0SHerbert Xu mask |= POLLOUT | POLLWRNORM; 127233dccbb0SHerbert Xu 1273c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1274c70f1829SEric W. Biederman mask = POLLERR; 1275c70f1829SEric W. Biederman 1276631ab46bSEric W. Biederman tun_put(tun); 12771da177e4SLinus Torvalds return mask; 12781da177e4SLinus Torvalds } 12791da177e4SLinus Torvalds 128090e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 128190e33d45SPetar Penkov size_t len, 128290e33d45SPetar Penkov const struct iov_iter *it) 128390e33d45SPetar Penkov { 128490e33d45SPetar Penkov struct sk_buff *skb; 128590e33d45SPetar Penkov size_t linear; 128690e33d45SPetar Penkov int err; 128790e33d45SPetar Penkov int i; 128890e33d45SPetar Penkov 128990e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 129090e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 129190e33d45SPetar Penkov 129290e33d45SPetar Penkov local_bh_disable(); 129390e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 129490e33d45SPetar Penkov local_bh_enable(); 129590e33d45SPetar Penkov if (!skb) 129690e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 129790e33d45SPetar Penkov 129890e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 129990e33d45SPetar Penkov err = __skb_grow(skb, linear); 130090e33d45SPetar Penkov if (err) 130190e33d45SPetar Penkov goto free; 130290e33d45SPetar Penkov 130390e33d45SPetar Penkov skb->len = len; 130490e33d45SPetar Penkov skb->data_len = len - linear; 130590e33d45SPetar Penkov skb->truesize += skb->data_len; 130690e33d45SPetar Penkov 130790e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 130890e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 130990e33d45SPetar Penkov unsigned long offset; 131090e33d45SPetar Penkov struct page *page; 131190e33d45SPetar Penkov void *data; 131290e33d45SPetar Penkov 131390e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 131490e33d45SPetar Penkov err = -EINVAL; 131590e33d45SPetar Penkov goto free; 131690e33d45SPetar Penkov } 131790e33d45SPetar Penkov 131890e33d45SPetar Penkov local_bh_disable(); 131990e33d45SPetar Penkov data = napi_alloc_frag(fragsz); 132090e33d45SPetar Penkov local_bh_enable(); 132190e33d45SPetar Penkov if (!data) { 132290e33d45SPetar Penkov err = -ENOMEM; 132390e33d45SPetar Penkov goto free; 132490e33d45SPetar Penkov } 132590e33d45SPetar Penkov 132690e33d45SPetar Penkov page = virt_to_head_page(data); 132790e33d45SPetar Penkov offset = data - page_address(page); 132890e33d45SPetar Penkov skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 132990e33d45SPetar Penkov } 133090e33d45SPetar Penkov 133190e33d45SPetar Penkov return skb; 133290e33d45SPetar Penkov free: 133390e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 133490e33d45SPetar Penkov napi_free_frags(&tfile->napi); 133590e33d45SPetar Penkov return ERR_PTR(err); 133690e33d45SPetar Penkov } 133790e33d45SPetar Penkov 1338f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1339f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 134054f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 134133dccbb0SHerbert Xu size_t prepad, size_t len, 134233dccbb0SHerbert Xu size_t linear, int noblock) 1343f42157cbSRusty Russell { 134454f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1345f42157cbSRusty Russell struct sk_buff *skb; 134633dccbb0SHerbert Xu int err; 1347f42157cbSRusty Russell 1348f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 13490eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 135033dccbb0SHerbert Xu linear = len; 1351f42157cbSRusty Russell 135233dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 135328d64271SEric Dumazet &err, 0); 1354f42157cbSRusty Russell if (!skb) 135533dccbb0SHerbert Xu return ERR_PTR(err); 1356f42157cbSRusty Russell 1357f42157cbSRusty Russell skb_reserve(skb, prepad); 1358f42157cbSRusty Russell skb_put(skb, linear); 135933dccbb0SHerbert Xu skb->data_len = len - linear; 136033dccbb0SHerbert Xu skb->len += len - linear; 1361f42157cbSRusty Russell 1362f42157cbSRusty Russell return skb; 1363f42157cbSRusty Russell } 1364f42157cbSRusty Russell 13655503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 13665503fcecSJason Wang struct sk_buff *skb, int more) 13675503fcecSJason Wang { 13685503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 13695503fcecSJason Wang struct sk_buff_head process_queue; 13705503fcecSJason Wang u32 rx_batched = tun->rx_batched; 13715503fcecSJason Wang bool rcv = false; 13725503fcecSJason Wang 13735503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 13745503fcecSJason Wang local_bh_disable(); 13755503fcecSJason Wang netif_receive_skb(skb); 13765503fcecSJason Wang local_bh_enable(); 13775503fcecSJason Wang return; 13785503fcecSJason Wang } 13795503fcecSJason Wang 13805503fcecSJason Wang spin_lock(&queue->lock); 13815503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 13825503fcecSJason Wang __skb_queue_head_init(&process_queue); 13835503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 13845503fcecSJason Wang rcv = true; 13855503fcecSJason Wang } else { 13865503fcecSJason Wang __skb_queue_tail(queue, skb); 13875503fcecSJason Wang } 13885503fcecSJason Wang spin_unlock(&queue->lock); 13895503fcecSJason Wang 13905503fcecSJason Wang if (rcv) { 13915503fcecSJason Wang struct sk_buff *nskb; 13925503fcecSJason Wang 13935503fcecSJason Wang local_bh_disable(); 13945503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 13955503fcecSJason Wang netif_receive_skb(nskb); 13965503fcecSJason Wang netif_receive_skb(skb); 13975503fcecSJason Wang local_bh_enable(); 13985503fcecSJason Wang } 13995503fcecSJason Wang } 14005503fcecSJason Wang 140166ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 140266ccbc9cSJason Wang int len, int noblock, bool zerocopy) 140366ccbc9cSJason Wang { 140466ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 140566ccbc9cSJason Wang return false; 140666ccbc9cSJason Wang 140766ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 140866ccbc9cSJason Wang return false; 140966ccbc9cSJason Wang 141066ccbc9cSJason Wang if (!noblock) 141166ccbc9cSJason Wang return false; 141266ccbc9cSJason Wang 141366ccbc9cSJason Wang if (zerocopy) 141466ccbc9cSJason Wang return false; 141566ccbc9cSJason Wang 141666ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 141766ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 141866ccbc9cSJason Wang return false; 141966ccbc9cSJason Wang 142066ccbc9cSJason Wang return true; 142166ccbc9cSJason Wang } 142266ccbc9cSJason Wang 1423761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1424761876c8SJason Wang struct tun_file *tfile, 142566ccbc9cSJason Wang struct iov_iter *from, 1426761876c8SJason Wang struct virtio_net_hdr *hdr, 14271cfe6e93SJason Wang int len, int *skb_xdp) 142866ccbc9cSJason Wang { 14290bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 143066ccbc9cSJason Wang struct sk_buff *skb; 1431761876c8SJason Wang struct bpf_prog *xdp_prog; 14327df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1433761876c8SJason Wang unsigned int delta = 0; 143466ccbc9cSJason Wang char *buf; 143566ccbc9cSJason Wang size_t copied; 1436761876c8SJason Wang bool xdp_xmit = false; 14377df13219SJason Wang int err, pad = TUN_RX_PAD; 14387df13219SJason Wang 14397df13219SJason Wang rcu_read_lock(); 14407df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 14417df13219SJason Wang if (xdp_prog) 14427df13219SJason Wang pad += TUN_HEADROOM; 14437df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 14447df13219SJason Wang rcu_read_unlock(); 144566ccbc9cSJason Wang 144666ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 144766ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 144866ccbc9cSJason Wang 144966ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 145066ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 14517df13219SJason Wang alloc_frag->offset + pad, 145266ccbc9cSJason Wang len, from); 145366ccbc9cSJason Wang if (copied != len) 145466ccbc9cSJason Wang return ERR_PTR(-EFAULT); 145566ccbc9cSJason Wang 14567df13219SJason Wang /* There's a small window that XDP may be set after the check 14577df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 14587df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 14597df13219SJason Wang */ 14607df13219SJason Wang if (hdr->gso_type || !xdp_prog) 14611cfe6e93SJason Wang *skb_xdp = 1; 1462761876c8SJason Wang else 14631cfe6e93SJason Wang *skb_xdp = 0; 146466ccbc9cSJason Wang 1465761876c8SJason Wang rcu_read_lock(); 1466761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 14671cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1468761876c8SJason Wang struct xdp_buff xdp; 1469761876c8SJason Wang void *orig_data; 1470761876c8SJason Wang u32 act; 1471761876c8SJason Wang 1472761876c8SJason Wang xdp.data_hard_start = buf; 14737df13219SJason Wang xdp.data = buf + pad; 1474de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1475761876c8SJason Wang xdp.data_end = xdp.data + len; 1476761876c8SJason Wang orig_data = xdp.data; 1477761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1478761876c8SJason Wang 1479761876c8SJason Wang switch (act) { 1480761876c8SJason Wang case XDP_REDIRECT: 1481761876c8SJason Wang get_page(alloc_frag->page); 1482761876c8SJason Wang alloc_frag->offset += buflen; 1483761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1484761876c8SJason Wang if (err) 1485761876c8SJason Wang goto err_redirect; 1486761876c8SJason Wang return NULL; 1487761876c8SJason Wang case XDP_TX: 1488761876c8SJason Wang xdp_xmit = true; 1489761876c8SJason Wang /* fall through */ 1490761876c8SJason Wang case XDP_PASS: 1491761876c8SJason Wang delta = orig_data - xdp.data; 1492761876c8SJason Wang break; 1493761876c8SJason Wang default: 1494761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1495761876c8SJason Wang /* fall through */ 1496761876c8SJason Wang case XDP_ABORTED: 1497761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1498761876c8SJason Wang /* fall through */ 1499761876c8SJason Wang case XDP_DROP: 1500761876c8SJason Wang goto err_xdp; 1501761876c8SJason Wang } 1502761876c8SJason Wang } 1503761876c8SJason Wang 1504761876c8SJason Wang skb = build_skb(buf, buflen); 1505761876c8SJason Wang if (!skb) { 1506761876c8SJason Wang rcu_read_unlock(); 1507761876c8SJason Wang return ERR_PTR(-ENOMEM); 1508761876c8SJason Wang } 1509761876c8SJason Wang 15107df13219SJason Wang skb_reserve(skb, pad - delta); 1511761876c8SJason Wang skb_put(skb, len + delta); 151266ccbc9cSJason Wang get_page(alloc_frag->page); 151366ccbc9cSJason Wang alloc_frag->offset += buflen; 151466ccbc9cSJason Wang 1515761876c8SJason Wang if (xdp_xmit) { 1516761876c8SJason Wang skb->dev = tun->dev; 1517761876c8SJason Wang generic_xdp_tx(skb, xdp_prog); 1518761876c8SJason Wang rcu_read_lock(); 1519761876c8SJason Wang return NULL; 1520761876c8SJason Wang } 1521761876c8SJason Wang 1522761876c8SJason Wang rcu_read_unlock(); 1523761876c8SJason Wang 152466ccbc9cSJason Wang return skb; 1525761876c8SJason Wang 1526761876c8SJason Wang err_redirect: 1527761876c8SJason Wang put_page(alloc_frag->page); 1528761876c8SJason Wang err_xdp: 1529761876c8SJason Wang rcu_read_unlock(); 1530761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1531761876c8SJason Wang return NULL; 153266ccbc9cSJason Wang } 153366ccbc9cSJason Wang 15341da177e4SLinus Torvalds /* Get packet from user space buffer */ 153554f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1536f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 15375503fcecSJason Wang int noblock, bool more) 15381da177e4SLinus Torvalds { 153909640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 15401da177e4SLinus Torvalds struct sk_buff *skb; 1541f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1542eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1543f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1544608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 154596f8d9ecSJason Wang int good_linear; 15460690899bSMichael S. Tsirkin int copylen; 15470690899bSMichael S. Tsirkin bool zerocopy = false; 15480690899bSMichael S. Tsirkin int err; 154949974420SEric Dumazet u32 rxhash; 15501cfe6e93SJason Wang int skb_xdp = 1; 155190e33d45SPetar Penkov bool frags = tun_napi_frags_enabled(tun); 15521da177e4SLinus Torvalds 15531bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 15541bd4978aSEric Dumazet return -EIO; 15551bd4978aSEric Dumazet 155640630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 155715718ea0SDan Carpenter if (len < sizeof(pi)) 15581da177e4SLinus Torvalds return -EINVAL; 155915718ea0SDan Carpenter len -= sizeof(pi); 15601da177e4SLinus Torvalds 1561cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 15621da177e4SLinus Torvalds return -EFAULT; 15631da177e4SLinus Torvalds } 15641da177e4SLinus Torvalds 156540630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1566e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1567e1edab87SWillem de Bruijn 1568e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1569f43798c2SRusty Russell return -EINVAL; 1570e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1571f43798c2SRusty Russell 1572cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1573f43798c2SRusty Russell return -EFAULT; 1574f43798c2SRusty Russell 15754909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 157656f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 157756f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 15784909122fSHerbert Xu 157956f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1580f43798c2SRusty Russell return -EINVAL; 1581e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1582f43798c2SRusty Russell } 1583f43798c2SRusty Russell 158440630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1585a504b86eSstephen hemminger align += NET_IP_ALIGN; 15860eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 158756f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1588e01bf1c8SRusty Russell return -EINVAL; 1589e01bf1c8SRusty Russell } 15901da177e4SLinus Torvalds 159196f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 159296f8d9ecSJason Wang 159388529176SJason Wang if (msg_control) { 1594f5ff53b4SAl Viro struct iov_iter i = *from; 1595f5ff53b4SAl Viro 159688529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 159788529176SJason Wang * enough room for skb expand head in case it is used. 15980690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 15990690899bSMichael S. Tsirkin */ 160056f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 160196f8d9ecSJason Wang if (copylen > good_linear) 160296f8d9ecSJason Wang copylen = good_linear; 16033dd5c330SJason Wang linear = copylen; 1604f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1605f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 160688529176SJason Wang zerocopy = true; 160788529176SJason Wang } 160888529176SJason Wang 160990e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 16101cfe6e93SJason Wang /* For the packet that is not easy to be processed 16111cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 16121cfe6e93SJason Wang * skb was created with generic XDP routine. 16131cfe6e93SJason Wang */ 16141cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 161566ccbc9cSJason Wang if (IS_ERR(skb)) { 161666ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 161766ccbc9cSJason Wang return PTR_ERR(skb); 161866ccbc9cSJason Wang } 1619761876c8SJason Wang if (!skb) 1620761876c8SJason Wang return total_len; 162166ccbc9cSJason Wang } else { 162288529176SJason Wang if (!zerocopy) { 16230690899bSMichael S. Tsirkin copylen = len; 162456f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 162596f8d9ecSJason Wang linear = good_linear; 162696f8d9ecSJason Wang else 162756f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 16283dd5c330SJason Wang } 16290690899bSMichael S. Tsirkin 163090e33d45SPetar Penkov if (frags) { 163190e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 163290e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 163390e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 163490e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 163590e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 163690e33d45SPetar Penkov */ 163790e33d45SPetar Penkov zerocopy = false; 163890e33d45SPetar Penkov } else { 163990e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 164090e33d45SPetar Penkov noblock); 164190e33d45SPetar Penkov } 164290e33d45SPetar Penkov 164333dccbb0SHerbert Xu if (IS_ERR(skb)) { 164433dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1645608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 164690e33d45SPetar Penkov if (frags) 164790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 164833dccbb0SHerbert Xu return PTR_ERR(skb); 16491da177e4SLinus Torvalds } 16501da177e4SLinus Torvalds 16510690899bSMichael S. Tsirkin if (zerocopy) 1652f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1653af1cc7a2SJason Wang else 1654f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 16550690899bSMichael S. Tsirkin 16560690899bSMichael S. Tsirkin if (err) { 1657608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 16588f22757eSDave Jones kfree_skb(skb); 165990e33d45SPetar Penkov if (frags) { 166090e33d45SPetar Penkov tfile->napi.skb = NULL; 166190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 166290e33d45SPetar Penkov } 166390e33d45SPetar Penkov 16641da177e4SLinus Torvalds return -EFAULT; 16658f22757eSDave Jones } 166666ccbc9cSJason Wang } 16671da177e4SLinus Torvalds 16683e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1669df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1670df10db98SPaolo Abeni kfree_skb(skb); 167190e33d45SPetar Penkov if (frags) { 167290e33d45SPetar Penkov tfile->napi.skb = NULL; 167390e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 167490e33d45SPetar Penkov } 167590e33d45SPetar Penkov 1676df10db98SPaolo Abeni return -EINVAL; 1677df10db98SPaolo Abeni } 1678df10db98SPaolo Abeni 16791da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 168040630b82SMichael S. Tsirkin case IFF_TUN: 168140630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 16822580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 16832580c4c1SAlexander Potapenko 16842580c4c1SAlexander Potapenko switch (ip_version) { 16852580c4c1SAlexander Potapenko case 4: 1686f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1687f09f7ee2SAng Way Chuang break; 16882580c4c1SAlexander Potapenko case 6: 1689f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1690f09f7ee2SAng Way Chuang break; 1691f09f7ee2SAng Way Chuang default: 1692608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1693f09f7ee2SAng Way Chuang kfree_skb(skb); 1694f09f7ee2SAng Way Chuang return -EINVAL; 1695f09f7ee2SAng Way Chuang } 1696f09f7ee2SAng Way Chuang } 1697f09f7ee2SAng Way Chuang 1698459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 16991da177e4SLinus Torvalds skb->protocol = pi.proto; 17004c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 17011da177e4SLinus Torvalds break; 170240630b82SMichael S. Tsirkin case IFF_TAP: 170390e33d45SPetar Penkov if (!frags) 17041da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 17051da177e4SLinus Torvalds break; 17066403eab1SJoe Perches } 17071da177e4SLinus Torvalds 17080690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 17090690899bSMichael S. Tsirkin if (zerocopy) { 17100690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 17110690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1712c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1713af1cc7a2SJason Wang } else if (msg_control) { 1714af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1715af1cc7a2SJason Wang uarg->callback(uarg, false); 17160690899bSMichael S. Tsirkin } 17170690899bSMichael S. Tsirkin 171872f65107SVlad Yasevich skb_reset_network_header(skb); 171940893fd0SJason Wang skb_probe_transport_header(skb, 0); 172038502af7SJason Wang 17211cfe6e93SJason Wang if (skb_xdp) { 1722761876c8SJason Wang struct bpf_prog *xdp_prog; 1723761876c8SJason Wang int ret; 1724761876c8SJason Wang 1725761876c8SJason Wang rcu_read_lock(); 1726761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1727761876c8SJason Wang if (xdp_prog) { 1728761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1729761876c8SJason Wang if (ret != XDP_PASS) { 1730761876c8SJason Wang rcu_read_unlock(); 1731761876c8SJason Wang return total_len; 1732761876c8SJason Wang } 1733761876c8SJason Wang } 1734761876c8SJason Wang rcu_read_unlock(); 1735761876c8SJason Wang } 1736761876c8SJason Wang 1737feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 173894317099SPetar Penkov 173990e33d45SPetar Penkov if (frags) { 174090e33d45SPetar Penkov /* Exercise flow dissector code path. */ 174190e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 174290e33d45SPetar Penkov 1743010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 174490e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 174590e33d45SPetar Penkov napi_free_frags(&tfile->napi); 174690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 174790e33d45SPetar Penkov WARN_ON(1); 174890e33d45SPetar Penkov return -ENOMEM; 174990e33d45SPetar Penkov } 175090e33d45SPetar Penkov 175190e33d45SPetar Penkov local_bh_disable(); 175290e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 175390e33d45SPetar Penkov local_bh_enable(); 175490e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1755aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 175694317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 175794317099SPetar Penkov int queue_len; 175894317099SPetar Penkov 175994317099SPetar Penkov spin_lock_bh(&queue->lock); 176094317099SPetar Penkov __skb_queue_tail(queue, skb); 176194317099SPetar Penkov queue_len = skb_queue_len(queue); 176294317099SPetar Penkov spin_unlock(&queue->lock); 176394317099SPetar Penkov 176494317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 176594317099SPetar Penkov napi_schedule(&tfile->napi); 176694317099SPetar Penkov 176794317099SPetar Penkov local_bh_enable(); 176894317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 17695503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 177094317099SPetar Penkov } else { 17711da177e4SLinus Torvalds netif_rx_ni(skb); 177294317099SPetar Penkov } 17731da177e4SLinus Torvalds 1774608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1775608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1776608b9977SPaolo Abeni stats->rx_packets++; 1777608b9977SPaolo Abeni stats->rx_bytes += len; 1778608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1779608b9977SPaolo Abeni put_cpu_ptr(stats); 17801da177e4SLinus Torvalds 17819e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 17820690899bSMichael S. Tsirkin return total_len; 17831da177e4SLinus Torvalds } 17841da177e4SLinus Torvalds 1785f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 17861da177e4SLinus Torvalds { 178733dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 178854f968d6SJason Wang struct tun_file *tfile = file->private_data; 17899484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1790631ab46bSEric W. Biederman ssize_t result; 17911da177e4SLinus Torvalds 17921da177e4SLinus Torvalds if (!tun) 17931da177e4SLinus Torvalds return -EBADFD; 17941da177e4SLinus Torvalds 17955503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 17965503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1797631ab46bSEric W. Biederman 1798631ab46bSEric W. Biederman tun_put(tun); 1799631ab46bSEric W. Biederman return result; 18001da177e4SLinus Torvalds } 18011da177e4SLinus Torvalds 18021da177e4SLinus Torvalds /* Put packet to the user space buffer */ 18036f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 180454f968d6SJason Wang struct tun_file *tfile, 18051da177e4SLinus Torvalds struct sk_buff *skb, 1806e0b46d0eSHerbert Xu struct iov_iter *iter) 18071da177e4SLinus Torvalds { 18081da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 1809608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 1810e0b46d0eSHerbert Xu ssize_t total; 18118c847d25SJason Wang int vlan_offset = 0; 1812a8f9bfdfSHerbert Xu int vlan_hlen = 0; 18132eb783c4SHerbert Xu int vnet_hdr_sz = 0; 1814a8f9bfdfSHerbert Xu 1815df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1816a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 18171da177e4SLinus Torvalds 181840630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 1819e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 18201da177e4SLinus Torvalds 1821e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 1822e0b46d0eSHerbert Xu 182340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 1824e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 18251da177e4SLinus Torvalds return -EINVAL; 18261da177e4SLinus Torvalds 1827e0b46d0eSHerbert Xu total += sizeof(pi); 1828e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 18291da177e4SLinus Torvalds /* Packet will be striped */ 18301da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 18311da177e4SLinus Torvalds } 18321da177e4SLinus Torvalds 1833e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 18341da177e4SLinus Torvalds return -EFAULT; 18351da177e4SLinus Torvalds } 18361da177e4SLinus Torvalds 18372eb783c4SHerbert Xu if (vnet_hdr_sz) { 18389403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 183934166093SMike Rapoport 1840e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 1841f43798c2SRusty Russell return -EINVAL; 1842f43798c2SRusty Russell 18433e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 18446391a448SJason Wang tun_is_little_endian(tun), true)) { 1845f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 18466b8a66eeSJoe Perches pr_err("unexpected GSO type: " 1847ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 184856f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 184956f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 1850ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 1851ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 1852ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 185356f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 1854ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 1855ef3db4a5SMichael S. Tsirkin return -EINVAL; 1856ef3db4a5SMichael S. Tsirkin } 1857f43798c2SRusty Russell 1858e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 1859f43798c2SRusty Russell return -EFAULT; 18608c847d25SJason Wang 18618c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1862f43798c2SRusty Russell } 1863f43798c2SRusty Russell 1864a8f9bfdfSHerbert Xu if (vlan_hlen) { 1865e0b46d0eSHerbert Xu int ret; 18666680ec68SJason Wang struct { 18676680ec68SJason Wang __be16 h_vlan_proto; 18686680ec68SJason Wang __be16 h_vlan_TCI; 18696680ec68SJason Wang } veth; 18701da177e4SLinus Torvalds 18716680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 1872df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 18731da177e4SLinus Torvalds 18746680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 18756680ec68SJason Wang 1876e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 1877e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 18786680ec68SJason Wang goto done; 18796680ec68SJason Wang 1880e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 1881e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 18826680ec68SJason Wang goto done; 18836680ec68SJason Wang } 18846680ec68SJason Wang 1885e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 18866680ec68SJason Wang 18876680ec68SJason Wang done: 1888608b9977SPaolo Abeni /* caller is in process context, */ 1889608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1890608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1891608b9977SPaolo Abeni stats->tx_packets++; 1892608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 1893608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1894608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 18951da177e4SLinus Torvalds 18961da177e4SLinus Torvalds return total; 18971da177e4SLinus Torvalds } 18981da177e4SLinus Torvalds 18991576d986SJason Wang static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, 19001576d986SJason Wang int *err) 19011576d986SJason Wang { 19021576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 19031576d986SJason Wang struct sk_buff *skb = NULL; 1904f48cc6b2SJason Wang int error = 0; 19051576d986SJason Wang 19061576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19071576d986SJason Wang if (skb) 19081576d986SJason Wang goto out; 19091576d986SJason Wang if (noblock) { 1910f48cc6b2SJason Wang error = -EAGAIN; 19111576d986SJason Wang goto out; 19121576d986SJason Wang } 19131576d986SJason Wang 19141576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 19151576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 19161576d986SJason Wang 19171576d986SJason Wang while (1) { 19181576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19191576d986SJason Wang if (skb) 19201576d986SJason Wang break; 19211576d986SJason Wang if (signal_pending(current)) { 1922f48cc6b2SJason Wang error = -ERESTARTSYS; 19231576d986SJason Wang break; 19241576d986SJason Wang } 19251576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 1926f48cc6b2SJason Wang error = -EFAULT; 19271576d986SJason Wang break; 19281576d986SJason Wang } 19291576d986SJason Wang 19301576d986SJason Wang schedule(); 19311576d986SJason Wang } 19321576d986SJason Wang 19331576d986SJason Wang current->state = TASK_RUNNING; 19341576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 19351576d986SJason Wang 19361576d986SJason Wang out: 1937f48cc6b2SJason Wang *err = error; 19381576d986SJason Wang return skb; 19391576d986SJason Wang } 19401576d986SJason Wang 194154f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 19429b067034SAl Viro struct iov_iter *to, 1943ac77cfd4SJason Wang int noblock, struct sk_buff *skb) 19441da177e4SLinus Torvalds { 19459b067034SAl Viro ssize_t ret; 19461576d986SJason Wang int err; 19471da177e4SLinus Torvalds 19483872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 19491da177e4SLinus Torvalds 19509b067034SAl Viro if (!iov_iter_count(to)) 19519b067034SAl Viro return 0; 19521da177e4SLinus Torvalds 1953ac77cfd4SJason Wang if (!skb) { 19541576d986SJason Wang /* Read frames from ring */ 19551576d986SJason Wang skb = tun_ring_recv(tfile, noblock, &err); 1956e0b46d0eSHerbert Xu if (!skb) 1957957f094fSAlex Gartrell return err; 1958ac77cfd4SJason Wang } 1959e0b46d0eSHerbert Xu 19609b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 1961f51a5e82SJason Wang if (unlikely(ret < 0)) 19621da177e4SLinus Torvalds kfree_skb(skb); 1963f51a5e82SJason Wang else 1964f51a5e82SJason Wang consume_skb(skb); 19651da177e4SLinus Torvalds 196605c2828cSMichael S. Tsirkin return ret; 196705c2828cSMichael S. Tsirkin } 196805c2828cSMichael S. Tsirkin 19699b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 197005c2828cSMichael S. Tsirkin { 197105c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 197205c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 19739484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 19749b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 197505c2828cSMichael S. Tsirkin 197605c2828cSMichael S. Tsirkin if (!tun) 197705c2828cSMichael S. Tsirkin return -EBADFD; 1978ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 197942404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 1980d0b7da8aSZhi Yong Wu if (ret > 0) 1981d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 1982631ab46bSEric W. Biederman tun_put(tun); 19831da177e4SLinus Torvalds return ret; 19841da177e4SLinus Torvalds } 19851da177e4SLinus Torvalds 198696442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 198796442e42SJason Wang { 198896442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 198996442e42SJason Wang 19904008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 1991608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 199296442e42SJason Wang tun_flow_uninit(tun); 19935dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 199496442e42SJason Wang } 199596442e42SJason Wang 19961da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 19971da177e4SLinus Torvalds { 19981da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 19991da177e4SLinus Torvalds 20000625c883SEric W. Biederman tun->owner = INVALID_UID; 20010625c883SEric W. Biederman tun->group = INVALID_GID; 20021da177e4SLinus Torvalds 20031da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2004cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2005cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2006016adb72SJason Wang /* We prefer our own queue length */ 2007016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 20081da177e4SLinus Torvalds } 20091da177e4SLinus Torvalds 2010f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2011f019a7a5SEric W. Biederman * device with netlink. 2012f019a7a5SEric W. Biederman */ 2013a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2014a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2015f019a7a5SEric W. Biederman { 2016f019a7a5SEric W. Biederman return -EINVAL; 2017f019a7a5SEric W. Biederman } 2018f019a7a5SEric W. Biederman 2019f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2020f019a7a5SEric W. Biederman .kind = DRV_NAME, 2021f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2022f019a7a5SEric W. Biederman .setup = tun_setup, 2023f019a7a5SEric W. Biederman .validate = tun_validate, 2024f019a7a5SEric W. Biederman }; 2025f019a7a5SEric W. Biederman 202633dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 202733dccbb0SHerbert Xu { 202854f968d6SJason Wang struct tun_file *tfile; 202943815482SEric Dumazet wait_queue_head_t *wqueue; 203033dccbb0SHerbert Xu 203133dccbb0SHerbert Xu if (!sock_writeable(sk)) 203233dccbb0SHerbert Xu return; 203333dccbb0SHerbert Xu 20349cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 203533dccbb0SHerbert Xu return; 203633dccbb0SHerbert Xu 203743815482SEric Dumazet wqueue = sk_sleep(sk); 203843815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 203943815482SEric Dumazet wake_up_interruptible_sync_poll(wqueue, POLLOUT | 204005c2828cSMichael S. Tsirkin POLLWRNORM | POLLWRBAND); 2041c722c625SHerbert Xu 204254f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 204354f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 204433dccbb0SHerbert Xu } 204533dccbb0SHerbert Xu 20461b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 204705c2828cSMichael S. Tsirkin { 204854f968d6SJason Wang int ret; 204954f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20509484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 205154f968d6SJason Wang 205254f968d6SJason Wang if (!tun) 205354f968d6SJason Wang return -EBADFD; 2054f5ff53b4SAl Viro 2055c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 20565503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 20575503fcecSJason Wang m->msg_flags & MSG_MORE); 205854f968d6SJason Wang tun_put(tun); 205954f968d6SJason Wang return ret; 206005c2828cSMichael S. Tsirkin } 206105c2828cSMichael S. Tsirkin 20621b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 206305c2828cSMichael S. Tsirkin int flags) 206405c2828cSMichael S. Tsirkin { 206554f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20669484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 206705c2828cSMichael S. Tsirkin int ret; 206854f968d6SJason Wang 206954f968d6SJason Wang if (!tun) 207054f968d6SJason Wang return -EBADFD; 207154f968d6SJason Wang 2072eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 20733811ae76SGao feng ret = -EINVAL; 20743811ae76SGao feng goto out; 20753811ae76SGao feng } 2076eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2077eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2078eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2079eda29772SRichard Cochran goto out; 2080eda29772SRichard Cochran } 2081ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, 2082ac77cfd4SJason Wang m->msg_control); 208387897931SAlex Gartrell if (ret > (ssize_t)total_len) { 208442404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 208542404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 208642404c09SDavid S. Miller } 20873811ae76SGao feng out: 208854f968d6SJason Wang tun_put(tun); 208905c2828cSMichael S. Tsirkin return ret; 209005c2828cSMichael S. Tsirkin } 209105c2828cSMichael S. Tsirkin 20921576d986SJason Wang static int tun_peek_len(struct socket *sock) 20931576d986SJason Wang { 20941576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20951576d986SJason Wang struct tun_struct *tun; 20961576d986SJason Wang int ret = 0; 20971576d986SJason Wang 20989484dc74Syuan linyu tun = tun_get(tfile); 20991576d986SJason Wang if (!tun) 21001576d986SJason Wang return 0; 21011576d986SJason Wang 21021576d986SJason Wang ret = skb_array_peek_len(&tfile->tx_array); 21031576d986SJason Wang tun_put(tun); 21041576d986SJason Wang 21051576d986SJason Wang return ret; 21061576d986SJason Wang } 21071576d986SJason Wang 210805c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 210905c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 21101576d986SJason Wang .peek_len = tun_peek_len, 211105c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 211205c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 211305c2828cSMichael S. Tsirkin }; 211405c2828cSMichael S. Tsirkin 211533dccbb0SHerbert Xu static struct proto tun_proto = { 211633dccbb0SHerbert Xu .name = "tun", 211733dccbb0SHerbert Xu .owner = THIS_MODULE, 211854f968d6SJason Wang .obj_size = sizeof(struct tun_file), 211933dccbb0SHerbert Xu }; 2120f019a7a5SEric W. Biederman 2121980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2122980c9e8cSDavid Woodhouse { 2123031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2124980c9e8cSDavid Woodhouse } 2125980c9e8cSDavid Woodhouse 2126980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2127980c9e8cSDavid Woodhouse char *buf) 2128980c9e8cSDavid Woodhouse { 2129980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2130980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2131980c9e8cSDavid Woodhouse } 2132980c9e8cSDavid Woodhouse 2133980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2134980c9e8cSDavid Woodhouse char *buf) 2135980c9e8cSDavid Woodhouse { 2136980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 21370625c883SEric W. Biederman return uid_valid(tun->owner)? 21380625c883SEric W. Biederman sprintf(buf, "%u\n", 21390625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 21400625c883SEric W. Biederman sprintf(buf, "-1\n"); 2141980c9e8cSDavid Woodhouse } 2142980c9e8cSDavid Woodhouse 2143980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2144980c9e8cSDavid Woodhouse char *buf) 2145980c9e8cSDavid Woodhouse { 2146980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 21470625c883SEric W. Biederman return gid_valid(tun->group) ? 21480625c883SEric W. Biederman sprintf(buf, "%u\n", 21490625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 21500625c883SEric W. Biederman sprintf(buf, "-1\n"); 2151980c9e8cSDavid Woodhouse } 2152980c9e8cSDavid Woodhouse 2153980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2154980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2155980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2156980c9e8cSDavid Woodhouse 2157c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2158c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2159c4d33e24STakashi Iwai &dev_attr_owner.attr, 2160c4d33e24STakashi Iwai &dev_attr_group.attr, 2161c4d33e24STakashi Iwai NULL 2162c4d33e24STakashi Iwai }; 2163c4d33e24STakashi Iwai 2164c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2165c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2166c4d33e24STakashi Iwai }; 2167c4d33e24STakashi Iwai 2168d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 21691da177e4SLinus Torvalds { 21701da177e4SLinus Torvalds struct tun_struct *tun; 217154f968d6SJason Wang struct tun_file *tfile = file->private_data; 21721da177e4SLinus Torvalds struct net_device *dev; 21731da177e4SLinus Torvalds int err; 21741da177e4SLinus Torvalds 21757c0c3b1aSJason Wang if (tfile->detached) 21767c0c3b1aSJason Wang return -EINVAL; 21777c0c3b1aSJason Wang 217890e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 217990e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 218090e33d45SPetar Penkov return -EPERM; 218190e33d45SPetar Penkov 218290e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 218390e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 218490e33d45SPetar Penkov return -EINVAL; 218590e33d45SPetar Penkov } 218690e33d45SPetar Penkov 218774a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 218874a3e5a7SEric W. Biederman if (dev) { 2189f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2190f85ba780SDavid Woodhouse return -EBUSY; 219174a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 219274a3e5a7SEric W. Biederman tun = netdev_priv(dev); 219374a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 219474a3e5a7SEric W. Biederman tun = netdev_priv(dev); 219574a3e5a7SEric W. Biederman else 219674a3e5a7SEric W. Biederman return -EINVAL; 219774a3e5a7SEric W. Biederman 21988e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 219940630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 22008e6d91aeSJason Wang return -EINVAL; 22018e6d91aeSJason Wang 2202cde8b15fSJason Wang if (tun_not_capable(tun)) 22032b980dbdSPaul Moore return -EPERM; 22045dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 22052b980dbdSPaul Moore if (err < 0) 22062b980dbdSPaul Moore return err; 22072b980dbdSPaul Moore 220894317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 220994317099SPetar Penkov ifr->ifr_flags & IFF_NAPI); 2210a7385ba2SEric W. Biederman if (err < 0) 2211a7385ba2SEric W. Biederman return err; 22124008e97fSJason Wang 221340630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2214e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2215e8dbad66SJason Wang /* One or more queue has already been attached, no need 2216e8dbad66SJason Wang * to initialize the device again. 2217e8dbad66SJason Wang */ 2218e8dbad66SJason Wang return 0; 2219e8dbad66SJason Wang } 222086a264abSDavid Howells } 22211da177e4SLinus Torvalds else { 22221da177e4SLinus Torvalds char *name; 22231da177e4SLinus Torvalds unsigned long flags = 0; 2224edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2225edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 22261da177e4SLinus Torvalds 2227c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2228ca6bb5d7SDavid Woodhouse return -EPERM; 22292b980dbdSPaul Moore err = security_tun_dev_create(); 22302b980dbdSPaul Moore if (err < 0) 22312b980dbdSPaul Moore return err; 2232ca6bb5d7SDavid Woodhouse 22331da177e4SLinus Torvalds /* Set dev type */ 22341da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 22351da177e4SLinus Torvalds /* TUN device */ 223640630b82SMichael S. Tsirkin flags |= IFF_TUN; 22371da177e4SLinus Torvalds name = "tun%d"; 22381da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 22391da177e4SLinus Torvalds /* TAP device */ 224040630b82SMichael S. Tsirkin flags |= IFF_TAP; 22411da177e4SLinus Torvalds name = "tap%d"; 22421da177e4SLinus Torvalds } else 224336989b90SKusanagi Kouichi return -EINVAL; 22441da177e4SLinus Torvalds 22451da177e4SLinus Torvalds if (*ifr->ifr_name) 22461da177e4SLinus Torvalds name = ifr->ifr_name; 22471da177e4SLinus Torvalds 2248c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2249c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2250c835a677STom Gundersen queues); 2251edfb6a14SJason Wang 22521da177e4SLinus Torvalds if (!dev) 22531da177e4SLinus Torvalds return -ENOMEM; 22541da177e4SLinus Torvalds 2255fc54c658SPavel Emelyanov dev_net_set(dev, net); 2256f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2257fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2258c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2259758e43b7SStephen Hemminger 22601da177e4SLinus Torvalds tun = netdev_priv(dev); 22611da177e4SLinus Torvalds tun->dev = dev; 22621da177e4SLinus Torvalds tun->flags = flags; 2263f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2264d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 22651da177e4SLinus Torvalds 2266eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 226754f968d6SJason Wang tun->filter_attached = false; 226854f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 22695503fcecSJason Wang tun->rx_batched = 0; 227033dccbb0SHerbert Xu 2271608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2272608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2273608b9977SPaolo Abeni err = -ENOMEM; 2274608b9977SPaolo Abeni goto err_free_dev; 2275608b9977SPaolo Abeni } 2276608b9977SPaolo Abeni 227796442e42SJason Wang spin_lock_init(&tun->lock); 227896442e42SJason Wang 22795dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 22805dbbaf2dSPaul Moore if (err < 0) 2281608b9977SPaolo Abeni goto err_free_stat; 22822b980dbdSPaul Moore 22831da177e4SLinus Torvalds tun_net_init(dev); 2284944a1376SPavel Emelyanov tun_flow_init(tun); 228596442e42SJason Wang 228688255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 22876680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 22886680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 22892a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 22906671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 22916671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 22926671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 229388255375SMichał Mirosław 22944008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 229594317099SPetar Penkov err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2296eb0fb363SJason Wang if (err < 0) 2297662ca437SJason Wang goto err_free_flow; 2298eb0fb363SJason Wang 22991da177e4SLinus Torvalds err = register_netdevice(tun->dev); 23001da177e4SLinus Torvalds if (err < 0) 2301662ca437SJason Wang goto err_detach; 2302af668b3cSMichael S. Tsirkin } 2303980c9e8cSDavid Woodhouse 2304eb0fb363SJason Wang netif_carrier_on(tun->dev); 23051da177e4SLinus Torvalds 23066b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 23071da177e4SLinus Torvalds 2308031f5e03SMichael S. Tsirkin tun->flags = (tun->flags & ~TUN_FEATURES) | 2309031f5e03SMichael S. Tsirkin (ifr->ifr_flags & TUN_FEATURES); 2310c8d68e6bSJason Wang 2311e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2312e35259a9SMax Krasnyansky * xoff state. 2313e35259a9SMax Krasnyansky */ 2314e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2315c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2316e35259a9SMax Krasnyansky 23171da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 23181da177e4SLinus Torvalds return 0; 23191da177e4SLinus Torvalds 2320662ca437SJason Wang err_detach: 2321662ca437SJason Wang tun_detach_all(dev); 2322ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2323ff244c6bSEric Dumazet goto err_free_dev; 2324ff244c6bSEric Dumazet 2325662ca437SJason Wang err_free_flow: 2326662ca437SJason Wang tun_flow_uninit(tun); 2327662ca437SJason Wang security_tun_dev_free_security(tun->security); 2328608b9977SPaolo Abeni err_free_stat: 2329608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 23301da177e4SLinus Torvalds err_free_dev: 23311da177e4SLinus Torvalds free_netdev(dev); 23321da177e4SLinus Torvalds return err; 23331da177e4SLinus Torvalds } 23341da177e4SLinus Torvalds 23359ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2336876bfd4dSHerbert Xu struct ifreq *ifr) 2337e3b99556SMark McLoughlin { 23386b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2339e3b99556SMark McLoughlin 2340e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2341e3b99556SMark McLoughlin 2342980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2343e3b99556SMark McLoughlin 2344e3b99556SMark McLoughlin } 2345e3b99556SMark McLoughlin 23465228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 23475228ddc9SRusty Russell * privs required. */ 234888255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 23495228ddc9SRusty Russell { 2350c8f44affSMichał Mirosław netdev_features_t features = 0; 23515228ddc9SRusty Russell 23525228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 235388255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 23545228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 23555228ddc9SRusty Russell 23565228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 23575228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 23585228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 23595228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 23605228ddc9SRusty Russell } 23615228ddc9SRusty Russell if (arg & TUN_F_TSO4) 23625228ddc9SRusty Russell features |= NETIF_F_TSO; 23635228ddc9SRusty Russell if (arg & TUN_F_TSO6) 23645228ddc9SRusty Russell features |= NETIF_F_TSO6; 23655228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 23665228ddc9SRusty Russell } 23675228ddc9SRusty Russell } 23685228ddc9SRusty Russell 23695228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 23705228ddc9SRusty Russell * trying to set them. */ 23715228ddc9SRusty Russell if (arg) 23725228ddc9SRusty Russell return -EINVAL; 23735228ddc9SRusty Russell 237488255375SMichał Mirosław tun->set_features = features; 237509050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 237609050957SYaroslav Isakov tun->dev->wanted_features |= features; 237788255375SMichał Mirosław netdev_update_features(tun->dev); 23785228ddc9SRusty Russell 23795228ddc9SRusty Russell return 0; 23805228ddc9SRusty Russell } 23815228ddc9SRusty Russell 2382c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2383c8d68e6bSJason Wang { 2384c8d68e6bSJason Wang int i; 2385c8d68e6bSJason Wang struct tun_file *tfile; 2386c8d68e6bSJason Wang 2387c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2388b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 23898ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 23908ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 23918ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2392c8d68e6bSJason Wang } 2393c8d68e6bSJason Wang 2394c8d68e6bSJason Wang tun->filter_attached = false; 2395c8d68e6bSJason Wang } 2396c8d68e6bSJason Wang 2397c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2398c8d68e6bSJason Wang { 2399c8d68e6bSJason Wang int i, ret = 0; 2400c8d68e6bSJason Wang struct tun_file *tfile; 2401c8d68e6bSJason Wang 2402c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2403b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 24048ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 24058ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 24068ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2407c8d68e6bSJason Wang if (ret) { 2408c8d68e6bSJason Wang tun_detach_filter(tun, i); 2409c8d68e6bSJason Wang return ret; 2410c8d68e6bSJason Wang } 2411c8d68e6bSJason Wang } 2412c8d68e6bSJason Wang 2413c8d68e6bSJason Wang tun->filter_attached = true; 2414c8d68e6bSJason Wang return ret; 2415c8d68e6bSJason Wang } 2416c8d68e6bSJason Wang 2417c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2418c8d68e6bSJason Wang { 2419c8d68e6bSJason Wang struct tun_file *tfile; 2420c8d68e6bSJason Wang int i; 2421c8d68e6bSJason Wang 2422c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2423b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2424c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2425c8d68e6bSJason Wang } 2426c8d68e6bSJason Wang } 2427c8d68e6bSJason Wang 2428cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2429cde8b15fSJason Wang { 2430cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2431cde8b15fSJason Wang struct tun_struct *tun; 2432cde8b15fSJason Wang int ret = 0; 2433cde8b15fSJason Wang 2434cde8b15fSJason Wang rtnl_lock(); 2435cde8b15fSJason Wang 2436cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 24374008e97fSJason Wang tun = tfile->detached; 24385dbbaf2dSPaul Moore if (!tun) { 2439cde8b15fSJason Wang ret = -EINVAL; 24405dbbaf2dSPaul Moore goto unlock; 24415dbbaf2dSPaul Moore } 24425dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 24435dbbaf2dSPaul Moore if (ret < 0) 24445dbbaf2dSPaul Moore goto unlock; 244594317099SPetar Penkov ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 24464008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2447b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 244840630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 24494008e97fSJason Wang ret = -EINVAL; 2450cde8b15fSJason Wang else 24514008e97fSJason Wang __tun_detach(tfile, false); 24524008e97fSJason Wang } else 2453cde8b15fSJason Wang ret = -EINVAL; 2454cde8b15fSJason Wang 24555dbbaf2dSPaul Moore unlock: 2456cde8b15fSJason Wang rtnl_unlock(); 2457cde8b15fSJason Wang return ret; 2458cde8b15fSJason Wang } 2459cde8b15fSJason Wang 246050857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 246150857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 24621da177e4SLinus Torvalds { 246336b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2464631ab46bSEric W. Biederman struct tun_struct *tun; 24651da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 24661da177e4SLinus Torvalds struct ifreq ifr; 24670625c883SEric W. Biederman kuid_t owner; 24680625c883SEric W. Biederman kgid_t group; 246933dccbb0SHerbert Xu int sndbuf; 2470d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2471fb7589a1SPavel Emelyanov unsigned int ifindex; 24721cf8e410SMichael S. Tsirkin int le; 2473f271b2ccSMax Krasnyansky int ret; 24741da177e4SLinus Torvalds 247520861f26SGao Feng if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 247650857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 24771da177e4SLinus Torvalds return -EFAULT; 24788bbb1813SDavid S. Miller } else { 2479a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 24808bbb1813SDavid S. Miller } 2481631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2482631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2483631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2484031f5e03SMichael S. Tsirkin * TUNSETIFF. 2485031f5e03SMichael S. Tsirkin */ 2486031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2487631ab46bSEric W. Biederman (unsigned int __user*)argp); 2488cde8b15fSJason Wang } else if (cmd == TUNSETQUEUE) 2489cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2490631ab46bSEric W. Biederman 2491c8d68e6bSJason Wang ret = 0; 2492876bfd4dSHerbert Xu rtnl_lock(); 2493876bfd4dSHerbert Xu 24949484dc74Syuan linyu tun = tun_get(tfile); 24950f16bc13SGao Feng if (cmd == TUNSETIFF) { 24960f16bc13SGao Feng ret = -EEXIST; 24970f16bc13SGao Feng if (tun) 24980f16bc13SGao Feng goto unlock; 24990f16bc13SGao Feng 25001da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 25011da177e4SLinus Torvalds 2502140e807dSEric W. Biederman ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 25031da177e4SLinus Torvalds 2504876bfd4dSHerbert Xu if (ret) 2505876bfd4dSHerbert Xu goto unlock; 25061da177e4SLinus Torvalds 250750857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2508876bfd4dSHerbert Xu ret = -EFAULT; 2509876bfd4dSHerbert Xu goto unlock; 25101da177e4SLinus Torvalds } 2511fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2512fb7589a1SPavel Emelyanov ret = -EPERM; 2513fb7589a1SPavel Emelyanov if (tun) 2514fb7589a1SPavel Emelyanov goto unlock; 2515fb7589a1SPavel Emelyanov 2516fb7589a1SPavel Emelyanov ret = -EFAULT; 2517fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2518fb7589a1SPavel Emelyanov goto unlock; 2519fb7589a1SPavel Emelyanov 2520fb7589a1SPavel Emelyanov ret = 0; 2521fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2522fb7589a1SPavel Emelyanov goto unlock; 2523fb7589a1SPavel Emelyanov } 25241da177e4SLinus Torvalds 2525876bfd4dSHerbert Xu ret = -EBADFD; 25261da177e4SLinus Torvalds if (!tun) 2527876bfd4dSHerbert Xu goto unlock; 25281da177e4SLinus Torvalds 25291e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 25301da177e4SLinus Torvalds 2531631ab46bSEric W. Biederman ret = 0; 25321da177e4SLinus Torvalds switch (cmd) { 2533e3b99556SMark McLoughlin case TUNGETIFF: 25349ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2535e3b99556SMark McLoughlin 25363d407a80SPavel Emelyanov if (tfile->detached) 25373d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2538849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2539849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 25403d407a80SPavel Emelyanov 254150857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2542631ab46bSEric W. Biederman ret = -EFAULT; 2543e3b99556SMark McLoughlin break; 2544e3b99556SMark McLoughlin 25451da177e4SLinus Torvalds case TUNSETNOCSUM: 25461da177e4SLinus Torvalds /* Disable/Enable checksum */ 25471da177e4SLinus Torvalds 254888255375SMichał Mirosław /* [unimplemented] */ 254988255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 25506b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 25511da177e4SLinus Torvalds break; 25521da177e4SLinus Torvalds 25531da177e4SLinus Torvalds case TUNSETPERSIST: 255454f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 255554f968d6SJason Wang * module to prevent the module being unprobed. 255654f968d6SJason Wang */ 255740630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 255840630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 255954f968d6SJason Wang __module_get(THIS_MODULE); 2560dd38bd85SJason Wang } 256140630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 256240630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 256354f968d6SJason Wang module_put(THIS_MODULE); 256454f968d6SJason Wang } 25651da177e4SLinus Torvalds 25666b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 25676b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 25681da177e4SLinus Torvalds break; 25691da177e4SLinus Torvalds 25701da177e4SLinus Torvalds case TUNSETOWNER: 25711da177e4SLinus Torvalds /* Set owner of the device */ 25720625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 25730625c883SEric W. Biederman if (!uid_valid(owner)) { 25740625c883SEric W. Biederman ret = -EINVAL; 25750625c883SEric W. Biederman break; 25760625c883SEric W. Biederman } 25770625c883SEric W. Biederman tun->owner = owner; 25781e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 25790625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 25801da177e4SLinus Torvalds break; 25811da177e4SLinus Torvalds 25828c644623SGuido Guenther case TUNSETGROUP: 25838c644623SGuido Guenther /* Set group of the device */ 25840625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 25850625c883SEric W. Biederman if (!gid_valid(group)) { 25860625c883SEric W. Biederman ret = -EINVAL; 25870625c883SEric W. Biederman break; 25880625c883SEric W. Biederman } 25890625c883SEric W. Biederman tun->group = group; 25901e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 25910625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 25928c644623SGuido Guenther break; 25938c644623SGuido Guenther 2594ff4cc3acSMike Kershaw case TUNSETLINK: 2595ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2596ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 25976b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 25986b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 259948abfe05SDavid S. Miller ret = -EBUSY; 2600ff4cc3acSMike Kershaw } else { 2601ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 26026b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 26036b8a66eeSJoe Perches tun->dev->type); 260448abfe05SDavid S. Miller ret = 0; 2605ff4cc3acSMike Kershaw } 2606631ab46bSEric W. Biederman break; 2607ff4cc3acSMike Kershaw 26081da177e4SLinus Torvalds #ifdef TUN_DEBUG 26091da177e4SLinus Torvalds case TUNSETDEBUG: 26101da177e4SLinus Torvalds tun->debug = arg; 26111da177e4SLinus Torvalds break; 26121da177e4SLinus Torvalds #endif 26135228ddc9SRusty Russell case TUNSETOFFLOAD: 261488255375SMichał Mirosław ret = set_offload(tun, arg); 2615631ab46bSEric W. Biederman break; 26165228ddc9SRusty Russell 2617f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 2618f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 2619631ab46bSEric W. Biederman ret = -EINVAL; 262040630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2621631ab46bSEric W. Biederman break; 2622c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 2623631ab46bSEric W. Biederman break; 26241da177e4SLinus Torvalds 26251da177e4SLinus Torvalds case SIOCGIFHWADDR: 2626b595076aSUwe Kleine-König /* Get hw address */ 2627f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2628f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 262950857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2630631ab46bSEric W. Biederman ret = -EFAULT; 2631631ab46bSEric W. Biederman break; 26321da177e4SLinus Torvalds 26331da177e4SLinus Torvalds case SIOCSIFHWADDR: 2634f271b2ccSMax Krasnyansky /* Set hw address */ 26356b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 26366b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 263740102371SKim B. Heino 263840102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2639631ab46bSEric W. Biederman break; 264033dccbb0SHerbert Xu 264133dccbb0SHerbert Xu case TUNGETSNDBUF: 264254f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 264333dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 264433dccbb0SHerbert Xu ret = -EFAULT; 264533dccbb0SHerbert Xu break; 264633dccbb0SHerbert Xu 264733dccbb0SHerbert Xu case TUNSETSNDBUF: 264833dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 264933dccbb0SHerbert Xu ret = -EFAULT; 265033dccbb0SHerbert Xu break; 265133dccbb0SHerbert Xu } 265233dccbb0SHerbert Xu 2653c8d68e6bSJason Wang tun->sndbuf = sndbuf; 2654c8d68e6bSJason Wang tun_set_sndbuf(tun); 265533dccbb0SHerbert Xu break; 265633dccbb0SHerbert Xu 2657d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 2658d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 2659d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2660d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2661d9d52b51SMichael S. Tsirkin break; 2662d9d52b51SMichael S. Tsirkin 2663d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 2664d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2665d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2666d9d52b51SMichael S. Tsirkin break; 2667d9d52b51SMichael S. Tsirkin } 2668d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2669d9d52b51SMichael S. Tsirkin ret = -EINVAL; 2670d9d52b51SMichael S. Tsirkin break; 2671d9d52b51SMichael S. Tsirkin } 2672d9d52b51SMichael S. Tsirkin 2673d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 2674d9d52b51SMichael S. Tsirkin break; 2675d9d52b51SMichael S. Tsirkin 26761cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 26771cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 26781cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 26791cf8e410SMichael S. Tsirkin ret = -EFAULT; 26801cf8e410SMichael S. Tsirkin break; 26811cf8e410SMichael S. Tsirkin 26821cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 26831cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 26841cf8e410SMichael S. Tsirkin ret = -EFAULT; 26851cf8e410SMichael S. Tsirkin break; 26861cf8e410SMichael S. Tsirkin } 26871cf8e410SMichael S. Tsirkin if (le) 26881cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 26891cf8e410SMichael S. Tsirkin else 26901cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 26911cf8e410SMichael S. Tsirkin break; 26921cf8e410SMichael S. Tsirkin 26938b8e658bSGreg Kurz case TUNGETVNETBE: 26948b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 26958b8e658bSGreg Kurz break; 26968b8e658bSGreg Kurz 26978b8e658bSGreg Kurz case TUNSETVNETBE: 26988b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 26998b8e658bSGreg Kurz break; 27008b8e658bSGreg Kurz 270199405162SMichael S. Tsirkin case TUNATTACHFILTER: 270299405162SMichael S. Tsirkin /* Can be set only for TAPs */ 270399405162SMichael S. Tsirkin ret = -EINVAL; 270440630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 270599405162SMichael S. Tsirkin break; 270699405162SMichael S. Tsirkin ret = -EFAULT; 270754f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 270899405162SMichael S. Tsirkin break; 270999405162SMichael S. Tsirkin 2710c8d68e6bSJason Wang ret = tun_attach_filter(tun); 271199405162SMichael S. Tsirkin break; 271299405162SMichael S. Tsirkin 271399405162SMichael S. Tsirkin case TUNDETACHFILTER: 271499405162SMichael S. Tsirkin /* Can be set only for TAPs */ 271599405162SMichael S. Tsirkin ret = -EINVAL; 271640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 271799405162SMichael S. Tsirkin break; 2718c8d68e6bSJason Wang ret = 0; 2719c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 272099405162SMichael S. Tsirkin break; 272199405162SMichael S. Tsirkin 272276975e9cSPavel Emelyanov case TUNGETFILTER: 272376975e9cSPavel Emelyanov ret = -EINVAL; 272440630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 272576975e9cSPavel Emelyanov break; 272676975e9cSPavel Emelyanov ret = -EFAULT; 272776975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 272876975e9cSPavel Emelyanov break; 272976975e9cSPavel Emelyanov ret = 0; 273076975e9cSPavel Emelyanov break; 273176975e9cSPavel Emelyanov 27321da177e4SLinus Torvalds default: 2733631ab46bSEric W. Biederman ret = -EINVAL; 2734631ab46bSEric W. Biederman break; 2735ee289b64SJoe Perches } 27361da177e4SLinus Torvalds 2737876bfd4dSHerbert Xu unlock: 2738876bfd4dSHerbert Xu rtnl_unlock(); 2739876bfd4dSHerbert Xu if (tun) 2740631ab46bSEric W. Biederman tun_put(tun); 2741631ab46bSEric W. Biederman return ret; 27421da177e4SLinus Torvalds } 27431da177e4SLinus Torvalds 274450857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 274550857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 274650857e2aSArnd Bergmann { 274750857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 274850857e2aSArnd Bergmann } 274950857e2aSArnd Bergmann 275050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 275150857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 275250857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 275350857e2aSArnd Bergmann { 275450857e2aSArnd Bergmann switch (cmd) { 275550857e2aSArnd Bergmann case TUNSETIFF: 275650857e2aSArnd Bergmann case TUNGETIFF: 275750857e2aSArnd Bergmann case TUNSETTXFILTER: 275850857e2aSArnd Bergmann case TUNGETSNDBUF: 275950857e2aSArnd Bergmann case TUNSETSNDBUF: 276050857e2aSArnd Bergmann case SIOCGIFHWADDR: 276150857e2aSArnd Bergmann case SIOCSIFHWADDR: 276250857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 276350857e2aSArnd Bergmann break; 276450857e2aSArnd Bergmann default: 276550857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 276650857e2aSArnd Bergmann break; 276750857e2aSArnd Bergmann } 276850857e2aSArnd Bergmann 276950857e2aSArnd Bergmann /* 277050857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 277150857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 277250857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 277350857e2aSArnd Bergmann * contents. 277450857e2aSArnd Bergmann */ 277550857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 277650857e2aSArnd Bergmann } 277750857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 277850857e2aSArnd Bergmann 27791da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 27801da177e4SLinus Torvalds { 278154f968d6SJason Wang struct tun_file *tfile = file->private_data; 27821da177e4SLinus Torvalds int ret; 27831da177e4SLinus Torvalds 278454f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 27859d319522SJonathan Corbet goto out; 27861da177e4SLinus Torvalds 27871da177e4SLinus Torvalds if (on) { 2788e0b93eddSJeff Layton __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 278954f968d6SJason Wang tfile->flags |= TUN_FASYNC; 27901da177e4SLinus Torvalds } else 279154f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 27929d319522SJonathan Corbet ret = 0; 27939d319522SJonathan Corbet out: 27949d319522SJonathan Corbet return ret; 27951da177e4SLinus Torvalds } 27961da177e4SLinus Torvalds 27971da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 27981da177e4SLinus Torvalds { 2799140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 2800631ab46bSEric W. Biederman struct tun_file *tfile; 2801deed49fbSThomas Gleixner 28026b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 2803631ab46bSEric W. Biederman 2804140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 280511aa9c28SEric W. Biederman &tun_proto, 0); 2806631ab46bSEric W. Biederman if (!tfile) 2807631ab46bSEric W. Biederman return -ENOMEM; 2808c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 280954f968d6SJason Wang tfile->flags = 0; 2810fb7589a1SPavel Emelyanov tfile->ifindex = 0; 281154f968d6SJason Wang 281254f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 28139e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 281454f968d6SJason Wang 281554f968d6SJason Wang tfile->socket.file = file; 281654f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 281754f968d6SJason Wang 281854f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 281954f968d6SJason Wang 282054f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 282154f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 282254f968d6SJason Wang 2823631ab46bSEric W. Biederman file->private_data = tfile; 28244008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 282554f968d6SJason Wang 282619a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 282719a6afb2SJason Wang 28281da177e4SLinus Torvalds return 0; 28291da177e4SLinus Torvalds } 28301da177e4SLinus Torvalds 28311da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 28321da177e4SLinus Torvalds { 2833631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 28341da177e4SLinus Torvalds 2835c8d68e6bSJason Wang tun_detach(tfile, true); 28361da177e4SLinus Torvalds 28371da177e4SLinus Torvalds return 0; 28381da177e4SLinus Torvalds } 28391da177e4SLinus Torvalds 284093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 28419484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 284293e14b6dSMasatake YAMATO { 28439484dc74Syuan linyu struct tun_file *tfile = file->private_data; 284493e14b6dSMasatake YAMATO struct tun_struct *tun; 284593e14b6dSMasatake YAMATO struct ifreq ifr; 284693e14b6dSMasatake YAMATO 284793e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 284893e14b6dSMasatake YAMATO 284993e14b6dSMasatake YAMATO rtnl_lock(); 28509484dc74Syuan linyu tun = tun_get(tfile); 285193e14b6dSMasatake YAMATO if (tun) 285293e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 285393e14b6dSMasatake YAMATO rtnl_unlock(); 285493e14b6dSMasatake YAMATO 285593e14b6dSMasatake YAMATO if (tun) 285693e14b6dSMasatake YAMATO tun_put(tun); 285793e14b6dSMasatake YAMATO 2858a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 285993e14b6dSMasatake YAMATO } 286093e14b6dSMasatake YAMATO #endif 286193e14b6dSMasatake YAMATO 2862d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 28631da177e4SLinus Torvalds .owner = THIS_MODULE, 28641da177e4SLinus Torvalds .llseek = no_llseek, 28659b067034SAl Viro .read_iter = tun_chr_read_iter, 2866f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 28671da177e4SLinus Torvalds .poll = tun_chr_poll, 2868876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 286950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 287050857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 287150857e2aSArnd Bergmann #endif 28721da177e4SLinus Torvalds .open = tun_chr_open, 28731da177e4SLinus Torvalds .release = tun_chr_close, 287493e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 287593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 287693e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 287793e14b6dSMasatake YAMATO #endif 28781da177e4SLinus Torvalds }; 28791da177e4SLinus Torvalds 28801da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 28811da177e4SLinus Torvalds .minor = TUN_MINOR, 28821da177e4SLinus Torvalds .name = "tun", 2883e454cea2SKay Sievers .nodename = "net/tun", 28841da177e4SLinus Torvalds .fops = &tun_fops, 28851da177e4SLinus Torvalds }; 28861da177e4SLinus Torvalds 28871da177e4SLinus Torvalds /* ethtool interface */ 28881da177e4SLinus Torvalds 288929ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev, 289029ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 28911da177e4SLinus Torvalds { 289229ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 289329ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 289429ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 289529ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 289629ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 289729ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 289829ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 28991da177e4SLinus Torvalds return 0; 29001da177e4SLinus Torvalds } 29011da177e4SLinus Torvalds 29021da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 29031da177e4SLinus Torvalds { 29041da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29051da177e4SLinus Torvalds 290633a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 290733a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 29081da177e4SLinus Torvalds 29091da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 291040630b82SMichael S. Tsirkin case IFF_TUN: 291133a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 29121da177e4SLinus Torvalds break; 291340630b82SMichael S. Tsirkin case IFF_TAP: 291433a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 29151da177e4SLinus Torvalds break; 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds } 29181da177e4SLinus Torvalds 29191da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 29201da177e4SLinus Torvalds { 29211da177e4SLinus Torvalds #ifdef TUN_DEBUG 29221da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29231da177e4SLinus Torvalds return tun->debug; 29241da177e4SLinus Torvalds #else 29251da177e4SLinus Torvalds return -EOPNOTSUPP; 29261da177e4SLinus Torvalds #endif 29271da177e4SLinus Torvalds } 29281da177e4SLinus Torvalds 29291da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 29301da177e4SLinus Torvalds { 29311da177e4SLinus Torvalds #ifdef TUN_DEBUG 29321da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29331da177e4SLinus Torvalds tun->debug = value; 29341da177e4SLinus Torvalds #endif 29351da177e4SLinus Torvalds } 29361da177e4SLinus Torvalds 29375503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 29385503fcecSJason Wang struct ethtool_coalesce *ec) 29395503fcecSJason Wang { 29405503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 29415503fcecSJason Wang 29425503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 29435503fcecSJason Wang 29445503fcecSJason Wang return 0; 29455503fcecSJason Wang } 29465503fcecSJason Wang 29475503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 29485503fcecSJason Wang struct ethtool_coalesce *ec) 29495503fcecSJason Wang { 29505503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 29515503fcecSJason Wang 29525503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 29535503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 29545503fcecSJason Wang else 29555503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 29565503fcecSJason Wang 29575503fcecSJason Wang return 0; 29585503fcecSJason Wang } 29595503fcecSJason Wang 29607282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 29611da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 29621da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 29631da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 2964bee31369SNolan Leake .get_link = ethtool_op_get_link, 2965eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 29665503fcecSJason Wang .get_coalesce = tun_get_coalesce, 29675503fcecSJason Wang .set_coalesce = tun_set_coalesce, 296829ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 29691da177e4SLinus Torvalds }; 29701da177e4SLinus Torvalds 29711576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 29721576d986SJason Wang { 29731576d986SJason Wang struct net_device *dev = tun->dev; 29741576d986SJason Wang struct tun_file *tfile; 29751576d986SJason Wang struct skb_array **arrays; 29761576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 29771576d986SJason Wang int ret, i; 29781576d986SJason Wang 297912039046Sstephen hemminger arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); 29801576d986SJason Wang if (!arrays) 29811576d986SJason Wang return -ENOMEM; 29821576d986SJason Wang 29831576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 29841576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29851576d986SJason Wang arrays[i] = &tfile->tx_array; 29861576d986SJason Wang } 29871576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 29881576d986SJason Wang arrays[i++] = &tfile->tx_array; 29891576d986SJason Wang 29901576d986SJason Wang ret = skb_array_resize_multiple(arrays, n, 29911576d986SJason Wang dev->tx_queue_len, GFP_KERNEL); 29921576d986SJason Wang 29931576d986SJason Wang kfree(arrays); 29941576d986SJason Wang return ret; 29951576d986SJason Wang } 29961576d986SJason Wang 29971576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 29981576d986SJason Wang unsigned long event, void *ptr) 29991576d986SJason Wang { 30001576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 30011576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 30021576d986SJason Wang 300386dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 300486dfb4acSCraig Gallek return NOTIFY_DONE; 300586dfb4acSCraig Gallek 30061576d986SJason Wang switch (event) { 30071576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 30081576d986SJason Wang if (tun_queue_resize(tun)) 30091576d986SJason Wang return NOTIFY_BAD; 30101576d986SJason Wang break; 30111576d986SJason Wang default: 30121576d986SJason Wang break; 30131576d986SJason Wang } 30141576d986SJason Wang 30151576d986SJason Wang return NOTIFY_DONE; 30161576d986SJason Wang } 30171576d986SJason Wang 30181576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 30191576d986SJason Wang .notifier_call = tun_device_event, 30201576d986SJason Wang }; 302179d17604SPavel Emelyanov 30221da177e4SLinus Torvalds static int __init tun_init(void) 30231da177e4SLinus Torvalds { 30241da177e4SLinus Torvalds int ret = 0; 30251da177e4SLinus Torvalds 30266b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 30271da177e4SLinus Torvalds 3028f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 302979d17604SPavel Emelyanov if (ret) { 30306b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3031f019a7a5SEric W. Biederman goto err_linkops; 303279d17604SPavel Emelyanov } 303379d17604SPavel Emelyanov 30341da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 303579d17604SPavel Emelyanov if (ret) { 30366b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 303779d17604SPavel Emelyanov goto err_misc; 303879d17604SPavel Emelyanov } 30391576d986SJason Wang 30405edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 30415edfbd3cSTonghao Zhang if (ret) { 30425edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 30435edfbd3cSTonghao Zhang goto err_notifier; 30445edfbd3cSTonghao Zhang } 30455edfbd3cSTonghao Zhang 304679d17604SPavel Emelyanov return 0; 30475edfbd3cSTonghao Zhang 30485edfbd3cSTonghao Zhang err_notifier: 30495edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 305079d17604SPavel Emelyanov err_misc: 3051f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3052f019a7a5SEric W. Biederman err_linkops: 30531da177e4SLinus Torvalds return ret; 30541da177e4SLinus Torvalds } 30551da177e4SLinus Torvalds 30561da177e4SLinus Torvalds static void tun_cleanup(void) 30571da177e4SLinus Torvalds { 30581da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3059f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 30601576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 30611da177e4SLinus Torvalds } 30621da177e4SLinus Torvalds 306305c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 306405c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 306505c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 306605c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 306705c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 306805c2828cSMichael S. Tsirkin { 30696e914fc7SJason Wang struct tun_file *tfile; 307005c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 307105c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 30726e914fc7SJason Wang tfile = file->private_data; 30736e914fc7SJason Wang if (!tfile) 307405c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 307554f968d6SJason Wang return &tfile->socket; 307605c2828cSMichael S. Tsirkin } 307705c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 307805c2828cSMichael S. Tsirkin 307983339c6bSJason Wang struct skb_array *tun_get_skb_array(struct file *file) 308083339c6bSJason Wang { 308183339c6bSJason Wang struct tun_file *tfile; 308283339c6bSJason Wang 308383339c6bSJason Wang if (file->f_op != &tun_fops) 308483339c6bSJason Wang return ERR_PTR(-EINVAL); 308583339c6bSJason Wang tfile = file->private_data; 308683339c6bSJason Wang if (!tfile) 308783339c6bSJason Wang return ERR_PTR(-EBADFD); 308883339c6bSJason Wang return &tfile->tx_array; 308983339c6bSJason Wang } 309083339c6bSJason Wang EXPORT_SYMBOL_GPL(tun_get_skb_array); 309183339c6bSJason Wang 30921da177e4SLinus Torvalds module_init(tun_init); 30931da177e4SLinus Torvalds module_exit(tun_cleanup); 30941da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 30951da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 30961da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 30971da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3098578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3099