11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 7393e14b6dSMasatake YAMATO #include <linux/seq_file.h> 74e0b46d0eSHerbert Xu #include <linux/uio.h> 751576d986SJason Wang #include <linux/skb_array.h> 76761876c8SJason Wang #include <linux/bpf.h> 77761876c8SJason Wang #include <linux/bpf_trace.h> 7890e33d45SPetar Penkov #include <linux/mutex.h> 791da177e4SLinus Torvalds 807c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 811da177e4SLinus Torvalds 8214daa021SRusty Russell /* Uncomment to enable debugging */ 8314daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8414daa021SRusty Russell 851da177e4SLinus Torvalds #ifdef TUN_DEBUG 861da177e4SLinus Torvalds static int debug; 8714daa021SRusty Russell 886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 896b8a66eeSJoe Perches do { \ 906b8a66eeSJoe Perches if (tun->debug) \ 916b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 926b8a66eeSJoe Perches } while (0) 936b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (debug == 2) \ 966b8a66eeSJoe Perches printk(level fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 9814daa021SRusty Russell #else 996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1006b8a66eeSJoe Perches do { \ 1016b8a66eeSJoe Perches if (0) \ 1026b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1036b8a66eeSJoe Perches } while (0) 1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches printk(level fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 111761876c8SJason Wang #define TUN_HEADROOM 256 1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11366ccbc9cSJason Wang 114031f5e03SMichael S. Tsirkin /* TUN device flags */ 115031f5e03SMichael S. Tsirkin 116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 117031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 118031f5e03SMichael S. Tsirkin */ 119031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1228b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 123031f5e03SMichael S. Tsirkin 124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12590e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 12690e33d45SPetar Penkov 1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1280690899bSMichael S. Tsirkin 129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 130f271b2ccSMax Krasnyansky struct tap_filter { 131f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 132f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 133f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134f271b2ccSMax Krasnyansky }; 135f271b2ccSMax Krasnyansky 136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 139b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 140c8d68e6bSJason Wang 14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14296442e42SJason Wang 143608b9977SPaolo Abeni struct tun_pcpu_stats { 144608b9977SPaolo Abeni u64 rx_packets; 145608b9977SPaolo Abeni u64 rx_bytes; 146608b9977SPaolo Abeni u64 tx_packets; 147608b9977SPaolo Abeni u64 tx_bytes; 148608b9977SPaolo Abeni struct u64_stats_sync syncp; 149608b9977SPaolo Abeni u32 rx_dropped; 150608b9977SPaolo Abeni u32 tx_dropped; 151608b9977SPaolo Abeni u32 rx_frame_errors; 152608b9977SPaolo Abeni }; 153608b9977SPaolo Abeni 15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15592d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 15654f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 15754f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 15836fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 15954f968d6SJason Wang * this). 1606e914fc7SJason Wang * 1616e914fc7SJason Wang * RCU usage: 16236fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1636e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16454f968d6SJason Wang */ 165631ab46bSEric W. Biederman struct tun_file { 16654f968d6SJason Wang struct sock sk; 16754f968d6SJason Wang struct socket socket; 16854f968d6SJason Wang struct socket_wq wq; 1696e914fc7SJason Wang struct tun_struct __rcu *tun; 17054f968d6SJason Wang struct fasync_struct *fasync; 17154f968d6SJason Wang /* only used for fasnyc */ 17254f968d6SJason Wang unsigned int flags; 173fb7589a1SPavel Emelyanov union { 174c8d68e6bSJason Wang u16 queue_index; 175fb7589a1SPavel Emelyanov unsigned int ifindex; 176fb7589a1SPavel Emelyanov }; 17794317099SPetar Penkov struct napi_struct napi; 17890e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1794008e97fSJason Wang struct list_head next; 1804008e97fSJason Wang struct tun_struct *detached; 1811576d986SJason Wang struct skb_array tx_array; 182631ab46bSEric W. Biederman }; 183631ab46bSEric W. Biederman 18496442e42SJason Wang struct tun_flow_entry { 18596442e42SJason Wang struct hlist_node hash_link; 18696442e42SJason Wang struct rcu_head rcu; 18796442e42SJason Wang struct tun_struct *tun; 18896442e42SJason Wang 18996442e42SJason Wang u32 rxhash; 1909bc88939STom Herbert u32 rps_rxhash; 19196442e42SJason Wang int queue_index; 19296442e42SJason Wang unsigned long updated; 19396442e42SJason Wang }; 19496442e42SJason Wang 19596442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 19696442e42SJason Wang 19754f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 19836fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 19954f968d6SJason Wang * file were attached to a persist device. 20054f968d6SJason Wang */ 20114daa021SRusty Russell struct tun_struct { 202c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 203c8d68e6bSJason Wang unsigned int numqueues; 204f271b2ccSMax Krasnyansky unsigned int flags; 2050625c883SEric W. Biederman kuid_t owner; 2060625c883SEric W. Biederman kgid_t group; 20714daa021SRusty Russell 20814daa021SRusty Russell struct net_device *dev; 209c8f44affSMichał Mirosław netdev_features_t set_features; 21088255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 211d591a1f3SDavid S. Miller NETIF_F_TSO6) 212d9d52b51SMichael S. Tsirkin 213eaea34b2SPaolo Abeni int align; 214d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 21554f968d6SJason Wang int sndbuf; 21654f968d6SJason Wang struct tap_filter txflt; 21754f968d6SJason Wang struct sock_fprog fprog; 21854f968d6SJason Wang /* protected by rtnl lock */ 21954f968d6SJason Wang bool filter_attached; 22014daa021SRusty Russell #ifdef TUN_DEBUG 22114daa021SRusty Russell int debug; 22214daa021SRusty Russell #endif 22396442e42SJason Wang spinlock_t lock; 22496442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 22596442e42SJason Wang struct timer_list flow_gc_timer; 22696442e42SJason Wang unsigned long ageing_time; 2274008e97fSJason Wang unsigned int numdisabled; 2284008e97fSJason Wang struct list_head disabled; 2295dbbaf2dSPaul Moore void *security; 230b8732fb7SJason Wang u32 flow_count; 2315503fcecSJason Wang u32 rx_batched; 232608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 233761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 23414daa021SRusty Russell }; 23514daa021SRusty Russell 23694317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 23794317099SPetar Penkov { 23894317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 23994317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 24094317099SPetar Penkov struct sk_buff_head process_queue; 24194317099SPetar Penkov struct sk_buff *skb; 24294317099SPetar Penkov int received = 0; 24394317099SPetar Penkov 24494317099SPetar Penkov __skb_queue_head_init(&process_queue); 24594317099SPetar Penkov 24694317099SPetar Penkov spin_lock(&queue->lock); 24794317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 24894317099SPetar Penkov spin_unlock(&queue->lock); 24994317099SPetar Penkov 25094317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 25194317099SPetar Penkov napi_gro_receive(napi, skb); 25294317099SPetar Penkov ++received; 25394317099SPetar Penkov } 25494317099SPetar Penkov 25594317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 25694317099SPetar Penkov spin_lock(&queue->lock); 25794317099SPetar Penkov skb_queue_splice(&process_queue, queue); 25894317099SPetar Penkov spin_unlock(&queue->lock); 25994317099SPetar Penkov } 26094317099SPetar Penkov 26194317099SPetar Penkov return received; 26294317099SPetar Penkov } 26394317099SPetar Penkov 26494317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 26594317099SPetar Penkov { 26694317099SPetar Penkov unsigned int received; 26794317099SPetar Penkov 26894317099SPetar Penkov received = tun_napi_receive(napi, budget); 26994317099SPetar Penkov 27094317099SPetar Penkov if (received < budget) 27194317099SPetar Penkov napi_complete_done(napi, received); 27294317099SPetar Penkov 27394317099SPetar Penkov return received; 27494317099SPetar Penkov } 27594317099SPetar Penkov 27694317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 27794317099SPetar Penkov bool napi_en) 27894317099SPetar Penkov { 27994317099SPetar Penkov if (napi_en) { 28094317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 28194317099SPetar Penkov NAPI_POLL_WEIGHT); 28294317099SPetar Penkov napi_enable(&tfile->napi); 28390e33d45SPetar Penkov mutex_init(&tfile->napi_mutex); 28494317099SPetar Penkov } 28594317099SPetar Penkov } 28694317099SPetar Penkov 28794317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 28894317099SPetar Penkov { 28994317099SPetar Penkov if (tun->flags & IFF_NAPI) 29094317099SPetar Penkov napi_disable(&tfile->napi); 29194317099SPetar Penkov } 29294317099SPetar Penkov 29394317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 29494317099SPetar Penkov { 29594317099SPetar Penkov if (tun->flags & IFF_NAPI) 29694317099SPetar Penkov netif_napi_del(&tfile->napi); 29794317099SPetar Penkov } 29894317099SPetar Penkov 29990e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun) 30090e33d45SPetar Penkov { 30190e33d45SPetar Penkov return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 30290e33d45SPetar Penkov } 30390e33d45SPetar Penkov 3048b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3058b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3068b8e658bSGreg Kurz { 3078b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3088b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3098b8e658bSGreg Kurz } 3108b8e658bSGreg Kurz 3118b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3128b8e658bSGreg Kurz { 3138b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3148b8e658bSGreg Kurz 3158b8e658bSGreg Kurz if (put_user(be, argp)) 3168b8e658bSGreg Kurz return -EFAULT; 3178b8e658bSGreg Kurz 3188b8e658bSGreg Kurz return 0; 3198b8e658bSGreg Kurz } 3208b8e658bSGreg Kurz 3218b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3228b8e658bSGreg Kurz { 3238b8e658bSGreg Kurz int be; 3248b8e658bSGreg Kurz 3258b8e658bSGreg Kurz if (get_user(be, argp)) 3268b8e658bSGreg Kurz return -EFAULT; 3278b8e658bSGreg Kurz 3288b8e658bSGreg Kurz if (be) 3298b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3308b8e658bSGreg Kurz else 3318b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3328b8e658bSGreg Kurz 3338b8e658bSGreg Kurz return 0; 3348b8e658bSGreg Kurz } 3358b8e658bSGreg Kurz #else 3368b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3378b8e658bSGreg Kurz { 3388b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3398b8e658bSGreg Kurz } 3408b8e658bSGreg Kurz 3418b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3428b8e658bSGreg Kurz { 3438b8e658bSGreg Kurz return -EINVAL; 3448b8e658bSGreg Kurz } 3458b8e658bSGreg Kurz 3468b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3478b8e658bSGreg Kurz { 3488b8e658bSGreg Kurz return -EINVAL; 3498b8e658bSGreg Kurz } 3508b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3518b8e658bSGreg Kurz 35225bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 35325bd55bbSGreg Kurz { 3547d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3558b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 35625bd55bbSGreg Kurz } 35725bd55bbSGreg Kurz 35856f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 35956f0dcc5SMichael S. Tsirkin { 36025bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 36156f0dcc5SMichael S. Tsirkin } 36256f0dcc5SMichael S. Tsirkin 36356f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 36456f0dcc5SMichael S. Tsirkin { 36525bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 36656f0dcc5SMichael S. Tsirkin } 36756f0dcc5SMichael S. Tsirkin 36896442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 36996442e42SJason Wang { 37096442e42SJason Wang return rxhash & 0x3ff; 37196442e42SJason Wang } 37296442e42SJason Wang 37396442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 37496442e42SJason Wang { 37596442e42SJason Wang struct tun_flow_entry *e; 37696442e42SJason Wang 377b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 37896442e42SJason Wang if (e->rxhash == rxhash) 37996442e42SJason Wang return e; 38096442e42SJason Wang } 38196442e42SJason Wang return NULL; 38296442e42SJason Wang } 38396442e42SJason Wang 38496442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 38596442e42SJason Wang struct hlist_head *head, 38696442e42SJason Wang u32 rxhash, u16 queue_index) 38796442e42SJason Wang { 3889fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3899fdc6befSEric Dumazet 39096442e42SJason Wang if (e) { 39196442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 39296442e42SJason Wang rxhash, queue_index); 39396442e42SJason Wang e->updated = jiffies; 39496442e42SJason Wang e->rxhash = rxhash; 3959bc88939STom Herbert e->rps_rxhash = 0; 39696442e42SJason Wang e->queue_index = queue_index; 39796442e42SJason Wang e->tun = tun; 39896442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 399b8732fb7SJason Wang ++tun->flow_count; 40096442e42SJason Wang } 40196442e42SJason Wang return e; 40296442e42SJason Wang } 40396442e42SJason Wang 40496442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 40596442e42SJason Wang { 40696442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 40796442e42SJason Wang e->rxhash, e->queue_index); 40896442e42SJason Wang hlist_del_rcu(&e->hash_link); 4099fdc6befSEric Dumazet kfree_rcu(e, rcu); 410b8732fb7SJason Wang --tun->flow_count; 41196442e42SJason Wang } 41296442e42SJason Wang 41396442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 41496442e42SJason Wang { 41596442e42SJason Wang int i; 41696442e42SJason Wang 41796442e42SJason Wang spin_lock_bh(&tun->lock); 41896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 41996442e42SJason Wang struct tun_flow_entry *e; 420b67bfe0dSSasha Levin struct hlist_node *n; 42196442e42SJason Wang 422b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 42396442e42SJason Wang tun_flow_delete(tun, e); 42496442e42SJason Wang } 42596442e42SJason Wang spin_unlock_bh(&tun->lock); 42696442e42SJason Wang } 42796442e42SJason Wang 42896442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 42996442e42SJason Wang { 43096442e42SJason Wang int i; 43196442e42SJason Wang 43296442e42SJason Wang spin_lock_bh(&tun->lock); 43396442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 43496442e42SJason Wang struct tun_flow_entry *e; 435b67bfe0dSSasha Levin struct hlist_node *n; 43696442e42SJason Wang 437b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 43896442e42SJason Wang if (e->queue_index == queue_index) 43996442e42SJason Wang tun_flow_delete(tun, e); 44096442e42SJason Wang } 44196442e42SJason Wang } 44296442e42SJason Wang spin_unlock_bh(&tun->lock); 44396442e42SJason Wang } 44496442e42SJason Wang 44596442e42SJason Wang static void tun_flow_cleanup(unsigned long data) 44696442e42SJason Wang { 44796442e42SJason Wang struct tun_struct *tun = (struct tun_struct *)data; 44896442e42SJason Wang unsigned long delay = tun->ageing_time; 44996442e42SJason Wang unsigned long next_timer = jiffies + delay; 45096442e42SJason Wang unsigned long count = 0; 45196442e42SJason Wang int i; 45296442e42SJason Wang 45396442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 45496442e42SJason Wang 45596442e42SJason Wang spin_lock_bh(&tun->lock); 45696442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45796442e42SJason Wang struct tun_flow_entry *e; 458b67bfe0dSSasha Levin struct hlist_node *n; 45996442e42SJason Wang 460b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 46196442e42SJason Wang unsigned long this_timer; 46296442e42SJason Wang count++; 46396442e42SJason Wang this_timer = e->updated + delay; 46496442e42SJason Wang if (time_before_eq(this_timer, jiffies)) 46596442e42SJason Wang tun_flow_delete(tun, e); 46696442e42SJason Wang else if (time_before(this_timer, next_timer)) 46796442e42SJason Wang next_timer = this_timer; 46896442e42SJason Wang } 46996442e42SJason Wang } 47096442e42SJason Wang 47196442e42SJason Wang if (count) 47296442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 47396442e42SJason Wang spin_unlock_bh(&tun->lock); 47496442e42SJason Wang } 47596442e42SJason Wang 47649974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4779e85722dSJason Wang struct tun_file *tfile) 47896442e42SJason Wang { 47996442e42SJason Wang struct hlist_head *head; 48096442e42SJason Wang struct tun_flow_entry *e; 48196442e42SJason Wang unsigned long delay = tun->ageing_time; 4829e85722dSJason Wang u16 queue_index = tfile->queue_index; 48396442e42SJason Wang 48496442e42SJason Wang if (!rxhash) 48596442e42SJason Wang return; 48696442e42SJason Wang else 48796442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 48896442e42SJason Wang 48996442e42SJason Wang rcu_read_lock(); 49096442e42SJason Wang 4919e85722dSJason Wang /* We may get a very small possibility of OOO during switching, not 4929e85722dSJason Wang * worth to optimize.*/ 4939e85722dSJason Wang if (tun->numqueues == 1 || tfile->detached) 49496442e42SJason Wang goto unlock; 49596442e42SJason Wang 49696442e42SJason Wang e = tun_flow_find(head, rxhash); 49796442e42SJason Wang if (likely(e)) { 49896442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 49996442e42SJason Wang e->queue_index = queue_index; 50096442e42SJason Wang e->updated = jiffies; 5019bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 50296442e42SJason Wang } else { 50396442e42SJason Wang spin_lock_bh(&tun->lock); 504b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 505b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 50696442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 50796442e42SJason Wang 50896442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 50996442e42SJason Wang mod_timer(&tun->flow_gc_timer, 51096442e42SJason Wang round_jiffies_up(jiffies + delay)); 51196442e42SJason Wang spin_unlock_bh(&tun->lock); 51296442e42SJason Wang } 51396442e42SJason Wang 51496442e42SJason Wang unlock: 51596442e42SJason Wang rcu_read_unlock(); 51696442e42SJason Wang } 51796442e42SJason Wang 5189bc88939STom Herbert /** 5199bc88939STom Herbert * Save the hash received in the stack receive path and update the 5209bc88939STom Herbert * flow_hash table accordingly. 5219bc88939STom Herbert */ 5229bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5239bc88939STom Herbert { 524567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5259bc88939STom Herbert e->rps_rxhash = hash; 5269bc88939STom Herbert } 5279bc88939STom Herbert 528c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 52992d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 530c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 531c8d68e6bSJason Wang * the userspace application move between processors, we may get a 532c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 533c8d68e6bSJason Wang * hope the rxq no. may help here. 534c8d68e6bSJason Wang */ 535f663dd9aSJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 53699932d4fSDaniel Borkmann void *accel_priv, select_queue_fallback_t fallback) 537c8d68e6bSJason Wang { 538c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 53996442e42SJason Wang struct tun_flow_entry *e; 540c8d68e6bSJason Wang u32 txq = 0; 541c8d68e6bSJason Wang u32 numqueues = 0; 542c8d68e6bSJason Wang 543c8d68e6bSJason Wang rcu_read_lock(); 54492bb73eaSJason Wang numqueues = ACCESS_ONCE(tun->numqueues); 545c8d68e6bSJason Wang 546feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 547c8d68e6bSJason Wang if (txq) { 54896442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5499bc88939STom Herbert if (e) { 5509bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 551fbe4d456SZhi Yong Wu txq = e->queue_index; 5529bc88939STom Herbert } else 553c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 554c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 555c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 556c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 557c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 558c8d68e6bSJason Wang txq -= numqueues; 559c8d68e6bSJason Wang } 560c8d68e6bSJason Wang 561c8d68e6bSJason Wang rcu_read_unlock(); 562c8d68e6bSJason Wang return txq; 563c8d68e6bSJason Wang } 564c8d68e6bSJason Wang 565cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 566cde8b15fSJason Wang { 567cde8b15fSJason Wang const struct cred *cred = current_cred(); 568c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 569cde8b15fSJason Wang 570cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 571cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 572c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 573cde8b15fSJason Wang } 574cde8b15fSJason Wang 575c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 576c8d68e6bSJason Wang { 577c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 578c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 579c8d68e6bSJason Wang } 580c8d68e6bSJason Wang 5814008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 5824008e97fSJason Wang { 5834008e97fSJason Wang tfile->detached = tun; 5844008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 5854008e97fSJason Wang ++tun->numdisabled; 5864008e97fSJason Wang } 5874008e97fSJason Wang 588d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 5894008e97fSJason Wang { 5904008e97fSJason Wang struct tun_struct *tun = tfile->detached; 5914008e97fSJason Wang 5924008e97fSJason Wang tfile->detached = NULL; 5934008e97fSJason Wang list_del_init(&tfile->next); 5944008e97fSJason Wang --tun->numdisabled; 5954008e97fSJason Wang return tun; 5964008e97fSJason Wang } 5974008e97fSJason Wang 5984bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 5994bfb0513SJason Wang { 6001576d986SJason Wang struct sk_buff *skb; 6011576d986SJason Wang 6021576d986SJason Wang while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) 6031576d986SJason Wang kfree_skb(skb); 6041576d986SJason Wang 6055503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6064bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6074bfb0513SJason Wang } 6084bfb0513SJason Wang 609c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 610c8d68e6bSJason Wang { 611c8d68e6bSJason Wang struct tun_file *ntfile; 612c8d68e6bSJason Wang struct tun_struct *tun; 613c8d68e6bSJason Wang 614b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 615b8deabd3SJason Wang 61694317099SPetar Penkov if (tun && clean) { 61794317099SPetar Penkov tun_napi_disable(tun, tfile); 61894317099SPetar Penkov tun_napi_del(tun, tfile); 61994317099SPetar Penkov } 62094317099SPetar Penkov 6219e85722dSJason Wang if (tun && !tfile->detached) { 622c8d68e6bSJason Wang u16 index = tfile->queue_index; 623c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 624c8d68e6bSJason Wang 625c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 626c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 627b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 628c8d68e6bSJason Wang ntfile->queue_index = index; 629c8d68e6bSJason Wang 630c8d68e6bSJason Wang --tun->numqueues; 6319e85722dSJason Wang if (clean) { 632c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 633c8d68e6bSJason Wang sock_put(&tfile->sk); 6349e85722dSJason Wang } else 6354008e97fSJason Wang tun_disable_queue(tun, tfile); 636c8d68e6bSJason Wang 637c8d68e6bSJason Wang synchronize_net(); 63896442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 639c8d68e6bSJason Wang /* Drop read queue */ 6404bfb0513SJason Wang tun_queue_purge(tfile); 641c8d68e6bSJason Wang tun_set_real_num_queues(tun); 642dd38bd85SJason Wang } else if (tfile->detached && clean) { 6434008e97fSJason Wang tun = tun_enable_queue(tfile); 644dd38bd85SJason Wang sock_put(&tfile->sk); 645dd38bd85SJason Wang } 646c8d68e6bSJason Wang 647c8d68e6bSJason Wang if (clean) { 648af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 649af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 650af668b3cSMichael S. Tsirkin 65140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 652af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6534008e97fSJason Wang unregister_netdevice(tun->dev); 654af668b3cSMichael S. Tsirkin } 6551576d986SJason Wang if (tun) 6561576d986SJason Wang skb_array_cleanup(&tfile->tx_array); 657140e807dSEric W. Biederman sock_put(&tfile->sk); 658c8d68e6bSJason Wang } 659c8d68e6bSJason Wang } 660c8d68e6bSJason Wang 661c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 662c8d68e6bSJason Wang { 663c8d68e6bSJason Wang rtnl_lock(); 664c8d68e6bSJason Wang __tun_detach(tfile, clean); 665c8d68e6bSJason Wang rtnl_unlock(); 666c8d68e6bSJason Wang } 667c8d68e6bSJason Wang 668c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 669c8d68e6bSJason Wang { 670c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 671761876c8SJason Wang struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); 6724008e97fSJason Wang struct tun_file *tfile, *tmp; 673c8d68e6bSJason Wang int i, n = tun->numqueues; 674c8d68e6bSJason Wang 675c8d68e6bSJason Wang for (i = 0; i < n; i++) { 676b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 677c8d68e6bSJason Wang BUG_ON(!tfile); 67894317099SPetar Penkov tun_napi_disable(tun, tfile); 679addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 6809e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 681c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 682c8d68e6bSJason Wang --tun->numqueues; 683c8d68e6bSJason Wang } 6849e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 685addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 6869e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 687c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 6889e85722dSJason Wang } 689c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 690c8d68e6bSJason Wang 691c8d68e6bSJason Wang synchronize_net(); 692c8d68e6bSJason Wang for (i = 0; i < n; i++) { 693b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 69494317099SPetar Penkov tun_napi_del(tun, tfile); 695c8d68e6bSJason Wang /* Drop read queue */ 6964bfb0513SJason Wang tun_queue_purge(tfile); 697c8d68e6bSJason Wang sock_put(&tfile->sk); 698c8d68e6bSJason Wang } 6994008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7004008e97fSJason Wang tun_enable_queue(tfile); 7014bfb0513SJason Wang tun_queue_purge(tfile); 7024008e97fSJason Wang sock_put(&tfile->sk); 7034008e97fSJason Wang } 7044008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 705dd38bd85SJason Wang 706761876c8SJason Wang if (xdp_prog) 707761876c8SJason Wang bpf_prog_put(xdp_prog); 708761876c8SJason Wang 70940630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 710dd38bd85SJason Wang module_put(THIS_MODULE); 711c8d68e6bSJason Wang } 712c8d68e6bSJason Wang 71394317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 71494317099SPetar Penkov bool skip_filter, bool napi) 715a7385ba2SEric W. Biederman { 716631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7171576d986SJason Wang struct net_device *dev = tun->dev; 71838231b7aSEric W. Biederman int err; 719a7385ba2SEric W. Biederman 7205dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7215dbbaf2dSPaul Moore if (err < 0) 7225dbbaf2dSPaul Moore goto out; 7235dbbaf2dSPaul Moore 72438231b7aSEric W. Biederman err = -EINVAL; 7259e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 72638231b7aSEric W. Biederman goto out; 72738231b7aSEric W. Biederman 72838231b7aSEric W. Biederman err = -EBUSY; 72940630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 730c8d68e6bSJason Wang goto out; 731c8d68e6bSJason Wang 732c8d68e6bSJason Wang err = -E2BIG; 7334008e97fSJason Wang if (!tfile->detached && 7344008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 73538231b7aSEric W. Biederman goto out; 73638231b7aSEric W. Biederman 73738231b7aSEric W. Biederman err = 0; 73854f968d6SJason Wang 73992d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 740849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7418ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7428ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7438ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 74454f968d6SJason Wang if (!err) 74554f968d6SJason Wang goto out; 74654f968d6SJason Wang } 7471576d986SJason Wang 7481576d986SJason Wang if (!tfile->detached && 7491576d986SJason Wang skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { 7501576d986SJason Wang err = -ENOMEM; 7511576d986SJason Wang goto out; 7521576d986SJason Wang } 7531576d986SJason Wang 754c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 755addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 7566e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 757c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 758c8d68e6bSJason Wang tun->numqueues++; 759c8d68e6bSJason Wang 76094317099SPetar Penkov if (tfile->detached) { 7614008e97fSJason Wang tun_enable_queue(tfile); 76294317099SPetar Penkov } else { 7634008e97fSJason Wang sock_hold(&tfile->sk); 76494317099SPetar Penkov tun_napi_init(tun, tfile, napi); 76594317099SPetar Penkov } 7664008e97fSJason Wang 767c8d68e6bSJason Wang tun_set_real_num_queues(tun); 768c8d68e6bSJason Wang 769c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 770c8d68e6bSJason Wang * refcnt. 771c8d68e6bSJason Wang */ 772a7385ba2SEric W. Biederman 77338231b7aSEric W. Biederman out: 77438231b7aSEric W. Biederman return err; 775a7385ba2SEric W. Biederman } 776a7385ba2SEric W. Biederman 7779484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 778631ab46bSEric W. Biederman { 7796e914fc7SJason Wang struct tun_struct *tun; 780c70f1829SEric W. Biederman 7816e914fc7SJason Wang rcu_read_lock(); 7826e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 7836e914fc7SJason Wang if (tun) 7846e914fc7SJason Wang dev_hold(tun->dev); 7856e914fc7SJason Wang rcu_read_unlock(); 786c70f1829SEric W. Biederman 787c70f1829SEric W. Biederman return tun; 788631ab46bSEric W. Biederman } 789631ab46bSEric W. Biederman 790631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 791631ab46bSEric W. Biederman { 7926e914fc7SJason Wang dev_put(tun->dev); 793631ab46bSEric W. Biederman } 794631ab46bSEric W. Biederman 7956b8a66eeSJoe Perches /* TAP filtering */ 796f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 797f271b2ccSMax Krasnyansky { 798f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 799f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 800f271b2ccSMax Krasnyansky } 801f271b2ccSMax Krasnyansky 802f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 803f271b2ccSMax Krasnyansky { 804f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 805f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 806f271b2ccSMax Krasnyansky } 807f271b2ccSMax Krasnyansky 808f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 809f271b2ccSMax Krasnyansky { 810f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 811f271b2ccSMax Krasnyansky struct tun_filter uf; 812f271b2ccSMax Krasnyansky int err, alen, n, nexact; 813f271b2ccSMax Krasnyansky 814f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 815f271b2ccSMax Krasnyansky return -EFAULT; 816f271b2ccSMax Krasnyansky 817f271b2ccSMax Krasnyansky if (!uf.count) { 818f271b2ccSMax Krasnyansky /* Disabled */ 819f271b2ccSMax Krasnyansky filter->count = 0; 820f271b2ccSMax Krasnyansky return 0; 821f271b2ccSMax Krasnyansky } 822f271b2ccSMax Krasnyansky 823f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 82428e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 82528e8190dSMarkus Elfring if (IS_ERR(addr)) 82628e8190dSMarkus Elfring return PTR_ERR(addr); 827f271b2ccSMax Krasnyansky 828f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 829f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 830f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 831f271b2ccSMax Krasnyansky filter->count = 0; 832f271b2ccSMax Krasnyansky wmb(); 833f271b2ccSMax Krasnyansky 834f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 835f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 836f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 837f271b2ccSMax Krasnyansky 838f271b2ccSMax Krasnyansky nexact = n; 839f271b2ccSMax Krasnyansky 840cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 841cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 842f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 843cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 844cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 845cfbf84fcSAlex Williamson err = 0; /* no filter */ 8463b8d2a69SMarkus Elfring goto free_addr; 847cfbf84fcSAlex Williamson } 848f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 849cfbf84fcSAlex Williamson } 850f271b2ccSMax Krasnyansky 851f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 852f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 853f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 854f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 855f271b2ccSMax Krasnyansky 856f271b2ccSMax Krasnyansky /* Now enable the filter */ 857f271b2ccSMax Krasnyansky wmb(); 858f271b2ccSMax Krasnyansky filter->count = nexact; 859f271b2ccSMax Krasnyansky 860f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 861f271b2ccSMax Krasnyansky err = nexact; 8623b8d2a69SMarkus Elfring free_addr: 863f271b2ccSMax Krasnyansky kfree(addr); 864f271b2ccSMax Krasnyansky return err; 865f271b2ccSMax Krasnyansky } 866f271b2ccSMax Krasnyansky 867f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 868f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 869f271b2ccSMax Krasnyansky { 870f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 871f271b2ccSMax Krasnyansky * at this point. */ 872f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 873f271b2ccSMax Krasnyansky int i; 874f271b2ccSMax Krasnyansky 875f271b2ccSMax Krasnyansky /* Exact match */ 876f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 8772e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 878f271b2ccSMax Krasnyansky return 1; 879f271b2ccSMax Krasnyansky 880f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 881f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 882f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 883f271b2ccSMax Krasnyansky 884f271b2ccSMax Krasnyansky return 0; 885f271b2ccSMax Krasnyansky } 886f271b2ccSMax Krasnyansky 887f271b2ccSMax Krasnyansky /* 888f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 889f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 890f271b2ccSMax Krasnyansky */ 891f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 892f271b2ccSMax Krasnyansky { 893f271b2ccSMax Krasnyansky if (!filter->count) 894f271b2ccSMax Krasnyansky return 1; 895f271b2ccSMax Krasnyansky 896f271b2ccSMax Krasnyansky return run_filter(filter, skb); 897f271b2ccSMax Krasnyansky } 898f271b2ccSMax Krasnyansky 8991da177e4SLinus Torvalds /* Network device part of the driver */ 9001da177e4SLinus Torvalds 9017282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops; 9021da177e4SLinus Torvalds 903c70f1829SEric W. Biederman /* Net device detach from fd. */ 904c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 905c70f1829SEric W. Biederman { 906c8d68e6bSJason Wang tun_detach_all(dev); 907c70f1829SEric W. Biederman } 908c70f1829SEric W. Biederman 9091da177e4SLinus Torvalds /* Net device open. */ 9101da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 9111da177e4SLinus Torvalds { 912b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 913b20e2d54SHannes Frederic Sowa int i; 914b20e2d54SHannes Frederic Sowa 915c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 916b20e2d54SHannes Frederic Sowa 917b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 918b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 919b20e2d54SHannes Frederic Sowa 920b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 921b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 922b20e2d54SHannes Frederic Sowa } 923b20e2d54SHannes Frederic Sowa 9241da177e4SLinus Torvalds return 0; 9251da177e4SLinus Torvalds } 9261da177e4SLinus Torvalds 9271da177e4SLinus Torvalds /* Net device close. */ 9281da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 9291da177e4SLinus Torvalds { 930c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 9311da177e4SLinus Torvalds return 0; 9321da177e4SLinus Torvalds } 9331da177e4SLinus Torvalds 9341da177e4SLinus Torvalds /* Net device start xmit */ 935424efe9cSStephen Hemminger static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 9361da177e4SLinus Torvalds { 9371da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 938c8d68e6bSJason Wang int txq = skb->queue_mapping; 9396e914fc7SJason Wang struct tun_file *tfile; 940fa35864eSDominic Curran u32 numqueues = 0; 9411da177e4SLinus Torvalds 9426e914fc7SJason Wang rcu_read_lock(); 943c8d68e6bSJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 944fa35864eSDominic Curran numqueues = ACCESS_ONCE(tun->numqueues); 945c8d68e6bSJason Wang 9461da177e4SLinus Torvalds /* Drop packet if interface is not attached */ 947fa35864eSDominic Curran if (txq >= numqueues) 9481da177e4SLinus Torvalds goto drop; 9491da177e4SLinus Torvalds 9503df97ba8SJason Wang #ifdef CONFIG_RPS 9513df97ba8SJason Wang if (numqueues == 1 && static_key_false(&rps_needed)) { 9529bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 9539bc88939STom Herbert * RPS hash and save it into the flow_table here. 9549bc88939STom Herbert */ 9559bc88939STom Herbert __u32 rxhash; 9569bc88939STom Herbert 957feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 9589bc88939STom Herbert if (rxhash) { 9599bc88939STom Herbert struct tun_flow_entry *e; 9609bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 9619bc88939STom Herbert rxhash); 9629bc88939STom Herbert if (e) 9639bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 9649bc88939STom Herbert } 9659bc88939STom Herbert } 9663df97ba8SJason Wang #endif 9679bc88939STom Herbert 9686e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 9696e914fc7SJason Wang 970c8d68e6bSJason Wang BUG_ON(!tfile); 971c8d68e6bSJason Wang 972f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 973f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 974f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 975f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 976f271b2ccSMax Krasnyansky goto drop; 977f271b2ccSMax Krasnyansky 97854f968d6SJason Wang if (tfile->socket.sk->sk_filter && 97954f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 98099405162SMichael S. Tsirkin goto drop; 98199405162SMichael S. Tsirkin 9821f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 9837bf66305SJason Wang goto drop; 9847bf66305SJason Wang 9857b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 986eda29772SRichard Cochran 9870110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 9887bf66305SJason Wang * for indefinite time. 9897bf66305SJason Wang */ 9900110d6f2SMichael S. Tsirkin skb_orphan(skb); 9910110d6f2SMichael S. Tsirkin 992f8af75f3SEric Dumazet nf_reset(skb); 993f8af75f3SEric Dumazet 9941576d986SJason Wang if (skb_array_produce(&tfile->tx_array, skb)) 9951576d986SJason Wang goto drop; 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds /* Notify and wake up reader process */ 99854f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 99954f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 10009e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 10016e914fc7SJason Wang 10026e914fc7SJason Wang rcu_read_unlock(); 10036ed10654SPatrick McHardy return NETDEV_TX_OK; 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds drop: 1006608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1007149d36f7SMichael S. Tsirkin skb_tx_error(skb); 10081da177e4SLinus Torvalds kfree_skb(skb); 10096e914fc7SJason Wang rcu_read_unlock(); 1010baeababbSJason Wang return NET_XMIT_DROP; 10111da177e4SLinus Torvalds } 10121da177e4SLinus Torvalds 1013f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 10141da177e4SLinus Torvalds { 1015f271b2ccSMax Krasnyansky /* 1016f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1017f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1018f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1019f271b2ccSMax Krasnyansky */ 10201da177e4SLinus Torvalds } 10211da177e4SLinus Torvalds 1022c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1023c8f44affSMichał Mirosław netdev_features_t features) 102488255375SMichał Mirosław { 102588255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 102688255375SMichał Mirosław 102788255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 102888255375SMichał Mirosław } 1029bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1030bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev) 1031bebd097aSNeil Horman { 1032bebd097aSNeil Horman /* 1033bebd097aSNeil Horman * Tun only receives frames when: 1034bebd097aSNeil Horman * 1) the char device endpoint gets data from user space 1035bebd097aSNeil Horman * 2) the tun socket gets a sendmsg call from user space 103694317099SPetar Penkov * If NAPI is not enabled, since both of those are synchronous 103794317099SPetar Penkov * operations, we are guaranteed never to have pending data when we poll 103894317099SPetar Penkov * for it so there is nothing to do here but return. 1039bebd097aSNeil Horman * We need this though so netpoll recognizes us as an interface that 1040bebd097aSNeil Horman * supports polling, which enables bridge devices in virt setups to 1041bebd097aSNeil Horman * still use netconsole 104294317099SPetar Penkov * If NAPI is enabled, however, we need to schedule polling for all 104390e33d45SPetar Penkov * queues unless we are using napi_gro_frags(), which we call in 104490e33d45SPetar Penkov * process context and not in NAPI context. 1045bebd097aSNeil Horman */ 104694317099SPetar Penkov struct tun_struct *tun = netdev_priv(dev); 104794317099SPetar Penkov 104894317099SPetar Penkov if (tun->flags & IFF_NAPI) { 104994317099SPetar Penkov struct tun_file *tfile; 105094317099SPetar Penkov int i; 105194317099SPetar Penkov 105290e33d45SPetar Penkov if (tun_napi_frags_enabled(tun)) 105390e33d45SPetar Penkov return; 105490e33d45SPetar Penkov 105594317099SPetar Penkov rcu_read_lock(); 105694317099SPetar Penkov for (i = 0; i < tun->numqueues; i++) { 105794317099SPetar Penkov tfile = rcu_dereference(tun->tfiles[i]); 105894317099SPetar Penkov napi_schedule(&tfile->napi); 105994317099SPetar Penkov } 106094317099SPetar Penkov rcu_read_unlock(); 106194317099SPetar Penkov } 1062bebd097aSNeil Horman return; 1063bebd097aSNeil Horman } 1064bebd097aSNeil Horman #endif 1065eaea34b2SPaolo Abeni 1066eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1067eaea34b2SPaolo Abeni { 1068eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1069eaea34b2SPaolo Abeni 1070eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1071eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1072eaea34b2SPaolo Abeni 1073eaea34b2SPaolo Abeni tun->align = new_hr; 1074eaea34b2SPaolo Abeni } 1075eaea34b2SPaolo Abeni 1076bc1f4470Sstephen hemminger static void 1077608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1078608b9977SPaolo Abeni { 1079608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1080608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1081608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1082608b9977SPaolo Abeni int i; 1083608b9977SPaolo Abeni 1084608b9977SPaolo Abeni for_each_possible_cpu(i) { 1085608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1086608b9977SPaolo Abeni unsigned int start; 1087608b9977SPaolo Abeni 1088608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1089608b9977SPaolo Abeni do { 1090608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1091608b9977SPaolo Abeni rxpackets = p->rx_packets; 1092608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1093608b9977SPaolo Abeni txpackets = p->tx_packets; 1094608b9977SPaolo Abeni txbytes = p->tx_bytes; 1095608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1096608b9977SPaolo Abeni 1097608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1098608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1099608b9977SPaolo Abeni stats->tx_packets += txpackets; 1100608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1101608b9977SPaolo Abeni 1102608b9977SPaolo Abeni /* u32 counters */ 1103608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1104608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1105608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1106608b9977SPaolo Abeni } 1107608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1108608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1109608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1110608b9977SPaolo Abeni } 1111608b9977SPaolo Abeni 1112761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1113761876c8SJason Wang struct netlink_ext_ack *extack) 1114761876c8SJason Wang { 1115761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1116761876c8SJason Wang struct bpf_prog *old_prog; 1117761876c8SJason Wang 1118761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1119761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1120761876c8SJason Wang if (old_prog) 1121761876c8SJason Wang bpf_prog_put(old_prog); 1122761876c8SJason Wang 1123761876c8SJason Wang return 0; 1124761876c8SJason Wang } 1125761876c8SJason Wang 1126761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1127761876c8SJason Wang { 1128761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1129761876c8SJason Wang const struct bpf_prog *xdp_prog; 1130761876c8SJason Wang 1131761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1132761876c8SJason Wang if (xdp_prog) 1133761876c8SJason Wang return xdp_prog->aux->id; 1134761876c8SJason Wang 1135761876c8SJason Wang return 0; 1136761876c8SJason Wang } 1137761876c8SJason Wang 1138761876c8SJason Wang static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1139761876c8SJason Wang { 1140761876c8SJason Wang switch (xdp->command) { 1141761876c8SJason Wang case XDP_SETUP_PROG: 1142761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1143761876c8SJason Wang case XDP_QUERY_PROG: 1144761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1145761876c8SJason Wang xdp->prog_attached = !!xdp->prog_id; 1146761876c8SJason Wang return 0; 1147761876c8SJason Wang default: 1148761876c8SJason Wang return -EINVAL; 1149761876c8SJason Wang } 1150761876c8SJason Wang } 1151761876c8SJason Wang 1152758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1153c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1154758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1155758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 115600829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 115788255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1158c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1159bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1160bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1161bebd097aSNeil Horman #endif 1162eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1163608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1164758e43b7SStephen Hemminger }; 1165758e43b7SStephen Hemminger 1166758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1167c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1168758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1169758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 117000829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 117188255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1172afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1173758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1174758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1175c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1176bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1177bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1178bebd097aSNeil Horman #endif 11795e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1180eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1181608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1182761876c8SJason Wang .ndo_xdp = tun_xdp, 1183758e43b7SStephen Hemminger }; 1184758e43b7SStephen Hemminger 1185944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 118696442e42SJason Wang { 118796442e42SJason Wang int i; 118896442e42SJason Wang 118996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 119096442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 119196442e42SJason Wang 119296442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 119396442e42SJason Wang setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); 119496442e42SJason Wang mod_timer(&tun->flow_gc_timer, 119596442e42SJason Wang round_jiffies_up(jiffies + tun->ageing_time)); 119696442e42SJason Wang } 119796442e42SJason Wang 119896442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 119996442e42SJason Wang { 120096442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 120196442e42SJason Wang tun_flow_flush(tun); 120296442e42SJason Wang } 120396442e42SJason Wang 120491572088SJarod Wilson #define MIN_MTU 68 120591572088SJarod Wilson #define MAX_MTU 65535 120691572088SJarod Wilson 12071da177e4SLinus Torvalds /* Initialize net device. */ 12081da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 12091da177e4SLinus Torvalds { 12101da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 12111da177e4SLinus Torvalds 12121da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 121340630b82SMichael S. Tsirkin case IFF_TUN: 1214758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1215758e43b7SStephen Hemminger 12161da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 12171da177e4SLinus Torvalds dev->hard_header_len = 0; 12181da177e4SLinus Torvalds dev->addr_len = 0; 12191da177e4SLinus Torvalds dev->mtu = 1500; 12201da177e4SLinus Torvalds 12211da177e4SLinus Torvalds /* Zero header length */ 12221da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 12231da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 12241da177e4SLinus Torvalds break; 12251da177e4SLinus Torvalds 122640630b82SMichael S. Tsirkin case IFF_TAP: 12277a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 12281da177e4SLinus Torvalds /* Ethernet TAP Device */ 12291da177e4SLinus Torvalds ether_setup(dev); 1230550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1231a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 123236226a8dSBrian Braunstein 1233f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 123436226a8dSBrian Braunstein 12351da177e4SLinus Torvalds break; 12361da177e4SLinus Torvalds } 123791572088SJarod Wilson 123891572088SJarod Wilson dev->min_mtu = MIN_MTU; 123991572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 12401da177e4SLinus Torvalds } 12411da177e4SLinus Torvalds 12421da177e4SLinus Torvalds /* Character device part */ 12431da177e4SLinus Torvalds 12441da177e4SLinus Torvalds /* Poll */ 12451da177e4SLinus Torvalds static unsigned int tun_chr_poll(struct file *file, poll_table *wait) 12461da177e4SLinus Torvalds { 1247b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 12489484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 12493c8a9c63SMariusz Kozlowski struct sock *sk; 125033dccbb0SHerbert Xu unsigned int mask = 0; 12511da177e4SLinus Torvalds 12521da177e4SLinus Torvalds if (!tun) 1253eac9e902SEric W. Biederman return POLLERR; 12541da177e4SLinus Torvalds 125554f968d6SJason Wang sk = tfile->socket.sk; 12563c8a9c63SMariusz Kozlowski 12576b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 12581da177e4SLinus Torvalds 12599e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 12601da177e4SLinus Torvalds 12611576d986SJason Wang if (!skb_array_empty(&tfile->tx_array)) 12621da177e4SLinus Torvalds mask |= POLLIN | POLLRDNORM; 12631da177e4SLinus Torvalds 1264b20e2d54SHannes Frederic Sowa if (tun->dev->flags & IFF_UP && 1265b20e2d54SHannes Frederic Sowa (sock_writeable(sk) || 12669cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1267b20e2d54SHannes Frederic Sowa sock_writeable(sk)))) 126833dccbb0SHerbert Xu mask |= POLLOUT | POLLWRNORM; 126933dccbb0SHerbert Xu 1270c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1271c70f1829SEric W. Biederman mask = POLLERR; 1272c70f1829SEric W. Biederman 1273631ab46bSEric W. Biederman tun_put(tun); 12741da177e4SLinus Torvalds return mask; 12751da177e4SLinus Torvalds } 12761da177e4SLinus Torvalds 127790e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 127890e33d45SPetar Penkov size_t len, 127990e33d45SPetar Penkov const struct iov_iter *it) 128090e33d45SPetar Penkov { 128190e33d45SPetar Penkov struct sk_buff *skb; 128290e33d45SPetar Penkov size_t linear; 128390e33d45SPetar Penkov int err; 128490e33d45SPetar Penkov int i; 128590e33d45SPetar Penkov 128690e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 128790e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 128890e33d45SPetar Penkov 128990e33d45SPetar Penkov local_bh_disable(); 129090e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 129190e33d45SPetar Penkov local_bh_enable(); 129290e33d45SPetar Penkov if (!skb) 129390e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 129490e33d45SPetar Penkov 129590e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 129690e33d45SPetar Penkov err = __skb_grow(skb, linear); 129790e33d45SPetar Penkov if (err) 129890e33d45SPetar Penkov goto free; 129990e33d45SPetar Penkov 130090e33d45SPetar Penkov skb->len = len; 130190e33d45SPetar Penkov skb->data_len = len - linear; 130290e33d45SPetar Penkov skb->truesize += skb->data_len; 130390e33d45SPetar Penkov 130490e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 130590e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 130690e33d45SPetar Penkov unsigned long offset; 130790e33d45SPetar Penkov struct page *page; 130890e33d45SPetar Penkov void *data; 130990e33d45SPetar Penkov 131090e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 131190e33d45SPetar Penkov err = -EINVAL; 131290e33d45SPetar Penkov goto free; 131390e33d45SPetar Penkov } 131490e33d45SPetar Penkov 131590e33d45SPetar Penkov local_bh_disable(); 131690e33d45SPetar Penkov data = napi_alloc_frag(fragsz); 131790e33d45SPetar Penkov local_bh_enable(); 131890e33d45SPetar Penkov if (!data) { 131990e33d45SPetar Penkov err = -ENOMEM; 132090e33d45SPetar Penkov goto free; 132190e33d45SPetar Penkov } 132290e33d45SPetar Penkov 132390e33d45SPetar Penkov page = virt_to_head_page(data); 132490e33d45SPetar Penkov offset = data - page_address(page); 132590e33d45SPetar Penkov skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 132690e33d45SPetar Penkov } 132790e33d45SPetar Penkov 132890e33d45SPetar Penkov return skb; 132990e33d45SPetar Penkov free: 133090e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 133190e33d45SPetar Penkov napi_free_frags(&tfile->napi); 133290e33d45SPetar Penkov return ERR_PTR(err); 133390e33d45SPetar Penkov } 133490e33d45SPetar Penkov 1335f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1336f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 133754f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 133833dccbb0SHerbert Xu size_t prepad, size_t len, 133933dccbb0SHerbert Xu size_t linear, int noblock) 1340f42157cbSRusty Russell { 134154f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1342f42157cbSRusty Russell struct sk_buff *skb; 134333dccbb0SHerbert Xu int err; 1344f42157cbSRusty Russell 1345f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 13460eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 134733dccbb0SHerbert Xu linear = len; 1348f42157cbSRusty Russell 134933dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 135028d64271SEric Dumazet &err, 0); 1351f42157cbSRusty Russell if (!skb) 135233dccbb0SHerbert Xu return ERR_PTR(err); 1353f42157cbSRusty Russell 1354f42157cbSRusty Russell skb_reserve(skb, prepad); 1355f42157cbSRusty Russell skb_put(skb, linear); 135633dccbb0SHerbert Xu skb->data_len = len - linear; 135733dccbb0SHerbert Xu skb->len += len - linear; 1358f42157cbSRusty Russell 1359f42157cbSRusty Russell return skb; 1360f42157cbSRusty Russell } 1361f42157cbSRusty Russell 13625503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 13635503fcecSJason Wang struct sk_buff *skb, int more) 13645503fcecSJason Wang { 13655503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 13665503fcecSJason Wang struct sk_buff_head process_queue; 13675503fcecSJason Wang u32 rx_batched = tun->rx_batched; 13685503fcecSJason Wang bool rcv = false; 13695503fcecSJason Wang 13705503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 13715503fcecSJason Wang local_bh_disable(); 13725503fcecSJason Wang netif_receive_skb(skb); 13735503fcecSJason Wang local_bh_enable(); 13745503fcecSJason Wang return; 13755503fcecSJason Wang } 13765503fcecSJason Wang 13775503fcecSJason Wang spin_lock(&queue->lock); 13785503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 13795503fcecSJason Wang __skb_queue_head_init(&process_queue); 13805503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 13815503fcecSJason Wang rcv = true; 13825503fcecSJason Wang } else { 13835503fcecSJason Wang __skb_queue_tail(queue, skb); 13845503fcecSJason Wang } 13855503fcecSJason Wang spin_unlock(&queue->lock); 13865503fcecSJason Wang 13875503fcecSJason Wang if (rcv) { 13885503fcecSJason Wang struct sk_buff *nskb; 13895503fcecSJason Wang 13905503fcecSJason Wang local_bh_disable(); 13915503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 13925503fcecSJason Wang netif_receive_skb(nskb); 13935503fcecSJason Wang netif_receive_skb(skb); 13945503fcecSJason Wang local_bh_enable(); 13955503fcecSJason Wang } 13965503fcecSJason Wang } 13975503fcecSJason Wang 139866ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 139966ccbc9cSJason Wang int len, int noblock, bool zerocopy) 140066ccbc9cSJason Wang { 140166ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 140266ccbc9cSJason Wang return false; 140366ccbc9cSJason Wang 140466ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 140566ccbc9cSJason Wang return false; 140666ccbc9cSJason Wang 140766ccbc9cSJason Wang if (!noblock) 140866ccbc9cSJason Wang return false; 140966ccbc9cSJason Wang 141066ccbc9cSJason Wang if (zerocopy) 141166ccbc9cSJason Wang return false; 141266ccbc9cSJason Wang 141366ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 141466ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 141566ccbc9cSJason Wang return false; 141666ccbc9cSJason Wang 141766ccbc9cSJason Wang return true; 141866ccbc9cSJason Wang } 141966ccbc9cSJason Wang 1420761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1421761876c8SJason Wang struct tun_file *tfile, 142266ccbc9cSJason Wang struct iov_iter *from, 1423761876c8SJason Wang struct virtio_net_hdr *hdr, 14241cfe6e93SJason Wang int len, int *skb_xdp) 142566ccbc9cSJason Wang { 14260bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 142766ccbc9cSJason Wang struct sk_buff *skb; 1428761876c8SJason Wang struct bpf_prog *xdp_prog; 14297df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1430761876c8SJason Wang unsigned int delta = 0; 143166ccbc9cSJason Wang char *buf; 143266ccbc9cSJason Wang size_t copied; 1433761876c8SJason Wang bool xdp_xmit = false; 14347df13219SJason Wang int err, pad = TUN_RX_PAD; 14357df13219SJason Wang 14367df13219SJason Wang rcu_read_lock(); 14377df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 14387df13219SJason Wang if (xdp_prog) 14397df13219SJason Wang pad += TUN_HEADROOM; 14407df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 14417df13219SJason Wang rcu_read_unlock(); 144266ccbc9cSJason Wang 144366ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 144466ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 144566ccbc9cSJason Wang 144666ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 144766ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 14487df13219SJason Wang alloc_frag->offset + pad, 144966ccbc9cSJason Wang len, from); 145066ccbc9cSJason Wang if (copied != len) 145166ccbc9cSJason Wang return ERR_PTR(-EFAULT); 145266ccbc9cSJason Wang 14537df13219SJason Wang /* There's a small window that XDP may be set after the check 14547df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 14557df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 14567df13219SJason Wang */ 14577df13219SJason Wang if (hdr->gso_type || !xdp_prog) 14581cfe6e93SJason Wang *skb_xdp = 1; 1459761876c8SJason Wang else 14601cfe6e93SJason Wang *skb_xdp = 0; 146166ccbc9cSJason Wang 1462761876c8SJason Wang rcu_read_lock(); 1463761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 14641cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1465761876c8SJason Wang struct xdp_buff xdp; 1466761876c8SJason Wang void *orig_data; 1467761876c8SJason Wang u32 act; 1468761876c8SJason Wang 1469761876c8SJason Wang xdp.data_hard_start = buf; 14707df13219SJason Wang xdp.data = buf + pad; 1471de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1472761876c8SJason Wang xdp.data_end = xdp.data + len; 1473761876c8SJason Wang orig_data = xdp.data; 1474761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1475761876c8SJason Wang 1476761876c8SJason Wang switch (act) { 1477761876c8SJason Wang case XDP_REDIRECT: 1478761876c8SJason Wang get_page(alloc_frag->page); 1479761876c8SJason Wang alloc_frag->offset += buflen; 1480761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1481761876c8SJason Wang if (err) 1482761876c8SJason Wang goto err_redirect; 1483761876c8SJason Wang return NULL; 1484761876c8SJason Wang case XDP_TX: 1485761876c8SJason Wang xdp_xmit = true; 1486761876c8SJason Wang /* fall through */ 1487761876c8SJason Wang case XDP_PASS: 1488761876c8SJason Wang delta = orig_data - xdp.data; 1489761876c8SJason Wang break; 1490761876c8SJason Wang default: 1491761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1492761876c8SJason Wang /* fall through */ 1493761876c8SJason Wang case XDP_ABORTED: 1494761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1495761876c8SJason Wang /* fall through */ 1496761876c8SJason Wang case XDP_DROP: 1497761876c8SJason Wang goto err_xdp; 1498761876c8SJason Wang } 1499761876c8SJason Wang } 1500761876c8SJason Wang 1501761876c8SJason Wang skb = build_skb(buf, buflen); 1502761876c8SJason Wang if (!skb) { 1503761876c8SJason Wang rcu_read_unlock(); 1504761876c8SJason Wang return ERR_PTR(-ENOMEM); 1505761876c8SJason Wang } 1506761876c8SJason Wang 15077df13219SJason Wang skb_reserve(skb, pad - delta); 1508761876c8SJason Wang skb_put(skb, len + delta); 150966ccbc9cSJason Wang get_page(alloc_frag->page); 151066ccbc9cSJason Wang alloc_frag->offset += buflen; 151166ccbc9cSJason Wang 1512761876c8SJason Wang if (xdp_xmit) { 1513761876c8SJason Wang skb->dev = tun->dev; 1514761876c8SJason Wang generic_xdp_tx(skb, xdp_prog); 1515761876c8SJason Wang rcu_read_lock(); 1516761876c8SJason Wang return NULL; 1517761876c8SJason Wang } 1518761876c8SJason Wang 1519761876c8SJason Wang rcu_read_unlock(); 1520761876c8SJason Wang 152166ccbc9cSJason Wang return skb; 1522761876c8SJason Wang 1523761876c8SJason Wang err_redirect: 1524761876c8SJason Wang put_page(alloc_frag->page); 1525761876c8SJason Wang err_xdp: 1526761876c8SJason Wang rcu_read_unlock(); 1527761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1528761876c8SJason Wang return NULL; 152966ccbc9cSJason Wang } 153066ccbc9cSJason Wang 15311da177e4SLinus Torvalds /* Get packet from user space buffer */ 153254f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1533f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 15345503fcecSJason Wang int noblock, bool more) 15351da177e4SLinus Torvalds { 153609640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 15371da177e4SLinus Torvalds struct sk_buff *skb; 1538f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1539eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1540f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1541608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 154296f8d9ecSJason Wang int good_linear; 15430690899bSMichael S. Tsirkin int copylen; 15440690899bSMichael S. Tsirkin bool zerocopy = false; 15450690899bSMichael S. Tsirkin int err; 154649974420SEric Dumazet u32 rxhash; 15471cfe6e93SJason Wang int skb_xdp = 1; 154890e33d45SPetar Penkov bool frags = tun_napi_frags_enabled(tun); 15491da177e4SLinus Torvalds 15501bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 15511bd4978aSEric Dumazet return -EIO; 15521bd4978aSEric Dumazet 155340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 155415718ea0SDan Carpenter if (len < sizeof(pi)) 15551da177e4SLinus Torvalds return -EINVAL; 155615718ea0SDan Carpenter len -= sizeof(pi); 15571da177e4SLinus Torvalds 1558cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 15591da177e4SLinus Torvalds return -EFAULT; 15601da177e4SLinus Torvalds } 15611da177e4SLinus Torvalds 156240630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1563e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1564e1edab87SWillem de Bruijn 1565e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1566f43798c2SRusty Russell return -EINVAL; 1567e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1568f43798c2SRusty Russell 1569cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1570f43798c2SRusty Russell return -EFAULT; 1571f43798c2SRusty Russell 15724909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 157356f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 157456f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 15754909122fSHerbert Xu 157656f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1577f43798c2SRusty Russell return -EINVAL; 1578e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1579f43798c2SRusty Russell } 1580f43798c2SRusty Russell 158140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1582a504b86eSstephen hemminger align += NET_IP_ALIGN; 15830eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 158456f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1585e01bf1c8SRusty Russell return -EINVAL; 1586e01bf1c8SRusty Russell } 15871da177e4SLinus Torvalds 158896f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 158996f8d9ecSJason Wang 159088529176SJason Wang if (msg_control) { 1591f5ff53b4SAl Viro struct iov_iter i = *from; 1592f5ff53b4SAl Viro 159388529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 159488529176SJason Wang * enough room for skb expand head in case it is used. 15950690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 15960690899bSMichael S. Tsirkin */ 159756f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 159896f8d9ecSJason Wang if (copylen > good_linear) 159996f8d9ecSJason Wang copylen = good_linear; 16003dd5c330SJason Wang linear = copylen; 1601f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1602f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 160388529176SJason Wang zerocopy = true; 160488529176SJason Wang } 160588529176SJason Wang 160690e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 16071cfe6e93SJason Wang /* For the packet that is not easy to be processed 16081cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 16091cfe6e93SJason Wang * skb was created with generic XDP routine. 16101cfe6e93SJason Wang */ 16111cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 161266ccbc9cSJason Wang if (IS_ERR(skb)) { 161366ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 161466ccbc9cSJason Wang return PTR_ERR(skb); 161566ccbc9cSJason Wang } 1616761876c8SJason Wang if (!skb) 1617761876c8SJason Wang return total_len; 161866ccbc9cSJason Wang } else { 161988529176SJason Wang if (!zerocopy) { 16200690899bSMichael S. Tsirkin copylen = len; 162156f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 162296f8d9ecSJason Wang linear = good_linear; 162396f8d9ecSJason Wang else 162456f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 16253dd5c330SJason Wang } 16260690899bSMichael S. Tsirkin 162790e33d45SPetar Penkov if (frags) { 162890e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 162990e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 163090e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 163190e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 163290e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 163390e33d45SPetar Penkov */ 163490e33d45SPetar Penkov zerocopy = false; 163590e33d45SPetar Penkov } else { 163690e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 163790e33d45SPetar Penkov noblock); 163890e33d45SPetar Penkov } 163990e33d45SPetar Penkov 164033dccbb0SHerbert Xu if (IS_ERR(skb)) { 164133dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1642608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 164390e33d45SPetar Penkov if (frags) 164490e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 164533dccbb0SHerbert Xu return PTR_ERR(skb); 16461da177e4SLinus Torvalds } 16471da177e4SLinus Torvalds 16480690899bSMichael S. Tsirkin if (zerocopy) 1649f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1650af1cc7a2SJason Wang else 1651f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 16520690899bSMichael S. Tsirkin 16530690899bSMichael S. Tsirkin if (err) { 1654608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 16558f22757eSDave Jones kfree_skb(skb); 165690e33d45SPetar Penkov if (frags) { 165790e33d45SPetar Penkov tfile->napi.skb = NULL; 165890e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 165990e33d45SPetar Penkov } 166090e33d45SPetar Penkov 16611da177e4SLinus Torvalds return -EFAULT; 16628f22757eSDave Jones } 166366ccbc9cSJason Wang } 16641da177e4SLinus Torvalds 16653e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1666df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1667df10db98SPaolo Abeni kfree_skb(skb); 166890e33d45SPetar Penkov if (frags) { 166990e33d45SPetar Penkov tfile->napi.skb = NULL; 167090e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 167190e33d45SPetar Penkov } 167290e33d45SPetar Penkov 1673df10db98SPaolo Abeni return -EINVAL; 1674df10db98SPaolo Abeni } 1675df10db98SPaolo Abeni 16761da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 167740630b82SMichael S. Tsirkin case IFF_TUN: 167840630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 16792580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 16802580c4c1SAlexander Potapenko 16812580c4c1SAlexander Potapenko switch (ip_version) { 16822580c4c1SAlexander Potapenko case 4: 1683f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1684f09f7ee2SAng Way Chuang break; 16852580c4c1SAlexander Potapenko case 6: 1686f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1687f09f7ee2SAng Way Chuang break; 1688f09f7ee2SAng Way Chuang default: 1689608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1690f09f7ee2SAng Way Chuang kfree_skb(skb); 1691f09f7ee2SAng Way Chuang return -EINVAL; 1692f09f7ee2SAng Way Chuang } 1693f09f7ee2SAng Way Chuang } 1694f09f7ee2SAng Way Chuang 1695459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 16961da177e4SLinus Torvalds skb->protocol = pi.proto; 16974c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 16981da177e4SLinus Torvalds break; 169940630b82SMichael S. Tsirkin case IFF_TAP: 170090e33d45SPetar Penkov if (!frags) 17011da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 17021da177e4SLinus Torvalds break; 17036403eab1SJoe Perches } 17041da177e4SLinus Torvalds 17050690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 17060690899bSMichael S. Tsirkin if (zerocopy) { 17070690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 17080690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1709c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1710af1cc7a2SJason Wang } else if (msg_control) { 1711af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1712af1cc7a2SJason Wang uarg->callback(uarg, false); 17130690899bSMichael S. Tsirkin } 17140690899bSMichael S. Tsirkin 171572f65107SVlad Yasevich skb_reset_network_header(skb); 171640893fd0SJason Wang skb_probe_transport_header(skb, 0); 171738502af7SJason Wang 17181cfe6e93SJason Wang if (skb_xdp) { 1719761876c8SJason Wang struct bpf_prog *xdp_prog; 1720761876c8SJason Wang int ret; 1721761876c8SJason Wang 1722761876c8SJason Wang rcu_read_lock(); 1723761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1724761876c8SJason Wang if (xdp_prog) { 1725761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1726761876c8SJason Wang if (ret != XDP_PASS) { 1727761876c8SJason Wang rcu_read_unlock(); 1728761876c8SJason Wang return total_len; 1729761876c8SJason Wang } 1730761876c8SJason Wang } 1731761876c8SJason Wang rcu_read_unlock(); 1732761876c8SJason Wang } 1733761876c8SJason Wang 1734feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 173594317099SPetar Penkov 173690e33d45SPetar Penkov if (frags) { 173790e33d45SPetar Penkov /* Exercise flow dissector code path. */ 173890e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 173990e33d45SPetar Penkov 1740*010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 174190e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 174290e33d45SPetar Penkov napi_free_frags(&tfile->napi); 174390e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 174490e33d45SPetar Penkov WARN_ON(1); 174590e33d45SPetar Penkov return -ENOMEM; 174690e33d45SPetar Penkov } 174790e33d45SPetar Penkov 174890e33d45SPetar Penkov local_bh_disable(); 174990e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 175090e33d45SPetar Penkov local_bh_enable(); 175190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 175290e33d45SPetar Penkov } else if (tun->flags & IFF_NAPI) { 175394317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 175494317099SPetar Penkov int queue_len; 175594317099SPetar Penkov 175694317099SPetar Penkov spin_lock_bh(&queue->lock); 175794317099SPetar Penkov __skb_queue_tail(queue, skb); 175894317099SPetar Penkov queue_len = skb_queue_len(queue); 175994317099SPetar Penkov spin_unlock(&queue->lock); 176094317099SPetar Penkov 176194317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 176294317099SPetar Penkov napi_schedule(&tfile->napi); 176394317099SPetar Penkov 176494317099SPetar Penkov local_bh_enable(); 176594317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 17665503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 176794317099SPetar Penkov } else { 17681da177e4SLinus Torvalds netif_rx_ni(skb); 176994317099SPetar Penkov } 17701da177e4SLinus Torvalds 1771608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1772608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1773608b9977SPaolo Abeni stats->rx_packets++; 1774608b9977SPaolo Abeni stats->rx_bytes += len; 1775608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1776608b9977SPaolo Abeni put_cpu_ptr(stats); 17771da177e4SLinus Torvalds 17789e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 17790690899bSMichael S. Tsirkin return total_len; 17801da177e4SLinus Torvalds } 17811da177e4SLinus Torvalds 1782f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 17831da177e4SLinus Torvalds { 178433dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 178554f968d6SJason Wang struct tun_file *tfile = file->private_data; 17869484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1787631ab46bSEric W. Biederman ssize_t result; 17881da177e4SLinus Torvalds 17891da177e4SLinus Torvalds if (!tun) 17901da177e4SLinus Torvalds return -EBADFD; 17911da177e4SLinus Torvalds 17925503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 17935503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1794631ab46bSEric W. Biederman 1795631ab46bSEric W. Biederman tun_put(tun); 1796631ab46bSEric W. Biederman return result; 17971da177e4SLinus Torvalds } 17981da177e4SLinus Torvalds 17991da177e4SLinus Torvalds /* Put packet to the user space buffer */ 18006f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 180154f968d6SJason Wang struct tun_file *tfile, 18021da177e4SLinus Torvalds struct sk_buff *skb, 1803e0b46d0eSHerbert Xu struct iov_iter *iter) 18041da177e4SLinus Torvalds { 18051da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 1806608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 1807e0b46d0eSHerbert Xu ssize_t total; 18088c847d25SJason Wang int vlan_offset = 0; 1809a8f9bfdfSHerbert Xu int vlan_hlen = 0; 18102eb783c4SHerbert Xu int vnet_hdr_sz = 0; 1811a8f9bfdfSHerbert Xu 1812df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1813a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 18141da177e4SLinus Torvalds 181540630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 1816e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 18171da177e4SLinus Torvalds 1818e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 1819e0b46d0eSHerbert Xu 182040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 1821e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 18221da177e4SLinus Torvalds return -EINVAL; 18231da177e4SLinus Torvalds 1824e0b46d0eSHerbert Xu total += sizeof(pi); 1825e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 18261da177e4SLinus Torvalds /* Packet will be striped */ 18271da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 18281da177e4SLinus Torvalds } 18291da177e4SLinus Torvalds 1830e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 18311da177e4SLinus Torvalds return -EFAULT; 18321da177e4SLinus Torvalds } 18331da177e4SLinus Torvalds 18342eb783c4SHerbert Xu if (vnet_hdr_sz) { 18359403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 183634166093SMike Rapoport 1837e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 1838f43798c2SRusty Russell return -EINVAL; 1839f43798c2SRusty Russell 18403e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 18416391a448SJason Wang tun_is_little_endian(tun), true)) { 1842f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 18436b8a66eeSJoe Perches pr_err("unexpected GSO type: " 1844ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 184556f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 184656f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 1847ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 1848ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 1849ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 185056f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 1851ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 1852ef3db4a5SMichael S. Tsirkin return -EINVAL; 1853ef3db4a5SMichael S. Tsirkin } 1854f43798c2SRusty Russell 1855e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 1856f43798c2SRusty Russell return -EFAULT; 18578c847d25SJason Wang 18588c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1859f43798c2SRusty Russell } 1860f43798c2SRusty Russell 1861a8f9bfdfSHerbert Xu if (vlan_hlen) { 1862e0b46d0eSHerbert Xu int ret; 18636680ec68SJason Wang struct { 18646680ec68SJason Wang __be16 h_vlan_proto; 18656680ec68SJason Wang __be16 h_vlan_TCI; 18666680ec68SJason Wang } veth; 18671da177e4SLinus Torvalds 18686680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 1869df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 18701da177e4SLinus Torvalds 18716680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 18726680ec68SJason Wang 1873e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 1874e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 18756680ec68SJason Wang goto done; 18766680ec68SJason Wang 1877e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 1878e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 18796680ec68SJason Wang goto done; 18806680ec68SJason Wang } 18816680ec68SJason Wang 1882e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 18836680ec68SJason Wang 18846680ec68SJason Wang done: 1885608b9977SPaolo Abeni /* caller is in process context, */ 1886608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1887608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1888608b9977SPaolo Abeni stats->tx_packets++; 1889608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 1890608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1891608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 18921da177e4SLinus Torvalds 18931da177e4SLinus Torvalds return total; 18941da177e4SLinus Torvalds } 18951da177e4SLinus Torvalds 18961576d986SJason Wang static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, 18971576d986SJason Wang int *err) 18981576d986SJason Wang { 18991576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 19001576d986SJason Wang struct sk_buff *skb = NULL; 1901f48cc6b2SJason Wang int error = 0; 19021576d986SJason Wang 19031576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19041576d986SJason Wang if (skb) 19051576d986SJason Wang goto out; 19061576d986SJason Wang if (noblock) { 1907f48cc6b2SJason Wang error = -EAGAIN; 19081576d986SJason Wang goto out; 19091576d986SJason Wang } 19101576d986SJason Wang 19111576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 19121576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 19131576d986SJason Wang 19141576d986SJason Wang while (1) { 19151576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19161576d986SJason Wang if (skb) 19171576d986SJason Wang break; 19181576d986SJason Wang if (signal_pending(current)) { 1919f48cc6b2SJason Wang error = -ERESTARTSYS; 19201576d986SJason Wang break; 19211576d986SJason Wang } 19221576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 1923f48cc6b2SJason Wang error = -EFAULT; 19241576d986SJason Wang break; 19251576d986SJason Wang } 19261576d986SJason Wang 19271576d986SJason Wang schedule(); 19281576d986SJason Wang } 19291576d986SJason Wang 19301576d986SJason Wang current->state = TASK_RUNNING; 19311576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 19321576d986SJason Wang 19331576d986SJason Wang out: 1934f48cc6b2SJason Wang *err = error; 19351576d986SJason Wang return skb; 19361576d986SJason Wang } 19371576d986SJason Wang 193854f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 19399b067034SAl Viro struct iov_iter *to, 1940ac77cfd4SJason Wang int noblock, struct sk_buff *skb) 19411da177e4SLinus Torvalds { 19429b067034SAl Viro ssize_t ret; 19431576d986SJason Wang int err; 19441da177e4SLinus Torvalds 19453872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 19461da177e4SLinus Torvalds 19479b067034SAl Viro if (!iov_iter_count(to)) 19489b067034SAl Viro return 0; 19491da177e4SLinus Torvalds 1950ac77cfd4SJason Wang if (!skb) { 19511576d986SJason Wang /* Read frames from ring */ 19521576d986SJason Wang skb = tun_ring_recv(tfile, noblock, &err); 1953e0b46d0eSHerbert Xu if (!skb) 1954957f094fSAlex Gartrell return err; 1955ac77cfd4SJason Wang } 1956e0b46d0eSHerbert Xu 19579b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 1958f51a5e82SJason Wang if (unlikely(ret < 0)) 19591da177e4SLinus Torvalds kfree_skb(skb); 1960f51a5e82SJason Wang else 1961f51a5e82SJason Wang consume_skb(skb); 19621da177e4SLinus Torvalds 196305c2828cSMichael S. Tsirkin return ret; 196405c2828cSMichael S. Tsirkin } 196505c2828cSMichael S. Tsirkin 19669b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 196705c2828cSMichael S. Tsirkin { 196805c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 196905c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 19709484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 19719b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 197205c2828cSMichael S. Tsirkin 197305c2828cSMichael S. Tsirkin if (!tun) 197405c2828cSMichael S. Tsirkin return -EBADFD; 1975ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 197642404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 1977d0b7da8aSZhi Yong Wu if (ret > 0) 1978d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 1979631ab46bSEric W. Biederman tun_put(tun); 19801da177e4SLinus Torvalds return ret; 19811da177e4SLinus Torvalds } 19821da177e4SLinus Torvalds 198396442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 198496442e42SJason Wang { 198596442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 198696442e42SJason Wang 19874008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 1988608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 198996442e42SJason Wang tun_flow_uninit(tun); 19905dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 199196442e42SJason Wang } 199296442e42SJason Wang 19931da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 19941da177e4SLinus Torvalds { 19951da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 19961da177e4SLinus Torvalds 19970625c883SEric W. Biederman tun->owner = INVALID_UID; 19980625c883SEric W. Biederman tun->group = INVALID_GID; 19991da177e4SLinus Torvalds 20001da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2001cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2002cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2003016adb72SJason Wang /* We prefer our own queue length */ 2004016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 20051da177e4SLinus Torvalds } 20061da177e4SLinus Torvalds 2007f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2008f019a7a5SEric W. Biederman * device with netlink. 2009f019a7a5SEric W. Biederman */ 2010a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2011a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2012f019a7a5SEric W. Biederman { 2013f019a7a5SEric W. Biederman return -EINVAL; 2014f019a7a5SEric W. Biederman } 2015f019a7a5SEric W. Biederman 2016f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2017f019a7a5SEric W. Biederman .kind = DRV_NAME, 2018f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2019f019a7a5SEric W. Biederman .setup = tun_setup, 2020f019a7a5SEric W. Biederman .validate = tun_validate, 2021f019a7a5SEric W. Biederman }; 2022f019a7a5SEric W. Biederman 202333dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 202433dccbb0SHerbert Xu { 202554f968d6SJason Wang struct tun_file *tfile; 202643815482SEric Dumazet wait_queue_head_t *wqueue; 202733dccbb0SHerbert Xu 202833dccbb0SHerbert Xu if (!sock_writeable(sk)) 202933dccbb0SHerbert Xu return; 203033dccbb0SHerbert Xu 20319cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 203233dccbb0SHerbert Xu return; 203333dccbb0SHerbert Xu 203443815482SEric Dumazet wqueue = sk_sleep(sk); 203543815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 203643815482SEric Dumazet wake_up_interruptible_sync_poll(wqueue, POLLOUT | 203705c2828cSMichael S. Tsirkin POLLWRNORM | POLLWRBAND); 2038c722c625SHerbert Xu 203954f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 204054f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 204133dccbb0SHerbert Xu } 204233dccbb0SHerbert Xu 20431b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 204405c2828cSMichael S. Tsirkin { 204554f968d6SJason Wang int ret; 204654f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20479484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 204854f968d6SJason Wang 204954f968d6SJason Wang if (!tun) 205054f968d6SJason Wang return -EBADFD; 2051f5ff53b4SAl Viro 2052c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 20535503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 20545503fcecSJason Wang m->msg_flags & MSG_MORE); 205554f968d6SJason Wang tun_put(tun); 205654f968d6SJason Wang return ret; 205705c2828cSMichael S. Tsirkin } 205805c2828cSMichael S. Tsirkin 20591b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 206005c2828cSMichael S. Tsirkin int flags) 206105c2828cSMichael S. Tsirkin { 206254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20639484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 206405c2828cSMichael S. Tsirkin int ret; 206554f968d6SJason Wang 206654f968d6SJason Wang if (!tun) 206754f968d6SJason Wang return -EBADFD; 206854f968d6SJason Wang 2069eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 20703811ae76SGao feng ret = -EINVAL; 20713811ae76SGao feng goto out; 20723811ae76SGao feng } 2073eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2074eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2075eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2076eda29772SRichard Cochran goto out; 2077eda29772SRichard Cochran } 2078ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, 2079ac77cfd4SJason Wang m->msg_control); 208087897931SAlex Gartrell if (ret > (ssize_t)total_len) { 208142404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 208242404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 208342404c09SDavid S. Miller } 20843811ae76SGao feng out: 208554f968d6SJason Wang tun_put(tun); 208605c2828cSMichael S. Tsirkin return ret; 208705c2828cSMichael S. Tsirkin } 208805c2828cSMichael S. Tsirkin 20891576d986SJason Wang static int tun_peek_len(struct socket *sock) 20901576d986SJason Wang { 20911576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 20921576d986SJason Wang struct tun_struct *tun; 20931576d986SJason Wang int ret = 0; 20941576d986SJason Wang 20959484dc74Syuan linyu tun = tun_get(tfile); 20961576d986SJason Wang if (!tun) 20971576d986SJason Wang return 0; 20981576d986SJason Wang 20991576d986SJason Wang ret = skb_array_peek_len(&tfile->tx_array); 21001576d986SJason Wang tun_put(tun); 21011576d986SJason Wang 21021576d986SJason Wang return ret; 21031576d986SJason Wang } 21041576d986SJason Wang 210505c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 210605c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 21071576d986SJason Wang .peek_len = tun_peek_len, 210805c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 210905c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 211005c2828cSMichael S. Tsirkin }; 211105c2828cSMichael S. Tsirkin 211233dccbb0SHerbert Xu static struct proto tun_proto = { 211333dccbb0SHerbert Xu .name = "tun", 211433dccbb0SHerbert Xu .owner = THIS_MODULE, 211554f968d6SJason Wang .obj_size = sizeof(struct tun_file), 211633dccbb0SHerbert Xu }; 2117f019a7a5SEric W. Biederman 2118980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2119980c9e8cSDavid Woodhouse { 2120031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2121980c9e8cSDavid Woodhouse } 2122980c9e8cSDavid Woodhouse 2123980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2124980c9e8cSDavid Woodhouse char *buf) 2125980c9e8cSDavid Woodhouse { 2126980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2127980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2128980c9e8cSDavid Woodhouse } 2129980c9e8cSDavid Woodhouse 2130980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2131980c9e8cSDavid Woodhouse char *buf) 2132980c9e8cSDavid Woodhouse { 2133980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 21340625c883SEric W. Biederman return uid_valid(tun->owner)? 21350625c883SEric W. Biederman sprintf(buf, "%u\n", 21360625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 21370625c883SEric W. Biederman sprintf(buf, "-1\n"); 2138980c9e8cSDavid Woodhouse } 2139980c9e8cSDavid Woodhouse 2140980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2141980c9e8cSDavid Woodhouse char *buf) 2142980c9e8cSDavid Woodhouse { 2143980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 21440625c883SEric W. Biederman return gid_valid(tun->group) ? 21450625c883SEric W. Biederman sprintf(buf, "%u\n", 21460625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 21470625c883SEric W. Biederman sprintf(buf, "-1\n"); 2148980c9e8cSDavid Woodhouse } 2149980c9e8cSDavid Woodhouse 2150980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2151980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2152980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2153980c9e8cSDavid Woodhouse 2154c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2155c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2156c4d33e24STakashi Iwai &dev_attr_owner.attr, 2157c4d33e24STakashi Iwai &dev_attr_group.attr, 2158c4d33e24STakashi Iwai NULL 2159c4d33e24STakashi Iwai }; 2160c4d33e24STakashi Iwai 2161c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2162c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2163c4d33e24STakashi Iwai }; 2164c4d33e24STakashi Iwai 2165d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 21661da177e4SLinus Torvalds { 21671da177e4SLinus Torvalds struct tun_struct *tun; 216854f968d6SJason Wang struct tun_file *tfile = file->private_data; 21691da177e4SLinus Torvalds struct net_device *dev; 21701da177e4SLinus Torvalds int err; 21711da177e4SLinus Torvalds 21727c0c3b1aSJason Wang if (tfile->detached) 21737c0c3b1aSJason Wang return -EINVAL; 21747c0c3b1aSJason Wang 217590e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 217690e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 217790e33d45SPetar Penkov return -EPERM; 217890e33d45SPetar Penkov 217990e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 218090e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 218190e33d45SPetar Penkov return -EINVAL; 218290e33d45SPetar Penkov } 218390e33d45SPetar Penkov 218474a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 218574a3e5a7SEric W. Biederman if (dev) { 2186f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2187f85ba780SDavid Woodhouse return -EBUSY; 218874a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 218974a3e5a7SEric W. Biederman tun = netdev_priv(dev); 219074a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 219174a3e5a7SEric W. Biederman tun = netdev_priv(dev); 219274a3e5a7SEric W. Biederman else 219374a3e5a7SEric W. Biederman return -EINVAL; 219474a3e5a7SEric W. Biederman 21958e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 219640630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 21978e6d91aeSJason Wang return -EINVAL; 21988e6d91aeSJason Wang 2199cde8b15fSJason Wang if (tun_not_capable(tun)) 22002b980dbdSPaul Moore return -EPERM; 22015dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 22022b980dbdSPaul Moore if (err < 0) 22032b980dbdSPaul Moore return err; 22042b980dbdSPaul Moore 220594317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 220694317099SPetar Penkov ifr->ifr_flags & IFF_NAPI); 2207a7385ba2SEric W. Biederman if (err < 0) 2208a7385ba2SEric W. Biederman return err; 22094008e97fSJason Wang 221040630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2211e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2212e8dbad66SJason Wang /* One or more queue has already been attached, no need 2213e8dbad66SJason Wang * to initialize the device again. 2214e8dbad66SJason Wang */ 2215e8dbad66SJason Wang return 0; 2216e8dbad66SJason Wang } 221786a264abSDavid Howells } 22181da177e4SLinus Torvalds else { 22191da177e4SLinus Torvalds char *name; 22201da177e4SLinus Torvalds unsigned long flags = 0; 2221edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2222edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 22231da177e4SLinus Torvalds 2224c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2225ca6bb5d7SDavid Woodhouse return -EPERM; 22262b980dbdSPaul Moore err = security_tun_dev_create(); 22272b980dbdSPaul Moore if (err < 0) 22282b980dbdSPaul Moore return err; 2229ca6bb5d7SDavid Woodhouse 22301da177e4SLinus Torvalds /* Set dev type */ 22311da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 22321da177e4SLinus Torvalds /* TUN device */ 223340630b82SMichael S. Tsirkin flags |= IFF_TUN; 22341da177e4SLinus Torvalds name = "tun%d"; 22351da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 22361da177e4SLinus Torvalds /* TAP device */ 223740630b82SMichael S. Tsirkin flags |= IFF_TAP; 22381da177e4SLinus Torvalds name = "tap%d"; 22391da177e4SLinus Torvalds } else 224036989b90SKusanagi Kouichi return -EINVAL; 22411da177e4SLinus Torvalds 22421da177e4SLinus Torvalds if (*ifr->ifr_name) 22431da177e4SLinus Torvalds name = ifr->ifr_name; 22441da177e4SLinus Torvalds 2245c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2246c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2247c835a677STom Gundersen queues); 2248edfb6a14SJason Wang 22491da177e4SLinus Torvalds if (!dev) 22501da177e4SLinus Torvalds return -ENOMEM; 22511da177e4SLinus Torvalds 2252fc54c658SPavel Emelyanov dev_net_set(dev, net); 2253f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2254fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2255c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2256758e43b7SStephen Hemminger 22571da177e4SLinus Torvalds tun = netdev_priv(dev); 22581da177e4SLinus Torvalds tun->dev = dev; 22591da177e4SLinus Torvalds tun->flags = flags; 2260f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2261d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 22621da177e4SLinus Torvalds 2263eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 226454f968d6SJason Wang tun->filter_attached = false; 226554f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 22665503fcecSJason Wang tun->rx_batched = 0; 226733dccbb0SHerbert Xu 2268608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2269608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2270608b9977SPaolo Abeni err = -ENOMEM; 2271608b9977SPaolo Abeni goto err_free_dev; 2272608b9977SPaolo Abeni } 2273608b9977SPaolo Abeni 227496442e42SJason Wang spin_lock_init(&tun->lock); 227596442e42SJason Wang 22765dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 22775dbbaf2dSPaul Moore if (err < 0) 2278608b9977SPaolo Abeni goto err_free_stat; 22792b980dbdSPaul Moore 22801da177e4SLinus Torvalds tun_net_init(dev); 2281944a1376SPavel Emelyanov tun_flow_init(tun); 228296442e42SJason Wang 228388255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 22846680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 22856680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 22862a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 22876671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 22886671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 22896671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 229088255375SMichał Mirosław 22914008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 229294317099SPetar Penkov err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2293eb0fb363SJason Wang if (err < 0) 2294662ca437SJason Wang goto err_free_flow; 2295eb0fb363SJason Wang 22961da177e4SLinus Torvalds err = register_netdevice(tun->dev); 22971da177e4SLinus Torvalds if (err < 0) 2298662ca437SJason Wang goto err_detach; 2299af668b3cSMichael S. Tsirkin } 2300980c9e8cSDavid Woodhouse 2301eb0fb363SJason Wang netif_carrier_on(tun->dev); 23021da177e4SLinus Torvalds 23036b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 23041da177e4SLinus Torvalds 2305031f5e03SMichael S. Tsirkin tun->flags = (tun->flags & ~TUN_FEATURES) | 2306031f5e03SMichael S. Tsirkin (ifr->ifr_flags & TUN_FEATURES); 2307c8d68e6bSJason Wang 2308e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2309e35259a9SMax Krasnyansky * xoff state. 2310e35259a9SMax Krasnyansky */ 2311e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2312c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2313e35259a9SMax Krasnyansky 23141da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 23151da177e4SLinus Torvalds return 0; 23161da177e4SLinus Torvalds 2317662ca437SJason Wang err_detach: 2318662ca437SJason Wang tun_detach_all(dev); 2319ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2320ff244c6bSEric Dumazet goto err_free_dev; 2321ff244c6bSEric Dumazet 2322662ca437SJason Wang err_free_flow: 2323662ca437SJason Wang tun_flow_uninit(tun); 2324662ca437SJason Wang security_tun_dev_free_security(tun->security); 2325608b9977SPaolo Abeni err_free_stat: 2326608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 23271da177e4SLinus Torvalds err_free_dev: 23281da177e4SLinus Torvalds free_netdev(dev); 23291da177e4SLinus Torvalds return err; 23301da177e4SLinus Torvalds } 23311da177e4SLinus Torvalds 23329ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2333876bfd4dSHerbert Xu struct ifreq *ifr) 2334e3b99556SMark McLoughlin { 23356b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2336e3b99556SMark McLoughlin 2337e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2338e3b99556SMark McLoughlin 2339980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2340e3b99556SMark McLoughlin 2341e3b99556SMark McLoughlin } 2342e3b99556SMark McLoughlin 23435228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 23445228ddc9SRusty Russell * privs required. */ 234588255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 23465228ddc9SRusty Russell { 2347c8f44affSMichał Mirosław netdev_features_t features = 0; 23485228ddc9SRusty Russell 23495228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 235088255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 23515228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 23525228ddc9SRusty Russell 23535228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 23545228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 23555228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 23565228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 23575228ddc9SRusty Russell } 23585228ddc9SRusty Russell if (arg & TUN_F_TSO4) 23595228ddc9SRusty Russell features |= NETIF_F_TSO; 23605228ddc9SRusty Russell if (arg & TUN_F_TSO6) 23615228ddc9SRusty Russell features |= NETIF_F_TSO6; 23625228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 23635228ddc9SRusty Russell } 23645228ddc9SRusty Russell } 23655228ddc9SRusty Russell 23665228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 23675228ddc9SRusty Russell * trying to set them. */ 23685228ddc9SRusty Russell if (arg) 23695228ddc9SRusty Russell return -EINVAL; 23705228ddc9SRusty Russell 237188255375SMichał Mirosław tun->set_features = features; 237209050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 237309050957SYaroslav Isakov tun->dev->wanted_features |= features; 237488255375SMichał Mirosław netdev_update_features(tun->dev); 23755228ddc9SRusty Russell 23765228ddc9SRusty Russell return 0; 23775228ddc9SRusty Russell } 23785228ddc9SRusty Russell 2379c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2380c8d68e6bSJason Wang { 2381c8d68e6bSJason Wang int i; 2382c8d68e6bSJason Wang struct tun_file *tfile; 2383c8d68e6bSJason Wang 2384c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2385b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 23868ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 23878ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 23888ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2389c8d68e6bSJason Wang } 2390c8d68e6bSJason Wang 2391c8d68e6bSJason Wang tun->filter_attached = false; 2392c8d68e6bSJason Wang } 2393c8d68e6bSJason Wang 2394c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2395c8d68e6bSJason Wang { 2396c8d68e6bSJason Wang int i, ret = 0; 2397c8d68e6bSJason Wang struct tun_file *tfile; 2398c8d68e6bSJason Wang 2399c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2400b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 24018ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 24028ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 24038ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2404c8d68e6bSJason Wang if (ret) { 2405c8d68e6bSJason Wang tun_detach_filter(tun, i); 2406c8d68e6bSJason Wang return ret; 2407c8d68e6bSJason Wang } 2408c8d68e6bSJason Wang } 2409c8d68e6bSJason Wang 2410c8d68e6bSJason Wang tun->filter_attached = true; 2411c8d68e6bSJason Wang return ret; 2412c8d68e6bSJason Wang } 2413c8d68e6bSJason Wang 2414c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2415c8d68e6bSJason Wang { 2416c8d68e6bSJason Wang struct tun_file *tfile; 2417c8d68e6bSJason Wang int i; 2418c8d68e6bSJason Wang 2419c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2420b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2421c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2422c8d68e6bSJason Wang } 2423c8d68e6bSJason Wang } 2424c8d68e6bSJason Wang 2425cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2426cde8b15fSJason Wang { 2427cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2428cde8b15fSJason Wang struct tun_struct *tun; 2429cde8b15fSJason Wang int ret = 0; 2430cde8b15fSJason Wang 2431cde8b15fSJason Wang rtnl_lock(); 2432cde8b15fSJason Wang 2433cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 24344008e97fSJason Wang tun = tfile->detached; 24355dbbaf2dSPaul Moore if (!tun) { 2436cde8b15fSJason Wang ret = -EINVAL; 24375dbbaf2dSPaul Moore goto unlock; 24385dbbaf2dSPaul Moore } 24395dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 24405dbbaf2dSPaul Moore if (ret < 0) 24415dbbaf2dSPaul Moore goto unlock; 244294317099SPetar Penkov ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 24434008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2444b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 244540630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 24464008e97fSJason Wang ret = -EINVAL; 2447cde8b15fSJason Wang else 24484008e97fSJason Wang __tun_detach(tfile, false); 24494008e97fSJason Wang } else 2450cde8b15fSJason Wang ret = -EINVAL; 2451cde8b15fSJason Wang 24525dbbaf2dSPaul Moore unlock: 2453cde8b15fSJason Wang rtnl_unlock(); 2454cde8b15fSJason Wang return ret; 2455cde8b15fSJason Wang } 2456cde8b15fSJason Wang 245750857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 245850857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 24591da177e4SLinus Torvalds { 246036b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2461631ab46bSEric W. Biederman struct tun_struct *tun; 24621da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 24631da177e4SLinus Torvalds struct ifreq ifr; 24640625c883SEric W. Biederman kuid_t owner; 24650625c883SEric W. Biederman kgid_t group; 246633dccbb0SHerbert Xu int sndbuf; 2467d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2468fb7589a1SPavel Emelyanov unsigned int ifindex; 24691cf8e410SMichael S. Tsirkin int le; 2470f271b2ccSMax Krasnyansky int ret; 24711da177e4SLinus Torvalds 247220861f26SGao Feng if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 247350857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 24741da177e4SLinus Torvalds return -EFAULT; 24758bbb1813SDavid S. Miller } else { 2476a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 24778bbb1813SDavid S. Miller } 2478631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2479631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2480631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2481031f5e03SMichael S. Tsirkin * TUNSETIFF. 2482031f5e03SMichael S. Tsirkin */ 2483031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2484631ab46bSEric W. Biederman (unsigned int __user*)argp); 2485cde8b15fSJason Wang } else if (cmd == TUNSETQUEUE) 2486cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2487631ab46bSEric W. Biederman 2488c8d68e6bSJason Wang ret = 0; 2489876bfd4dSHerbert Xu rtnl_lock(); 2490876bfd4dSHerbert Xu 24919484dc74Syuan linyu tun = tun_get(tfile); 24920f16bc13SGao Feng if (cmd == TUNSETIFF) { 24930f16bc13SGao Feng ret = -EEXIST; 24940f16bc13SGao Feng if (tun) 24950f16bc13SGao Feng goto unlock; 24960f16bc13SGao Feng 24971da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 24981da177e4SLinus Torvalds 2499140e807dSEric W. Biederman ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 25001da177e4SLinus Torvalds 2501876bfd4dSHerbert Xu if (ret) 2502876bfd4dSHerbert Xu goto unlock; 25031da177e4SLinus Torvalds 250450857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2505876bfd4dSHerbert Xu ret = -EFAULT; 2506876bfd4dSHerbert Xu goto unlock; 25071da177e4SLinus Torvalds } 2508fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2509fb7589a1SPavel Emelyanov ret = -EPERM; 2510fb7589a1SPavel Emelyanov if (tun) 2511fb7589a1SPavel Emelyanov goto unlock; 2512fb7589a1SPavel Emelyanov 2513fb7589a1SPavel Emelyanov ret = -EFAULT; 2514fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2515fb7589a1SPavel Emelyanov goto unlock; 2516fb7589a1SPavel Emelyanov 2517fb7589a1SPavel Emelyanov ret = 0; 2518fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2519fb7589a1SPavel Emelyanov goto unlock; 2520fb7589a1SPavel Emelyanov } 25211da177e4SLinus Torvalds 2522876bfd4dSHerbert Xu ret = -EBADFD; 25231da177e4SLinus Torvalds if (!tun) 2524876bfd4dSHerbert Xu goto unlock; 25251da177e4SLinus Torvalds 25261e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 25271da177e4SLinus Torvalds 2528631ab46bSEric W. Biederman ret = 0; 25291da177e4SLinus Torvalds switch (cmd) { 2530e3b99556SMark McLoughlin case TUNGETIFF: 25319ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2532e3b99556SMark McLoughlin 25333d407a80SPavel Emelyanov if (tfile->detached) 25343d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2535849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2536849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 25373d407a80SPavel Emelyanov 253850857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2539631ab46bSEric W. Biederman ret = -EFAULT; 2540e3b99556SMark McLoughlin break; 2541e3b99556SMark McLoughlin 25421da177e4SLinus Torvalds case TUNSETNOCSUM: 25431da177e4SLinus Torvalds /* Disable/Enable checksum */ 25441da177e4SLinus Torvalds 254588255375SMichał Mirosław /* [unimplemented] */ 254688255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 25476b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 25481da177e4SLinus Torvalds break; 25491da177e4SLinus Torvalds 25501da177e4SLinus Torvalds case TUNSETPERSIST: 255154f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 255254f968d6SJason Wang * module to prevent the module being unprobed. 255354f968d6SJason Wang */ 255440630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 255540630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 255654f968d6SJason Wang __module_get(THIS_MODULE); 2557dd38bd85SJason Wang } 255840630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 255940630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 256054f968d6SJason Wang module_put(THIS_MODULE); 256154f968d6SJason Wang } 25621da177e4SLinus Torvalds 25636b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 25646b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 25651da177e4SLinus Torvalds break; 25661da177e4SLinus Torvalds 25671da177e4SLinus Torvalds case TUNSETOWNER: 25681da177e4SLinus Torvalds /* Set owner of the device */ 25690625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 25700625c883SEric W. Biederman if (!uid_valid(owner)) { 25710625c883SEric W. Biederman ret = -EINVAL; 25720625c883SEric W. Biederman break; 25730625c883SEric W. Biederman } 25740625c883SEric W. Biederman tun->owner = owner; 25751e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 25760625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 25771da177e4SLinus Torvalds break; 25781da177e4SLinus Torvalds 25798c644623SGuido Guenther case TUNSETGROUP: 25808c644623SGuido Guenther /* Set group of the device */ 25810625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 25820625c883SEric W. Biederman if (!gid_valid(group)) { 25830625c883SEric W. Biederman ret = -EINVAL; 25840625c883SEric W. Biederman break; 25850625c883SEric W. Biederman } 25860625c883SEric W. Biederman tun->group = group; 25871e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 25880625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 25898c644623SGuido Guenther break; 25908c644623SGuido Guenther 2591ff4cc3acSMike Kershaw case TUNSETLINK: 2592ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2593ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 25946b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 25956b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 259648abfe05SDavid S. Miller ret = -EBUSY; 2597ff4cc3acSMike Kershaw } else { 2598ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 25996b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 26006b8a66eeSJoe Perches tun->dev->type); 260148abfe05SDavid S. Miller ret = 0; 2602ff4cc3acSMike Kershaw } 2603631ab46bSEric W. Biederman break; 2604ff4cc3acSMike Kershaw 26051da177e4SLinus Torvalds #ifdef TUN_DEBUG 26061da177e4SLinus Torvalds case TUNSETDEBUG: 26071da177e4SLinus Torvalds tun->debug = arg; 26081da177e4SLinus Torvalds break; 26091da177e4SLinus Torvalds #endif 26105228ddc9SRusty Russell case TUNSETOFFLOAD: 261188255375SMichał Mirosław ret = set_offload(tun, arg); 2612631ab46bSEric W. Biederman break; 26135228ddc9SRusty Russell 2614f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 2615f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 2616631ab46bSEric W. Biederman ret = -EINVAL; 261740630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2618631ab46bSEric W. Biederman break; 2619c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 2620631ab46bSEric W. Biederman break; 26211da177e4SLinus Torvalds 26221da177e4SLinus Torvalds case SIOCGIFHWADDR: 2623b595076aSUwe Kleine-König /* Get hw address */ 2624f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2625f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 262650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2627631ab46bSEric W. Biederman ret = -EFAULT; 2628631ab46bSEric W. Biederman break; 26291da177e4SLinus Torvalds 26301da177e4SLinus Torvalds case SIOCSIFHWADDR: 2631f271b2ccSMax Krasnyansky /* Set hw address */ 26326b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 26336b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 263440102371SKim B. Heino 263540102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2636631ab46bSEric W. Biederman break; 263733dccbb0SHerbert Xu 263833dccbb0SHerbert Xu case TUNGETSNDBUF: 263954f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 264033dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 264133dccbb0SHerbert Xu ret = -EFAULT; 264233dccbb0SHerbert Xu break; 264333dccbb0SHerbert Xu 264433dccbb0SHerbert Xu case TUNSETSNDBUF: 264533dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 264633dccbb0SHerbert Xu ret = -EFAULT; 264733dccbb0SHerbert Xu break; 264833dccbb0SHerbert Xu } 264933dccbb0SHerbert Xu 2650c8d68e6bSJason Wang tun->sndbuf = sndbuf; 2651c8d68e6bSJason Wang tun_set_sndbuf(tun); 265233dccbb0SHerbert Xu break; 265333dccbb0SHerbert Xu 2654d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 2655d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 2656d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2657d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2658d9d52b51SMichael S. Tsirkin break; 2659d9d52b51SMichael S. Tsirkin 2660d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 2661d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2662d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2663d9d52b51SMichael S. Tsirkin break; 2664d9d52b51SMichael S. Tsirkin } 2665d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2666d9d52b51SMichael S. Tsirkin ret = -EINVAL; 2667d9d52b51SMichael S. Tsirkin break; 2668d9d52b51SMichael S. Tsirkin } 2669d9d52b51SMichael S. Tsirkin 2670d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 2671d9d52b51SMichael S. Tsirkin break; 2672d9d52b51SMichael S. Tsirkin 26731cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 26741cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 26751cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 26761cf8e410SMichael S. Tsirkin ret = -EFAULT; 26771cf8e410SMichael S. Tsirkin break; 26781cf8e410SMichael S. Tsirkin 26791cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 26801cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 26811cf8e410SMichael S. Tsirkin ret = -EFAULT; 26821cf8e410SMichael S. Tsirkin break; 26831cf8e410SMichael S. Tsirkin } 26841cf8e410SMichael S. Tsirkin if (le) 26851cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 26861cf8e410SMichael S. Tsirkin else 26871cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 26881cf8e410SMichael S. Tsirkin break; 26891cf8e410SMichael S. Tsirkin 26908b8e658bSGreg Kurz case TUNGETVNETBE: 26918b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 26928b8e658bSGreg Kurz break; 26938b8e658bSGreg Kurz 26948b8e658bSGreg Kurz case TUNSETVNETBE: 26958b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 26968b8e658bSGreg Kurz break; 26978b8e658bSGreg Kurz 269899405162SMichael S. Tsirkin case TUNATTACHFILTER: 269999405162SMichael S. Tsirkin /* Can be set only for TAPs */ 270099405162SMichael S. Tsirkin ret = -EINVAL; 270140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 270299405162SMichael S. Tsirkin break; 270399405162SMichael S. Tsirkin ret = -EFAULT; 270454f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 270599405162SMichael S. Tsirkin break; 270699405162SMichael S. Tsirkin 2707c8d68e6bSJason Wang ret = tun_attach_filter(tun); 270899405162SMichael S. Tsirkin break; 270999405162SMichael S. Tsirkin 271099405162SMichael S. Tsirkin case TUNDETACHFILTER: 271199405162SMichael S. Tsirkin /* Can be set only for TAPs */ 271299405162SMichael S. Tsirkin ret = -EINVAL; 271340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 271499405162SMichael S. Tsirkin break; 2715c8d68e6bSJason Wang ret = 0; 2716c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 271799405162SMichael S. Tsirkin break; 271899405162SMichael S. Tsirkin 271976975e9cSPavel Emelyanov case TUNGETFILTER: 272076975e9cSPavel Emelyanov ret = -EINVAL; 272140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 272276975e9cSPavel Emelyanov break; 272376975e9cSPavel Emelyanov ret = -EFAULT; 272476975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 272576975e9cSPavel Emelyanov break; 272676975e9cSPavel Emelyanov ret = 0; 272776975e9cSPavel Emelyanov break; 272876975e9cSPavel Emelyanov 27291da177e4SLinus Torvalds default: 2730631ab46bSEric W. Biederman ret = -EINVAL; 2731631ab46bSEric W. Biederman break; 2732ee289b64SJoe Perches } 27331da177e4SLinus Torvalds 2734876bfd4dSHerbert Xu unlock: 2735876bfd4dSHerbert Xu rtnl_unlock(); 2736876bfd4dSHerbert Xu if (tun) 2737631ab46bSEric W. Biederman tun_put(tun); 2738631ab46bSEric W. Biederman return ret; 27391da177e4SLinus Torvalds } 27401da177e4SLinus Torvalds 274150857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 274250857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 274350857e2aSArnd Bergmann { 274450857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 274550857e2aSArnd Bergmann } 274650857e2aSArnd Bergmann 274750857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 274850857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 274950857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 275050857e2aSArnd Bergmann { 275150857e2aSArnd Bergmann switch (cmd) { 275250857e2aSArnd Bergmann case TUNSETIFF: 275350857e2aSArnd Bergmann case TUNGETIFF: 275450857e2aSArnd Bergmann case TUNSETTXFILTER: 275550857e2aSArnd Bergmann case TUNGETSNDBUF: 275650857e2aSArnd Bergmann case TUNSETSNDBUF: 275750857e2aSArnd Bergmann case SIOCGIFHWADDR: 275850857e2aSArnd Bergmann case SIOCSIFHWADDR: 275950857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 276050857e2aSArnd Bergmann break; 276150857e2aSArnd Bergmann default: 276250857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 276350857e2aSArnd Bergmann break; 276450857e2aSArnd Bergmann } 276550857e2aSArnd Bergmann 276650857e2aSArnd Bergmann /* 276750857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 276850857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 276950857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 277050857e2aSArnd Bergmann * contents. 277150857e2aSArnd Bergmann */ 277250857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 277350857e2aSArnd Bergmann } 277450857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 277550857e2aSArnd Bergmann 27761da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 27771da177e4SLinus Torvalds { 277854f968d6SJason Wang struct tun_file *tfile = file->private_data; 27791da177e4SLinus Torvalds int ret; 27801da177e4SLinus Torvalds 278154f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 27829d319522SJonathan Corbet goto out; 27831da177e4SLinus Torvalds 27841da177e4SLinus Torvalds if (on) { 2785e0b93eddSJeff Layton __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 278654f968d6SJason Wang tfile->flags |= TUN_FASYNC; 27871da177e4SLinus Torvalds } else 278854f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 27899d319522SJonathan Corbet ret = 0; 27909d319522SJonathan Corbet out: 27919d319522SJonathan Corbet return ret; 27921da177e4SLinus Torvalds } 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 27951da177e4SLinus Torvalds { 2796140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 2797631ab46bSEric W. Biederman struct tun_file *tfile; 2798deed49fbSThomas Gleixner 27996b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 2800631ab46bSEric W. Biederman 2801140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 280211aa9c28SEric W. Biederman &tun_proto, 0); 2803631ab46bSEric W. Biederman if (!tfile) 2804631ab46bSEric W. Biederman return -ENOMEM; 2805c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 280654f968d6SJason Wang tfile->flags = 0; 2807fb7589a1SPavel Emelyanov tfile->ifindex = 0; 280854f968d6SJason Wang 280954f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 28109e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 281154f968d6SJason Wang 281254f968d6SJason Wang tfile->socket.file = file; 281354f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 281454f968d6SJason Wang 281554f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 281654f968d6SJason Wang 281754f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 281854f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 281954f968d6SJason Wang 2820631ab46bSEric W. Biederman file->private_data = tfile; 28214008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 282254f968d6SJason Wang 282319a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 282419a6afb2SJason Wang 28251da177e4SLinus Torvalds return 0; 28261da177e4SLinus Torvalds } 28271da177e4SLinus Torvalds 28281da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 28291da177e4SLinus Torvalds { 2830631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 28311da177e4SLinus Torvalds 2832c8d68e6bSJason Wang tun_detach(tfile, true); 28331da177e4SLinus Torvalds 28341da177e4SLinus Torvalds return 0; 28351da177e4SLinus Torvalds } 28361da177e4SLinus Torvalds 283793e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 28389484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 283993e14b6dSMasatake YAMATO { 28409484dc74Syuan linyu struct tun_file *tfile = file->private_data; 284193e14b6dSMasatake YAMATO struct tun_struct *tun; 284293e14b6dSMasatake YAMATO struct ifreq ifr; 284393e14b6dSMasatake YAMATO 284493e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 284593e14b6dSMasatake YAMATO 284693e14b6dSMasatake YAMATO rtnl_lock(); 28479484dc74Syuan linyu tun = tun_get(tfile); 284893e14b6dSMasatake YAMATO if (tun) 284993e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 285093e14b6dSMasatake YAMATO rtnl_unlock(); 285193e14b6dSMasatake YAMATO 285293e14b6dSMasatake YAMATO if (tun) 285393e14b6dSMasatake YAMATO tun_put(tun); 285493e14b6dSMasatake YAMATO 2855a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 285693e14b6dSMasatake YAMATO } 285793e14b6dSMasatake YAMATO #endif 285893e14b6dSMasatake YAMATO 2859d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 28601da177e4SLinus Torvalds .owner = THIS_MODULE, 28611da177e4SLinus Torvalds .llseek = no_llseek, 28629b067034SAl Viro .read_iter = tun_chr_read_iter, 2863f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 28641da177e4SLinus Torvalds .poll = tun_chr_poll, 2865876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 286650857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 286750857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 286850857e2aSArnd Bergmann #endif 28691da177e4SLinus Torvalds .open = tun_chr_open, 28701da177e4SLinus Torvalds .release = tun_chr_close, 287193e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 287293e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 287393e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 287493e14b6dSMasatake YAMATO #endif 28751da177e4SLinus Torvalds }; 28761da177e4SLinus Torvalds 28771da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 28781da177e4SLinus Torvalds .minor = TUN_MINOR, 28791da177e4SLinus Torvalds .name = "tun", 2880e454cea2SKay Sievers .nodename = "net/tun", 28811da177e4SLinus Torvalds .fops = &tun_fops, 28821da177e4SLinus Torvalds }; 28831da177e4SLinus Torvalds 28841da177e4SLinus Torvalds /* ethtool interface */ 28851da177e4SLinus Torvalds 288629ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev, 288729ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 28881da177e4SLinus Torvalds { 288929ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 289029ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 289129ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 289229ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 289329ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 289429ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 289529ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 28961da177e4SLinus Torvalds return 0; 28971da177e4SLinus Torvalds } 28981da177e4SLinus Torvalds 28991da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 29001da177e4SLinus Torvalds { 29011da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29021da177e4SLinus Torvalds 290333a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 290433a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 29051da177e4SLinus Torvalds 29061da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 290740630b82SMichael S. Tsirkin case IFF_TUN: 290833a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 29091da177e4SLinus Torvalds break; 291040630b82SMichael S. Tsirkin case IFF_TAP: 291133a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 29121da177e4SLinus Torvalds break; 29131da177e4SLinus Torvalds } 29141da177e4SLinus Torvalds } 29151da177e4SLinus Torvalds 29161da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 29171da177e4SLinus Torvalds { 29181da177e4SLinus Torvalds #ifdef TUN_DEBUG 29191da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29201da177e4SLinus Torvalds return tun->debug; 29211da177e4SLinus Torvalds #else 29221da177e4SLinus Torvalds return -EOPNOTSUPP; 29231da177e4SLinus Torvalds #endif 29241da177e4SLinus Torvalds } 29251da177e4SLinus Torvalds 29261da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 29271da177e4SLinus Torvalds { 29281da177e4SLinus Torvalds #ifdef TUN_DEBUG 29291da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 29301da177e4SLinus Torvalds tun->debug = value; 29311da177e4SLinus Torvalds #endif 29321da177e4SLinus Torvalds } 29331da177e4SLinus Torvalds 29345503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 29355503fcecSJason Wang struct ethtool_coalesce *ec) 29365503fcecSJason Wang { 29375503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 29385503fcecSJason Wang 29395503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 29405503fcecSJason Wang 29415503fcecSJason Wang return 0; 29425503fcecSJason Wang } 29435503fcecSJason Wang 29445503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 29455503fcecSJason Wang struct ethtool_coalesce *ec) 29465503fcecSJason Wang { 29475503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 29485503fcecSJason Wang 29495503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 29505503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 29515503fcecSJason Wang else 29525503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 29535503fcecSJason Wang 29545503fcecSJason Wang return 0; 29555503fcecSJason Wang } 29565503fcecSJason Wang 29577282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 29581da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 29591da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 29601da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 2961bee31369SNolan Leake .get_link = ethtool_op_get_link, 2962eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 29635503fcecSJason Wang .get_coalesce = tun_get_coalesce, 29645503fcecSJason Wang .set_coalesce = tun_set_coalesce, 296529ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 29661da177e4SLinus Torvalds }; 29671da177e4SLinus Torvalds 29681576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 29691576d986SJason Wang { 29701576d986SJason Wang struct net_device *dev = tun->dev; 29711576d986SJason Wang struct tun_file *tfile; 29721576d986SJason Wang struct skb_array **arrays; 29731576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 29741576d986SJason Wang int ret, i; 29751576d986SJason Wang 297612039046Sstephen hemminger arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); 29771576d986SJason Wang if (!arrays) 29781576d986SJason Wang return -ENOMEM; 29791576d986SJason Wang 29801576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 29811576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29821576d986SJason Wang arrays[i] = &tfile->tx_array; 29831576d986SJason Wang } 29841576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 29851576d986SJason Wang arrays[i++] = &tfile->tx_array; 29861576d986SJason Wang 29871576d986SJason Wang ret = skb_array_resize_multiple(arrays, n, 29881576d986SJason Wang dev->tx_queue_len, GFP_KERNEL); 29891576d986SJason Wang 29901576d986SJason Wang kfree(arrays); 29911576d986SJason Wang return ret; 29921576d986SJason Wang } 29931576d986SJason Wang 29941576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 29951576d986SJason Wang unsigned long event, void *ptr) 29961576d986SJason Wang { 29971576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 29981576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 29991576d986SJason Wang 300086dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 300186dfb4acSCraig Gallek return NOTIFY_DONE; 300286dfb4acSCraig Gallek 30031576d986SJason Wang switch (event) { 30041576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 30051576d986SJason Wang if (tun_queue_resize(tun)) 30061576d986SJason Wang return NOTIFY_BAD; 30071576d986SJason Wang break; 30081576d986SJason Wang default: 30091576d986SJason Wang break; 30101576d986SJason Wang } 30111576d986SJason Wang 30121576d986SJason Wang return NOTIFY_DONE; 30131576d986SJason Wang } 30141576d986SJason Wang 30151576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 30161576d986SJason Wang .notifier_call = tun_device_event, 30171576d986SJason Wang }; 301879d17604SPavel Emelyanov 30191da177e4SLinus Torvalds static int __init tun_init(void) 30201da177e4SLinus Torvalds { 30211da177e4SLinus Torvalds int ret = 0; 30221da177e4SLinus Torvalds 30236b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 30241da177e4SLinus Torvalds 3025f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 302679d17604SPavel Emelyanov if (ret) { 30276b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3028f019a7a5SEric W. Biederman goto err_linkops; 302979d17604SPavel Emelyanov } 303079d17604SPavel Emelyanov 30311da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 303279d17604SPavel Emelyanov if (ret) { 30336b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 303479d17604SPavel Emelyanov goto err_misc; 303579d17604SPavel Emelyanov } 30361576d986SJason Wang 30375edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 30385edfbd3cSTonghao Zhang if (ret) { 30395edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 30405edfbd3cSTonghao Zhang goto err_notifier; 30415edfbd3cSTonghao Zhang } 30425edfbd3cSTonghao Zhang 304379d17604SPavel Emelyanov return 0; 30445edfbd3cSTonghao Zhang 30455edfbd3cSTonghao Zhang err_notifier: 30465edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 304779d17604SPavel Emelyanov err_misc: 3048f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3049f019a7a5SEric W. Biederman err_linkops: 30501da177e4SLinus Torvalds return ret; 30511da177e4SLinus Torvalds } 30521da177e4SLinus Torvalds 30531da177e4SLinus Torvalds static void tun_cleanup(void) 30541da177e4SLinus Torvalds { 30551da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3056f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 30571576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 30581da177e4SLinus Torvalds } 30591da177e4SLinus Torvalds 306005c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 306105c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 306205c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 306305c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 306405c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 306505c2828cSMichael S. Tsirkin { 30666e914fc7SJason Wang struct tun_file *tfile; 306705c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 306805c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 30696e914fc7SJason Wang tfile = file->private_data; 30706e914fc7SJason Wang if (!tfile) 307105c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 307254f968d6SJason Wang return &tfile->socket; 307305c2828cSMichael S. Tsirkin } 307405c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 307505c2828cSMichael S. Tsirkin 307683339c6bSJason Wang struct skb_array *tun_get_skb_array(struct file *file) 307783339c6bSJason Wang { 307883339c6bSJason Wang struct tun_file *tfile; 307983339c6bSJason Wang 308083339c6bSJason Wang if (file->f_op != &tun_fops) 308183339c6bSJason Wang return ERR_PTR(-EINVAL); 308283339c6bSJason Wang tfile = file->private_data; 308383339c6bSJason Wang if (!tfile) 308483339c6bSJason Wang return ERR_PTR(-EBADFD); 308583339c6bSJason Wang return &tfile->tx_array; 308683339c6bSJason Wang } 308783339c6bSJason Wang EXPORT_SYMBOL_GPL(tun_get_skb_array); 308883339c6bSJason Wang 30891da177e4SLinus Torvalds module_init(tun_init); 30901da177e4SLinus Torvalds module_exit(tun_cleanup); 30911da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 30921da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 30931da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 30941da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3095578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3096