11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 7393e14b6dSMasatake YAMATO #include <linux/seq_file.h> 74e0b46d0eSHerbert Xu #include <linux/uio.h> 751576d986SJason Wang #include <linux/skb_array.h> 76761876c8SJason Wang #include <linux/bpf.h> 77761876c8SJason Wang #include <linux/bpf_trace.h> 7890e33d45SPetar Penkov #include <linux/mutex.h> 791da177e4SLinus Torvalds 807c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 811da177e4SLinus Torvalds 8214daa021SRusty Russell /* Uncomment to enable debugging */ 8314daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8414daa021SRusty Russell 851da177e4SLinus Torvalds #ifdef TUN_DEBUG 861da177e4SLinus Torvalds static int debug; 8714daa021SRusty Russell 886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 896b8a66eeSJoe Perches do { \ 906b8a66eeSJoe Perches if (tun->debug) \ 916b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 926b8a66eeSJoe Perches } while (0) 936b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (debug == 2) \ 966b8a66eeSJoe Perches printk(level fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 9814daa021SRusty Russell #else 996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1006b8a66eeSJoe Perches do { \ 1016b8a66eeSJoe Perches if (0) \ 1026b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1036b8a66eeSJoe Perches } while (0) 1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches printk(level fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 111761876c8SJason Wang #define TUN_HEADROOM 256 1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11366ccbc9cSJason Wang 114031f5e03SMichael S. Tsirkin /* TUN device flags */ 115031f5e03SMichael S. Tsirkin 116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 117031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 118031f5e03SMichael S. Tsirkin */ 119031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1228b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 123031f5e03SMichael S. Tsirkin 124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12590e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 12690e33d45SPetar Penkov 1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1280690899bSMichael S. Tsirkin 129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 130f271b2ccSMax Krasnyansky struct tap_filter { 131f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 132f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 133f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134f271b2ccSMax Krasnyansky }; 135f271b2ccSMax Krasnyansky 136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 139b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 140c8d68e6bSJason Wang 14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14296442e42SJason Wang 143608b9977SPaolo Abeni struct tun_pcpu_stats { 144608b9977SPaolo Abeni u64 rx_packets; 145608b9977SPaolo Abeni u64 rx_bytes; 146608b9977SPaolo Abeni u64 tx_packets; 147608b9977SPaolo Abeni u64 tx_bytes; 148608b9977SPaolo Abeni struct u64_stats_sync syncp; 149608b9977SPaolo Abeni u32 rx_dropped; 150608b9977SPaolo Abeni u32 tx_dropped; 151608b9977SPaolo Abeni u32 rx_frame_errors; 152608b9977SPaolo Abeni }; 153608b9977SPaolo Abeni 15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15592d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 15654f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 15754f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 15836fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 15954f968d6SJason Wang * this). 1606e914fc7SJason Wang * 1616e914fc7SJason Wang * RCU usage: 16236fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1636e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16454f968d6SJason Wang */ 165631ab46bSEric W. Biederman struct tun_file { 16654f968d6SJason Wang struct sock sk; 16754f968d6SJason Wang struct socket socket; 16854f968d6SJason Wang struct socket_wq wq; 1696e914fc7SJason Wang struct tun_struct __rcu *tun; 17054f968d6SJason Wang struct fasync_struct *fasync; 17154f968d6SJason Wang /* only used for fasnyc */ 17254f968d6SJason Wang unsigned int flags; 173fb7589a1SPavel Emelyanov union { 174c8d68e6bSJason Wang u16 queue_index; 175fb7589a1SPavel Emelyanov unsigned int ifindex; 176fb7589a1SPavel Emelyanov }; 17794317099SPetar Penkov struct napi_struct napi; 178aec72f33SEric Dumazet bool napi_enabled; 17990e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1804008e97fSJason Wang struct list_head next; 1814008e97fSJason Wang struct tun_struct *detached; 1821576d986SJason Wang struct skb_array tx_array; 183631ab46bSEric W. Biederman }; 184631ab46bSEric W. Biederman 18596442e42SJason Wang struct tun_flow_entry { 18696442e42SJason Wang struct hlist_node hash_link; 18796442e42SJason Wang struct rcu_head rcu; 18896442e42SJason Wang struct tun_struct *tun; 18996442e42SJason Wang 19096442e42SJason Wang u32 rxhash; 1919bc88939STom Herbert u32 rps_rxhash; 19296442e42SJason Wang int queue_index; 19396442e42SJason Wang unsigned long updated; 19496442e42SJason Wang }; 19596442e42SJason Wang 19696442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 19796442e42SJason Wang 198*96f84061SJason Wang struct tun_steering_prog { 199*96f84061SJason Wang struct rcu_head rcu; 200*96f84061SJason Wang struct bpf_prog *prog; 201*96f84061SJason Wang }; 202*96f84061SJason Wang 20354f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 20436fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 20554f968d6SJason Wang * file were attached to a persist device. 20654f968d6SJason Wang */ 20714daa021SRusty Russell struct tun_struct { 208c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 209c8d68e6bSJason Wang unsigned int numqueues; 210f271b2ccSMax Krasnyansky unsigned int flags; 2110625c883SEric W. Biederman kuid_t owner; 2120625c883SEric W. Biederman kgid_t group; 21314daa021SRusty Russell 21414daa021SRusty Russell struct net_device *dev; 215c8f44affSMichał Mirosław netdev_features_t set_features; 21688255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 217d591a1f3SDavid S. Miller NETIF_F_TSO6) 218d9d52b51SMichael S. Tsirkin 219eaea34b2SPaolo Abeni int align; 220d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 22154f968d6SJason Wang int sndbuf; 22254f968d6SJason Wang struct tap_filter txflt; 22354f968d6SJason Wang struct sock_fprog fprog; 22454f968d6SJason Wang /* protected by rtnl lock */ 22554f968d6SJason Wang bool filter_attached; 22614daa021SRusty Russell #ifdef TUN_DEBUG 22714daa021SRusty Russell int debug; 22814daa021SRusty Russell #endif 22996442e42SJason Wang spinlock_t lock; 23096442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 23196442e42SJason Wang struct timer_list flow_gc_timer; 23296442e42SJason Wang unsigned long ageing_time; 2334008e97fSJason Wang unsigned int numdisabled; 2344008e97fSJason Wang struct list_head disabled; 2355dbbaf2dSPaul Moore void *security; 236b8732fb7SJason Wang u32 flow_count; 2375503fcecSJason Wang u32 rx_batched; 238608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 239761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 240*96f84061SJason Wang struct tun_steering_prog __rcu *steering_prog; 24114daa021SRusty Russell }; 24214daa021SRusty Russell 24394317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 24494317099SPetar Penkov { 24594317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 24694317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 24794317099SPetar Penkov struct sk_buff_head process_queue; 24894317099SPetar Penkov struct sk_buff *skb; 24994317099SPetar Penkov int received = 0; 25094317099SPetar Penkov 25194317099SPetar Penkov __skb_queue_head_init(&process_queue); 25294317099SPetar Penkov 25394317099SPetar Penkov spin_lock(&queue->lock); 25494317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 25594317099SPetar Penkov spin_unlock(&queue->lock); 25694317099SPetar Penkov 25794317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 25894317099SPetar Penkov napi_gro_receive(napi, skb); 25994317099SPetar Penkov ++received; 26094317099SPetar Penkov } 26194317099SPetar Penkov 26294317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 26394317099SPetar Penkov spin_lock(&queue->lock); 26494317099SPetar Penkov skb_queue_splice(&process_queue, queue); 26594317099SPetar Penkov spin_unlock(&queue->lock); 26694317099SPetar Penkov } 26794317099SPetar Penkov 26894317099SPetar Penkov return received; 26994317099SPetar Penkov } 27094317099SPetar Penkov 27194317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 27294317099SPetar Penkov { 27394317099SPetar Penkov unsigned int received; 27494317099SPetar Penkov 27594317099SPetar Penkov received = tun_napi_receive(napi, budget); 27694317099SPetar Penkov 27794317099SPetar Penkov if (received < budget) 27894317099SPetar Penkov napi_complete_done(napi, received); 27994317099SPetar Penkov 28094317099SPetar Penkov return received; 28194317099SPetar Penkov } 28294317099SPetar Penkov 28394317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 28494317099SPetar Penkov bool napi_en) 28594317099SPetar Penkov { 286aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 28794317099SPetar Penkov if (napi_en) { 28894317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 28994317099SPetar Penkov NAPI_POLL_WEIGHT); 29094317099SPetar Penkov napi_enable(&tfile->napi); 29190e33d45SPetar Penkov mutex_init(&tfile->napi_mutex); 29294317099SPetar Penkov } 29394317099SPetar Penkov } 29494317099SPetar Penkov 29594317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 29694317099SPetar Penkov { 297aec72f33SEric Dumazet if (tfile->napi_enabled) 29894317099SPetar Penkov napi_disable(&tfile->napi); 29994317099SPetar Penkov } 30094317099SPetar Penkov 30194317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 30294317099SPetar Penkov { 303aec72f33SEric Dumazet if (tfile->napi_enabled) 30494317099SPetar Penkov netif_napi_del(&tfile->napi); 30594317099SPetar Penkov } 30694317099SPetar Penkov 30790e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun) 30890e33d45SPetar Penkov { 30990e33d45SPetar Penkov return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 31090e33d45SPetar Penkov } 31190e33d45SPetar Penkov 3128b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3138b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3148b8e658bSGreg Kurz { 3158b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3168b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3178b8e658bSGreg Kurz } 3188b8e658bSGreg Kurz 3198b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3208b8e658bSGreg Kurz { 3218b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3228b8e658bSGreg Kurz 3238b8e658bSGreg Kurz if (put_user(be, argp)) 3248b8e658bSGreg Kurz return -EFAULT; 3258b8e658bSGreg Kurz 3268b8e658bSGreg Kurz return 0; 3278b8e658bSGreg Kurz } 3288b8e658bSGreg Kurz 3298b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3308b8e658bSGreg Kurz { 3318b8e658bSGreg Kurz int be; 3328b8e658bSGreg Kurz 3338b8e658bSGreg Kurz if (get_user(be, argp)) 3348b8e658bSGreg Kurz return -EFAULT; 3358b8e658bSGreg Kurz 3368b8e658bSGreg Kurz if (be) 3378b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3388b8e658bSGreg Kurz else 3398b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3408b8e658bSGreg Kurz 3418b8e658bSGreg Kurz return 0; 3428b8e658bSGreg Kurz } 3438b8e658bSGreg Kurz #else 3448b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3458b8e658bSGreg Kurz { 3468b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3478b8e658bSGreg Kurz } 3488b8e658bSGreg Kurz 3498b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3508b8e658bSGreg Kurz { 3518b8e658bSGreg Kurz return -EINVAL; 3528b8e658bSGreg Kurz } 3538b8e658bSGreg Kurz 3548b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3558b8e658bSGreg Kurz { 3568b8e658bSGreg Kurz return -EINVAL; 3578b8e658bSGreg Kurz } 3588b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3598b8e658bSGreg Kurz 36025bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 36125bd55bbSGreg Kurz { 3627d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3638b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 36425bd55bbSGreg Kurz } 36525bd55bbSGreg Kurz 36656f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 36756f0dcc5SMichael S. Tsirkin { 36825bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 36956f0dcc5SMichael S. Tsirkin } 37056f0dcc5SMichael S. Tsirkin 37156f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 37256f0dcc5SMichael S. Tsirkin { 37325bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 37456f0dcc5SMichael S. Tsirkin } 37556f0dcc5SMichael S. Tsirkin 37696442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 37796442e42SJason Wang { 37896442e42SJason Wang return rxhash & 0x3ff; 37996442e42SJason Wang } 38096442e42SJason Wang 38196442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 38296442e42SJason Wang { 38396442e42SJason Wang struct tun_flow_entry *e; 38496442e42SJason Wang 385b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 38696442e42SJason Wang if (e->rxhash == rxhash) 38796442e42SJason Wang return e; 38896442e42SJason Wang } 38996442e42SJason Wang return NULL; 39096442e42SJason Wang } 39196442e42SJason Wang 39296442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 39396442e42SJason Wang struct hlist_head *head, 39496442e42SJason Wang u32 rxhash, u16 queue_index) 39596442e42SJason Wang { 3969fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3979fdc6befSEric Dumazet 39896442e42SJason Wang if (e) { 39996442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 40096442e42SJason Wang rxhash, queue_index); 40196442e42SJason Wang e->updated = jiffies; 40296442e42SJason Wang e->rxhash = rxhash; 4039bc88939STom Herbert e->rps_rxhash = 0; 40496442e42SJason Wang e->queue_index = queue_index; 40596442e42SJason Wang e->tun = tun; 40696442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 407b8732fb7SJason Wang ++tun->flow_count; 40896442e42SJason Wang } 40996442e42SJason Wang return e; 41096442e42SJason Wang } 41196442e42SJason Wang 41296442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 41396442e42SJason Wang { 41496442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 41596442e42SJason Wang e->rxhash, e->queue_index); 41696442e42SJason Wang hlist_del_rcu(&e->hash_link); 4179fdc6befSEric Dumazet kfree_rcu(e, rcu); 418b8732fb7SJason Wang --tun->flow_count; 41996442e42SJason Wang } 42096442e42SJason Wang 42196442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 42296442e42SJason Wang { 42396442e42SJason Wang int i; 42496442e42SJason Wang 42596442e42SJason Wang spin_lock_bh(&tun->lock); 42696442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 42796442e42SJason Wang struct tun_flow_entry *e; 428b67bfe0dSSasha Levin struct hlist_node *n; 42996442e42SJason Wang 430b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 43196442e42SJason Wang tun_flow_delete(tun, e); 43296442e42SJason Wang } 43396442e42SJason Wang spin_unlock_bh(&tun->lock); 43496442e42SJason Wang } 43596442e42SJason Wang 43696442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 43796442e42SJason Wang { 43896442e42SJason Wang int i; 43996442e42SJason Wang 44096442e42SJason Wang spin_lock_bh(&tun->lock); 44196442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 44296442e42SJason Wang struct tun_flow_entry *e; 443b67bfe0dSSasha Levin struct hlist_node *n; 44496442e42SJason Wang 445b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 44696442e42SJason Wang if (e->queue_index == queue_index) 44796442e42SJason Wang tun_flow_delete(tun, e); 44896442e42SJason Wang } 44996442e42SJason Wang } 45096442e42SJason Wang spin_unlock_bh(&tun->lock); 45196442e42SJason Wang } 45296442e42SJason Wang 453e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 45496442e42SJason Wang { 455e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 45696442e42SJason Wang unsigned long delay = tun->ageing_time; 45796442e42SJason Wang unsigned long next_timer = jiffies + delay; 45896442e42SJason Wang unsigned long count = 0; 45996442e42SJason Wang int i; 46096442e42SJason Wang 46196442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 46296442e42SJason Wang 4637dbfb4efSEric Dumazet spin_lock(&tun->lock); 46496442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46596442e42SJason Wang struct tun_flow_entry *e; 466b67bfe0dSSasha Levin struct hlist_node *n; 46796442e42SJason Wang 468b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 46996442e42SJason Wang unsigned long this_timer; 47081d98fa4SEric Dumazet 47196442e42SJason Wang this_timer = e->updated + delay; 47281d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 47396442e42SJason Wang tun_flow_delete(tun, e); 47481d98fa4SEric Dumazet continue; 47581d98fa4SEric Dumazet } 47681d98fa4SEric Dumazet count++; 47781d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 47896442e42SJason Wang next_timer = this_timer; 47996442e42SJason Wang } 48096442e42SJason Wang } 48196442e42SJason Wang 48296442e42SJason Wang if (count) 48396442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 4847dbfb4efSEric Dumazet spin_unlock(&tun->lock); 48596442e42SJason Wang } 48696442e42SJason Wang 48749974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4889e85722dSJason Wang struct tun_file *tfile) 48996442e42SJason Wang { 49096442e42SJason Wang struct hlist_head *head; 49196442e42SJason Wang struct tun_flow_entry *e; 49296442e42SJason Wang unsigned long delay = tun->ageing_time; 4939e85722dSJason Wang u16 queue_index = tfile->queue_index; 49496442e42SJason Wang 49596442e42SJason Wang if (!rxhash) 49696442e42SJason Wang return; 49796442e42SJason Wang else 49896442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 49996442e42SJason Wang 50096442e42SJason Wang rcu_read_lock(); 50196442e42SJason Wang 5029e85722dSJason Wang /* We may get a very small possibility of OOO during switching, not 5039e85722dSJason Wang * worth to optimize.*/ 5049e85722dSJason Wang if (tun->numqueues == 1 || tfile->detached) 50596442e42SJason Wang goto unlock; 50696442e42SJason Wang 50796442e42SJason Wang e = tun_flow_find(head, rxhash); 50896442e42SJason Wang if (likely(e)) { 50996442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 51096442e42SJason Wang e->queue_index = queue_index; 51196442e42SJason Wang e->updated = jiffies; 5129bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 51396442e42SJason Wang } else { 51496442e42SJason Wang spin_lock_bh(&tun->lock); 515b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 516b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 51796442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 51896442e42SJason Wang 51996442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 52096442e42SJason Wang mod_timer(&tun->flow_gc_timer, 52196442e42SJason Wang round_jiffies_up(jiffies + delay)); 52296442e42SJason Wang spin_unlock_bh(&tun->lock); 52396442e42SJason Wang } 52496442e42SJason Wang 52596442e42SJason Wang unlock: 52696442e42SJason Wang rcu_read_unlock(); 52796442e42SJason Wang } 52896442e42SJason Wang 5299bc88939STom Herbert /** 5309bc88939STom Herbert * Save the hash received in the stack receive path and update the 5319bc88939STom Herbert * flow_hash table accordingly. 5329bc88939STom Herbert */ 5339bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5349bc88939STom Herbert { 535567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5369bc88939STom Herbert e->rps_rxhash = hash; 5379bc88939STom Herbert } 5389bc88939STom Herbert 539c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 54092d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 541c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 542c8d68e6bSJason Wang * the userspace application move between processors, we may get a 543c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 544c8d68e6bSJason Wang * hope the rxq no. may help here. 545c8d68e6bSJason Wang */ 546*96f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 547c8d68e6bSJason Wang { 54896442e42SJason Wang struct tun_flow_entry *e; 549c8d68e6bSJason Wang u32 txq = 0; 550c8d68e6bSJason Wang u32 numqueues = 0; 551c8d68e6bSJason Wang 5526aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 553c8d68e6bSJason Wang 554feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 555c8d68e6bSJason Wang if (txq) { 55696442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5579bc88939STom Herbert if (e) { 5589bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 559fbe4d456SZhi Yong Wu txq = e->queue_index; 5609bc88939STom Herbert } else 561c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 562c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 563c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 564c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 565c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 566c8d68e6bSJason Wang txq -= numqueues; 567c8d68e6bSJason Wang } 568c8d68e6bSJason Wang 569c8d68e6bSJason Wang return txq; 570c8d68e6bSJason Wang } 571c8d68e6bSJason Wang 572*96f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 573*96f84061SJason Wang { 574*96f84061SJason Wang struct tun_steering_prog *prog; 575*96f84061SJason Wang u16 ret = 0; 576*96f84061SJason Wang 577*96f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 578*96f84061SJason Wang if (prog) 579*96f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 580*96f84061SJason Wang 581*96f84061SJason Wang return ret % tun->numqueues; 582*96f84061SJason Wang } 583*96f84061SJason Wang 584*96f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 585*96f84061SJason Wang void *accel_priv, select_queue_fallback_t fallback) 586*96f84061SJason Wang { 587*96f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 588*96f84061SJason Wang u16 ret; 589*96f84061SJason Wang 590*96f84061SJason Wang rcu_read_lock(); 591*96f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 592*96f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 593*96f84061SJason Wang else 594*96f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 595*96f84061SJason Wang rcu_read_unlock(); 596*96f84061SJason Wang 597*96f84061SJason Wang return ret; 598*96f84061SJason Wang } 599*96f84061SJason Wang 600cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 601cde8b15fSJason Wang { 602cde8b15fSJason Wang const struct cred *cred = current_cred(); 603c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 604cde8b15fSJason Wang 605cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 606cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 607c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 608cde8b15fSJason Wang } 609cde8b15fSJason Wang 610c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 611c8d68e6bSJason Wang { 612c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 613c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 614c8d68e6bSJason Wang } 615c8d68e6bSJason Wang 6164008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6174008e97fSJason Wang { 6184008e97fSJason Wang tfile->detached = tun; 6194008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6204008e97fSJason Wang ++tun->numdisabled; 6214008e97fSJason Wang } 6224008e97fSJason Wang 623d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6244008e97fSJason Wang { 6254008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6264008e97fSJason Wang 6274008e97fSJason Wang tfile->detached = NULL; 6284008e97fSJason Wang list_del_init(&tfile->next); 6294008e97fSJason Wang --tun->numdisabled; 6304008e97fSJason Wang return tun; 6314008e97fSJason Wang } 6324008e97fSJason Wang 6334bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6344bfb0513SJason Wang { 6351576d986SJason Wang struct sk_buff *skb; 6361576d986SJason Wang 6371576d986SJason Wang while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) 6381576d986SJason Wang kfree_skb(skb); 6391576d986SJason Wang 6405503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6414bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6424bfb0513SJason Wang } 6434bfb0513SJason Wang 644c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 645c8d68e6bSJason Wang { 646c8d68e6bSJason Wang struct tun_file *ntfile; 647c8d68e6bSJason Wang struct tun_struct *tun; 648c8d68e6bSJason Wang 649b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 650b8deabd3SJason Wang 65194317099SPetar Penkov if (tun && clean) { 65294317099SPetar Penkov tun_napi_disable(tun, tfile); 65394317099SPetar Penkov tun_napi_del(tun, tfile); 65494317099SPetar Penkov } 65594317099SPetar Penkov 6569e85722dSJason Wang if (tun && !tfile->detached) { 657c8d68e6bSJason Wang u16 index = tfile->queue_index; 658c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 659c8d68e6bSJason Wang 660c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 661c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 662b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 663c8d68e6bSJason Wang ntfile->queue_index = index; 664c8d68e6bSJason Wang 665c8d68e6bSJason Wang --tun->numqueues; 6669e85722dSJason Wang if (clean) { 667c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 668c8d68e6bSJason Wang sock_put(&tfile->sk); 6699e85722dSJason Wang } else 6704008e97fSJason Wang tun_disable_queue(tun, tfile); 671c8d68e6bSJason Wang 672c8d68e6bSJason Wang synchronize_net(); 67396442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 674c8d68e6bSJason Wang /* Drop read queue */ 6754bfb0513SJason Wang tun_queue_purge(tfile); 676c8d68e6bSJason Wang tun_set_real_num_queues(tun); 677dd38bd85SJason Wang } else if (tfile->detached && clean) { 6784008e97fSJason Wang tun = tun_enable_queue(tfile); 679dd38bd85SJason Wang sock_put(&tfile->sk); 680dd38bd85SJason Wang } 681c8d68e6bSJason Wang 682c8d68e6bSJason Wang if (clean) { 683af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 684af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 685af668b3cSMichael S. Tsirkin 68640630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 687af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6884008e97fSJason Wang unregister_netdevice(tun->dev); 689af668b3cSMichael S. Tsirkin } 6901576d986SJason Wang if (tun) 6911576d986SJason Wang skb_array_cleanup(&tfile->tx_array); 692140e807dSEric W. Biederman sock_put(&tfile->sk); 693c8d68e6bSJason Wang } 694c8d68e6bSJason Wang } 695c8d68e6bSJason Wang 696c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 697c8d68e6bSJason Wang { 698c8d68e6bSJason Wang rtnl_lock(); 699c8d68e6bSJason Wang __tun_detach(tfile, clean); 700c8d68e6bSJason Wang rtnl_unlock(); 701c8d68e6bSJason Wang } 702c8d68e6bSJason Wang 703c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 704c8d68e6bSJason Wang { 705c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7064008e97fSJason Wang struct tun_file *tfile, *tmp; 707c8d68e6bSJason Wang int i, n = tun->numqueues; 708c8d68e6bSJason Wang 709c8d68e6bSJason Wang for (i = 0; i < n; i++) { 710b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 711c8d68e6bSJason Wang BUG_ON(!tfile); 71294317099SPetar Penkov tun_napi_disable(tun, tfile); 713addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7149e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 715c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 716c8d68e6bSJason Wang --tun->numqueues; 717c8d68e6bSJason Wang } 7189e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 719addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7209e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 721c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7229e85722dSJason Wang } 723c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 724c8d68e6bSJason Wang 725c8d68e6bSJason Wang synchronize_net(); 726c8d68e6bSJason Wang for (i = 0; i < n; i++) { 727b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 72894317099SPetar Penkov tun_napi_del(tun, tfile); 729c8d68e6bSJason Wang /* Drop read queue */ 7304bfb0513SJason Wang tun_queue_purge(tfile); 731c8d68e6bSJason Wang sock_put(&tfile->sk); 732c8d68e6bSJason Wang } 7334008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7344008e97fSJason Wang tun_enable_queue(tfile); 7354bfb0513SJason Wang tun_queue_purge(tfile); 7364008e97fSJason Wang sock_put(&tfile->sk); 7374008e97fSJason Wang } 7384008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 739dd38bd85SJason Wang 74040630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 741dd38bd85SJason Wang module_put(THIS_MODULE); 742c8d68e6bSJason Wang } 743c8d68e6bSJason Wang 74494317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 74594317099SPetar Penkov bool skip_filter, bool napi) 746a7385ba2SEric W. Biederman { 747631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7481576d986SJason Wang struct net_device *dev = tun->dev; 74938231b7aSEric W. Biederman int err; 750a7385ba2SEric W. Biederman 7515dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7525dbbaf2dSPaul Moore if (err < 0) 7535dbbaf2dSPaul Moore goto out; 7545dbbaf2dSPaul Moore 75538231b7aSEric W. Biederman err = -EINVAL; 7569e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 75738231b7aSEric W. Biederman goto out; 75838231b7aSEric W. Biederman 75938231b7aSEric W. Biederman err = -EBUSY; 76040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 761c8d68e6bSJason Wang goto out; 762c8d68e6bSJason Wang 763c8d68e6bSJason Wang err = -E2BIG; 7644008e97fSJason Wang if (!tfile->detached && 7654008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 76638231b7aSEric W. Biederman goto out; 76738231b7aSEric W. Biederman 76838231b7aSEric W. Biederman err = 0; 76954f968d6SJason Wang 77092d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 771849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7728ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7738ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7748ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 77554f968d6SJason Wang if (!err) 77654f968d6SJason Wang goto out; 77754f968d6SJason Wang } 7781576d986SJason Wang 7791576d986SJason Wang if (!tfile->detached && 7801576d986SJason Wang skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { 7811576d986SJason Wang err = -ENOMEM; 7821576d986SJason Wang goto out; 7831576d986SJason Wang } 7841576d986SJason Wang 785c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 786addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 7876e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 788c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 789c8d68e6bSJason Wang tun->numqueues++; 790c8d68e6bSJason Wang 79194317099SPetar Penkov if (tfile->detached) { 7924008e97fSJason Wang tun_enable_queue(tfile); 79394317099SPetar Penkov } else { 7944008e97fSJason Wang sock_hold(&tfile->sk); 79594317099SPetar Penkov tun_napi_init(tun, tfile, napi); 79694317099SPetar Penkov } 7974008e97fSJason Wang 798c8d68e6bSJason Wang tun_set_real_num_queues(tun); 799c8d68e6bSJason Wang 800c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 801c8d68e6bSJason Wang * refcnt. 802c8d68e6bSJason Wang */ 803a7385ba2SEric W. Biederman 80438231b7aSEric W. Biederman out: 80538231b7aSEric W. Biederman return err; 806a7385ba2SEric W. Biederman } 807a7385ba2SEric W. Biederman 8089484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 809631ab46bSEric W. Biederman { 8106e914fc7SJason Wang struct tun_struct *tun; 811c70f1829SEric W. Biederman 8126e914fc7SJason Wang rcu_read_lock(); 8136e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8146e914fc7SJason Wang if (tun) 8156e914fc7SJason Wang dev_hold(tun->dev); 8166e914fc7SJason Wang rcu_read_unlock(); 817c70f1829SEric W. Biederman 818c70f1829SEric W. Biederman return tun; 819631ab46bSEric W. Biederman } 820631ab46bSEric W. Biederman 821631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 822631ab46bSEric W. Biederman { 8236e914fc7SJason Wang dev_put(tun->dev); 824631ab46bSEric W. Biederman } 825631ab46bSEric W. Biederman 8266b8a66eeSJoe Perches /* TAP filtering */ 827f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 828f271b2ccSMax Krasnyansky { 829f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 830f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 831f271b2ccSMax Krasnyansky } 832f271b2ccSMax Krasnyansky 833f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 834f271b2ccSMax Krasnyansky { 835f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 836f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 837f271b2ccSMax Krasnyansky } 838f271b2ccSMax Krasnyansky 839f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 840f271b2ccSMax Krasnyansky { 841f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 842f271b2ccSMax Krasnyansky struct tun_filter uf; 843f271b2ccSMax Krasnyansky int err, alen, n, nexact; 844f271b2ccSMax Krasnyansky 845f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 846f271b2ccSMax Krasnyansky return -EFAULT; 847f271b2ccSMax Krasnyansky 848f271b2ccSMax Krasnyansky if (!uf.count) { 849f271b2ccSMax Krasnyansky /* Disabled */ 850f271b2ccSMax Krasnyansky filter->count = 0; 851f271b2ccSMax Krasnyansky return 0; 852f271b2ccSMax Krasnyansky } 853f271b2ccSMax Krasnyansky 854f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 85528e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 85628e8190dSMarkus Elfring if (IS_ERR(addr)) 85728e8190dSMarkus Elfring return PTR_ERR(addr); 858f271b2ccSMax Krasnyansky 859f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 860f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 861f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 862f271b2ccSMax Krasnyansky filter->count = 0; 863f271b2ccSMax Krasnyansky wmb(); 864f271b2ccSMax Krasnyansky 865f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 866f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 867f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 868f271b2ccSMax Krasnyansky 869f271b2ccSMax Krasnyansky nexact = n; 870f271b2ccSMax Krasnyansky 871cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 872cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 873f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 874cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 875cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 876cfbf84fcSAlex Williamson err = 0; /* no filter */ 8773b8d2a69SMarkus Elfring goto free_addr; 878cfbf84fcSAlex Williamson } 879f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 880cfbf84fcSAlex Williamson } 881f271b2ccSMax Krasnyansky 882f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 883f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 884f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 885f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 886f271b2ccSMax Krasnyansky 887f271b2ccSMax Krasnyansky /* Now enable the filter */ 888f271b2ccSMax Krasnyansky wmb(); 889f271b2ccSMax Krasnyansky filter->count = nexact; 890f271b2ccSMax Krasnyansky 891f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 892f271b2ccSMax Krasnyansky err = nexact; 8933b8d2a69SMarkus Elfring free_addr: 894f271b2ccSMax Krasnyansky kfree(addr); 895f271b2ccSMax Krasnyansky return err; 896f271b2ccSMax Krasnyansky } 897f271b2ccSMax Krasnyansky 898f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 899f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 900f271b2ccSMax Krasnyansky { 901f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 902f271b2ccSMax Krasnyansky * at this point. */ 903f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 904f271b2ccSMax Krasnyansky int i; 905f271b2ccSMax Krasnyansky 906f271b2ccSMax Krasnyansky /* Exact match */ 907f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9082e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 909f271b2ccSMax Krasnyansky return 1; 910f271b2ccSMax Krasnyansky 911f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 912f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 913f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 914f271b2ccSMax Krasnyansky 915f271b2ccSMax Krasnyansky return 0; 916f271b2ccSMax Krasnyansky } 917f271b2ccSMax Krasnyansky 918f271b2ccSMax Krasnyansky /* 919f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 920f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 921f271b2ccSMax Krasnyansky */ 922f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 923f271b2ccSMax Krasnyansky { 924f271b2ccSMax Krasnyansky if (!filter->count) 925f271b2ccSMax Krasnyansky return 1; 926f271b2ccSMax Krasnyansky 927f271b2ccSMax Krasnyansky return run_filter(filter, skb); 928f271b2ccSMax Krasnyansky } 929f271b2ccSMax Krasnyansky 9301da177e4SLinus Torvalds /* Network device part of the driver */ 9311da177e4SLinus Torvalds 9327282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops; 9331da177e4SLinus Torvalds 934c70f1829SEric W. Biederman /* Net device detach from fd. */ 935c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 936c70f1829SEric W. Biederman { 937c8d68e6bSJason Wang tun_detach_all(dev); 938c70f1829SEric W. Biederman } 939c70f1829SEric W. Biederman 9401da177e4SLinus Torvalds /* Net device open. */ 9411da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 9421da177e4SLinus Torvalds { 943b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 944b20e2d54SHannes Frederic Sowa int i; 945b20e2d54SHannes Frederic Sowa 946c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 947b20e2d54SHannes Frederic Sowa 948b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 949b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 950b20e2d54SHannes Frederic Sowa 951b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 952b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 953b20e2d54SHannes Frederic Sowa } 954b20e2d54SHannes Frederic Sowa 9551da177e4SLinus Torvalds return 0; 9561da177e4SLinus Torvalds } 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds /* Net device close. */ 9591da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 9601da177e4SLinus Torvalds { 961c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 9621da177e4SLinus Torvalds return 0; 9631da177e4SLinus Torvalds } 9641da177e4SLinus Torvalds 9651da177e4SLinus Torvalds /* Net device start xmit */ 966*96f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 9671da177e4SLinus Torvalds { 9683df97ba8SJason Wang #ifdef CONFIG_RPS 969*96f84061SJason Wang if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 9709bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 9719bc88939STom Herbert * RPS hash and save it into the flow_table here. 9729bc88939STom Herbert */ 9739bc88939STom Herbert __u32 rxhash; 9749bc88939STom Herbert 975feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 9769bc88939STom Herbert if (rxhash) { 9779bc88939STom Herbert struct tun_flow_entry *e; 9789bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 9799bc88939STom Herbert rxhash); 9809bc88939STom Herbert if (e) 9819bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 9829bc88939STom Herbert } 9839bc88939STom Herbert } 9843df97ba8SJason Wang #endif 985*96f84061SJason Wang } 986*96f84061SJason Wang 987*96f84061SJason Wang /* Net device start xmit */ 988*96f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 989*96f84061SJason Wang { 990*96f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 991*96f84061SJason Wang int txq = skb->queue_mapping; 992*96f84061SJason Wang struct tun_file *tfile; 993*96f84061SJason Wang u32 numqueues = 0; 994*96f84061SJason Wang 995*96f84061SJason Wang rcu_read_lock(); 996*96f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 997*96f84061SJason Wang numqueues = READ_ONCE(tun->numqueues); 998*96f84061SJason Wang 999*96f84061SJason Wang /* Drop packet if interface is not attached */ 1000*96f84061SJason Wang if (txq >= numqueues) 1001*96f84061SJason Wang goto drop; 1002*96f84061SJason Wang 1003*96f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 1004*96f84061SJason Wang tun_automq_xmit(tun, skb); 10059bc88939STom Herbert 10066e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10076e914fc7SJason Wang 1008c8d68e6bSJason Wang BUG_ON(!tfile); 1009c8d68e6bSJason Wang 1010f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1011f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1012f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1013f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1014f271b2ccSMax Krasnyansky goto drop; 1015f271b2ccSMax Krasnyansky 101654f968d6SJason Wang if (tfile->socket.sk->sk_filter && 101754f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 101899405162SMichael S. Tsirkin goto drop; 101999405162SMichael S. Tsirkin 10201f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 10217bf66305SJason Wang goto drop; 10227bf66305SJason Wang 10237b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1024eda29772SRichard Cochran 10250110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 10267bf66305SJason Wang * for indefinite time. 10277bf66305SJason Wang */ 10280110d6f2SMichael S. Tsirkin skb_orphan(skb); 10290110d6f2SMichael S. Tsirkin 1030f8af75f3SEric Dumazet nf_reset(skb); 1031f8af75f3SEric Dumazet 10321576d986SJason Wang if (skb_array_produce(&tfile->tx_array, skb)) 10331576d986SJason Wang goto drop; 10341da177e4SLinus Torvalds 10351da177e4SLinus Torvalds /* Notify and wake up reader process */ 103654f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 103754f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 10389e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 10396e914fc7SJason Wang 10406e914fc7SJason Wang rcu_read_unlock(); 10416ed10654SPatrick McHardy return NETDEV_TX_OK; 10421da177e4SLinus Torvalds 10431da177e4SLinus Torvalds drop: 1044608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1045149d36f7SMichael S. Tsirkin skb_tx_error(skb); 10461da177e4SLinus Torvalds kfree_skb(skb); 10476e914fc7SJason Wang rcu_read_unlock(); 1048baeababbSJason Wang return NET_XMIT_DROP; 10491da177e4SLinus Torvalds } 10501da177e4SLinus Torvalds 1051f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 10521da177e4SLinus Torvalds { 1053f271b2ccSMax Krasnyansky /* 1054f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1055f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1056f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1057f271b2ccSMax Krasnyansky */ 10581da177e4SLinus Torvalds } 10591da177e4SLinus Torvalds 1060c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1061c8f44affSMichał Mirosław netdev_features_t features) 106288255375SMichał Mirosław { 106388255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 106488255375SMichał Mirosław 106588255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 106688255375SMichał Mirosław } 1067bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1068bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev) 1069bebd097aSNeil Horman { 1070bebd097aSNeil Horman /* 1071bebd097aSNeil Horman * Tun only receives frames when: 1072bebd097aSNeil Horman * 1) the char device endpoint gets data from user space 1073bebd097aSNeil Horman * 2) the tun socket gets a sendmsg call from user space 107494317099SPetar Penkov * If NAPI is not enabled, since both of those are synchronous 107594317099SPetar Penkov * operations, we are guaranteed never to have pending data when we poll 107694317099SPetar Penkov * for it so there is nothing to do here but return. 1077bebd097aSNeil Horman * We need this though so netpoll recognizes us as an interface that 1078bebd097aSNeil Horman * supports polling, which enables bridge devices in virt setups to 1079bebd097aSNeil Horman * still use netconsole 108094317099SPetar Penkov * If NAPI is enabled, however, we need to schedule polling for all 108190e33d45SPetar Penkov * queues unless we are using napi_gro_frags(), which we call in 108290e33d45SPetar Penkov * process context and not in NAPI context. 1083bebd097aSNeil Horman */ 108494317099SPetar Penkov struct tun_struct *tun = netdev_priv(dev); 108594317099SPetar Penkov 108694317099SPetar Penkov if (tun->flags & IFF_NAPI) { 108794317099SPetar Penkov struct tun_file *tfile; 108894317099SPetar Penkov int i; 108994317099SPetar Penkov 109090e33d45SPetar Penkov if (tun_napi_frags_enabled(tun)) 109190e33d45SPetar Penkov return; 109290e33d45SPetar Penkov 109394317099SPetar Penkov rcu_read_lock(); 109494317099SPetar Penkov for (i = 0; i < tun->numqueues; i++) { 109594317099SPetar Penkov tfile = rcu_dereference(tun->tfiles[i]); 1096aec72f33SEric Dumazet if (tfile->napi_enabled) 109794317099SPetar Penkov napi_schedule(&tfile->napi); 109894317099SPetar Penkov } 109994317099SPetar Penkov rcu_read_unlock(); 110094317099SPetar Penkov } 1101bebd097aSNeil Horman return; 1102bebd097aSNeil Horman } 1103bebd097aSNeil Horman #endif 1104eaea34b2SPaolo Abeni 1105eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1106eaea34b2SPaolo Abeni { 1107eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1108eaea34b2SPaolo Abeni 1109eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1110eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1111eaea34b2SPaolo Abeni 1112eaea34b2SPaolo Abeni tun->align = new_hr; 1113eaea34b2SPaolo Abeni } 1114eaea34b2SPaolo Abeni 1115bc1f4470Sstephen hemminger static void 1116608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1117608b9977SPaolo Abeni { 1118608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1119608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1120608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1121608b9977SPaolo Abeni int i; 1122608b9977SPaolo Abeni 1123608b9977SPaolo Abeni for_each_possible_cpu(i) { 1124608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1125608b9977SPaolo Abeni unsigned int start; 1126608b9977SPaolo Abeni 1127608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1128608b9977SPaolo Abeni do { 1129608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1130608b9977SPaolo Abeni rxpackets = p->rx_packets; 1131608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1132608b9977SPaolo Abeni txpackets = p->tx_packets; 1133608b9977SPaolo Abeni txbytes = p->tx_bytes; 1134608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1135608b9977SPaolo Abeni 1136608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1137608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1138608b9977SPaolo Abeni stats->tx_packets += txpackets; 1139608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1140608b9977SPaolo Abeni 1141608b9977SPaolo Abeni /* u32 counters */ 1142608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1143608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1144608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1145608b9977SPaolo Abeni } 1146608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1147608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1148608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1149608b9977SPaolo Abeni } 1150608b9977SPaolo Abeni 1151761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1152761876c8SJason Wang struct netlink_ext_ack *extack) 1153761876c8SJason Wang { 1154761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1155761876c8SJason Wang struct bpf_prog *old_prog; 1156761876c8SJason Wang 1157761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1158761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1159761876c8SJason Wang if (old_prog) 1160761876c8SJason Wang bpf_prog_put(old_prog); 1161761876c8SJason Wang 1162761876c8SJason Wang return 0; 1163761876c8SJason Wang } 1164761876c8SJason Wang 1165761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1166761876c8SJason Wang { 1167761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1168761876c8SJason Wang const struct bpf_prog *xdp_prog; 1169761876c8SJason Wang 1170761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1171761876c8SJason Wang if (xdp_prog) 1172761876c8SJason Wang return xdp_prog->aux->id; 1173761876c8SJason Wang 1174761876c8SJason Wang return 0; 1175761876c8SJason Wang } 1176761876c8SJason Wang 1177f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1178761876c8SJason Wang { 1179761876c8SJason Wang switch (xdp->command) { 1180761876c8SJason Wang case XDP_SETUP_PROG: 1181761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1182761876c8SJason Wang case XDP_QUERY_PROG: 1183761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1184761876c8SJason Wang xdp->prog_attached = !!xdp->prog_id; 1185761876c8SJason Wang return 0; 1186761876c8SJason Wang default: 1187761876c8SJason Wang return -EINVAL; 1188761876c8SJason Wang } 1189761876c8SJason Wang } 1190761876c8SJason Wang 1191758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1192c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1193758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1194758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 119500829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 119688255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1197c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1198bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1199bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1200bebd097aSNeil Horman #endif 1201eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1202608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1203758e43b7SStephen Hemminger }; 1204758e43b7SStephen Hemminger 1205758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1206c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1207758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1208758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 120900829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 121088255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1211afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1212758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1213758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1214c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1215bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1216bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1217bebd097aSNeil Horman #endif 12185e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1219eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1220608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1221f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1222758e43b7SStephen Hemminger }; 1223758e43b7SStephen Hemminger 1224944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 122596442e42SJason Wang { 122696442e42SJason Wang int i; 122796442e42SJason Wang 122896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 122996442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 123096442e42SJason Wang 123196442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1232e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1233e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1234e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 123596442e42SJason Wang } 123696442e42SJason Wang 123796442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 123896442e42SJason Wang { 123996442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 124096442e42SJason Wang tun_flow_flush(tun); 124196442e42SJason Wang } 124296442e42SJason Wang 124391572088SJarod Wilson #define MIN_MTU 68 124491572088SJarod Wilson #define MAX_MTU 65535 124591572088SJarod Wilson 12461da177e4SLinus Torvalds /* Initialize net device. */ 12471da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 12481da177e4SLinus Torvalds { 12491da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 12501da177e4SLinus Torvalds 12511da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 125240630b82SMichael S. Tsirkin case IFF_TUN: 1253758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1254758e43b7SStephen Hemminger 12551da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 12561da177e4SLinus Torvalds dev->hard_header_len = 0; 12571da177e4SLinus Torvalds dev->addr_len = 0; 12581da177e4SLinus Torvalds dev->mtu = 1500; 12591da177e4SLinus Torvalds 12601da177e4SLinus Torvalds /* Zero header length */ 12611da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 12621da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 12631da177e4SLinus Torvalds break; 12641da177e4SLinus Torvalds 126540630b82SMichael S. Tsirkin case IFF_TAP: 12667a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 12671da177e4SLinus Torvalds /* Ethernet TAP Device */ 12681da177e4SLinus Torvalds ether_setup(dev); 1269550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1270a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 127136226a8dSBrian Braunstein 1272f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 127336226a8dSBrian Braunstein 12741da177e4SLinus Torvalds break; 12751da177e4SLinus Torvalds } 127691572088SJarod Wilson 127791572088SJarod Wilson dev->min_mtu = MIN_MTU; 127891572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 12791da177e4SLinus Torvalds } 12801da177e4SLinus Torvalds 12811da177e4SLinus Torvalds /* Character device part */ 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds /* Poll */ 12841da177e4SLinus Torvalds static unsigned int tun_chr_poll(struct file *file, poll_table *wait) 12851da177e4SLinus Torvalds { 1286b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 12879484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 12883c8a9c63SMariusz Kozlowski struct sock *sk; 128933dccbb0SHerbert Xu unsigned int mask = 0; 12901da177e4SLinus Torvalds 12911da177e4SLinus Torvalds if (!tun) 1292eac9e902SEric W. Biederman return POLLERR; 12931da177e4SLinus Torvalds 129454f968d6SJason Wang sk = tfile->socket.sk; 12953c8a9c63SMariusz Kozlowski 12966b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 12971da177e4SLinus Torvalds 12989e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 12991da177e4SLinus Torvalds 13001576d986SJason Wang if (!skb_array_empty(&tfile->tx_array)) 13011da177e4SLinus Torvalds mask |= POLLIN | POLLRDNORM; 13021da177e4SLinus Torvalds 1303b20e2d54SHannes Frederic Sowa if (tun->dev->flags & IFF_UP && 1304b20e2d54SHannes Frederic Sowa (sock_writeable(sk) || 13059cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1306b20e2d54SHannes Frederic Sowa sock_writeable(sk)))) 130733dccbb0SHerbert Xu mask |= POLLOUT | POLLWRNORM; 130833dccbb0SHerbert Xu 1309c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1310c70f1829SEric W. Biederman mask = POLLERR; 1311c70f1829SEric W. Biederman 1312631ab46bSEric W. Biederman tun_put(tun); 13131da177e4SLinus Torvalds return mask; 13141da177e4SLinus Torvalds } 13151da177e4SLinus Torvalds 131690e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 131790e33d45SPetar Penkov size_t len, 131890e33d45SPetar Penkov const struct iov_iter *it) 131990e33d45SPetar Penkov { 132090e33d45SPetar Penkov struct sk_buff *skb; 132190e33d45SPetar Penkov size_t linear; 132290e33d45SPetar Penkov int err; 132390e33d45SPetar Penkov int i; 132490e33d45SPetar Penkov 132590e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 132690e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 132790e33d45SPetar Penkov 132890e33d45SPetar Penkov local_bh_disable(); 132990e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 133090e33d45SPetar Penkov local_bh_enable(); 133190e33d45SPetar Penkov if (!skb) 133290e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 133390e33d45SPetar Penkov 133490e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 133590e33d45SPetar Penkov err = __skb_grow(skb, linear); 133690e33d45SPetar Penkov if (err) 133790e33d45SPetar Penkov goto free; 133890e33d45SPetar Penkov 133990e33d45SPetar Penkov skb->len = len; 134090e33d45SPetar Penkov skb->data_len = len - linear; 134190e33d45SPetar Penkov skb->truesize += skb->data_len; 134290e33d45SPetar Penkov 134390e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 134490e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 134590e33d45SPetar Penkov unsigned long offset; 134690e33d45SPetar Penkov struct page *page; 134790e33d45SPetar Penkov void *data; 134890e33d45SPetar Penkov 134990e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 135090e33d45SPetar Penkov err = -EINVAL; 135190e33d45SPetar Penkov goto free; 135290e33d45SPetar Penkov } 135390e33d45SPetar Penkov 135490e33d45SPetar Penkov local_bh_disable(); 135590e33d45SPetar Penkov data = napi_alloc_frag(fragsz); 135690e33d45SPetar Penkov local_bh_enable(); 135790e33d45SPetar Penkov if (!data) { 135890e33d45SPetar Penkov err = -ENOMEM; 135990e33d45SPetar Penkov goto free; 136090e33d45SPetar Penkov } 136190e33d45SPetar Penkov 136290e33d45SPetar Penkov page = virt_to_head_page(data); 136390e33d45SPetar Penkov offset = data - page_address(page); 136490e33d45SPetar Penkov skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 136590e33d45SPetar Penkov } 136690e33d45SPetar Penkov 136790e33d45SPetar Penkov return skb; 136890e33d45SPetar Penkov free: 136990e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 137090e33d45SPetar Penkov napi_free_frags(&tfile->napi); 137190e33d45SPetar Penkov return ERR_PTR(err); 137290e33d45SPetar Penkov } 137390e33d45SPetar Penkov 1374f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1375f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 137654f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 137733dccbb0SHerbert Xu size_t prepad, size_t len, 137833dccbb0SHerbert Xu size_t linear, int noblock) 1379f42157cbSRusty Russell { 138054f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1381f42157cbSRusty Russell struct sk_buff *skb; 138233dccbb0SHerbert Xu int err; 1383f42157cbSRusty Russell 1384f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 13850eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 138633dccbb0SHerbert Xu linear = len; 1387f42157cbSRusty Russell 138833dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 138928d64271SEric Dumazet &err, 0); 1390f42157cbSRusty Russell if (!skb) 139133dccbb0SHerbert Xu return ERR_PTR(err); 1392f42157cbSRusty Russell 1393f42157cbSRusty Russell skb_reserve(skb, prepad); 1394f42157cbSRusty Russell skb_put(skb, linear); 139533dccbb0SHerbert Xu skb->data_len = len - linear; 139633dccbb0SHerbert Xu skb->len += len - linear; 1397f42157cbSRusty Russell 1398f42157cbSRusty Russell return skb; 1399f42157cbSRusty Russell } 1400f42157cbSRusty Russell 14015503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 14025503fcecSJason Wang struct sk_buff *skb, int more) 14035503fcecSJason Wang { 14045503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 14055503fcecSJason Wang struct sk_buff_head process_queue; 14065503fcecSJason Wang u32 rx_batched = tun->rx_batched; 14075503fcecSJason Wang bool rcv = false; 14085503fcecSJason Wang 14095503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 14105503fcecSJason Wang local_bh_disable(); 14115503fcecSJason Wang netif_receive_skb(skb); 14125503fcecSJason Wang local_bh_enable(); 14135503fcecSJason Wang return; 14145503fcecSJason Wang } 14155503fcecSJason Wang 14165503fcecSJason Wang spin_lock(&queue->lock); 14175503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 14185503fcecSJason Wang __skb_queue_head_init(&process_queue); 14195503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 14205503fcecSJason Wang rcv = true; 14215503fcecSJason Wang } else { 14225503fcecSJason Wang __skb_queue_tail(queue, skb); 14235503fcecSJason Wang } 14245503fcecSJason Wang spin_unlock(&queue->lock); 14255503fcecSJason Wang 14265503fcecSJason Wang if (rcv) { 14275503fcecSJason Wang struct sk_buff *nskb; 14285503fcecSJason Wang 14295503fcecSJason Wang local_bh_disable(); 14305503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 14315503fcecSJason Wang netif_receive_skb(nskb); 14325503fcecSJason Wang netif_receive_skb(skb); 14335503fcecSJason Wang local_bh_enable(); 14345503fcecSJason Wang } 14355503fcecSJason Wang } 14365503fcecSJason Wang 143766ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 143866ccbc9cSJason Wang int len, int noblock, bool zerocopy) 143966ccbc9cSJason Wang { 144066ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 144166ccbc9cSJason Wang return false; 144266ccbc9cSJason Wang 144366ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 144466ccbc9cSJason Wang return false; 144566ccbc9cSJason Wang 144666ccbc9cSJason Wang if (!noblock) 144766ccbc9cSJason Wang return false; 144866ccbc9cSJason Wang 144966ccbc9cSJason Wang if (zerocopy) 145066ccbc9cSJason Wang return false; 145166ccbc9cSJason Wang 145266ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 145366ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 145466ccbc9cSJason Wang return false; 145566ccbc9cSJason Wang 145666ccbc9cSJason Wang return true; 145766ccbc9cSJason Wang } 145866ccbc9cSJason Wang 1459761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1460761876c8SJason Wang struct tun_file *tfile, 146166ccbc9cSJason Wang struct iov_iter *from, 1462761876c8SJason Wang struct virtio_net_hdr *hdr, 14631cfe6e93SJason Wang int len, int *skb_xdp) 146466ccbc9cSJason Wang { 14650bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 146666ccbc9cSJason Wang struct sk_buff *skb; 1467761876c8SJason Wang struct bpf_prog *xdp_prog; 14687df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1469761876c8SJason Wang unsigned int delta = 0; 147066ccbc9cSJason Wang char *buf; 147166ccbc9cSJason Wang size_t copied; 1472761876c8SJason Wang bool xdp_xmit = false; 14737df13219SJason Wang int err, pad = TUN_RX_PAD; 14747df13219SJason Wang 14757df13219SJason Wang rcu_read_lock(); 14767df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 14777df13219SJason Wang if (xdp_prog) 14787df13219SJason Wang pad += TUN_HEADROOM; 14797df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 14807df13219SJason Wang rcu_read_unlock(); 148166ccbc9cSJason Wang 148263b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 148366ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 148466ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 148566ccbc9cSJason Wang 148666ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 148766ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 14887df13219SJason Wang alloc_frag->offset + pad, 148966ccbc9cSJason Wang len, from); 149066ccbc9cSJason Wang if (copied != len) 149166ccbc9cSJason Wang return ERR_PTR(-EFAULT); 149266ccbc9cSJason Wang 14937df13219SJason Wang /* There's a small window that XDP may be set after the check 14947df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 14957df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 14967df13219SJason Wang */ 14977df13219SJason Wang if (hdr->gso_type || !xdp_prog) 14981cfe6e93SJason Wang *skb_xdp = 1; 1499761876c8SJason Wang else 15001cfe6e93SJason Wang *skb_xdp = 0; 150166ccbc9cSJason Wang 1502761876c8SJason Wang rcu_read_lock(); 1503761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 15041cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1505761876c8SJason Wang struct xdp_buff xdp; 1506761876c8SJason Wang void *orig_data; 1507761876c8SJason Wang u32 act; 1508761876c8SJason Wang 1509761876c8SJason Wang xdp.data_hard_start = buf; 15107df13219SJason Wang xdp.data = buf + pad; 1511de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1512761876c8SJason Wang xdp.data_end = xdp.data + len; 1513761876c8SJason Wang orig_data = xdp.data; 1514761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1515761876c8SJason Wang 1516761876c8SJason Wang switch (act) { 1517761876c8SJason Wang case XDP_REDIRECT: 1518761876c8SJason Wang get_page(alloc_frag->page); 1519761876c8SJason Wang alloc_frag->offset += buflen; 1520761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1521761876c8SJason Wang if (err) 1522761876c8SJason Wang goto err_redirect; 1523654d5738SXin Long rcu_read_unlock(); 1524761876c8SJason Wang return NULL; 1525761876c8SJason Wang case XDP_TX: 1526761876c8SJason Wang xdp_xmit = true; 1527761876c8SJason Wang /* fall through */ 1528761876c8SJason Wang case XDP_PASS: 1529761876c8SJason Wang delta = orig_data - xdp.data; 1530761876c8SJason Wang break; 1531761876c8SJason Wang default: 1532761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1533761876c8SJason Wang /* fall through */ 1534761876c8SJason Wang case XDP_ABORTED: 1535761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1536761876c8SJason Wang /* fall through */ 1537761876c8SJason Wang case XDP_DROP: 1538761876c8SJason Wang goto err_xdp; 1539761876c8SJason Wang } 1540761876c8SJason Wang } 1541761876c8SJason Wang 1542761876c8SJason Wang skb = build_skb(buf, buflen); 1543761876c8SJason Wang if (!skb) { 1544761876c8SJason Wang rcu_read_unlock(); 1545761876c8SJason Wang return ERR_PTR(-ENOMEM); 1546761876c8SJason Wang } 1547761876c8SJason Wang 15487df13219SJason Wang skb_reserve(skb, pad - delta); 1549761876c8SJason Wang skb_put(skb, len + delta); 155066ccbc9cSJason Wang get_page(alloc_frag->page); 155166ccbc9cSJason Wang alloc_frag->offset += buflen; 155266ccbc9cSJason Wang 1553761876c8SJason Wang if (xdp_xmit) { 1554761876c8SJason Wang skb->dev = tun->dev; 1555761876c8SJason Wang generic_xdp_tx(skb, xdp_prog); 1556654d5738SXin Long rcu_read_unlock(); 1557761876c8SJason Wang return NULL; 1558761876c8SJason Wang } 1559761876c8SJason Wang 1560761876c8SJason Wang rcu_read_unlock(); 1561761876c8SJason Wang 156266ccbc9cSJason Wang return skb; 1563761876c8SJason Wang 1564761876c8SJason Wang err_redirect: 1565761876c8SJason Wang put_page(alloc_frag->page); 1566761876c8SJason Wang err_xdp: 1567761876c8SJason Wang rcu_read_unlock(); 1568761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1569761876c8SJason Wang return NULL; 157066ccbc9cSJason Wang } 157166ccbc9cSJason Wang 15721da177e4SLinus Torvalds /* Get packet from user space buffer */ 157354f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1574f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 15755503fcecSJason Wang int noblock, bool more) 15761da177e4SLinus Torvalds { 157709640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 15781da177e4SLinus Torvalds struct sk_buff *skb; 1579f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1580eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1581f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1582608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 158396f8d9ecSJason Wang int good_linear; 15840690899bSMichael S. Tsirkin int copylen; 15850690899bSMichael S. Tsirkin bool zerocopy = false; 15860690899bSMichael S. Tsirkin int err; 1587*96f84061SJason Wang u32 rxhash = 0; 15881cfe6e93SJason Wang int skb_xdp = 1; 158990e33d45SPetar Penkov bool frags = tun_napi_frags_enabled(tun); 15901da177e4SLinus Torvalds 15911bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 15921bd4978aSEric Dumazet return -EIO; 15931bd4978aSEric Dumazet 159440630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 159515718ea0SDan Carpenter if (len < sizeof(pi)) 15961da177e4SLinus Torvalds return -EINVAL; 159715718ea0SDan Carpenter len -= sizeof(pi); 15981da177e4SLinus Torvalds 1599cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 16001da177e4SLinus Torvalds return -EFAULT; 16011da177e4SLinus Torvalds } 16021da177e4SLinus Torvalds 160340630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1604e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1605e1edab87SWillem de Bruijn 1606e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1607f43798c2SRusty Russell return -EINVAL; 1608e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1609f43798c2SRusty Russell 1610cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1611f43798c2SRusty Russell return -EFAULT; 1612f43798c2SRusty Russell 16134909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 161456f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 161556f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 16164909122fSHerbert Xu 161756f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1618f43798c2SRusty Russell return -EINVAL; 1619e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1620f43798c2SRusty Russell } 1621f43798c2SRusty Russell 162240630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1623a504b86eSstephen hemminger align += NET_IP_ALIGN; 16240eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 162556f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1626e01bf1c8SRusty Russell return -EINVAL; 1627e01bf1c8SRusty Russell } 16281da177e4SLinus Torvalds 162996f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 163096f8d9ecSJason Wang 163188529176SJason Wang if (msg_control) { 1632f5ff53b4SAl Viro struct iov_iter i = *from; 1633f5ff53b4SAl Viro 163488529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 163588529176SJason Wang * enough room for skb expand head in case it is used. 16360690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 16370690899bSMichael S. Tsirkin */ 163856f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 163996f8d9ecSJason Wang if (copylen > good_linear) 164096f8d9ecSJason Wang copylen = good_linear; 16413dd5c330SJason Wang linear = copylen; 1642f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1643f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 164488529176SJason Wang zerocopy = true; 164588529176SJason Wang } 164688529176SJason Wang 164790e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 16481cfe6e93SJason Wang /* For the packet that is not easy to be processed 16491cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 16501cfe6e93SJason Wang * skb was created with generic XDP routine. 16511cfe6e93SJason Wang */ 16521cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 165366ccbc9cSJason Wang if (IS_ERR(skb)) { 165466ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 165566ccbc9cSJason Wang return PTR_ERR(skb); 165666ccbc9cSJason Wang } 1657761876c8SJason Wang if (!skb) 1658761876c8SJason Wang return total_len; 165966ccbc9cSJason Wang } else { 166088529176SJason Wang if (!zerocopy) { 16610690899bSMichael S. Tsirkin copylen = len; 166256f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 166396f8d9ecSJason Wang linear = good_linear; 166496f8d9ecSJason Wang else 166556f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 16663dd5c330SJason Wang } 16670690899bSMichael S. Tsirkin 166890e33d45SPetar Penkov if (frags) { 166990e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 167090e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 167190e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 167290e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 167390e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 167490e33d45SPetar Penkov */ 167590e33d45SPetar Penkov zerocopy = false; 167690e33d45SPetar Penkov } else { 167790e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 167890e33d45SPetar Penkov noblock); 167990e33d45SPetar Penkov } 168090e33d45SPetar Penkov 168133dccbb0SHerbert Xu if (IS_ERR(skb)) { 168233dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1683608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 168490e33d45SPetar Penkov if (frags) 168590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 168633dccbb0SHerbert Xu return PTR_ERR(skb); 16871da177e4SLinus Torvalds } 16881da177e4SLinus Torvalds 16890690899bSMichael S. Tsirkin if (zerocopy) 1690f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1691af1cc7a2SJason Wang else 1692f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 16930690899bSMichael S. Tsirkin 16940690899bSMichael S. Tsirkin if (err) { 1695608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 16968f22757eSDave Jones kfree_skb(skb); 169790e33d45SPetar Penkov if (frags) { 169890e33d45SPetar Penkov tfile->napi.skb = NULL; 169990e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 170090e33d45SPetar Penkov } 170190e33d45SPetar Penkov 17021da177e4SLinus Torvalds return -EFAULT; 17038f22757eSDave Jones } 170466ccbc9cSJason Wang } 17051da177e4SLinus Torvalds 17063e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1707df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1708df10db98SPaolo Abeni kfree_skb(skb); 170990e33d45SPetar Penkov if (frags) { 171090e33d45SPetar Penkov tfile->napi.skb = NULL; 171190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 171290e33d45SPetar Penkov } 171390e33d45SPetar Penkov 1714df10db98SPaolo Abeni return -EINVAL; 1715df10db98SPaolo Abeni } 1716df10db98SPaolo Abeni 17171da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 171840630b82SMichael S. Tsirkin case IFF_TUN: 171940630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 17202580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 17212580c4c1SAlexander Potapenko 17222580c4c1SAlexander Potapenko switch (ip_version) { 17232580c4c1SAlexander Potapenko case 4: 1724f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1725f09f7ee2SAng Way Chuang break; 17262580c4c1SAlexander Potapenko case 6: 1727f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1728f09f7ee2SAng Way Chuang break; 1729f09f7ee2SAng Way Chuang default: 1730608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1731f09f7ee2SAng Way Chuang kfree_skb(skb); 1732f09f7ee2SAng Way Chuang return -EINVAL; 1733f09f7ee2SAng Way Chuang } 1734f09f7ee2SAng Way Chuang } 1735f09f7ee2SAng Way Chuang 1736459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 17371da177e4SLinus Torvalds skb->protocol = pi.proto; 17384c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 17391da177e4SLinus Torvalds break; 174040630b82SMichael S. Tsirkin case IFF_TAP: 174190e33d45SPetar Penkov if (!frags) 17421da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 17431da177e4SLinus Torvalds break; 17446403eab1SJoe Perches } 17451da177e4SLinus Torvalds 17460690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 17470690899bSMichael S. Tsirkin if (zerocopy) { 17480690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 17490690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1750c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1751af1cc7a2SJason Wang } else if (msg_control) { 1752af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1753af1cc7a2SJason Wang uarg->callback(uarg, false); 17540690899bSMichael S. Tsirkin } 17550690899bSMichael S. Tsirkin 175672f65107SVlad Yasevich skb_reset_network_header(skb); 175740893fd0SJason Wang skb_probe_transport_header(skb, 0); 175838502af7SJason Wang 17591cfe6e93SJason Wang if (skb_xdp) { 1760761876c8SJason Wang struct bpf_prog *xdp_prog; 1761761876c8SJason Wang int ret; 1762761876c8SJason Wang 1763761876c8SJason Wang rcu_read_lock(); 1764761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1765761876c8SJason Wang if (xdp_prog) { 1766761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1767761876c8SJason Wang if (ret != XDP_PASS) { 1768761876c8SJason Wang rcu_read_unlock(); 1769761876c8SJason Wang return total_len; 1770761876c8SJason Wang } 1771761876c8SJason Wang } 1772761876c8SJason Wang rcu_read_unlock(); 1773761876c8SJason Wang } 1774761876c8SJason Wang 1775*96f84061SJason Wang rcu_read_lock(); 1776*96f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 1777feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 1778*96f84061SJason Wang rcu_read_unlock(); 177994317099SPetar Penkov 178090e33d45SPetar Penkov if (frags) { 178190e33d45SPetar Penkov /* Exercise flow dissector code path. */ 178290e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 178390e33d45SPetar Penkov 1784010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 178590e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 178690e33d45SPetar Penkov napi_free_frags(&tfile->napi); 178790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 178890e33d45SPetar Penkov WARN_ON(1); 178990e33d45SPetar Penkov return -ENOMEM; 179090e33d45SPetar Penkov } 179190e33d45SPetar Penkov 179290e33d45SPetar Penkov local_bh_disable(); 179390e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 179490e33d45SPetar Penkov local_bh_enable(); 179590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1796aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 179794317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 179894317099SPetar Penkov int queue_len; 179994317099SPetar Penkov 180094317099SPetar Penkov spin_lock_bh(&queue->lock); 180194317099SPetar Penkov __skb_queue_tail(queue, skb); 180294317099SPetar Penkov queue_len = skb_queue_len(queue); 180394317099SPetar Penkov spin_unlock(&queue->lock); 180494317099SPetar Penkov 180594317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 180694317099SPetar Penkov napi_schedule(&tfile->napi); 180794317099SPetar Penkov 180894317099SPetar Penkov local_bh_enable(); 180994317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 18105503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 181194317099SPetar Penkov } else { 18121da177e4SLinus Torvalds netif_rx_ni(skb); 181394317099SPetar Penkov } 18141da177e4SLinus Torvalds 1815608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1816608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1817608b9977SPaolo Abeni stats->rx_packets++; 1818608b9977SPaolo Abeni stats->rx_bytes += len; 1819608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1820608b9977SPaolo Abeni put_cpu_ptr(stats); 18211da177e4SLinus Torvalds 1822*96f84061SJason Wang if (rxhash) 18239e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 1824*96f84061SJason Wang 18250690899bSMichael S. Tsirkin return total_len; 18261da177e4SLinus Torvalds } 18271da177e4SLinus Torvalds 1828f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 18291da177e4SLinus Torvalds { 183033dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 183154f968d6SJason Wang struct tun_file *tfile = file->private_data; 18329484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1833631ab46bSEric W. Biederman ssize_t result; 18341da177e4SLinus Torvalds 18351da177e4SLinus Torvalds if (!tun) 18361da177e4SLinus Torvalds return -EBADFD; 18371da177e4SLinus Torvalds 18385503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 18395503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1840631ab46bSEric W. Biederman 1841631ab46bSEric W. Biederman tun_put(tun); 1842631ab46bSEric W. Biederman return result; 18431da177e4SLinus Torvalds } 18441da177e4SLinus Torvalds 18451da177e4SLinus Torvalds /* Put packet to the user space buffer */ 18466f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 184754f968d6SJason Wang struct tun_file *tfile, 18481da177e4SLinus Torvalds struct sk_buff *skb, 1849e0b46d0eSHerbert Xu struct iov_iter *iter) 18501da177e4SLinus Torvalds { 18511da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 1852608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 1853e0b46d0eSHerbert Xu ssize_t total; 18548c847d25SJason Wang int vlan_offset = 0; 1855a8f9bfdfSHerbert Xu int vlan_hlen = 0; 18562eb783c4SHerbert Xu int vnet_hdr_sz = 0; 1857a8f9bfdfSHerbert Xu 1858df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1859a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 18601da177e4SLinus Torvalds 186140630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 1862e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 18631da177e4SLinus Torvalds 1864e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 1865e0b46d0eSHerbert Xu 186640630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 1867e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 18681da177e4SLinus Torvalds return -EINVAL; 18691da177e4SLinus Torvalds 1870e0b46d0eSHerbert Xu total += sizeof(pi); 1871e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 18721da177e4SLinus Torvalds /* Packet will be striped */ 18731da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 18741da177e4SLinus Torvalds } 18751da177e4SLinus Torvalds 1876e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 18771da177e4SLinus Torvalds return -EFAULT; 18781da177e4SLinus Torvalds } 18791da177e4SLinus Torvalds 18802eb783c4SHerbert Xu if (vnet_hdr_sz) { 18819403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 188234166093SMike Rapoport 1883e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 1884f43798c2SRusty Russell return -EINVAL; 1885f43798c2SRusty Russell 18863e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 18876391a448SJason Wang tun_is_little_endian(tun), true)) { 1888f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 18896b8a66eeSJoe Perches pr_err("unexpected GSO type: " 1890ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 189156f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 189256f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 1893ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 1894ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 1895ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 189656f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 1897ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 1898ef3db4a5SMichael S. Tsirkin return -EINVAL; 1899ef3db4a5SMichael S. Tsirkin } 1900f43798c2SRusty Russell 1901e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 1902f43798c2SRusty Russell return -EFAULT; 19038c847d25SJason Wang 19048c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1905f43798c2SRusty Russell } 1906f43798c2SRusty Russell 1907a8f9bfdfSHerbert Xu if (vlan_hlen) { 1908e0b46d0eSHerbert Xu int ret; 19096680ec68SJason Wang struct { 19106680ec68SJason Wang __be16 h_vlan_proto; 19116680ec68SJason Wang __be16 h_vlan_TCI; 19126680ec68SJason Wang } veth; 19131da177e4SLinus Torvalds 19146680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 1915df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 19161da177e4SLinus Torvalds 19176680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 19186680ec68SJason Wang 1919e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 1920e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 19216680ec68SJason Wang goto done; 19226680ec68SJason Wang 1923e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 1924e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 19256680ec68SJason Wang goto done; 19266680ec68SJason Wang } 19276680ec68SJason Wang 1928e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 19296680ec68SJason Wang 19306680ec68SJason Wang done: 1931608b9977SPaolo Abeni /* caller is in process context, */ 1932608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1933608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1934608b9977SPaolo Abeni stats->tx_packets++; 1935608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 1936608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1937608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 19381da177e4SLinus Torvalds 19391da177e4SLinus Torvalds return total; 19401da177e4SLinus Torvalds } 19411da177e4SLinus Torvalds 19421576d986SJason Wang static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, 19431576d986SJason Wang int *err) 19441576d986SJason Wang { 19451576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 19461576d986SJason Wang struct sk_buff *skb = NULL; 1947f48cc6b2SJason Wang int error = 0; 19481576d986SJason Wang 19491576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19501576d986SJason Wang if (skb) 19511576d986SJason Wang goto out; 19521576d986SJason Wang if (noblock) { 1953f48cc6b2SJason Wang error = -EAGAIN; 19541576d986SJason Wang goto out; 19551576d986SJason Wang } 19561576d986SJason Wang 19571576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 19581576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 19591576d986SJason Wang 19601576d986SJason Wang while (1) { 19611576d986SJason Wang skb = skb_array_consume(&tfile->tx_array); 19621576d986SJason Wang if (skb) 19631576d986SJason Wang break; 19641576d986SJason Wang if (signal_pending(current)) { 1965f48cc6b2SJason Wang error = -ERESTARTSYS; 19661576d986SJason Wang break; 19671576d986SJason Wang } 19681576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 1969f48cc6b2SJason Wang error = -EFAULT; 19701576d986SJason Wang break; 19711576d986SJason Wang } 19721576d986SJason Wang 19731576d986SJason Wang schedule(); 19741576d986SJason Wang } 19751576d986SJason Wang 19761576d986SJason Wang current->state = TASK_RUNNING; 19771576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 19781576d986SJason Wang 19791576d986SJason Wang out: 1980f48cc6b2SJason Wang *err = error; 19811576d986SJason Wang return skb; 19821576d986SJason Wang } 19831576d986SJason Wang 198454f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 19859b067034SAl Viro struct iov_iter *to, 1986ac77cfd4SJason Wang int noblock, struct sk_buff *skb) 19871da177e4SLinus Torvalds { 19889b067034SAl Viro ssize_t ret; 19891576d986SJason Wang int err; 19901da177e4SLinus Torvalds 19913872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 19921da177e4SLinus Torvalds 1993c33ee15bSWei Xu if (!iov_iter_count(to)) { 1994c33ee15bSWei Xu if (skb) 1995c33ee15bSWei Xu kfree_skb(skb); 19969b067034SAl Viro return 0; 1997c33ee15bSWei Xu } 19981da177e4SLinus Torvalds 1999ac77cfd4SJason Wang if (!skb) { 20001576d986SJason Wang /* Read frames from ring */ 20011576d986SJason Wang skb = tun_ring_recv(tfile, noblock, &err); 2002e0b46d0eSHerbert Xu if (!skb) 2003957f094fSAlex Gartrell return err; 2004ac77cfd4SJason Wang } 2005e0b46d0eSHerbert Xu 20069b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2007f51a5e82SJason Wang if (unlikely(ret < 0)) 20081da177e4SLinus Torvalds kfree_skb(skb); 2009f51a5e82SJason Wang else 2010f51a5e82SJason Wang consume_skb(skb); 20111da177e4SLinus Torvalds 201205c2828cSMichael S. Tsirkin return ret; 201305c2828cSMichael S. Tsirkin } 201405c2828cSMichael S. Tsirkin 20159b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 201605c2828cSMichael S. Tsirkin { 201705c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 201805c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 20199484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 20209b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 202105c2828cSMichael S. Tsirkin 202205c2828cSMichael S. Tsirkin if (!tun) 202305c2828cSMichael S. Tsirkin return -EBADFD; 2024ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 202542404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2026d0b7da8aSZhi Yong Wu if (ret > 0) 2027d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2028631ab46bSEric W. Biederman tun_put(tun); 20291da177e4SLinus Torvalds return ret; 20301da177e4SLinus Torvalds } 20311da177e4SLinus Torvalds 2032*96f84061SJason Wang static void tun_steering_prog_free(struct rcu_head *rcu) 2033*96f84061SJason Wang { 2034*96f84061SJason Wang struct tun_steering_prog *prog = container_of(rcu, 2035*96f84061SJason Wang struct tun_steering_prog, rcu); 2036*96f84061SJason Wang 2037*96f84061SJason Wang bpf_prog_destroy(prog->prog); 2038*96f84061SJason Wang kfree(prog); 2039*96f84061SJason Wang } 2040*96f84061SJason Wang 2041*96f84061SJason Wang static int __tun_set_steering_ebpf(struct tun_struct *tun, 2042*96f84061SJason Wang struct bpf_prog *prog) 2043*96f84061SJason Wang { 2044*96f84061SJason Wang struct tun_steering_prog *old, *new = NULL; 2045*96f84061SJason Wang 2046*96f84061SJason Wang if (prog) { 2047*96f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 2048*96f84061SJason Wang if (!new) 2049*96f84061SJason Wang return -ENOMEM; 2050*96f84061SJason Wang new->prog = prog; 2051*96f84061SJason Wang } 2052*96f84061SJason Wang 2053*96f84061SJason Wang old = rtnl_dereference(tun->steering_prog); 2054*96f84061SJason Wang rcu_assign_pointer(tun->steering_prog, new); 2055*96f84061SJason Wang 2056*96f84061SJason Wang if (old) 2057*96f84061SJason Wang call_rcu(&old->rcu, tun_steering_prog_free); 2058*96f84061SJason Wang 2059*96f84061SJason Wang return 0; 2060*96f84061SJason Wang } 2061*96f84061SJason Wang 206296442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 206396442e42SJason Wang { 206496442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 206596442e42SJason Wang 20664008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2067608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 206896442e42SJason Wang tun_flow_uninit(tun); 20695dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2070*96f84061SJason Wang rtnl_lock(); 2071*96f84061SJason Wang __tun_set_steering_ebpf(tun, NULL); 2072*96f84061SJason Wang rtnl_unlock(); 207396442e42SJason Wang } 207496442e42SJason Wang 20751da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 20761da177e4SLinus Torvalds { 20771da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 20781da177e4SLinus Torvalds 20790625c883SEric W. Biederman tun->owner = INVALID_UID; 20800625c883SEric W. Biederman tun->group = INVALID_GID; 20811da177e4SLinus Torvalds 20821da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2083cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2084cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2085016adb72SJason Wang /* We prefer our own queue length */ 2086016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 20871da177e4SLinus Torvalds } 20881da177e4SLinus Torvalds 2089f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2090f019a7a5SEric W. Biederman * device with netlink. 2091f019a7a5SEric W. Biederman */ 2092a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2093a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2094f019a7a5SEric W. Biederman { 2095f019a7a5SEric W. Biederman return -EINVAL; 2096f019a7a5SEric W. Biederman } 2097f019a7a5SEric W. Biederman 2098f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2099f019a7a5SEric W. Biederman .kind = DRV_NAME, 2100f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2101f019a7a5SEric W. Biederman .setup = tun_setup, 2102f019a7a5SEric W. Biederman .validate = tun_validate, 2103f019a7a5SEric W. Biederman }; 2104f019a7a5SEric W. Biederman 210533dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 210633dccbb0SHerbert Xu { 210754f968d6SJason Wang struct tun_file *tfile; 210843815482SEric Dumazet wait_queue_head_t *wqueue; 210933dccbb0SHerbert Xu 211033dccbb0SHerbert Xu if (!sock_writeable(sk)) 211133dccbb0SHerbert Xu return; 211233dccbb0SHerbert Xu 21139cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 211433dccbb0SHerbert Xu return; 211533dccbb0SHerbert Xu 211643815482SEric Dumazet wqueue = sk_sleep(sk); 211743815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 211843815482SEric Dumazet wake_up_interruptible_sync_poll(wqueue, POLLOUT | 211905c2828cSMichael S. Tsirkin POLLWRNORM | POLLWRBAND); 2120c722c625SHerbert Xu 212154f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 212254f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 212333dccbb0SHerbert Xu } 212433dccbb0SHerbert Xu 21251b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 212605c2828cSMichael S. Tsirkin { 212754f968d6SJason Wang int ret; 212854f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 21299484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 213054f968d6SJason Wang 213154f968d6SJason Wang if (!tun) 213254f968d6SJason Wang return -EBADFD; 2133f5ff53b4SAl Viro 2134c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 21355503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 21365503fcecSJason Wang m->msg_flags & MSG_MORE); 213754f968d6SJason Wang tun_put(tun); 213854f968d6SJason Wang return ret; 213905c2828cSMichael S. Tsirkin } 214005c2828cSMichael S. Tsirkin 21411b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 214205c2828cSMichael S. Tsirkin int flags) 214305c2828cSMichael S. Tsirkin { 214454f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 21459484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2146c33ee15bSWei Xu struct sk_buff *skb = m->msg_control; 214705c2828cSMichael S. Tsirkin int ret; 214854f968d6SJason Wang 2149c33ee15bSWei Xu if (!tun) { 2150c33ee15bSWei Xu ret = -EBADFD; 2151c33ee15bSWei Xu goto out_free_skb; 2152c33ee15bSWei Xu } 215354f968d6SJason Wang 2154eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 21553811ae76SGao feng ret = -EINVAL; 2156c33ee15bSWei Xu goto out_put_tun; 21573811ae76SGao feng } 2158eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2159eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2160eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2161eda29772SRichard Cochran goto out; 2162eda29772SRichard Cochran } 2163c33ee15bSWei Xu ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); 216487897931SAlex Gartrell if (ret > (ssize_t)total_len) { 216542404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 216642404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 216742404c09SDavid S. Miller } 21683811ae76SGao feng out: 216954f968d6SJason Wang tun_put(tun); 217005c2828cSMichael S. Tsirkin return ret; 2171c33ee15bSWei Xu 2172c33ee15bSWei Xu out_put_tun: 2173c33ee15bSWei Xu tun_put(tun); 2174c33ee15bSWei Xu out_free_skb: 2175c33ee15bSWei Xu if (skb) 2176c33ee15bSWei Xu kfree_skb(skb); 2177c33ee15bSWei Xu return ret; 217805c2828cSMichael S. Tsirkin } 217905c2828cSMichael S. Tsirkin 21801576d986SJason Wang static int tun_peek_len(struct socket *sock) 21811576d986SJason Wang { 21821576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 21831576d986SJason Wang struct tun_struct *tun; 21841576d986SJason Wang int ret = 0; 21851576d986SJason Wang 21869484dc74Syuan linyu tun = tun_get(tfile); 21871576d986SJason Wang if (!tun) 21881576d986SJason Wang return 0; 21891576d986SJason Wang 21901576d986SJason Wang ret = skb_array_peek_len(&tfile->tx_array); 21911576d986SJason Wang tun_put(tun); 21921576d986SJason Wang 21931576d986SJason Wang return ret; 21941576d986SJason Wang } 21951576d986SJason Wang 219605c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 219705c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 21981576d986SJason Wang .peek_len = tun_peek_len, 219905c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 220005c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 220105c2828cSMichael S. Tsirkin }; 220205c2828cSMichael S. Tsirkin 220333dccbb0SHerbert Xu static struct proto tun_proto = { 220433dccbb0SHerbert Xu .name = "tun", 220533dccbb0SHerbert Xu .owner = THIS_MODULE, 220654f968d6SJason Wang .obj_size = sizeof(struct tun_file), 220733dccbb0SHerbert Xu }; 2208f019a7a5SEric W. Biederman 2209980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2210980c9e8cSDavid Woodhouse { 2211031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2212980c9e8cSDavid Woodhouse } 2213980c9e8cSDavid Woodhouse 2214980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2215980c9e8cSDavid Woodhouse char *buf) 2216980c9e8cSDavid Woodhouse { 2217980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2218980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2219980c9e8cSDavid Woodhouse } 2220980c9e8cSDavid Woodhouse 2221980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2222980c9e8cSDavid Woodhouse char *buf) 2223980c9e8cSDavid Woodhouse { 2224980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 22250625c883SEric W. Biederman return uid_valid(tun->owner)? 22260625c883SEric W. Biederman sprintf(buf, "%u\n", 22270625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 22280625c883SEric W. Biederman sprintf(buf, "-1\n"); 2229980c9e8cSDavid Woodhouse } 2230980c9e8cSDavid Woodhouse 2231980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2232980c9e8cSDavid Woodhouse char *buf) 2233980c9e8cSDavid Woodhouse { 2234980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 22350625c883SEric W. Biederman return gid_valid(tun->group) ? 22360625c883SEric W. Biederman sprintf(buf, "%u\n", 22370625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 22380625c883SEric W. Biederman sprintf(buf, "-1\n"); 2239980c9e8cSDavid Woodhouse } 2240980c9e8cSDavid Woodhouse 2241980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2242980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2243980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2244980c9e8cSDavid Woodhouse 2245c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2246c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2247c4d33e24STakashi Iwai &dev_attr_owner.attr, 2248c4d33e24STakashi Iwai &dev_attr_group.attr, 2249c4d33e24STakashi Iwai NULL 2250c4d33e24STakashi Iwai }; 2251c4d33e24STakashi Iwai 2252c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2253c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2254c4d33e24STakashi Iwai }; 2255c4d33e24STakashi Iwai 2256d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 22571da177e4SLinus Torvalds { 22581da177e4SLinus Torvalds struct tun_struct *tun; 225954f968d6SJason Wang struct tun_file *tfile = file->private_data; 22601da177e4SLinus Torvalds struct net_device *dev; 22611da177e4SLinus Torvalds int err; 22621da177e4SLinus Torvalds 22637c0c3b1aSJason Wang if (tfile->detached) 22647c0c3b1aSJason Wang return -EINVAL; 22657c0c3b1aSJason Wang 226690e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 226790e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 226890e33d45SPetar Penkov return -EPERM; 226990e33d45SPetar Penkov 227090e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 227190e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 227290e33d45SPetar Penkov return -EINVAL; 227390e33d45SPetar Penkov } 227490e33d45SPetar Penkov 227574a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 227674a3e5a7SEric W. Biederman if (dev) { 2277f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2278f85ba780SDavid Woodhouse return -EBUSY; 227974a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 228074a3e5a7SEric W. Biederman tun = netdev_priv(dev); 228174a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 228274a3e5a7SEric W. Biederman tun = netdev_priv(dev); 228374a3e5a7SEric W. Biederman else 228474a3e5a7SEric W. Biederman return -EINVAL; 228574a3e5a7SEric W. Biederman 22868e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 228740630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 22888e6d91aeSJason Wang return -EINVAL; 22898e6d91aeSJason Wang 2290cde8b15fSJason Wang if (tun_not_capable(tun)) 22912b980dbdSPaul Moore return -EPERM; 22925dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 22932b980dbdSPaul Moore if (err < 0) 22942b980dbdSPaul Moore return err; 22952b980dbdSPaul Moore 229694317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 229794317099SPetar Penkov ifr->ifr_flags & IFF_NAPI); 2298a7385ba2SEric W. Biederman if (err < 0) 2299a7385ba2SEric W. Biederman return err; 23004008e97fSJason Wang 230140630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2302e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2303e8dbad66SJason Wang /* One or more queue has already been attached, no need 2304e8dbad66SJason Wang * to initialize the device again. 2305e8dbad66SJason Wang */ 2306e8dbad66SJason Wang return 0; 2307e8dbad66SJason Wang } 230886a264abSDavid Howells } 23091da177e4SLinus Torvalds else { 23101da177e4SLinus Torvalds char *name; 23111da177e4SLinus Torvalds unsigned long flags = 0; 2312edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2313edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 23141da177e4SLinus Torvalds 2315c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2316ca6bb5d7SDavid Woodhouse return -EPERM; 23172b980dbdSPaul Moore err = security_tun_dev_create(); 23182b980dbdSPaul Moore if (err < 0) 23192b980dbdSPaul Moore return err; 2320ca6bb5d7SDavid Woodhouse 23211da177e4SLinus Torvalds /* Set dev type */ 23221da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 23231da177e4SLinus Torvalds /* TUN device */ 232440630b82SMichael S. Tsirkin flags |= IFF_TUN; 23251da177e4SLinus Torvalds name = "tun%d"; 23261da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 23271da177e4SLinus Torvalds /* TAP device */ 232840630b82SMichael S. Tsirkin flags |= IFF_TAP; 23291da177e4SLinus Torvalds name = "tap%d"; 23301da177e4SLinus Torvalds } else 233136989b90SKusanagi Kouichi return -EINVAL; 23321da177e4SLinus Torvalds 23331da177e4SLinus Torvalds if (*ifr->ifr_name) 23341da177e4SLinus Torvalds name = ifr->ifr_name; 23351da177e4SLinus Torvalds 2336c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2337c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2338c835a677STom Gundersen queues); 2339edfb6a14SJason Wang 23401da177e4SLinus Torvalds if (!dev) 23411da177e4SLinus Torvalds return -ENOMEM; 23420ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 23435c25f65fSJulien Gomes if (err < 0) 23440ad646c8SCong Wang goto err_free_dev; 23451da177e4SLinus Torvalds 2346fc54c658SPavel Emelyanov dev_net_set(dev, net); 2347f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2348fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2349c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2350758e43b7SStephen Hemminger 23511da177e4SLinus Torvalds tun = netdev_priv(dev); 23521da177e4SLinus Torvalds tun->dev = dev; 23531da177e4SLinus Torvalds tun->flags = flags; 2354f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2355d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 23561da177e4SLinus Torvalds 2357eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 235854f968d6SJason Wang tun->filter_attached = false; 235954f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 23605503fcecSJason Wang tun->rx_batched = 0; 2361*96f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 236233dccbb0SHerbert Xu 2363608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2364608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2365608b9977SPaolo Abeni err = -ENOMEM; 2366608b9977SPaolo Abeni goto err_free_dev; 2367608b9977SPaolo Abeni } 2368608b9977SPaolo Abeni 236996442e42SJason Wang spin_lock_init(&tun->lock); 237096442e42SJason Wang 23715dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 23725dbbaf2dSPaul Moore if (err < 0) 2373608b9977SPaolo Abeni goto err_free_stat; 23742b980dbdSPaul Moore 23751da177e4SLinus Torvalds tun_net_init(dev); 2376944a1376SPavel Emelyanov tun_flow_init(tun); 237796442e42SJason Wang 237888255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 23796680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 23806680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 23812a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 23826671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 23836671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 23846671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 238588255375SMichał Mirosław 23864008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 238794317099SPetar Penkov err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2388eb0fb363SJason Wang if (err < 0) 2389662ca437SJason Wang goto err_free_flow; 2390eb0fb363SJason Wang 23911da177e4SLinus Torvalds err = register_netdevice(tun->dev); 23921da177e4SLinus Torvalds if (err < 0) 2393662ca437SJason Wang goto err_detach; 2394af668b3cSMichael S. Tsirkin } 2395980c9e8cSDavid Woodhouse 2396eb0fb363SJason Wang netif_carrier_on(tun->dev); 23971da177e4SLinus Torvalds 23986b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 23991da177e4SLinus Torvalds 2400031f5e03SMichael S. Tsirkin tun->flags = (tun->flags & ~TUN_FEATURES) | 2401031f5e03SMichael S. Tsirkin (ifr->ifr_flags & TUN_FEATURES); 2402c8d68e6bSJason Wang 2403e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2404e35259a9SMax Krasnyansky * xoff state. 2405e35259a9SMax Krasnyansky */ 2406e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2407c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2408e35259a9SMax Krasnyansky 24091da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 24101da177e4SLinus Torvalds return 0; 24111da177e4SLinus Torvalds 2412662ca437SJason Wang err_detach: 2413662ca437SJason Wang tun_detach_all(dev); 2414ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2415ff244c6bSEric Dumazet goto err_free_dev; 2416ff244c6bSEric Dumazet 2417662ca437SJason Wang err_free_flow: 2418662ca437SJason Wang tun_flow_uninit(tun); 2419662ca437SJason Wang security_tun_dev_free_security(tun->security); 2420608b9977SPaolo Abeni err_free_stat: 2421608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 24221da177e4SLinus Torvalds err_free_dev: 24231da177e4SLinus Torvalds free_netdev(dev); 24241da177e4SLinus Torvalds return err; 24251da177e4SLinus Torvalds } 24261da177e4SLinus Torvalds 24279ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2428876bfd4dSHerbert Xu struct ifreq *ifr) 2429e3b99556SMark McLoughlin { 24306b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2431e3b99556SMark McLoughlin 2432e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2433e3b99556SMark McLoughlin 2434980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2435e3b99556SMark McLoughlin 2436e3b99556SMark McLoughlin } 2437e3b99556SMark McLoughlin 24385228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 24395228ddc9SRusty Russell * privs required. */ 244088255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 24415228ddc9SRusty Russell { 2442c8f44affSMichał Mirosław netdev_features_t features = 0; 24435228ddc9SRusty Russell 24445228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 244588255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 24465228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 24475228ddc9SRusty Russell 24485228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 24495228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 24505228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 24515228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 24525228ddc9SRusty Russell } 24535228ddc9SRusty Russell if (arg & TUN_F_TSO4) 24545228ddc9SRusty Russell features |= NETIF_F_TSO; 24555228ddc9SRusty Russell if (arg & TUN_F_TSO6) 24565228ddc9SRusty Russell features |= NETIF_F_TSO6; 24575228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 24585228ddc9SRusty Russell } 24590c19f846SWillem de Bruijn 24600c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 24615228ddc9SRusty Russell } 24625228ddc9SRusty Russell 24635228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 24645228ddc9SRusty Russell * trying to set them. */ 24655228ddc9SRusty Russell if (arg) 24665228ddc9SRusty Russell return -EINVAL; 24675228ddc9SRusty Russell 246888255375SMichał Mirosław tun->set_features = features; 246909050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 247009050957SYaroslav Isakov tun->dev->wanted_features |= features; 247188255375SMichał Mirosław netdev_update_features(tun->dev); 24725228ddc9SRusty Russell 24735228ddc9SRusty Russell return 0; 24745228ddc9SRusty Russell } 24755228ddc9SRusty Russell 2476c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2477c8d68e6bSJason Wang { 2478c8d68e6bSJason Wang int i; 2479c8d68e6bSJason Wang struct tun_file *tfile; 2480c8d68e6bSJason Wang 2481c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2482b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 24838ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 24848ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 24858ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2486c8d68e6bSJason Wang } 2487c8d68e6bSJason Wang 2488c8d68e6bSJason Wang tun->filter_attached = false; 2489c8d68e6bSJason Wang } 2490c8d68e6bSJason Wang 2491c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2492c8d68e6bSJason Wang { 2493c8d68e6bSJason Wang int i, ret = 0; 2494c8d68e6bSJason Wang struct tun_file *tfile; 2495c8d68e6bSJason Wang 2496c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2497b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 24988ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 24998ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 25008ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2501c8d68e6bSJason Wang if (ret) { 2502c8d68e6bSJason Wang tun_detach_filter(tun, i); 2503c8d68e6bSJason Wang return ret; 2504c8d68e6bSJason Wang } 2505c8d68e6bSJason Wang } 2506c8d68e6bSJason Wang 2507c8d68e6bSJason Wang tun->filter_attached = true; 2508c8d68e6bSJason Wang return ret; 2509c8d68e6bSJason Wang } 2510c8d68e6bSJason Wang 2511c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2512c8d68e6bSJason Wang { 2513c8d68e6bSJason Wang struct tun_file *tfile; 2514c8d68e6bSJason Wang int i; 2515c8d68e6bSJason Wang 2516c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2517b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2518c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2519c8d68e6bSJason Wang } 2520c8d68e6bSJason Wang } 2521c8d68e6bSJason Wang 2522cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2523cde8b15fSJason Wang { 2524cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2525cde8b15fSJason Wang struct tun_struct *tun; 2526cde8b15fSJason Wang int ret = 0; 2527cde8b15fSJason Wang 2528cde8b15fSJason Wang rtnl_lock(); 2529cde8b15fSJason Wang 2530cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 25314008e97fSJason Wang tun = tfile->detached; 25325dbbaf2dSPaul Moore if (!tun) { 2533cde8b15fSJason Wang ret = -EINVAL; 25345dbbaf2dSPaul Moore goto unlock; 25355dbbaf2dSPaul Moore } 25365dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 25375dbbaf2dSPaul Moore if (ret < 0) 25385dbbaf2dSPaul Moore goto unlock; 253994317099SPetar Penkov ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 25404008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2541b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 254240630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 25434008e97fSJason Wang ret = -EINVAL; 2544cde8b15fSJason Wang else 25454008e97fSJason Wang __tun_detach(tfile, false); 25464008e97fSJason Wang } else 2547cde8b15fSJason Wang ret = -EINVAL; 2548cde8b15fSJason Wang 25495dbbaf2dSPaul Moore unlock: 2550cde8b15fSJason Wang rtnl_unlock(); 2551cde8b15fSJason Wang return ret; 2552cde8b15fSJason Wang } 2553cde8b15fSJason Wang 2554*96f84061SJason Wang static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data) 2555*96f84061SJason Wang { 2556*96f84061SJason Wang struct bpf_prog *prog; 2557*96f84061SJason Wang int fd; 2558*96f84061SJason Wang 2559*96f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 2560*96f84061SJason Wang return -EFAULT; 2561*96f84061SJason Wang 2562*96f84061SJason Wang if (fd == -1) { 2563*96f84061SJason Wang prog = NULL; 2564*96f84061SJason Wang } else { 2565*96f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 2566*96f84061SJason Wang if (IS_ERR(prog)) 2567*96f84061SJason Wang return PTR_ERR(prog); 2568*96f84061SJason Wang } 2569*96f84061SJason Wang 2570*96f84061SJason Wang return __tun_set_steering_ebpf(tun, prog); 2571*96f84061SJason Wang } 2572*96f84061SJason Wang 257350857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 257450857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 25751da177e4SLinus Torvalds { 257636b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2577631ab46bSEric W. Biederman struct tun_struct *tun; 25781da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 25791da177e4SLinus Torvalds struct ifreq ifr; 25800625c883SEric W. Biederman kuid_t owner; 25810625c883SEric W. Biederman kgid_t group; 258233dccbb0SHerbert Xu int sndbuf; 2583d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2584fb7589a1SPavel Emelyanov unsigned int ifindex; 25851cf8e410SMichael S. Tsirkin int le; 2586f271b2ccSMax Krasnyansky int ret; 25871da177e4SLinus Torvalds 258820861f26SGao Feng if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 258950857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 25901da177e4SLinus Torvalds return -EFAULT; 25918bbb1813SDavid S. Miller } else { 2592a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 25938bbb1813SDavid S. Miller } 2594631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2595631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2596631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2597031f5e03SMichael S. Tsirkin * TUNSETIFF. 2598031f5e03SMichael S. Tsirkin */ 2599031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2600631ab46bSEric W. Biederman (unsigned int __user*)argp); 2601cde8b15fSJason Wang } else if (cmd == TUNSETQUEUE) 2602cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2603631ab46bSEric W. Biederman 2604c8d68e6bSJason Wang ret = 0; 2605876bfd4dSHerbert Xu rtnl_lock(); 2606876bfd4dSHerbert Xu 26079484dc74Syuan linyu tun = tun_get(tfile); 26080f16bc13SGao Feng if (cmd == TUNSETIFF) { 26090f16bc13SGao Feng ret = -EEXIST; 26100f16bc13SGao Feng if (tun) 26110f16bc13SGao Feng goto unlock; 26120f16bc13SGao Feng 26131da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 26141da177e4SLinus Torvalds 2615140e807dSEric W. Biederman ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 26161da177e4SLinus Torvalds 2617876bfd4dSHerbert Xu if (ret) 2618876bfd4dSHerbert Xu goto unlock; 26191da177e4SLinus Torvalds 262050857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2621876bfd4dSHerbert Xu ret = -EFAULT; 2622876bfd4dSHerbert Xu goto unlock; 26231da177e4SLinus Torvalds } 2624fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2625fb7589a1SPavel Emelyanov ret = -EPERM; 2626fb7589a1SPavel Emelyanov if (tun) 2627fb7589a1SPavel Emelyanov goto unlock; 2628fb7589a1SPavel Emelyanov 2629fb7589a1SPavel Emelyanov ret = -EFAULT; 2630fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2631fb7589a1SPavel Emelyanov goto unlock; 2632fb7589a1SPavel Emelyanov 2633fb7589a1SPavel Emelyanov ret = 0; 2634fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2635fb7589a1SPavel Emelyanov goto unlock; 2636fb7589a1SPavel Emelyanov } 26371da177e4SLinus Torvalds 2638876bfd4dSHerbert Xu ret = -EBADFD; 26391da177e4SLinus Torvalds if (!tun) 2640876bfd4dSHerbert Xu goto unlock; 26411da177e4SLinus Torvalds 26421e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 26431da177e4SLinus Torvalds 2644631ab46bSEric W. Biederman ret = 0; 26451da177e4SLinus Torvalds switch (cmd) { 2646e3b99556SMark McLoughlin case TUNGETIFF: 26479ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2648e3b99556SMark McLoughlin 26493d407a80SPavel Emelyanov if (tfile->detached) 26503d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2651849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2652849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 26533d407a80SPavel Emelyanov 265450857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2655631ab46bSEric W. Biederman ret = -EFAULT; 2656e3b99556SMark McLoughlin break; 2657e3b99556SMark McLoughlin 26581da177e4SLinus Torvalds case TUNSETNOCSUM: 26591da177e4SLinus Torvalds /* Disable/Enable checksum */ 26601da177e4SLinus Torvalds 266188255375SMichał Mirosław /* [unimplemented] */ 266288255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 26636b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 26641da177e4SLinus Torvalds break; 26651da177e4SLinus Torvalds 26661da177e4SLinus Torvalds case TUNSETPERSIST: 266754f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 266854f968d6SJason Wang * module to prevent the module being unprobed. 266954f968d6SJason Wang */ 267040630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 267140630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 267254f968d6SJason Wang __module_get(THIS_MODULE); 2673dd38bd85SJason Wang } 267440630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 267540630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 267654f968d6SJason Wang module_put(THIS_MODULE); 267754f968d6SJason Wang } 26781da177e4SLinus Torvalds 26796b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 26806b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 26811da177e4SLinus Torvalds break; 26821da177e4SLinus Torvalds 26831da177e4SLinus Torvalds case TUNSETOWNER: 26841da177e4SLinus Torvalds /* Set owner of the device */ 26850625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 26860625c883SEric W. Biederman if (!uid_valid(owner)) { 26870625c883SEric W. Biederman ret = -EINVAL; 26880625c883SEric W. Biederman break; 26890625c883SEric W. Biederman } 26900625c883SEric W. Biederman tun->owner = owner; 26911e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 26920625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 26931da177e4SLinus Torvalds break; 26941da177e4SLinus Torvalds 26958c644623SGuido Guenther case TUNSETGROUP: 26968c644623SGuido Guenther /* Set group of the device */ 26970625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 26980625c883SEric W. Biederman if (!gid_valid(group)) { 26990625c883SEric W. Biederman ret = -EINVAL; 27000625c883SEric W. Biederman break; 27010625c883SEric W. Biederman } 27020625c883SEric W. Biederman tun->group = group; 27031e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 27040625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 27058c644623SGuido Guenther break; 27068c644623SGuido Guenther 2707ff4cc3acSMike Kershaw case TUNSETLINK: 2708ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2709ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 27106b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 27116b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 271248abfe05SDavid S. Miller ret = -EBUSY; 2713ff4cc3acSMike Kershaw } else { 2714ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 27156b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 27166b8a66eeSJoe Perches tun->dev->type); 271748abfe05SDavid S. Miller ret = 0; 2718ff4cc3acSMike Kershaw } 2719631ab46bSEric W. Biederman break; 2720ff4cc3acSMike Kershaw 27211da177e4SLinus Torvalds #ifdef TUN_DEBUG 27221da177e4SLinus Torvalds case TUNSETDEBUG: 27231da177e4SLinus Torvalds tun->debug = arg; 27241da177e4SLinus Torvalds break; 27251da177e4SLinus Torvalds #endif 27265228ddc9SRusty Russell case TUNSETOFFLOAD: 272788255375SMichał Mirosław ret = set_offload(tun, arg); 2728631ab46bSEric W. Biederman break; 27295228ddc9SRusty Russell 2730f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 2731f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 2732631ab46bSEric W. Biederman ret = -EINVAL; 273340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2734631ab46bSEric W. Biederman break; 2735c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 2736631ab46bSEric W. Biederman break; 27371da177e4SLinus Torvalds 27381da177e4SLinus Torvalds case SIOCGIFHWADDR: 2739b595076aSUwe Kleine-König /* Get hw address */ 2740f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2741f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 274250857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2743631ab46bSEric W. Biederman ret = -EFAULT; 2744631ab46bSEric W. Biederman break; 27451da177e4SLinus Torvalds 27461da177e4SLinus Torvalds case SIOCSIFHWADDR: 2747f271b2ccSMax Krasnyansky /* Set hw address */ 27486b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 27496b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 275040102371SKim B. Heino 275140102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2752631ab46bSEric W. Biederman break; 275333dccbb0SHerbert Xu 275433dccbb0SHerbert Xu case TUNGETSNDBUF: 275554f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 275633dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 275733dccbb0SHerbert Xu ret = -EFAULT; 275833dccbb0SHerbert Xu break; 275933dccbb0SHerbert Xu 276033dccbb0SHerbert Xu case TUNSETSNDBUF: 276133dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 276233dccbb0SHerbert Xu ret = -EFAULT; 276333dccbb0SHerbert Xu break; 276433dccbb0SHerbert Xu } 276593161922SCraig Gallek if (sndbuf <= 0) { 276693161922SCraig Gallek ret = -EINVAL; 276793161922SCraig Gallek break; 276893161922SCraig Gallek } 276933dccbb0SHerbert Xu 2770c8d68e6bSJason Wang tun->sndbuf = sndbuf; 2771c8d68e6bSJason Wang tun_set_sndbuf(tun); 277233dccbb0SHerbert Xu break; 277333dccbb0SHerbert Xu 2774d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 2775d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 2776d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2777d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2778d9d52b51SMichael S. Tsirkin break; 2779d9d52b51SMichael S. Tsirkin 2780d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 2781d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2782d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2783d9d52b51SMichael S. Tsirkin break; 2784d9d52b51SMichael S. Tsirkin } 2785d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2786d9d52b51SMichael S. Tsirkin ret = -EINVAL; 2787d9d52b51SMichael S. Tsirkin break; 2788d9d52b51SMichael S. Tsirkin } 2789d9d52b51SMichael S. Tsirkin 2790d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 2791d9d52b51SMichael S. Tsirkin break; 2792d9d52b51SMichael S. Tsirkin 27931cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 27941cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 27951cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 27961cf8e410SMichael S. Tsirkin ret = -EFAULT; 27971cf8e410SMichael S. Tsirkin break; 27981cf8e410SMichael S. Tsirkin 27991cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 28001cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 28011cf8e410SMichael S. Tsirkin ret = -EFAULT; 28021cf8e410SMichael S. Tsirkin break; 28031cf8e410SMichael S. Tsirkin } 28041cf8e410SMichael S. Tsirkin if (le) 28051cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 28061cf8e410SMichael S. Tsirkin else 28071cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 28081cf8e410SMichael S. Tsirkin break; 28091cf8e410SMichael S. Tsirkin 28108b8e658bSGreg Kurz case TUNGETVNETBE: 28118b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 28128b8e658bSGreg Kurz break; 28138b8e658bSGreg Kurz 28148b8e658bSGreg Kurz case TUNSETVNETBE: 28158b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 28168b8e658bSGreg Kurz break; 28178b8e658bSGreg Kurz 281899405162SMichael S. Tsirkin case TUNATTACHFILTER: 281999405162SMichael S. Tsirkin /* Can be set only for TAPs */ 282099405162SMichael S. Tsirkin ret = -EINVAL; 282140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 282299405162SMichael S. Tsirkin break; 282399405162SMichael S. Tsirkin ret = -EFAULT; 282454f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 282599405162SMichael S. Tsirkin break; 282699405162SMichael S. Tsirkin 2827c8d68e6bSJason Wang ret = tun_attach_filter(tun); 282899405162SMichael S. Tsirkin break; 282999405162SMichael S. Tsirkin 283099405162SMichael S. Tsirkin case TUNDETACHFILTER: 283199405162SMichael S. Tsirkin /* Can be set only for TAPs */ 283299405162SMichael S. Tsirkin ret = -EINVAL; 283340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 283499405162SMichael S. Tsirkin break; 2835c8d68e6bSJason Wang ret = 0; 2836c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 283799405162SMichael S. Tsirkin break; 283899405162SMichael S. Tsirkin 283976975e9cSPavel Emelyanov case TUNGETFILTER: 284076975e9cSPavel Emelyanov ret = -EINVAL; 284140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 284276975e9cSPavel Emelyanov break; 284376975e9cSPavel Emelyanov ret = -EFAULT; 284476975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 284576975e9cSPavel Emelyanov break; 284676975e9cSPavel Emelyanov ret = 0; 284776975e9cSPavel Emelyanov break; 284876975e9cSPavel Emelyanov 2849*96f84061SJason Wang case TUNSETSTEERINGEBPF: 2850*96f84061SJason Wang ret = tun_set_steering_ebpf(tun, argp); 2851*96f84061SJason Wang break; 2852*96f84061SJason Wang 28531da177e4SLinus Torvalds default: 2854631ab46bSEric W. Biederman ret = -EINVAL; 2855631ab46bSEric W. Biederman break; 2856ee289b64SJoe Perches } 28571da177e4SLinus Torvalds 2858876bfd4dSHerbert Xu unlock: 2859876bfd4dSHerbert Xu rtnl_unlock(); 2860876bfd4dSHerbert Xu if (tun) 2861631ab46bSEric W. Biederman tun_put(tun); 2862631ab46bSEric W. Biederman return ret; 28631da177e4SLinus Torvalds } 28641da177e4SLinus Torvalds 286550857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 286650857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 286750857e2aSArnd Bergmann { 286850857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 286950857e2aSArnd Bergmann } 287050857e2aSArnd Bergmann 287150857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 287250857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 287350857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 287450857e2aSArnd Bergmann { 287550857e2aSArnd Bergmann switch (cmd) { 287650857e2aSArnd Bergmann case TUNSETIFF: 287750857e2aSArnd Bergmann case TUNGETIFF: 287850857e2aSArnd Bergmann case TUNSETTXFILTER: 287950857e2aSArnd Bergmann case TUNGETSNDBUF: 288050857e2aSArnd Bergmann case TUNSETSNDBUF: 288150857e2aSArnd Bergmann case SIOCGIFHWADDR: 288250857e2aSArnd Bergmann case SIOCSIFHWADDR: 288350857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 288450857e2aSArnd Bergmann break; 288550857e2aSArnd Bergmann default: 288650857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 288750857e2aSArnd Bergmann break; 288850857e2aSArnd Bergmann } 288950857e2aSArnd Bergmann 289050857e2aSArnd Bergmann /* 289150857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 289250857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 289350857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 289450857e2aSArnd Bergmann * contents. 289550857e2aSArnd Bergmann */ 289650857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 289750857e2aSArnd Bergmann } 289850857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 289950857e2aSArnd Bergmann 29001da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 29011da177e4SLinus Torvalds { 290254f968d6SJason Wang struct tun_file *tfile = file->private_data; 29031da177e4SLinus Torvalds int ret; 29041da177e4SLinus Torvalds 290554f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 29069d319522SJonathan Corbet goto out; 29071da177e4SLinus Torvalds 29081da177e4SLinus Torvalds if (on) { 2909e0b93eddSJeff Layton __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 291054f968d6SJason Wang tfile->flags |= TUN_FASYNC; 29111da177e4SLinus Torvalds } else 291254f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 29139d319522SJonathan Corbet ret = 0; 29149d319522SJonathan Corbet out: 29159d319522SJonathan Corbet return ret; 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds 29181da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 29191da177e4SLinus Torvalds { 2920140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 2921631ab46bSEric W. Biederman struct tun_file *tfile; 2922deed49fbSThomas Gleixner 29236b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 2924631ab46bSEric W. Biederman 2925140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 292611aa9c28SEric W. Biederman &tun_proto, 0); 2927631ab46bSEric W. Biederman if (!tfile) 2928631ab46bSEric W. Biederman return -ENOMEM; 2929c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 293054f968d6SJason Wang tfile->flags = 0; 2931fb7589a1SPavel Emelyanov tfile->ifindex = 0; 293254f968d6SJason Wang 293354f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 29349e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 293554f968d6SJason Wang 293654f968d6SJason Wang tfile->socket.file = file; 293754f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 293854f968d6SJason Wang 293954f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 294054f968d6SJason Wang 294154f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 294254f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 294354f968d6SJason Wang 2944631ab46bSEric W. Biederman file->private_data = tfile; 29454008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 294654f968d6SJason Wang 294719a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 294819a6afb2SJason Wang 29491da177e4SLinus Torvalds return 0; 29501da177e4SLinus Torvalds } 29511da177e4SLinus Torvalds 29521da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 29531da177e4SLinus Torvalds { 2954631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 29551da177e4SLinus Torvalds 2956c8d68e6bSJason Wang tun_detach(tfile, true); 29571da177e4SLinus Torvalds 29581da177e4SLinus Torvalds return 0; 29591da177e4SLinus Torvalds } 29601da177e4SLinus Torvalds 296193e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 29629484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 296393e14b6dSMasatake YAMATO { 29649484dc74Syuan linyu struct tun_file *tfile = file->private_data; 296593e14b6dSMasatake YAMATO struct tun_struct *tun; 296693e14b6dSMasatake YAMATO struct ifreq ifr; 296793e14b6dSMasatake YAMATO 296893e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 296993e14b6dSMasatake YAMATO 297093e14b6dSMasatake YAMATO rtnl_lock(); 29719484dc74Syuan linyu tun = tun_get(tfile); 297293e14b6dSMasatake YAMATO if (tun) 297393e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 297493e14b6dSMasatake YAMATO rtnl_unlock(); 297593e14b6dSMasatake YAMATO 297693e14b6dSMasatake YAMATO if (tun) 297793e14b6dSMasatake YAMATO tun_put(tun); 297893e14b6dSMasatake YAMATO 2979a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 298093e14b6dSMasatake YAMATO } 298193e14b6dSMasatake YAMATO #endif 298293e14b6dSMasatake YAMATO 2983d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 29841da177e4SLinus Torvalds .owner = THIS_MODULE, 29851da177e4SLinus Torvalds .llseek = no_llseek, 29869b067034SAl Viro .read_iter = tun_chr_read_iter, 2987f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 29881da177e4SLinus Torvalds .poll = tun_chr_poll, 2989876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 299050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 299150857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 299250857e2aSArnd Bergmann #endif 29931da177e4SLinus Torvalds .open = tun_chr_open, 29941da177e4SLinus Torvalds .release = tun_chr_close, 299593e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 299693e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 299793e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 299893e14b6dSMasatake YAMATO #endif 29991da177e4SLinus Torvalds }; 30001da177e4SLinus Torvalds 30011da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 30021da177e4SLinus Torvalds .minor = TUN_MINOR, 30031da177e4SLinus Torvalds .name = "tun", 3004e454cea2SKay Sievers .nodename = "net/tun", 30051da177e4SLinus Torvalds .fops = &tun_fops, 30061da177e4SLinus Torvalds }; 30071da177e4SLinus Torvalds 30081da177e4SLinus Torvalds /* ethtool interface */ 30091da177e4SLinus Torvalds 301029ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev, 301129ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 30121da177e4SLinus Torvalds { 301329ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 301429ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 301529ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 301629ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 301729ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 301829ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 301929ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 30201da177e4SLinus Torvalds return 0; 30211da177e4SLinus Torvalds } 30221da177e4SLinus Torvalds 30231da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 30241da177e4SLinus Torvalds { 30251da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 30261da177e4SLinus Torvalds 302733a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 302833a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 30291da177e4SLinus Torvalds 30301da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 303140630b82SMichael S. Tsirkin case IFF_TUN: 303233a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 30331da177e4SLinus Torvalds break; 303440630b82SMichael S. Tsirkin case IFF_TAP: 303533a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 30361da177e4SLinus Torvalds break; 30371da177e4SLinus Torvalds } 30381da177e4SLinus Torvalds } 30391da177e4SLinus Torvalds 30401da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 30411da177e4SLinus Torvalds { 30421da177e4SLinus Torvalds #ifdef TUN_DEBUG 30431da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 30441da177e4SLinus Torvalds return tun->debug; 30451da177e4SLinus Torvalds #else 30461da177e4SLinus Torvalds return -EOPNOTSUPP; 30471da177e4SLinus Torvalds #endif 30481da177e4SLinus Torvalds } 30491da177e4SLinus Torvalds 30501da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 30511da177e4SLinus Torvalds { 30521da177e4SLinus Torvalds #ifdef TUN_DEBUG 30531da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 30541da177e4SLinus Torvalds tun->debug = value; 30551da177e4SLinus Torvalds #endif 30561da177e4SLinus Torvalds } 30571da177e4SLinus Torvalds 30585503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 30595503fcecSJason Wang struct ethtool_coalesce *ec) 30605503fcecSJason Wang { 30615503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 30625503fcecSJason Wang 30635503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 30645503fcecSJason Wang 30655503fcecSJason Wang return 0; 30665503fcecSJason Wang } 30675503fcecSJason Wang 30685503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 30695503fcecSJason Wang struct ethtool_coalesce *ec) 30705503fcecSJason Wang { 30715503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 30725503fcecSJason Wang 30735503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 30745503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 30755503fcecSJason Wang else 30765503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 30775503fcecSJason Wang 30785503fcecSJason Wang return 0; 30795503fcecSJason Wang } 30805503fcecSJason Wang 30817282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 30821da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 30831da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 30841da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3085bee31369SNolan Leake .get_link = ethtool_op_get_link, 3086eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 30875503fcecSJason Wang .get_coalesce = tun_get_coalesce, 30885503fcecSJason Wang .set_coalesce = tun_set_coalesce, 308929ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 30901da177e4SLinus Torvalds }; 30911da177e4SLinus Torvalds 30921576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 30931576d986SJason Wang { 30941576d986SJason Wang struct net_device *dev = tun->dev; 30951576d986SJason Wang struct tun_file *tfile; 30961576d986SJason Wang struct skb_array **arrays; 30971576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 30981576d986SJason Wang int ret, i; 30991576d986SJason Wang 310012039046Sstephen hemminger arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); 31011576d986SJason Wang if (!arrays) 31021576d986SJason Wang return -ENOMEM; 31031576d986SJason Wang 31041576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 31051576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 31061576d986SJason Wang arrays[i] = &tfile->tx_array; 31071576d986SJason Wang } 31081576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 31091576d986SJason Wang arrays[i++] = &tfile->tx_array; 31101576d986SJason Wang 31111576d986SJason Wang ret = skb_array_resize_multiple(arrays, n, 31121576d986SJason Wang dev->tx_queue_len, GFP_KERNEL); 31131576d986SJason Wang 31141576d986SJason Wang kfree(arrays); 31151576d986SJason Wang return ret; 31161576d986SJason Wang } 31171576d986SJason Wang 31181576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 31191576d986SJason Wang unsigned long event, void *ptr) 31201576d986SJason Wang { 31211576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 31221576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 31231576d986SJason Wang 312486dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 312586dfb4acSCraig Gallek return NOTIFY_DONE; 312686dfb4acSCraig Gallek 31271576d986SJason Wang switch (event) { 31281576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 31291576d986SJason Wang if (tun_queue_resize(tun)) 31301576d986SJason Wang return NOTIFY_BAD; 31311576d986SJason Wang break; 31321576d986SJason Wang default: 31331576d986SJason Wang break; 31341576d986SJason Wang } 31351576d986SJason Wang 31361576d986SJason Wang return NOTIFY_DONE; 31371576d986SJason Wang } 31381576d986SJason Wang 31391576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 31401576d986SJason Wang .notifier_call = tun_device_event, 31411576d986SJason Wang }; 314279d17604SPavel Emelyanov 31431da177e4SLinus Torvalds static int __init tun_init(void) 31441da177e4SLinus Torvalds { 31451da177e4SLinus Torvalds int ret = 0; 31461da177e4SLinus Torvalds 31476b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 31481da177e4SLinus Torvalds 3149f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 315079d17604SPavel Emelyanov if (ret) { 31516b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3152f019a7a5SEric W. Biederman goto err_linkops; 315379d17604SPavel Emelyanov } 315479d17604SPavel Emelyanov 31551da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 315679d17604SPavel Emelyanov if (ret) { 31576b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 315879d17604SPavel Emelyanov goto err_misc; 315979d17604SPavel Emelyanov } 31601576d986SJason Wang 31615edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 31625edfbd3cSTonghao Zhang if (ret) { 31635edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 31645edfbd3cSTonghao Zhang goto err_notifier; 31655edfbd3cSTonghao Zhang } 31665edfbd3cSTonghao Zhang 316779d17604SPavel Emelyanov return 0; 31685edfbd3cSTonghao Zhang 31695edfbd3cSTonghao Zhang err_notifier: 31705edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 317179d17604SPavel Emelyanov err_misc: 3172f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3173f019a7a5SEric W. Biederman err_linkops: 31741da177e4SLinus Torvalds return ret; 31751da177e4SLinus Torvalds } 31761da177e4SLinus Torvalds 31771da177e4SLinus Torvalds static void tun_cleanup(void) 31781da177e4SLinus Torvalds { 31791da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3180f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 31811576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 31821da177e4SLinus Torvalds } 31831da177e4SLinus Torvalds 318405c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 318505c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 318605c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 318705c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 318805c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 318905c2828cSMichael S. Tsirkin { 31906e914fc7SJason Wang struct tun_file *tfile; 319105c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 319205c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 31936e914fc7SJason Wang tfile = file->private_data; 31946e914fc7SJason Wang if (!tfile) 319505c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 319654f968d6SJason Wang return &tfile->socket; 319705c2828cSMichael S. Tsirkin } 319805c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 319905c2828cSMichael S. Tsirkin 320083339c6bSJason Wang struct skb_array *tun_get_skb_array(struct file *file) 320183339c6bSJason Wang { 320283339c6bSJason Wang struct tun_file *tfile; 320383339c6bSJason Wang 320483339c6bSJason Wang if (file->f_op != &tun_fops) 320583339c6bSJason Wang return ERR_PTR(-EINVAL); 320683339c6bSJason Wang tfile = file->private_data; 320783339c6bSJason Wang if (!tfile) 320883339c6bSJason Wang return ERR_PTR(-EBADFD); 320983339c6bSJason Wang return &tfile->tx_array; 321083339c6bSJason Wang } 321183339c6bSJason Wang EXPORT_SYMBOL_GPL(tun_get_skb_array); 321283339c6bSJason Wang 32131da177e4SLinus Torvalds module_init(tun_init); 32141da177e4SLinus Torvalds module_exit(tun_cleanup); 32151da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 32161da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 32171da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 32181da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3219578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3220