11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 7393e14b6dSMasatake YAMATO #include <linux/seq_file.h> 74e0b46d0eSHerbert Xu #include <linux/uio.h> 751576d986SJason Wang #include <linux/skb_array.h> 76761876c8SJason Wang #include <linux/bpf.h> 77761876c8SJason Wang #include <linux/bpf_trace.h> 7890e33d45SPetar Penkov #include <linux/mutex.h> 791da177e4SLinus Torvalds 807c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 81f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 821da177e4SLinus Torvalds 8314daa021SRusty Russell /* Uncomment to enable debugging */ 8414daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8514daa021SRusty Russell 861da177e4SLinus Torvalds #ifdef TUN_DEBUG 871da177e4SLinus Torvalds static int debug; 8814daa021SRusty Russell 896b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 906b8a66eeSJoe Perches do { \ 916b8a66eeSJoe Perches if (tun->debug) \ 926b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 936b8a66eeSJoe Perches } while (0) 946b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 956b8a66eeSJoe Perches do { \ 966b8a66eeSJoe Perches if (debug == 2) \ 976b8a66eeSJoe Perches printk(level fmt, ##args); \ 986b8a66eeSJoe Perches } while (0) 9914daa021SRusty Russell #else 1006b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1016b8a66eeSJoe Perches do { \ 1026b8a66eeSJoe Perches if (0) \ 1036b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1046b8a66eeSJoe Perches } while (0) 1056b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1066b8a66eeSJoe Perches do { \ 1076b8a66eeSJoe Perches if (0) \ 1086b8a66eeSJoe Perches printk(level fmt, ##args); \ 1096b8a66eeSJoe Perches } while (0) 1101da177e4SLinus Torvalds #endif 1111da177e4SLinus Torvalds 112761876c8SJason Wang #define TUN_HEADROOM 256 1137df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11466ccbc9cSJason Wang 115031f5e03SMichael S. Tsirkin /* TUN device flags */ 116031f5e03SMichael S. Tsirkin 117031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 118031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 119031f5e03SMichael S. Tsirkin */ 120031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1211cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1221cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1238b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 124031f5e03SMichael S. Tsirkin 125031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12690e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 12790e33d45SPetar Penkov 1280690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1290690899bSMichael S. Tsirkin 130f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 131f271b2ccSMax Krasnyansky struct tap_filter { 132f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 133f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 134f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 135f271b2ccSMax Krasnyansky }; 136f271b2ccSMax Krasnyansky 137baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 138baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 139baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 140b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 141c8d68e6bSJason Wang 14296442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14396442e42SJason Wang 144608b9977SPaolo Abeni struct tun_pcpu_stats { 145608b9977SPaolo Abeni u64 rx_packets; 146608b9977SPaolo Abeni u64 rx_bytes; 147608b9977SPaolo Abeni u64 tx_packets; 148608b9977SPaolo Abeni u64 tx_bytes; 149608b9977SPaolo Abeni struct u64_stats_sync syncp; 150608b9977SPaolo Abeni u32 rx_dropped; 151608b9977SPaolo Abeni u32 tx_dropped; 152608b9977SPaolo Abeni u32 rx_frame_errors; 153608b9977SPaolo Abeni }; 154608b9977SPaolo Abeni 15554f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15692d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 15754f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 15854f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 15936fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 16054f968d6SJason Wang * this). 1616e914fc7SJason Wang * 1626e914fc7SJason Wang * RCU usage: 16336fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1646e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16554f968d6SJason Wang */ 166631ab46bSEric W. Biederman struct tun_file { 16754f968d6SJason Wang struct sock sk; 16854f968d6SJason Wang struct socket socket; 16954f968d6SJason Wang struct socket_wq wq; 1706e914fc7SJason Wang struct tun_struct __rcu *tun; 17154f968d6SJason Wang struct fasync_struct *fasync; 17254f968d6SJason Wang /* only used for fasnyc */ 17354f968d6SJason Wang unsigned int flags; 174fb7589a1SPavel Emelyanov union { 175c8d68e6bSJason Wang u16 queue_index; 176fb7589a1SPavel Emelyanov unsigned int ifindex; 177fb7589a1SPavel Emelyanov }; 17894317099SPetar Penkov struct napi_struct napi; 179aec72f33SEric Dumazet bool napi_enabled; 18090e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1814008e97fSJason Wang struct list_head next; 1824008e97fSJason Wang struct tun_struct *detached; 1835990a305SJason Wang struct ptr_ring tx_ring; 1848bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 185762c330dSJason Wang int xdp_pending_pkts; 186631ab46bSEric W. Biederman }; 187631ab46bSEric W. Biederman 18896442e42SJason Wang struct tun_flow_entry { 18996442e42SJason Wang struct hlist_node hash_link; 19096442e42SJason Wang struct rcu_head rcu; 19196442e42SJason Wang struct tun_struct *tun; 19296442e42SJason Wang 19396442e42SJason Wang u32 rxhash; 1949bc88939STom Herbert u32 rps_rxhash; 19596442e42SJason Wang int queue_index; 19696442e42SJason Wang unsigned long updated; 19796442e42SJason Wang }; 19896442e42SJason Wang 19996442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 20096442e42SJason Wang 201cd5681d7SJason Wang struct tun_prog { 20296f84061SJason Wang struct rcu_head rcu; 20396f84061SJason Wang struct bpf_prog *prog; 20496f84061SJason Wang }; 20596f84061SJason Wang 20654f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 20736fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 20854f968d6SJason Wang * file were attached to a persist device. 20954f968d6SJason Wang */ 21014daa021SRusty Russell struct tun_struct { 211c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 212c8d68e6bSJason Wang unsigned int numqueues; 213f271b2ccSMax Krasnyansky unsigned int flags; 2140625c883SEric W. Biederman kuid_t owner; 2150625c883SEric W. Biederman kgid_t group; 21614daa021SRusty Russell 21714daa021SRusty Russell struct net_device *dev; 218c8f44affSMichał Mirosław netdev_features_t set_features; 21988255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 220d591a1f3SDavid S. Miller NETIF_F_TSO6) 221d9d52b51SMichael S. Tsirkin 222eaea34b2SPaolo Abeni int align; 223d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 22454f968d6SJason Wang int sndbuf; 22554f968d6SJason Wang struct tap_filter txflt; 22654f968d6SJason Wang struct sock_fprog fprog; 22754f968d6SJason Wang /* protected by rtnl lock */ 22854f968d6SJason Wang bool filter_attached; 22914daa021SRusty Russell #ifdef TUN_DEBUG 23014daa021SRusty Russell int debug; 23114daa021SRusty Russell #endif 23296442e42SJason Wang spinlock_t lock; 23396442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 23496442e42SJason Wang struct timer_list flow_gc_timer; 23596442e42SJason Wang unsigned long ageing_time; 2364008e97fSJason Wang unsigned int numdisabled; 2374008e97fSJason Wang struct list_head disabled; 2385dbbaf2dSPaul Moore void *security; 239b8732fb7SJason Wang u32 flow_count; 2405503fcecSJason Wang u32 rx_batched; 241608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 242761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 243cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 244aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 24514daa021SRusty Russell }; 24614daa021SRusty Russell 247aff3d70aSJason Wang struct veth { 248aff3d70aSJason Wang __be16 h_vlan_proto; 249aff3d70aSJason Wang __be16 h_vlan_TCI; 2501da177e4SLinus Torvalds }; 2511da177e4SLinus Torvalds 252fc72d1d5SJason Wang bool tun_is_xdp_buff(void *ptr) 253fc72d1d5SJason Wang { 254fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 255fc72d1d5SJason Wang } 256fc72d1d5SJason Wang EXPORT_SYMBOL(tun_is_xdp_buff); 257fc72d1d5SJason Wang 258fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 259fc72d1d5SJason Wang { 260fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 261fc72d1d5SJason Wang } 262fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 263fc72d1d5SJason Wang 264fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 265fc72d1d5SJason Wang { 266fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 267fc72d1d5SJason Wang } 268fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 269fc72d1d5SJason Wang 27094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 27194317099SPetar Penkov { 27294317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 27394317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 27494317099SPetar Penkov struct sk_buff_head process_queue; 27594317099SPetar Penkov struct sk_buff *skb; 27694317099SPetar Penkov int received = 0; 27794317099SPetar Penkov 27894317099SPetar Penkov __skb_queue_head_init(&process_queue); 27994317099SPetar Penkov 28094317099SPetar Penkov spin_lock(&queue->lock); 28194317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 28294317099SPetar Penkov spin_unlock(&queue->lock); 28394317099SPetar Penkov 28494317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 28594317099SPetar Penkov napi_gro_receive(napi, skb); 28694317099SPetar Penkov ++received; 28794317099SPetar Penkov } 28894317099SPetar Penkov 28994317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 29094317099SPetar Penkov spin_lock(&queue->lock); 29194317099SPetar Penkov skb_queue_splice(&process_queue, queue); 29294317099SPetar Penkov spin_unlock(&queue->lock); 29394317099SPetar Penkov } 29494317099SPetar Penkov 29594317099SPetar Penkov return received; 29694317099SPetar Penkov } 29794317099SPetar Penkov 29894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 29994317099SPetar Penkov { 30094317099SPetar Penkov unsigned int received; 30194317099SPetar Penkov 30294317099SPetar Penkov received = tun_napi_receive(napi, budget); 30394317099SPetar Penkov 30494317099SPetar Penkov if (received < budget) 30594317099SPetar Penkov napi_complete_done(napi, received); 30694317099SPetar Penkov 30794317099SPetar Penkov return received; 30894317099SPetar Penkov } 30994317099SPetar Penkov 31094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 31194317099SPetar Penkov bool napi_en) 31294317099SPetar Penkov { 313aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 31494317099SPetar Penkov if (napi_en) { 31594317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 31694317099SPetar Penkov NAPI_POLL_WEIGHT); 31794317099SPetar Penkov napi_enable(&tfile->napi); 31890e33d45SPetar Penkov mutex_init(&tfile->napi_mutex); 31994317099SPetar Penkov } 32094317099SPetar Penkov } 32194317099SPetar Penkov 32294317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 32394317099SPetar Penkov { 324aec72f33SEric Dumazet if (tfile->napi_enabled) 32594317099SPetar Penkov napi_disable(&tfile->napi); 32694317099SPetar Penkov } 32794317099SPetar Penkov 32894317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 32994317099SPetar Penkov { 330aec72f33SEric Dumazet if (tfile->napi_enabled) 33194317099SPetar Penkov netif_napi_del(&tfile->napi); 33294317099SPetar Penkov } 33394317099SPetar Penkov 33490e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun) 33590e33d45SPetar Penkov { 33690e33d45SPetar Penkov return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 33790e33d45SPetar Penkov } 33890e33d45SPetar Penkov 3398b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3408b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3418b8e658bSGreg Kurz { 3428b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3438b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3448b8e658bSGreg Kurz } 3458b8e658bSGreg Kurz 3468b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3478b8e658bSGreg Kurz { 3488b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3498b8e658bSGreg Kurz 3508b8e658bSGreg Kurz if (put_user(be, argp)) 3518b8e658bSGreg Kurz return -EFAULT; 3528b8e658bSGreg Kurz 3538b8e658bSGreg Kurz return 0; 3548b8e658bSGreg Kurz } 3558b8e658bSGreg Kurz 3568b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3578b8e658bSGreg Kurz { 3588b8e658bSGreg Kurz int be; 3598b8e658bSGreg Kurz 3608b8e658bSGreg Kurz if (get_user(be, argp)) 3618b8e658bSGreg Kurz return -EFAULT; 3628b8e658bSGreg Kurz 3638b8e658bSGreg Kurz if (be) 3648b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3658b8e658bSGreg Kurz else 3668b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3678b8e658bSGreg Kurz 3688b8e658bSGreg Kurz return 0; 3698b8e658bSGreg Kurz } 3708b8e658bSGreg Kurz #else 3718b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3728b8e658bSGreg Kurz { 3738b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3748b8e658bSGreg Kurz } 3758b8e658bSGreg Kurz 3768b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3778b8e658bSGreg Kurz { 3788b8e658bSGreg Kurz return -EINVAL; 3798b8e658bSGreg Kurz } 3808b8e658bSGreg Kurz 3818b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3828b8e658bSGreg Kurz { 3838b8e658bSGreg Kurz return -EINVAL; 3848b8e658bSGreg Kurz } 3858b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3868b8e658bSGreg Kurz 38725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 38825bd55bbSGreg Kurz { 3897d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3908b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 39125bd55bbSGreg Kurz } 39225bd55bbSGreg Kurz 39356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 39456f0dcc5SMichael S. Tsirkin { 39525bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 39656f0dcc5SMichael S. Tsirkin } 39756f0dcc5SMichael S. Tsirkin 39856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 39956f0dcc5SMichael S. Tsirkin { 40025bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 40156f0dcc5SMichael S. Tsirkin } 40256f0dcc5SMichael S. Tsirkin 40396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 40496442e42SJason Wang { 40596442e42SJason Wang return rxhash & 0x3ff; 40696442e42SJason Wang } 40796442e42SJason Wang 40896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 40996442e42SJason Wang { 41096442e42SJason Wang struct tun_flow_entry *e; 41196442e42SJason Wang 412b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 41396442e42SJason Wang if (e->rxhash == rxhash) 41496442e42SJason Wang return e; 41596442e42SJason Wang } 41696442e42SJason Wang return NULL; 41796442e42SJason Wang } 41896442e42SJason Wang 41996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 42096442e42SJason Wang struct hlist_head *head, 42196442e42SJason Wang u32 rxhash, u16 queue_index) 42296442e42SJason Wang { 4239fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 4249fdc6befSEric Dumazet 42596442e42SJason Wang if (e) { 42696442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 42796442e42SJason Wang rxhash, queue_index); 42896442e42SJason Wang e->updated = jiffies; 42996442e42SJason Wang e->rxhash = rxhash; 4309bc88939STom Herbert e->rps_rxhash = 0; 43196442e42SJason Wang e->queue_index = queue_index; 43296442e42SJason Wang e->tun = tun; 43396442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 434b8732fb7SJason Wang ++tun->flow_count; 43596442e42SJason Wang } 43696442e42SJason Wang return e; 43796442e42SJason Wang } 43896442e42SJason Wang 43996442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 44096442e42SJason Wang { 44196442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 44296442e42SJason Wang e->rxhash, e->queue_index); 44396442e42SJason Wang hlist_del_rcu(&e->hash_link); 4449fdc6befSEric Dumazet kfree_rcu(e, rcu); 445b8732fb7SJason Wang --tun->flow_count; 44696442e42SJason Wang } 44796442e42SJason Wang 44896442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 44996442e42SJason Wang { 45096442e42SJason Wang int i; 45196442e42SJason Wang 45296442e42SJason Wang spin_lock_bh(&tun->lock); 45396442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45496442e42SJason Wang struct tun_flow_entry *e; 455b67bfe0dSSasha Levin struct hlist_node *n; 45696442e42SJason Wang 457b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 45896442e42SJason Wang tun_flow_delete(tun, e); 45996442e42SJason Wang } 46096442e42SJason Wang spin_unlock_bh(&tun->lock); 46196442e42SJason Wang } 46296442e42SJason Wang 46396442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 46496442e42SJason Wang { 46596442e42SJason Wang int i; 46696442e42SJason Wang 46796442e42SJason Wang spin_lock_bh(&tun->lock); 46896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46996442e42SJason Wang struct tun_flow_entry *e; 470b67bfe0dSSasha Levin struct hlist_node *n; 47196442e42SJason Wang 472b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 47396442e42SJason Wang if (e->queue_index == queue_index) 47496442e42SJason Wang tun_flow_delete(tun, e); 47596442e42SJason Wang } 47696442e42SJason Wang } 47796442e42SJason Wang spin_unlock_bh(&tun->lock); 47896442e42SJason Wang } 47996442e42SJason Wang 480e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 48196442e42SJason Wang { 482e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 48396442e42SJason Wang unsigned long delay = tun->ageing_time; 48496442e42SJason Wang unsigned long next_timer = jiffies + delay; 48596442e42SJason Wang unsigned long count = 0; 48696442e42SJason Wang int i; 48796442e42SJason Wang 48896442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 48996442e42SJason Wang 4907dbfb4efSEric Dumazet spin_lock(&tun->lock); 49196442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 49296442e42SJason Wang struct tun_flow_entry *e; 493b67bfe0dSSasha Levin struct hlist_node *n; 49496442e42SJason Wang 495b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 49696442e42SJason Wang unsigned long this_timer; 49781d98fa4SEric Dumazet 49896442e42SJason Wang this_timer = e->updated + delay; 49981d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 50096442e42SJason Wang tun_flow_delete(tun, e); 50181d98fa4SEric Dumazet continue; 50281d98fa4SEric Dumazet } 50381d98fa4SEric Dumazet count++; 50481d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 50596442e42SJason Wang next_timer = this_timer; 50696442e42SJason Wang } 50796442e42SJason Wang } 50896442e42SJason Wang 50996442e42SJason Wang if (count) 51096442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 5117dbfb4efSEric Dumazet spin_unlock(&tun->lock); 51296442e42SJason Wang } 51396442e42SJason Wang 51449974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 5159e85722dSJason Wang struct tun_file *tfile) 51696442e42SJason Wang { 51796442e42SJason Wang struct hlist_head *head; 51896442e42SJason Wang struct tun_flow_entry *e; 51996442e42SJason Wang unsigned long delay = tun->ageing_time; 5209e85722dSJason Wang u16 queue_index = tfile->queue_index; 52196442e42SJason Wang 52296442e42SJason Wang if (!rxhash) 52396442e42SJason Wang return; 52496442e42SJason Wang else 52596442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 52696442e42SJason Wang 52796442e42SJason Wang rcu_read_lock(); 52896442e42SJason Wang 5299e85722dSJason Wang /* We may get a very small possibility of OOO during switching, not 5309e85722dSJason Wang * worth to optimize.*/ 5319e85722dSJason Wang if (tun->numqueues == 1 || tfile->detached) 53296442e42SJason Wang goto unlock; 53396442e42SJason Wang 53496442e42SJason Wang e = tun_flow_find(head, rxhash); 53596442e42SJason Wang if (likely(e)) { 53696442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 53796442e42SJason Wang e->queue_index = queue_index; 53896442e42SJason Wang e->updated = jiffies; 5399bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 54096442e42SJason Wang } else { 54196442e42SJason Wang spin_lock_bh(&tun->lock); 542b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 543b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 54496442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 54596442e42SJason Wang 54696442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 54796442e42SJason Wang mod_timer(&tun->flow_gc_timer, 54896442e42SJason Wang round_jiffies_up(jiffies + delay)); 54996442e42SJason Wang spin_unlock_bh(&tun->lock); 55096442e42SJason Wang } 55196442e42SJason Wang 55296442e42SJason Wang unlock: 55396442e42SJason Wang rcu_read_unlock(); 55496442e42SJason Wang } 55596442e42SJason Wang 5569bc88939STom Herbert /** 5579bc88939STom Herbert * Save the hash received in the stack receive path and update the 5589bc88939STom Herbert * flow_hash table accordingly. 5599bc88939STom Herbert */ 5609bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5619bc88939STom Herbert { 562567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5639bc88939STom Herbert e->rps_rxhash = hash; 5649bc88939STom Herbert } 5659bc88939STom Herbert 566c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 56792d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 568c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 569c8d68e6bSJason Wang * the userspace application move between processors, we may get a 570c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 571c8d68e6bSJason Wang * hope the rxq no. may help here. 572c8d68e6bSJason Wang */ 57396f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 574c8d68e6bSJason Wang { 57596442e42SJason Wang struct tun_flow_entry *e; 576c8d68e6bSJason Wang u32 txq = 0; 577c8d68e6bSJason Wang u32 numqueues = 0; 578c8d68e6bSJason Wang 5796aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 580c8d68e6bSJason Wang 581feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 582c8d68e6bSJason Wang if (txq) { 58396442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5849bc88939STom Herbert if (e) { 5859bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 586fbe4d456SZhi Yong Wu txq = e->queue_index; 5879bc88939STom Herbert } else 588c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 589c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 590c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 591c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 592c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 593c8d68e6bSJason Wang txq -= numqueues; 594c8d68e6bSJason Wang } 595c8d68e6bSJason Wang 596c8d68e6bSJason Wang return txq; 597c8d68e6bSJason Wang } 598c8d68e6bSJason Wang 59996f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 60096f84061SJason Wang { 601cd5681d7SJason Wang struct tun_prog *prog; 60296f84061SJason Wang u16 ret = 0; 60396f84061SJason Wang 60496f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 60596f84061SJason Wang if (prog) 60696f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 60796f84061SJason Wang 60896f84061SJason Wang return ret % tun->numqueues; 60996f84061SJason Wang } 61096f84061SJason Wang 61196f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 61296f84061SJason Wang void *accel_priv, select_queue_fallback_t fallback) 61396f84061SJason Wang { 61496f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 61596f84061SJason Wang u16 ret; 61696f84061SJason Wang 61796f84061SJason Wang rcu_read_lock(); 61896f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 61996f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 62096f84061SJason Wang else 62196f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 62296f84061SJason Wang rcu_read_unlock(); 62396f84061SJason Wang 62496f84061SJason Wang return ret; 62596f84061SJason Wang } 62696f84061SJason Wang 627cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 628cde8b15fSJason Wang { 629cde8b15fSJason Wang const struct cred *cred = current_cred(); 630c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 631cde8b15fSJason Wang 632cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 633cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 634c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 635cde8b15fSJason Wang } 636cde8b15fSJason Wang 637c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 638c8d68e6bSJason Wang { 639c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 640c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 641c8d68e6bSJason Wang } 642c8d68e6bSJason Wang 6434008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6444008e97fSJason Wang { 6454008e97fSJason Wang tfile->detached = tun; 6464008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6474008e97fSJason Wang ++tun->numdisabled; 6484008e97fSJason Wang } 6494008e97fSJason Wang 650d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6514008e97fSJason Wang { 6524008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6534008e97fSJason Wang 6544008e97fSJason Wang tfile->detached = NULL; 6554008e97fSJason Wang list_del_init(&tfile->next); 6564008e97fSJason Wang --tun->numdisabled; 6574008e97fSJason Wang return tun; 6584008e97fSJason Wang } 6594008e97fSJason Wang 660fc72d1d5SJason Wang static void tun_ptr_free(void *ptr) 661fc72d1d5SJason Wang { 662fc72d1d5SJason Wang if (!ptr) 663fc72d1d5SJason Wang return; 664fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 665fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 666fc72d1d5SJason Wang 667fc72d1d5SJason Wang put_page(virt_to_head_page(xdp->data)); 668fc72d1d5SJason Wang } else { 669fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 670fc72d1d5SJason Wang } 671fc72d1d5SJason Wang } 672fc72d1d5SJason Wang 6734bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6744bfb0513SJason Wang { 675fc72d1d5SJason Wang void *ptr; 6761576d986SJason Wang 677fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 678fc72d1d5SJason Wang tun_ptr_free(ptr); 6791576d986SJason Wang 6805503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6814bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6824bfb0513SJason Wang } 6834bfb0513SJason Wang 6848565d26bSDavid S. Miller static void tun_cleanup_tx_ring(struct tun_file *tfile) 6854df0bfc7SCong Wang { 6868565d26bSDavid S. Miller if (tfile->tx_ring.queue) { 6878565d26bSDavid S. Miller ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 6888565d26bSDavid S. Miller xdp_rxq_info_unreg(&tfile->xdp_rxq); 6898565d26bSDavid S. Miller memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 6904df0bfc7SCong Wang } 6914df0bfc7SCong Wang } 6924df0bfc7SCong Wang 693c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 694c8d68e6bSJason Wang { 695c8d68e6bSJason Wang struct tun_file *ntfile; 696c8d68e6bSJason Wang struct tun_struct *tun; 697c8d68e6bSJason Wang 698b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 699b8deabd3SJason Wang 70094317099SPetar Penkov if (tun && clean) { 70194317099SPetar Penkov tun_napi_disable(tun, tfile); 70294317099SPetar Penkov tun_napi_del(tun, tfile); 70394317099SPetar Penkov } 70494317099SPetar Penkov 7059e85722dSJason Wang if (tun && !tfile->detached) { 706c8d68e6bSJason Wang u16 index = tfile->queue_index; 707c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 708c8d68e6bSJason Wang 709c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 710c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 711b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 712c8d68e6bSJason Wang ntfile->queue_index = index; 713c8d68e6bSJason Wang 714c8d68e6bSJason Wang --tun->numqueues; 7159e85722dSJason Wang if (clean) { 716c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 717c8d68e6bSJason Wang sock_put(&tfile->sk); 7189e85722dSJason Wang } else 7194008e97fSJason Wang tun_disable_queue(tun, tfile); 720c8d68e6bSJason Wang 721c8d68e6bSJason Wang synchronize_net(); 72296442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 723c8d68e6bSJason Wang /* Drop read queue */ 7244bfb0513SJason Wang tun_queue_purge(tfile); 725c8d68e6bSJason Wang tun_set_real_num_queues(tun); 726dd38bd85SJason Wang } else if (tfile->detached && clean) { 7274008e97fSJason Wang tun = tun_enable_queue(tfile); 728dd38bd85SJason Wang sock_put(&tfile->sk); 729dd38bd85SJason Wang } 730c8d68e6bSJason Wang 731c8d68e6bSJason Wang if (clean) { 732af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 733af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 734af668b3cSMichael S. Tsirkin 73540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 736af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 7374008e97fSJason Wang unregister_netdevice(tun->dev); 738af668b3cSMichael S. Tsirkin } 7398565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 740140e807dSEric W. Biederman sock_put(&tfile->sk); 741c8d68e6bSJason Wang } 742c8d68e6bSJason Wang } 743c8d68e6bSJason Wang 744c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 745c8d68e6bSJason Wang { 746c8d68e6bSJason Wang rtnl_lock(); 747c8d68e6bSJason Wang __tun_detach(tfile, clean); 748c8d68e6bSJason Wang rtnl_unlock(); 749c8d68e6bSJason Wang } 750c8d68e6bSJason Wang 751c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 752c8d68e6bSJason Wang { 753c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7544008e97fSJason Wang struct tun_file *tfile, *tmp; 755c8d68e6bSJason Wang int i, n = tun->numqueues; 756c8d68e6bSJason Wang 757c8d68e6bSJason Wang for (i = 0; i < n; i++) { 758b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 759c8d68e6bSJason Wang BUG_ON(!tfile); 76094317099SPetar Penkov tun_napi_disable(tun, tfile); 761addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7629e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 763c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 764c8d68e6bSJason Wang --tun->numqueues; 765c8d68e6bSJason Wang } 7669e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 767addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7689e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 769c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7709e85722dSJason Wang } 771c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 772c8d68e6bSJason Wang 773c8d68e6bSJason Wang synchronize_net(); 774c8d68e6bSJason Wang for (i = 0; i < n; i++) { 775b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 77694317099SPetar Penkov tun_napi_del(tun, tfile); 777c8d68e6bSJason Wang /* Drop read queue */ 7784bfb0513SJason Wang tun_queue_purge(tfile); 779c8d68e6bSJason Wang sock_put(&tfile->sk); 7808565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 781c8d68e6bSJason Wang } 7824008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7834008e97fSJason Wang tun_enable_queue(tfile); 7844bfb0513SJason Wang tun_queue_purge(tfile); 7854008e97fSJason Wang sock_put(&tfile->sk); 7868565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 7874008e97fSJason Wang } 7884008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 789dd38bd85SJason Wang 79040630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 791dd38bd85SJason Wang module_put(THIS_MODULE); 792c8d68e6bSJason Wang } 793c8d68e6bSJason Wang 79494317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 79594317099SPetar Penkov bool skip_filter, bool napi) 796a7385ba2SEric W. Biederman { 797631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7981576d986SJason Wang struct net_device *dev = tun->dev; 79938231b7aSEric W. Biederman int err; 800a7385ba2SEric W. Biederman 8015dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 8025dbbaf2dSPaul Moore if (err < 0) 8035dbbaf2dSPaul Moore goto out; 8045dbbaf2dSPaul Moore 80538231b7aSEric W. Biederman err = -EINVAL; 8069e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 80738231b7aSEric W. Biederman goto out; 80838231b7aSEric W. Biederman 80938231b7aSEric W. Biederman err = -EBUSY; 81040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 811c8d68e6bSJason Wang goto out; 812c8d68e6bSJason Wang 813c8d68e6bSJason Wang err = -E2BIG; 8144008e97fSJason Wang if (!tfile->detached && 8154008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 81638231b7aSEric W. Biederman goto out; 81738231b7aSEric W. Biederman 81838231b7aSEric W. Biederman err = 0; 81954f968d6SJason Wang 82092d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 821849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 8228ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 8238ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 8248ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 82554f968d6SJason Wang if (!err) 82654f968d6SJason Wang goto out; 82754f968d6SJason Wang } 8281576d986SJason Wang 8291576d986SJason Wang if (!tfile->detached && 8305990a305SJason Wang ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { 8311576d986SJason Wang err = -ENOMEM; 8321576d986SJason Wang goto out; 8331576d986SJason Wang } 8341576d986SJason Wang 835c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 836addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8378bf5c4eeSJesper Dangaard Brouer 8388bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8398bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8408bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8418bf5c4eeSJesper Dangaard Brouer 8428bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8438bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8448bf5c4eeSJesper Dangaard Brouer } else { 8458bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8468bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8478bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8488bf5c4eeSJesper Dangaard Brouer if (err < 0) 8498bf5c4eeSJesper Dangaard Brouer goto out; 8508bf5c4eeSJesper Dangaard Brouer err = 0; 8518bf5c4eeSJesper Dangaard Brouer } 8528bf5c4eeSJesper Dangaard Brouer 8536e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 854c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 855c8d68e6bSJason Wang tun->numqueues++; 856c8d68e6bSJason Wang 85794317099SPetar Penkov if (tfile->detached) { 8584008e97fSJason Wang tun_enable_queue(tfile); 85994317099SPetar Penkov } else { 8604008e97fSJason Wang sock_hold(&tfile->sk); 86194317099SPetar Penkov tun_napi_init(tun, tfile, napi); 86294317099SPetar Penkov } 8634008e97fSJason Wang 864c8d68e6bSJason Wang tun_set_real_num_queues(tun); 865c8d68e6bSJason Wang 866c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 867c8d68e6bSJason Wang * refcnt. 868c8d68e6bSJason Wang */ 869a7385ba2SEric W. Biederman 87038231b7aSEric W. Biederman out: 87138231b7aSEric W. Biederman return err; 872a7385ba2SEric W. Biederman } 873a7385ba2SEric W. Biederman 8749484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 875631ab46bSEric W. Biederman { 8766e914fc7SJason Wang struct tun_struct *tun; 877c70f1829SEric W. Biederman 8786e914fc7SJason Wang rcu_read_lock(); 8796e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8806e914fc7SJason Wang if (tun) 8816e914fc7SJason Wang dev_hold(tun->dev); 8826e914fc7SJason Wang rcu_read_unlock(); 883c70f1829SEric W. Biederman 884c70f1829SEric W. Biederman return tun; 885631ab46bSEric W. Biederman } 886631ab46bSEric W. Biederman 887631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 888631ab46bSEric W. Biederman { 8896e914fc7SJason Wang dev_put(tun->dev); 890631ab46bSEric W. Biederman } 891631ab46bSEric W. Biederman 8926b8a66eeSJoe Perches /* TAP filtering */ 893f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 894f271b2ccSMax Krasnyansky { 895f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 896f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 897f271b2ccSMax Krasnyansky } 898f271b2ccSMax Krasnyansky 899f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 900f271b2ccSMax Krasnyansky { 901f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 902f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 903f271b2ccSMax Krasnyansky } 904f271b2ccSMax Krasnyansky 905f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 906f271b2ccSMax Krasnyansky { 907f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 908f271b2ccSMax Krasnyansky struct tun_filter uf; 909f271b2ccSMax Krasnyansky int err, alen, n, nexact; 910f271b2ccSMax Krasnyansky 911f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 912f271b2ccSMax Krasnyansky return -EFAULT; 913f271b2ccSMax Krasnyansky 914f271b2ccSMax Krasnyansky if (!uf.count) { 915f271b2ccSMax Krasnyansky /* Disabled */ 916f271b2ccSMax Krasnyansky filter->count = 0; 917f271b2ccSMax Krasnyansky return 0; 918f271b2ccSMax Krasnyansky } 919f271b2ccSMax Krasnyansky 920f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 92128e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 92228e8190dSMarkus Elfring if (IS_ERR(addr)) 92328e8190dSMarkus Elfring return PTR_ERR(addr); 924f271b2ccSMax Krasnyansky 925f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 926f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 927f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 928f271b2ccSMax Krasnyansky filter->count = 0; 929f271b2ccSMax Krasnyansky wmb(); 930f271b2ccSMax Krasnyansky 931f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 932f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 933f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 934f271b2ccSMax Krasnyansky 935f271b2ccSMax Krasnyansky nexact = n; 936f271b2ccSMax Krasnyansky 937cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 938cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 939f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 940cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 941cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 942cfbf84fcSAlex Williamson err = 0; /* no filter */ 9433b8d2a69SMarkus Elfring goto free_addr; 944cfbf84fcSAlex Williamson } 945f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 946cfbf84fcSAlex Williamson } 947f271b2ccSMax Krasnyansky 948f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 949f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 950f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 951f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 952f271b2ccSMax Krasnyansky 953f271b2ccSMax Krasnyansky /* Now enable the filter */ 954f271b2ccSMax Krasnyansky wmb(); 955f271b2ccSMax Krasnyansky filter->count = nexact; 956f271b2ccSMax Krasnyansky 957f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 958f271b2ccSMax Krasnyansky err = nexact; 9593b8d2a69SMarkus Elfring free_addr: 960f271b2ccSMax Krasnyansky kfree(addr); 961f271b2ccSMax Krasnyansky return err; 962f271b2ccSMax Krasnyansky } 963f271b2ccSMax Krasnyansky 964f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 965f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 966f271b2ccSMax Krasnyansky { 967f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 968f271b2ccSMax Krasnyansky * at this point. */ 969f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 970f271b2ccSMax Krasnyansky int i; 971f271b2ccSMax Krasnyansky 972f271b2ccSMax Krasnyansky /* Exact match */ 973f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9742e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 975f271b2ccSMax Krasnyansky return 1; 976f271b2ccSMax Krasnyansky 977f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 978f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 979f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 980f271b2ccSMax Krasnyansky 981f271b2ccSMax Krasnyansky return 0; 982f271b2ccSMax Krasnyansky } 983f271b2ccSMax Krasnyansky 984f271b2ccSMax Krasnyansky /* 985f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 986f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 987f271b2ccSMax Krasnyansky */ 988f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 989f271b2ccSMax Krasnyansky { 990f271b2ccSMax Krasnyansky if (!filter->count) 991f271b2ccSMax Krasnyansky return 1; 992f271b2ccSMax Krasnyansky 993f271b2ccSMax Krasnyansky return run_filter(filter, skb); 994f271b2ccSMax Krasnyansky } 995f271b2ccSMax Krasnyansky 9961da177e4SLinus Torvalds /* Network device part of the driver */ 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 9991da177e4SLinus Torvalds 1000c70f1829SEric W. Biederman /* Net device detach from fd. */ 1001c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1002c70f1829SEric W. Biederman { 1003c8d68e6bSJason Wang tun_detach_all(dev); 1004c70f1829SEric W. Biederman } 1005c70f1829SEric W. Biederman 10061da177e4SLinus Torvalds /* Net device open. */ 10071da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10081da177e4SLinus Torvalds { 1009b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 1010b20e2d54SHannes Frederic Sowa int i; 1011b20e2d54SHannes Frederic Sowa 1012c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1013b20e2d54SHannes Frederic Sowa 1014b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 1015b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 1016b20e2d54SHannes Frederic Sowa 1017b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 1018b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 1019b20e2d54SHannes Frederic Sowa } 1020b20e2d54SHannes Frederic Sowa 10211da177e4SLinus Torvalds return 0; 10221da177e4SLinus Torvalds } 10231da177e4SLinus Torvalds 10241da177e4SLinus Torvalds /* Net device close. */ 10251da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10261da177e4SLinus Torvalds { 1027c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10281da177e4SLinus Torvalds return 0; 10291da177e4SLinus Torvalds } 10301da177e4SLinus Torvalds 10311da177e4SLinus Torvalds /* Net device start xmit */ 103296f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10331da177e4SLinus Torvalds { 10343df97ba8SJason Wang #ifdef CONFIG_RPS 103596f84061SJason Wang if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 10369bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10379bc88939STom Herbert * RPS hash and save it into the flow_table here. 10389bc88939STom Herbert */ 10399bc88939STom Herbert __u32 rxhash; 10409bc88939STom Herbert 1041feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10429bc88939STom Herbert if (rxhash) { 10439bc88939STom Herbert struct tun_flow_entry *e; 10449bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 10459bc88939STom Herbert rxhash); 10469bc88939STom Herbert if (e) 10479bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10489bc88939STom Herbert } 10499bc88939STom Herbert } 10503df97ba8SJason Wang #endif 105196f84061SJason Wang } 105296f84061SJason Wang 1053aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1054aff3d70aSJason Wang struct sk_buff *skb, 1055aff3d70aSJason Wang int len) 1056aff3d70aSJason Wang { 1057aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1058aff3d70aSJason Wang 1059aff3d70aSJason Wang if (prog) 1060aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1061aff3d70aSJason Wang 1062aff3d70aSJason Wang return len; 1063aff3d70aSJason Wang } 1064aff3d70aSJason Wang 106596f84061SJason Wang /* Net device start xmit */ 106696f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 106796f84061SJason Wang { 106896f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 106996f84061SJason Wang int txq = skb->queue_mapping; 107096f84061SJason Wang struct tun_file *tfile; 1071aff3d70aSJason Wang int len = skb->len; 107296f84061SJason Wang 107396f84061SJason Wang rcu_read_lock(); 107496f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 107596f84061SJason Wang 107696f84061SJason Wang /* Drop packet if interface is not attached */ 1077cc166427SWillem de Bruijn if (txq >= tun->numqueues) 107896f84061SJason Wang goto drop; 107996f84061SJason Wang 108096f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 108196f84061SJason Wang tun_automq_xmit(tun, skb); 10829bc88939STom Herbert 10836e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10846e914fc7SJason Wang 1085c8d68e6bSJason Wang BUG_ON(!tfile); 1086c8d68e6bSJason Wang 1087f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1088f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1089f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1090f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1091f271b2ccSMax Krasnyansky goto drop; 1092f271b2ccSMax Krasnyansky 109354f968d6SJason Wang if (tfile->socket.sk->sk_filter && 109454f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 109599405162SMichael S. Tsirkin goto drop; 109699405162SMichael S. Tsirkin 1097aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 1098aff3d70aSJason Wang 1099aff3d70aSJason Wang /* Trim extra bytes since we may insert vlan proto & TCI 1100aff3d70aSJason Wang * in tun_put_user(). 1101aff3d70aSJason Wang */ 1102aff3d70aSJason Wang len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; 1103aff3d70aSJason Wang if (len <= 0 || pskb_trim(skb, len)) 1104aff3d70aSJason Wang goto drop; 1105aff3d70aSJason Wang 11061f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 11077bf66305SJason Wang goto drop; 11087bf66305SJason Wang 11097b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1110eda29772SRichard Cochran 11110110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11127bf66305SJason Wang * for indefinite time. 11137bf66305SJason Wang */ 11140110d6f2SMichael S. Tsirkin skb_orphan(skb); 11150110d6f2SMichael S. Tsirkin 1116f8af75f3SEric Dumazet nf_reset(skb); 1117f8af75f3SEric Dumazet 11185990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 11191576d986SJason Wang goto drop; 11201da177e4SLinus Torvalds 11211da177e4SLinus Torvalds /* Notify and wake up reader process */ 112254f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 112354f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11249e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11256e914fc7SJason Wang 11266e914fc7SJason Wang rcu_read_unlock(); 11276ed10654SPatrick McHardy return NETDEV_TX_OK; 11281da177e4SLinus Torvalds 11291da177e4SLinus Torvalds drop: 1130608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1131149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11321da177e4SLinus Torvalds kfree_skb(skb); 11336e914fc7SJason Wang rcu_read_unlock(); 1134baeababbSJason Wang return NET_XMIT_DROP; 11351da177e4SLinus Torvalds } 11361da177e4SLinus Torvalds 1137f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11381da177e4SLinus Torvalds { 1139f271b2ccSMax Krasnyansky /* 1140f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1141f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1142f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1143f271b2ccSMax Krasnyansky */ 11441da177e4SLinus Torvalds } 11451da177e4SLinus Torvalds 1146c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1147c8f44affSMichał Mirosław netdev_features_t features) 114888255375SMichał Mirosław { 114988255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 115088255375SMichał Mirosław 115188255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 115288255375SMichał Mirosław } 1153bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1154bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev) 1155bebd097aSNeil Horman { 1156bebd097aSNeil Horman /* 1157bebd097aSNeil Horman * Tun only receives frames when: 1158bebd097aSNeil Horman * 1) the char device endpoint gets data from user space 1159bebd097aSNeil Horman * 2) the tun socket gets a sendmsg call from user space 116094317099SPetar Penkov * If NAPI is not enabled, since both of those are synchronous 116194317099SPetar Penkov * operations, we are guaranteed never to have pending data when we poll 116294317099SPetar Penkov * for it so there is nothing to do here but return. 1163bebd097aSNeil Horman * We need this though so netpoll recognizes us as an interface that 1164bebd097aSNeil Horman * supports polling, which enables bridge devices in virt setups to 1165bebd097aSNeil Horman * still use netconsole 116694317099SPetar Penkov * If NAPI is enabled, however, we need to schedule polling for all 116790e33d45SPetar Penkov * queues unless we are using napi_gro_frags(), which we call in 116890e33d45SPetar Penkov * process context and not in NAPI context. 1169bebd097aSNeil Horman */ 117094317099SPetar Penkov struct tun_struct *tun = netdev_priv(dev); 117194317099SPetar Penkov 117294317099SPetar Penkov if (tun->flags & IFF_NAPI) { 117394317099SPetar Penkov struct tun_file *tfile; 117494317099SPetar Penkov int i; 117594317099SPetar Penkov 117690e33d45SPetar Penkov if (tun_napi_frags_enabled(tun)) 117790e33d45SPetar Penkov return; 117890e33d45SPetar Penkov 117994317099SPetar Penkov rcu_read_lock(); 118094317099SPetar Penkov for (i = 0; i < tun->numqueues; i++) { 118194317099SPetar Penkov tfile = rcu_dereference(tun->tfiles[i]); 1182aec72f33SEric Dumazet if (tfile->napi_enabled) 118394317099SPetar Penkov napi_schedule(&tfile->napi); 118494317099SPetar Penkov } 118594317099SPetar Penkov rcu_read_unlock(); 118694317099SPetar Penkov } 1187bebd097aSNeil Horman return; 1188bebd097aSNeil Horman } 1189bebd097aSNeil Horman #endif 1190eaea34b2SPaolo Abeni 1191eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1192eaea34b2SPaolo Abeni { 1193eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1194eaea34b2SPaolo Abeni 1195eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1196eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1197eaea34b2SPaolo Abeni 1198eaea34b2SPaolo Abeni tun->align = new_hr; 1199eaea34b2SPaolo Abeni } 1200eaea34b2SPaolo Abeni 1201bc1f4470Sstephen hemminger static void 1202608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1203608b9977SPaolo Abeni { 1204608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1205608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1206608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1207608b9977SPaolo Abeni int i; 1208608b9977SPaolo Abeni 1209608b9977SPaolo Abeni for_each_possible_cpu(i) { 1210608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1211608b9977SPaolo Abeni unsigned int start; 1212608b9977SPaolo Abeni 1213608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1214608b9977SPaolo Abeni do { 1215608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1216608b9977SPaolo Abeni rxpackets = p->rx_packets; 1217608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1218608b9977SPaolo Abeni txpackets = p->tx_packets; 1219608b9977SPaolo Abeni txbytes = p->tx_bytes; 1220608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1221608b9977SPaolo Abeni 1222608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1223608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1224608b9977SPaolo Abeni stats->tx_packets += txpackets; 1225608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1226608b9977SPaolo Abeni 1227608b9977SPaolo Abeni /* u32 counters */ 1228608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1229608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1230608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1231608b9977SPaolo Abeni } 1232608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1233608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1234608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1235608b9977SPaolo Abeni } 1236608b9977SPaolo Abeni 1237761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1238761876c8SJason Wang struct netlink_ext_ack *extack) 1239761876c8SJason Wang { 1240761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1241761876c8SJason Wang struct bpf_prog *old_prog; 1242761876c8SJason Wang 1243761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1244761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1245761876c8SJason Wang if (old_prog) 1246761876c8SJason Wang bpf_prog_put(old_prog); 1247761876c8SJason Wang 1248761876c8SJason Wang return 0; 1249761876c8SJason Wang } 1250761876c8SJason Wang 1251761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1252761876c8SJason Wang { 1253761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1254761876c8SJason Wang const struct bpf_prog *xdp_prog; 1255761876c8SJason Wang 1256761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1257761876c8SJason Wang if (xdp_prog) 1258761876c8SJason Wang return xdp_prog->aux->id; 1259761876c8SJason Wang 1260761876c8SJason Wang return 0; 1261761876c8SJason Wang } 1262761876c8SJason Wang 1263f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1264761876c8SJason Wang { 1265761876c8SJason Wang switch (xdp->command) { 1266761876c8SJason Wang case XDP_SETUP_PROG: 1267761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1268761876c8SJason Wang case XDP_QUERY_PROG: 1269761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1270761876c8SJason Wang xdp->prog_attached = !!xdp->prog_id; 1271761876c8SJason Wang return 0; 1272761876c8SJason Wang default: 1273761876c8SJason Wang return -EINVAL; 1274761876c8SJason Wang } 1275761876c8SJason Wang } 1276761876c8SJason Wang 1277758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1278c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1279758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1280758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 128100829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 128288255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1283c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1284bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1285bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1286bebd097aSNeil Horman #endif 1287eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1288608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1289758e43b7SStephen Hemminger }; 1290758e43b7SStephen Hemminger 1291fc72d1d5SJason Wang static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) 1292fc72d1d5SJason Wang { 1293fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1294fc72d1d5SJason Wang struct xdp_buff *buff = xdp->data_hard_start; 1295fc72d1d5SJason Wang int headroom = xdp->data - xdp->data_hard_start; 1296fc72d1d5SJason Wang struct tun_file *tfile; 1297fc72d1d5SJason Wang u32 numqueues; 1298fc72d1d5SJason Wang int ret = 0; 1299fc72d1d5SJason Wang 1300fc72d1d5SJason Wang /* Assure headroom is available and buff is properly aligned */ 1301fc72d1d5SJason Wang if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) 1302fc72d1d5SJason Wang return -ENOSPC; 1303fc72d1d5SJason Wang 1304fc72d1d5SJason Wang *buff = *xdp; 1305fc72d1d5SJason Wang 1306fc72d1d5SJason Wang rcu_read_lock(); 1307fc72d1d5SJason Wang 1308fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1309fc72d1d5SJason Wang if (!numqueues) { 1310fc72d1d5SJason Wang ret = -ENOSPC; 1311fc72d1d5SJason Wang goto out; 1312fc72d1d5SJason Wang } 1313fc72d1d5SJason Wang 1314fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1315fc72d1d5SJason Wang numqueues]); 1316fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1317fc72d1d5SJason Wang * XDP buffer from sk_buff. 1318fc72d1d5SJason Wang */ 1319fc72d1d5SJason Wang if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { 1320fc72d1d5SJason Wang this_cpu_inc(tun->pcpu_stats->tx_dropped); 1321fc72d1d5SJason Wang ret = -ENOSPC; 1322fc72d1d5SJason Wang } 1323fc72d1d5SJason Wang 1324fc72d1d5SJason Wang out: 1325fc72d1d5SJason Wang rcu_read_unlock(); 1326fc72d1d5SJason Wang return ret; 1327fc72d1d5SJason Wang } 1328fc72d1d5SJason Wang 1329fc72d1d5SJason Wang static void tun_xdp_flush(struct net_device *dev) 1330fc72d1d5SJason Wang { 1331fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1332fc72d1d5SJason Wang struct tun_file *tfile; 1333fc72d1d5SJason Wang u32 numqueues; 1334fc72d1d5SJason Wang 1335fc72d1d5SJason Wang rcu_read_lock(); 1336fc72d1d5SJason Wang 1337fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1338fc72d1d5SJason Wang if (!numqueues) 1339fc72d1d5SJason Wang goto out; 1340fc72d1d5SJason Wang 1341fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1342fc72d1d5SJason Wang numqueues]); 1343fc72d1d5SJason Wang /* Notify and wake up reader process */ 1344fc72d1d5SJason Wang if (tfile->flags & TUN_FASYNC) 1345fc72d1d5SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1346fc72d1d5SJason Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1347fc72d1d5SJason Wang 1348fc72d1d5SJason Wang out: 1349fc72d1d5SJason Wang rcu_read_unlock(); 1350fc72d1d5SJason Wang } 1351fc72d1d5SJason Wang 1352758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1353c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1354758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1355758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 135600829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 135788255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1358afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1359758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1360758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1361c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1362bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1363bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1364bebd097aSNeil Horman #endif 13655e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1366eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1367608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1368f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1369fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 1370fc72d1d5SJason Wang .ndo_xdp_flush = tun_xdp_flush, 1371758e43b7SStephen Hemminger }; 1372758e43b7SStephen Hemminger 1373944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 137496442e42SJason Wang { 137596442e42SJason Wang int i; 137696442e42SJason Wang 137796442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 137896442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 137996442e42SJason Wang 138096442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1381e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1382e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1383e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 138496442e42SJason Wang } 138596442e42SJason Wang 138696442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 138796442e42SJason Wang { 138896442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 138996442e42SJason Wang tun_flow_flush(tun); 139096442e42SJason Wang } 139196442e42SJason Wang 139291572088SJarod Wilson #define MIN_MTU 68 139391572088SJarod Wilson #define MAX_MTU 65535 139491572088SJarod Wilson 13951da177e4SLinus Torvalds /* Initialize net device. */ 13961da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13971da177e4SLinus Torvalds { 13981da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 140140630b82SMichael S. Tsirkin case IFF_TUN: 1402758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1403758e43b7SStephen Hemminger 14041da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 14051da177e4SLinus Torvalds dev->hard_header_len = 0; 14061da177e4SLinus Torvalds dev->addr_len = 0; 14071da177e4SLinus Torvalds dev->mtu = 1500; 14081da177e4SLinus Torvalds 14091da177e4SLinus Torvalds /* Zero header length */ 14101da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 14111da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 14121da177e4SLinus Torvalds break; 14131da177e4SLinus Torvalds 141440630b82SMichael S. Tsirkin case IFF_TAP: 14157a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 14161da177e4SLinus Torvalds /* Ethernet TAP Device */ 14171da177e4SLinus Torvalds ether_setup(dev); 1418550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1419a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 142036226a8dSBrian Braunstein 1421f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 142236226a8dSBrian Braunstein 14231da177e4SLinus Torvalds break; 14241da177e4SLinus Torvalds } 142591572088SJarod Wilson 142691572088SJarod Wilson dev->min_mtu = MIN_MTU; 142791572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 14281da177e4SLinus Torvalds } 14291da177e4SLinus Torvalds 14301da177e4SLinus Torvalds /* Character device part */ 14311da177e4SLinus Torvalds 14321da177e4SLinus Torvalds /* Poll */ 1433afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14341da177e4SLinus Torvalds { 1435b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14369484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14373c8a9c63SMariusz Kozlowski struct sock *sk; 1438afc9a42bSAl Viro __poll_t mask = 0; 14391da177e4SLinus Torvalds 14401da177e4SLinus Torvalds if (!tun) 1441a9a08845SLinus Torvalds return EPOLLERR; 14421da177e4SLinus Torvalds 144354f968d6SJason Wang sk = tfile->socket.sk; 14443c8a9c63SMariusz Kozlowski 14456b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 14461da177e4SLinus Torvalds 14479e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14481da177e4SLinus Torvalds 14495990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1450a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14511da177e4SLinus Torvalds 1452b20e2d54SHannes Frederic Sowa if (tun->dev->flags & IFF_UP && 1453b20e2d54SHannes Frederic Sowa (sock_writeable(sk) || 14549cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1455b20e2d54SHannes Frederic Sowa sock_writeable(sk)))) 1456a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 145733dccbb0SHerbert Xu 1458c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1459a9a08845SLinus Torvalds mask = EPOLLERR; 1460c70f1829SEric W. Biederman 1461631ab46bSEric W. Biederman tun_put(tun); 14621da177e4SLinus Torvalds return mask; 14631da177e4SLinus Torvalds } 14641da177e4SLinus Torvalds 146590e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 146690e33d45SPetar Penkov size_t len, 146790e33d45SPetar Penkov const struct iov_iter *it) 146890e33d45SPetar Penkov { 146990e33d45SPetar Penkov struct sk_buff *skb; 147090e33d45SPetar Penkov size_t linear; 147190e33d45SPetar Penkov int err; 147290e33d45SPetar Penkov int i; 147390e33d45SPetar Penkov 147490e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 147590e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 147690e33d45SPetar Penkov 147790e33d45SPetar Penkov local_bh_disable(); 147890e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 147990e33d45SPetar Penkov local_bh_enable(); 148090e33d45SPetar Penkov if (!skb) 148190e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 148290e33d45SPetar Penkov 148390e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 148490e33d45SPetar Penkov err = __skb_grow(skb, linear); 148590e33d45SPetar Penkov if (err) 148690e33d45SPetar Penkov goto free; 148790e33d45SPetar Penkov 148890e33d45SPetar Penkov skb->len = len; 148990e33d45SPetar Penkov skb->data_len = len - linear; 149090e33d45SPetar Penkov skb->truesize += skb->data_len; 149190e33d45SPetar Penkov 149290e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 149390e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 149490e33d45SPetar Penkov unsigned long offset; 149590e33d45SPetar Penkov struct page *page; 149690e33d45SPetar Penkov void *data; 149790e33d45SPetar Penkov 149890e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 149990e33d45SPetar Penkov err = -EINVAL; 150090e33d45SPetar Penkov goto free; 150190e33d45SPetar Penkov } 150290e33d45SPetar Penkov 150390e33d45SPetar Penkov local_bh_disable(); 150490e33d45SPetar Penkov data = napi_alloc_frag(fragsz); 150590e33d45SPetar Penkov local_bh_enable(); 150690e33d45SPetar Penkov if (!data) { 150790e33d45SPetar Penkov err = -ENOMEM; 150890e33d45SPetar Penkov goto free; 150990e33d45SPetar Penkov } 151090e33d45SPetar Penkov 151190e33d45SPetar Penkov page = virt_to_head_page(data); 151290e33d45SPetar Penkov offset = data - page_address(page); 151390e33d45SPetar Penkov skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 151490e33d45SPetar Penkov } 151590e33d45SPetar Penkov 151690e33d45SPetar Penkov return skb; 151790e33d45SPetar Penkov free: 151890e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 151990e33d45SPetar Penkov napi_free_frags(&tfile->napi); 152090e33d45SPetar Penkov return ERR_PTR(err); 152190e33d45SPetar Penkov } 152290e33d45SPetar Penkov 1523f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1524f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 152554f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 152633dccbb0SHerbert Xu size_t prepad, size_t len, 152733dccbb0SHerbert Xu size_t linear, int noblock) 1528f42157cbSRusty Russell { 152954f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1530f42157cbSRusty Russell struct sk_buff *skb; 153133dccbb0SHerbert Xu int err; 1532f42157cbSRusty Russell 1533f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15340eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 153533dccbb0SHerbert Xu linear = len; 1536f42157cbSRusty Russell 153733dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 153828d64271SEric Dumazet &err, 0); 1539f42157cbSRusty Russell if (!skb) 154033dccbb0SHerbert Xu return ERR_PTR(err); 1541f42157cbSRusty Russell 1542f42157cbSRusty Russell skb_reserve(skb, prepad); 1543f42157cbSRusty Russell skb_put(skb, linear); 154433dccbb0SHerbert Xu skb->data_len = len - linear; 154533dccbb0SHerbert Xu skb->len += len - linear; 1546f42157cbSRusty Russell 1547f42157cbSRusty Russell return skb; 1548f42157cbSRusty Russell } 1549f42157cbSRusty Russell 15505503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15515503fcecSJason Wang struct sk_buff *skb, int more) 15525503fcecSJason Wang { 15535503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15545503fcecSJason Wang struct sk_buff_head process_queue; 15555503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15565503fcecSJason Wang bool rcv = false; 15575503fcecSJason Wang 15585503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15595503fcecSJason Wang local_bh_disable(); 15605503fcecSJason Wang netif_receive_skb(skb); 15615503fcecSJason Wang local_bh_enable(); 15625503fcecSJason Wang return; 15635503fcecSJason Wang } 15645503fcecSJason Wang 15655503fcecSJason Wang spin_lock(&queue->lock); 15665503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15675503fcecSJason Wang __skb_queue_head_init(&process_queue); 15685503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15695503fcecSJason Wang rcv = true; 15705503fcecSJason Wang } else { 15715503fcecSJason Wang __skb_queue_tail(queue, skb); 15725503fcecSJason Wang } 15735503fcecSJason Wang spin_unlock(&queue->lock); 15745503fcecSJason Wang 15755503fcecSJason Wang if (rcv) { 15765503fcecSJason Wang struct sk_buff *nskb; 15775503fcecSJason Wang 15785503fcecSJason Wang local_bh_disable(); 15795503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 15805503fcecSJason Wang netif_receive_skb(nskb); 15815503fcecSJason Wang netif_receive_skb(skb); 15825503fcecSJason Wang local_bh_enable(); 15835503fcecSJason Wang } 15845503fcecSJason Wang } 15855503fcecSJason Wang 158666ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 158766ccbc9cSJason Wang int len, int noblock, bool zerocopy) 158866ccbc9cSJason Wang { 158966ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 159066ccbc9cSJason Wang return false; 159166ccbc9cSJason Wang 159266ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 159366ccbc9cSJason Wang return false; 159466ccbc9cSJason Wang 159566ccbc9cSJason Wang if (!noblock) 159666ccbc9cSJason Wang return false; 159766ccbc9cSJason Wang 159866ccbc9cSJason Wang if (zerocopy) 159966ccbc9cSJason Wang return false; 160066ccbc9cSJason Wang 160166ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 160266ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 160366ccbc9cSJason Wang return false; 160466ccbc9cSJason Wang 160566ccbc9cSJason Wang return true; 160666ccbc9cSJason Wang } 160766ccbc9cSJason Wang 1608761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1609761876c8SJason Wang struct tun_file *tfile, 161066ccbc9cSJason Wang struct iov_iter *from, 1611761876c8SJason Wang struct virtio_net_hdr *hdr, 16121cfe6e93SJason Wang int len, int *skb_xdp) 161366ccbc9cSJason Wang { 16140bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 161566ccbc9cSJason Wang struct sk_buff *skb; 1616761876c8SJason Wang struct bpf_prog *xdp_prog; 16177df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1618761876c8SJason Wang unsigned int delta = 0; 161966ccbc9cSJason Wang char *buf; 162066ccbc9cSJason Wang size_t copied; 1621761876c8SJason Wang bool xdp_xmit = false; 16227df13219SJason Wang int err, pad = TUN_RX_PAD; 16237df13219SJason Wang 16247df13219SJason Wang rcu_read_lock(); 16257df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16267df13219SJason Wang if (xdp_prog) 16277df13219SJason Wang pad += TUN_HEADROOM; 16287df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16297df13219SJason Wang rcu_read_unlock(); 163066ccbc9cSJason Wang 163163b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 163266ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 163366ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 163466ccbc9cSJason Wang 163566ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 163666ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16377df13219SJason Wang alloc_frag->offset + pad, 163866ccbc9cSJason Wang len, from); 163966ccbc9cSJason Wang if (copied != len) 164066ccbc9cSJason Wang return ERR_PTR(-EFAULT); 164166ccbc9cSJason Wang 16427df13219SJason Wang /* There's a small window that XDP may be set after the check 16437df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16447df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16457df13219SJason Wang */ 16467df13219SJason Wang if (hdr->gso_type || !xdp_prog) 16471cfe6e93SJason Wang *skb_xdp = 1; 1648761876c8SJason Wang else 16491cfe6e93SJason Wang *skb_xdp = 0; 165066ccbc9cSJason Wang 1651761876c8SJason Wang rcu_read_lock(); 1652761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16531cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1654761876c8SJason Wang struct xdp_buff xdp; 1655761876c8SJason Wang void *orig_data; 1656761876c8SJason Wang u32 act; 1657761876c8SJason Wang 1658761876c8SJason Wang xdp.data_hard_start = buf; 16597df13219SJason Wang xdp.data = buf + pad; 1660de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1661761876c8SJason Wang xdp.data_end = xdp.data + len; 16628bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1663761876c8SJason Wang orig_data = xdp.data; 1664761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1665761876c8SJason Wang 1666761876c8SJason Wang switch (act) { 1667761876c8SJason Wang case XDP_REDIRECT: 1668761876c8SJason Wang get_page(alloc_frag->page); 1669761876c8SJason Wang alloc_frag->offset += buflen; 1670762c330dSJason Wang ++tfile->xdp_pending_pkts; 1671761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1672761876c8SJason Wang if (err) 1673761876c8SJason Wang goto err_redirect; 1674654d5738SXin Long rcu_read_unlock(); 1675761876c8SJason Wang return NULL; 1676761876c8SJason Wang case XDP_TX: 1677761876c8SJason Wang xdp_xmit = true; 1678761876c8SJason Wang /* fall through */ 1679761876c8SJason Wang case XDP_PASS: 1680761876c8SJason Wang delta = orig_data - xdp.data; 1681761876c8SJason Wang break; 1682761876c8SJason Wang default: 1683761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1684761876c8SJason Wang /* fall through */ 1685761876c8SJason Wang case XDP_ABORTED: 1686761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1687761876c8SJason Wang /* fall through */ 1688761876c8SJason Wang case XDP_DROP: 1689761876c8SJason Wang goto err_xdp; 1690761876c8SJason Wang } 1691761876c8SJason Wang } 1692761876c8SJason Wang 1693761876c8SJason Wang skb = build_skb(buf, buflen); 1694761876c8SJason Wang if (!skb) { 1695761876c8SJason Wang rcu_read_unlock(); 1696761876c8SJason Wang return ERR_PTR(-ENOMEM); 1697761876c8SJason Wang } 1698761876c8SJason Wang 16997df13219SJason Wang skb_reserve(skb, pad - delta); 1700761876c8SJason Wang skb_put(skb, len + delta); 170166ccbc9cSJason Wang get_page(alloc_frag->page); 170266ccbc9cSJason Wang alloc_frag->offset += buflen; 170366ccbc9cSJason Wang 1704761876c8SJason Wang if (xdp_xmit) { 1705761876c8SJason Wang skb->dev = tun->dev; 1706761876c8SJason Wang generic_xdp_tx(skb, xdp_prog); 1707654d5738SXin Long rcu_read_unlock(); 1708761876c8SJason Wang return NULL; 1709761876c8SJason Wang } 1710761876c8SJason Wang 1711761876c8SJason Wang rcu_read_unlock(); 1712761876c8SJason Wang 171366ccbc9cSJason Wang return skb; 1714761876c8SJason Wang 1715761876c8SJason Wang err_redirect: 1716761876c8SJason Wang put_page(alloc_frag->page); 1717761876c8SJason Wang err_xdp: 1718761876c8SJason Wang rcu_read_unlock(); 1719761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1720761876c8SJason Wang return NULL; 172166ccbc9cSJason Wang } 172266ccbc9cSJason Wang 17231da177e4SLinus Torvalds /* Get packet from user space buffer */ 172454f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1725f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17265503fcecSJason Wang int noblock, bool more) 17271da177e4SLinus Torvalds { 172809640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17291da177e4SLinus Torvalds struct sk_buff *skb; 1730f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1731eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1732f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1733608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 173496f8d9ecSJason Wang int good_linear; 17350690899bSMichael S. Tsirkin int copylen; 17360690899bSMichael S. Tsirkin bool zerocopy = false; 17370690899bSMichael S. Tsirkin int err; 173896f84061SJason Wang u32 rxhash = 0; 17391cfe6e93SJason Wang int skb_xdp = 1; 174090e33d45SPetar Penkov bool frags = tun_napi_frags_enabled(tun); 17411da177e4SLinus Torvalds 17421bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 17431bd4978aSEric Dumazet return -EIO; 17441bd4978aSEric Dumazet 174540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 174615718ea0SDan Carpenter if (len < sizeof(pi)) 17471da177e4SLinus Torvalds return -EINVAL; 174815718ea0SDan Carpenter len -= sizeof(pi); 17491da177e4SLinus Torvalds 1750cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17511da177e4SLinus Torvalds return -EFAULT; 17521da177e4SLinus Torvalds } 17531da177e4SLinus Torvalds 175440630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1755e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1756e1edab87SWillem de Bruijn 1757e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1758f43798c2SRusty Russell return -EINVAL; 1759e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1760f43798c2SRusty Russell 1761cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1762f43798c2SRusty Russell return -EFAULT; 1763f43798c2SRusty Russell 17644909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 176556f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 176656f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17674909122fSHerbert Xu 176856f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1769f43798c2SRusty Russell return -EINVAL; 1770e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1771f43798c2SRusty Russell } 1772f43798c2SRusty Russell 177340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1774a504b86eSstephen hemminger align += NET_IP_ALIGN; 17750eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 177656f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1777e01bf1c8SRusty Russell return -EINVAL; 1778e01bf1c8SRusty Russell } 17791da177e4SLinus Torvalds 178096f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 178196f8d9ecSJason Wang 178288529176SJason Wang if (msg_control) { 1783f5ff53b4SAl Viro struct iov_iter i = *from; 1784f5ff53b4SAl Viro 178588529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 178688529176SJason Wang * enough room for skb expand head in case it is used. 17870690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17880690899bSMichael S. Tsirkin */ 178956f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 179096f8d9ecSJason Wang if (copylen > good_linear) 179196f8d9ecSJason Wang copylen = good_linear; 17923dd5c330SJason Wang linear = copylen; 1793f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1794f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 179588529176SJason Wang zerocopy = true; 179688529176SJason Wang } 179788529176SJason Wang 179890e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17991cfe6e93SJason Wang /* For the packet that is not easy to be processed 18001cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 18011cfe6e93SJason Wang * skb was created with generic XDP routine. 18021cfe6e93SJason Wang */ 18031cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 180466ccbc9cSJason Wang if (IS_ERR(skb)) { 180566ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 180666ccbc9cSJason Wang return PTR_ERR(skb); 180766ccbc9cSJason Wang } 1808761876c8SJason Wang if (!skb) 1809761876c8SJason Wang return total_len; 181066ccbc9cSJason Wang } else { 181188529176SJason Wang if (!zerocopy) { 18120690899bSMichael S. Tsirkin copylen = len; 181356f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 181496f8d9ecSJason Wang linear = good_linear; 181596f8d9ecSJason Wang else 181656f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18173dd5c330SJason Wang } 18180690899bSMichael S. Tsirkin 181990e33d45SPetar Penkov if (frags) { 182090e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 182190e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 182290e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 182390e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 182490e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 182590e33d45SPetar Penkov */ 182690e33d45SPetar Penkov zerocopy = false; 182790e33d45SPetar Penkov } else { 182890e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 182990e33d45SPetar Penkov noblock); 183090e33d45SPetar Penkov } 183190e33d45SPetar Penkov 183233dccbb0SHerbert Xu if (IS_ERR(skb)) { 183333dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1834608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 183590e33d45SPetar Penkov if (frags) 183690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 183733dccbb0SHerbert Xu return PTR_ERR(skb); 18381da177e4SLinus Torvalds } 18391da177e4SLinus Torvalds 18400690899bSMichael S. Tsirkin if (zerocopy) 1841f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1842af1cc7a2SJason Wang else 1843f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18440690899bSMichael S. Tsirkin 18450690899bSMichael S. Tsirkin if (err) { 1846608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18478f22757eSDave Jones kfree_skb(skb); 184890e33d45SPetar Penkov if (frags) { 184990e33d45SPetar Penkov tfile->napi.skb = NULL; 185090e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 185190e33d45SPetar Penkov } 185290e33d45SPetar Penkov 18531da177e4SLinus Torvalds return -EFAULT; 18548f22757eSDave Jones } 185566ccbc9cSJason Wang } 18561da177e4SLinus Torvalds 18573e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1858df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1859df10db98SPaolo Abeni kfree_skb(skb); 186090e33d45SPetar Penkov if (frags) { 186190e33d45SPetar Penkov tfile->napi.skb = NULL; 186290e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 186390e33d45SPetar Penkov } 186490e33d45SPetar Penkov 1865df10db98SPaolo Abeni return -EINVAL; 1866df10db98SPaolo Abeni } 1867df10db98SPaolo Abeni 18681da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 186940630b82SMichael S. Tsirkin case IFF_TUN: 187040630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18712580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18722580c4c1SAlexander Potapenko 18732580c4c1SAlexander Potapenko switch (ip_version) { 18742580c4c1SAlexander Potapenko case 4: 1875f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1876f09f7ee2SAng Way Chuang break; 18772580c4c1SAlexander Potapenko case 6: 1878f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1879f09f7ee2SAng Way Chuang break; 1880f09f7ee2SAng Way Chuang default: 1881608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1882f09f7ee2SAng Way Chuang kfree_skb(skb); 1883f09f7ee2SAng Way Chuang return -EINVAL; 1884f09f7ee2SAng Way Chuang } 1885f09f7ee2SAng Way Chuang } 1886f09f7ee2SAng Way Chuang 1887459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18881da177e4SLinus Torvalds skb->protocol = pi.proto; 18894c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18901da177e4SLinus Torvalds break; 189140630b82SMichael S. Tsirkin case IFF_TAP: 189290e33d45SPetar Penkov if (!frags) 18931da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18941da177e4SLinus Torvalds break; 18956403eab1SJoe Perches } 18961da177e4SLinus Torvalds 18970690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18980690899bSMichael S. Tsirkin if (zerocopy) { 18990690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 19000690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1901c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1902af1cc7a2SJason Wang } else if (msg_control) { 1903af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1904af1cc7a2SJason Wang uarg->callback(uarg, false); 19050690899bSMichael S. Tsirkin } 19060690899bSMichael S. Tsirkin 190772f65107SVlad Yasevich skb_reset_network_header(skb); 190840893fd0SJason Wang skb_probe_transport_header(skb, 0); 190938502af7SJason Wang 19101cfe6e93SJason Wang if (skb_xdp) { 1911761876c8SJason Wang struct bpf_prog *xdp_prog; 1912761876c8SJason Wang int ret; 1913761876c8SJason Wang 1914761876c8SJason Wang rcu_read_lock(); 1915761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1916761876c8SJason Wang if (xdp_prog) { 1917761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1918761876c8SJason Wang if (ret != XDP_PASS) { 1919761876c8SJason Wang rcu_read_unlock(); 1920761876c8SJason Wang return total_len; 1921761876c8SJason Wang } 1922761876c8SJason Wang } 1923761876c8SJason Wang rcu_read_unlock(); 1924761876c8SJason Wang } 1925761876c8SJason Wang 192696f84061SJason Wang rcu_read_lock(); 192796f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 1928feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 192996f84061SJason Wang rcu_read_unlock(); 193094317099SPetar Penkov 193190e33d45SPetar Penkov if (frags) { 193290e33d45SPetar Penkov /* Exercise flow dissector code path. */ 193390e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 193490e33d45SPetar Penkov 1935010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 193690e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 193790e33d45SPetar Penkov napi_free_frags(&tfile->napi); 193890e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 193990e33d45SPetar Penkov WARN_ON(1); 194090e33d45SPetar Penkov return -ENOMEM; 194190e33d45SPetar Penkov } 194290e33d45SPetar Penkov 194390e33d45SPetar Penkov local_bh_disable(); 194490e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 194590e33d45SPetar Penkov local_bh_enable(); 194690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1947aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 194894317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 194994317099SPetar Penkov int queue_len; 195094317099SPetar Penkov 195194317099SPetar Penkov spin_lock_bh(&queue->lock); 195294317099SPetar Penkov __skb_queue_tail(queue, skb); 195394317099SPetar Penkov queue_len = skb_queue_len(queue); 195494317099SPetar Penkov spin_unlock(&queue->lock); 195594317099SPetar Penkov 195694317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 195794317099SPetar Penkov napi_schedule(&tfile->napi); 195894317099SPetar Penkov 195994317099SPetar Penkov local_bh_enable(); 196094317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 19615503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 196294317099SPetar Penkov } else { 19631da177e4SLinus Torvalds netif_rx_ni(skb); 196494317099SPetar Penkov } 19651da177e4SLinus Torvalds 1966608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1967608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1968608b9977SPaolo Abeni stats->rx_packets++; 1969608b9977SPaolo Abeni stats->rx_bytes += len; 1970608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1971608b9977SPaolo Abeni put_cpu_ptr(stats); 19721da177e4SLinus Torvalds 197396f84061SJason Wang if (rxhash) 19749e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 197596f84061SJason Wang 19760690899bSMichael S. Tsirkin return total_len; 19771da177e4SLinus Torvalds } 19781da177e4SLinus Torvalds 1979f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19801da177e4SLinus Torvalds { 198133dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 198254f968d6SJason Wang struct tun_file *tfile = file->private_data; 19839484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1984631ab46bSEric W. Biederman ssize_t result; 19851da177e4SLinus Torvalds 19861da177e4SLinus Torvalds if (!tun) 19871da177e4SLinus Torvalds return -EBADFD; 19881da177e4SLinus Torvalds 19895503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 19905503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1991631ab46bSEric W. Biederman 1992762c330dSJason Wang if (tfile->xdp_pending_pkts) { 1993762c330dSJason Wang tfile->xdp_pending_pkts = 0; 1994762c330dSJason Wang xdp_do_flush_map(); 1995762c330dSJason Wang } 1996762c330dSJason Wang 1997631ab46bSEric W. Biederman tun_put(tun); 1998631ab46bSEric W. Biederman return result; 19991da177e4SLinus Torvalds } 20001da177e4SLinus Torvalds 2001fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2002fc72d1d5SJason Wang struct tun_file *tfile, 2003fc72d1d5SJason Wang struct xdp_buff *xdp, 2004fc72d1d5SJason Wang struct iov_iter *iter) 2005fc72d1d5SJason Wang { 2006fc72d1d5SJason Wang int vnet_hdr_sz = 0; 2007fc72d1d5SJason Wang size_t size = xdp->data_end - xdp->data; 2008fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 2009fc72d1d5SJason Wang size_t ret; 2010fc72d1d5SJason Wang 2011fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2012fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2013fc72d1d5SJason Wang 2014fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2015fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2016fc72d1d5SJason Wang return -EINVAL; 2017fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2018fc72d1d5SJason Wang sizeof(gso))) 2019fc72d1d5SJason Wang return -EFAULT; 2020fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2021fc72d1d5SJason Wang } 2022fc72d1d5SJason Wang 2023fc72d1d5SJason Wang ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; 2024fc72d1d5SJason Wang 2025fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2026fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 2027fc72d1d5SJason Wang stats->tx_packets++; 2028fc72d1d5SJason Wang stats->tx_bytes += ret; 2029fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2030fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2031fc72d1d5SJason Wang 2032fc72d1d5SJason Wang return ret; 2033fc72d1d5SJason Wang } 2034fc72d1d5SJason Wang 20351da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20366f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 203754f968d6SJason Wang struct tun_file *tfile, 20381da177e4SLinus Torvalds struct sk_buff *skb, 2039e0b46d0eSHerbert Xu struct iov_iter *iter) 20401da177e4SLinus Torvalds { 20411da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2042608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2043e0b46d0eSHerbert Xu ssize_t total; 20448c847d25SJason Wang int vlan_offset = 0; 2045a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20462eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2047a8f9bfdfSHerbert Xu 2048df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2049a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20501da177e4SLinus Torvalds 205140630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2052e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20531da177e4SLinus Torvalds 2054e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2055e0b46d0eSHerbert Xu 205640630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2057e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20581da177e4SLinus Torvalds return -EINVAL; 20591da177e4SLinus Torvalds 2060e0b46d0eSHerbert Xu total += sizeof(pi); 2061e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 20621da177e4SLinus Torvalds /* Packet will be striped */ 20631da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 20641da177e4SLinus Torvalds } 20651da177e4SLinus Torvalds 2066e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 20671da177e4SLinus Torvalds return -EFAULT; 20681da177e4SLinus Torvalds } 20691da177e4SLinus Torvalds 20702eb783c4SHerbert Xu if (vnet_hdr_sz) { 20719403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 207234166093SMike Rapoport 2073e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2074f43798c2SRusty Russell return -EINVAL; 2075f43798c2SRusty Russell 20763e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 20776391a448SJason Wang tun_is_little_endian(tun), true)) { 2078f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20796b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2080ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 208156f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 208256f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2083ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2084ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2085ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 208656f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2087ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2088ef3db4a5SMichael S. Tsirkin return -EINVAL; 2089ef3db4a5SMichael S. Tsirkin } 2090f43798c2SRusty Russell 2091e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2092f43798c2SRusty Russell return -EFAULT; 20938c847d25SJason Wang 20948c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2095f43798c2SRusty Russell } 2096f43798c2SRusty Russell 2097a8f9bfdfSHerbert Xu if (vlan_hlen) { 2098e0b46d0eSHerbert Xu int ret; 2099aff3d70aSJason Wang struct veth veth; 21001da177e4SLinus Torvalds 21016680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2102df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 21031da177e4SLinus Torvalds 21046680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21056680ec68SJason Wang 2106e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2107e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21086680ec68SJason Wang goto done; 21096680ec68SJason Wang 2110e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2111e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21126680ec68SJason Wang goto done; 21136680ec68SJason Wang } 21146680ec68SJason Wang 2115e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21166680ec68SJason Wang 21176680ec68SJason Wang done: 2118608b9977SPaolo Abeni /* caller is in process context, */ 2119608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2120608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2121608b9977SPaolo Abeni stats->tx_packets++; 2122608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 2123608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2124608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21251da177e4SLinus Torvalds 21261da177e4SLinus Torvalds return total; 21271da177e4SLinus Torvalds } 21281da177e4SLinus Torvalds 2129fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21301576d986SJason Wang { 21311576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2132fc72d1d5SJason Wang void *ptr = NULL; 2133f48cc6b2SJason Wang int error = 0; 21341576d986SJason Wang 2135fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2136fc72d1d5SJason Wang if (ptr) 21371576d986SJason Wang goto out; 21381576d986SJason Wang if (noblock) { 2139f48cc6b2SJason Wang error = -EAGAIN; 21401576d986SJason Wang goto out; 21411576d986SJason Wang } 21421576d986SJason Wang 21431576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 21441576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 21451576d986SJason Wang 21461576d986SJason Wang while (1) { 2147fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2148fc72d1d5SJason Wang if (ptr) 21491576d986SJason Wang break; 21501576d986SJason Wang if (signal_pending(current)) { 2151f48cc6b2SJason Wang error = -ERESTARTSYS; 21521576d986SJason Wang break; 21531576d986SJason Wang } 21541576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2155f48cc6b2SJason Wang error = -EFAULT; 21561576d986SJason Wang break; 21571576d986SJason Wang } 21581576d986SJason Wang 21591576d986SJason Wang schedule(); 21601576d986SJason Wang } 21611576d986SJason Wang 21621576d986SJason Wang current->state = TASK_RUNNING; 21631576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 21641576d986SJason Wang 21651576d986SJason Wang out: 2166f48cc6b2SJason Wang *err = error; 2167fc72d1d5SJason Wang return ptr; 21681576d986SJason Wang } 21691576d986SJason Wang 217054f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 21719b067034SAl Viro struct iov_iter *to, 2172fc72d1d5SJason Wang int noblock, void *ptr) 21731da177e4SLinus Torvalds { 21749b067034SAl Viro ssize_t ret; 21751576d986SJason Wang int err; 21761da177e4SLinus Torvalds 21773872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 21781da177e4SLinus Torvalds 2179c33ee15bSWei Xu if (!iov_iter_count(to)) { 2180fc72d1d5SJason Wang tun_ptr_free(ptr); 21819b067034SAl Viro return 0; 2182c33ee15bSWei Xu } 21831da177e4SLinus Torvalds 2184fc72d1d5SJason Wang if (!ptr) { 21851576d986SJason Wang /* Read frames from ring */ 2186fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2187fc72d1d5SJason Wang if (!ptr) 2188957f094fSAlex Gartrell return err; 2189ac77cfd4SJason Wang } 2190e0b46d0eSHerbert Xu 2191fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 2192fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2193fc72d1d5SJason Wang 2194fc72d1d5SJason Wang ret = tun_put_user_xdp(tun, tfile, xdp, to); 2195fc72d1d5SJason Wang put_page(virt_to_head_page(xdp->data)); 2196fc72d1d5SJason Wang } else { 2197fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2198fc72d1d5SJason Wang 21999b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2200f51a5e82SJason Wang if (unlikely(ret < 0)) 22011da177e4SLinus Torvalds kfree_skb(skb); 2202f51a5e82SJason Wang else 2203f51a5e82SJason Wang consume_skb(skb); 2204fc72d1d5SJason Wang } 22051da177e4SLinus Torvalds 220605c2828cSMichael S. Tsirkin return ret; 220705c2828cSMichael S. Tsirkin } 220805c2828cSMichael S. Tsirkin 22099b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 221005c2828cSMichael S. Tsirkin { 221105c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 221205c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22139484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22149b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 221505c2828cSMichael S. Tsirkin 221605c2828cSMichael S. Tsirkin if (!tun) 221705c2828cSMichael S. Tsirkin return -EBADFD; 2218ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 221942404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2220d0b7da8aSZhi Yong Wu if (ret > 0) 2221d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2222631ab46bSEric W. Biederman tun_put(tun); 22231da177e4SLinus Torvalds return ret; 22241da177e4SLinus Torvalds } 22251da177e4SLinus Torvalds 2226cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 222796f84061SJason Wang { 2228cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 222996f84061SJason Wang 223096f84061SJason Wang bpf_prog_destroy(prog->prog); 223196f84061SJason Wang kfree(prog); 223296f84061SJason Wang } 223396f84061SJason Wang 22349d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22359d6474e4SJason Wang struct tun_prog __rcu **prog_p, 223696f84061SJason Wang struct bpf_prog *prog) 223796f84061SJason Wang { 2238cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 223996f84061SJason Wang 224096f84061SJason Wang if (prog) { 224196f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 224296f84061SJason Wang if (!new) 224396f84061SJason Wang return -ENOMEM; 224496f84061SJason Wang new->prog = prog; 224596f84061SJason Wang } 224696f84061SJason Wang 2247124da8f6SJason Wang spin_lock_bh(&tun->lock); 2248cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2249124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2250cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2251124da8f6SJason Wang spin_unlock_bh(&tun->lock); 225296f84061SJason Wang 225396f84061SJason Wang if (old) 2254cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 225596f84061SJason Wang 225696f84061SJason Wang return 0; 225796f84061SJason Wang } 225896f84061SJason Wang 225996442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 226096442e42SJason Wang { 226196442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 226296442e42SJason Wang 22634008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2264608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 226596442e42SJason Wang tun_flow_uninit(tun); 22665dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2267cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2268aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 226996442e42SJason Wang } 227096442e42SJason Wang 22711da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22721da177e4SLinus Torvalds { 22731da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22741da177e4SLinus Torvalds 22750625c883SEric W. Biederman tun->owner = INVALID_UID; 22760625c883SEric W. Biederman tun->group = INVALID_GID; 22771da177e4SLinus Torvalds 22781da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2279cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2280cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2281016adb72SJason Wang /* We prefer our own queue length */ 2282016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22831da177e4SLinus Torvalds } 22841da177e4SLinus Torvalds 2285f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2286f019a7a5SEric W. Biederman * device with netlink. 2287f019a7a5SEric W. Biederman */ 2288a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2289a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2290f019a7a5SEric W. Biederman { 2291f019a7a5SEric W. Biederman return -EINVAL; 2292f019a7a5SEric W. Biederman } 2293f019a7a5SEric W. Biederman 2294*1ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 2295*1ec010e7SSabrina Dubroca { 2296*1ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 2297*1ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 2298*1ec010e7SSabrina Dubroca 2299*1ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 2300*1ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 2301*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 2302*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 2303*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 2304*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 2305*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 2306*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 2307*1ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 2308*1ec010e7SSabrina Dubroca 0; 2309*1ec010e7SSabrina Dubroca } 2310*1ec010e7SSabrina Dubroca 2311*1ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 2312*1ec010e7SSabrina Dubroca { 2313*1ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 2314*1ec010e7SSabrina Dubroca 2315*1ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 2316*1ec010e7SSabrina Dubroca goto nla_put_failure; 2317*1ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 2318*1ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 2319*1ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 2320*1ec010e7SSabrina Dubroca goto nla_put_failure; 2321*1ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 2322*1ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 2323*1ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 2324*1ec010e7SSabrina Dubroca goto nla_put_failure; 2325*1ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 2326*1ec010e7SSabrina Dubroca goto nla_put_failure; 2327*1ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 2328*1ec010e7SSabrina Dubroca goto nla_put_failure; 2329*1ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 2330*1ec010e7SSabrina Dubroca goto nla_put_failure; 2331*1ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 2332*1ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 2333*1ec010e7SSabrina Dubroca goto nla_put_failure; 2334*1ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 2335*1ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 2336*1ec010e7SSabrina Dubroca goto nla_put_failure; 2337*1ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 2338*1ec010e7SSabrina Dubroca tun->numdisabled)) 2339*1ec010e7SSabrina Dubroca goto nla_put_failure; 2340*1ec010e7SSabrina Dubroca } 2341*1ec010e7SSabrina Dubroca 2342*1ec010e7SSabrina Dubroca return 0; 2343*1ec010e7SSabrina Dubroca 2344*1ec010e7SSabrina Dubroca nla_put_failure: 2345*1ec010e7SSabrina Dubroca return -EMSGSIZE; 2346*1ec010e7SSabrina Dubroca } 2347*1ec010e7SSabrina Dubroca 2348f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2349f019a7a5SEric W. Biederman .kind = DRV_NAME, 2350f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2351f019a7a5SEric W. Biederman .setup = tun_setup, 2352f019a7a5SEric W. Biederman .validate = tun_validate, 2353*1ec010e7SSabrina Dubroca .get_size = tun_get_size, 2354*1ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2355f019a7a5SEric W. Biederman }; 2356f019a7a5SEric W. Biederman 235733dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 235833dccbb0SHerbert Xu { 235954f968d6SJason Wang struct tun_file *tfile; 236043815482SEric Dumazet wait_queue_head_t *wqueue; 236133dccbb0SHerbert Xu 236233dccbb0SHerbert Xu if (!sock_writeable(sk)) 236333dccbb0SHerbert Xu return; 236433dccbb0SHerbert Xu 23659cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 236633dccbb0SHerbert Xu return; 236733dccbb0SHerbert Xu 236843815482SEric Dumazet wqueue = sk_sleep(sk); 236943815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2370a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2371a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2372c722c625SHerbert Xu 237354f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 237454f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 237533dccbb0SHerbert Xu } 237633dccbb0SHerbert Xu 23771b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 237805c2828cSMichael S. Tsirkin { 237954f968d6SJason Wang int ret; 238054f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23819484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 238254f968d6SJason Wang 238354f968d6SJason Wang if (!tun) 238454f968d6SJason Wang return -EBADFD; 2385f5ff53b4SAl Viro 2386c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 23875503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 23885503fcecSJason Wang m->msg_flags & MSG_MORE); 2389762c330dSJason Wang 2390762c330dSJason Wang if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || 2391762c330dSJason Wang !(m->msg_flags & MSG_MORE)) { 2392762c330dSJason Wang tfile->xdp_pending_pkts = 0; 2393762c330dSJason Wang xdp_do_flush_map(); 2394762c330dSJason Wang } 2395762c330dSJason Wang 239654f968d6SJason Wang tun_put(tun); 239754f968d6SJason Wang return ret; 239805c2828cSMichael S. Tsirkin } 239905c2828cSMichael S. Tsirkin 24001b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 240105c2828cSMichael S. Tsirkin int flags) 240205c2828cSMichael S. Tsirkin { 240354f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24049484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2405fc72d1d5SJason Wang void *ptr = m->msg_control; 240605c2828cSMichael S. Tsirkin int ret; 240754f968d6SJason Wang 2408c33ee15bSWei Xu if (!tun) { 2409c33ee15bSWei Xu ret = -EBADFD; 2410fc72d1d5SJason Wang goto out_free; 2411c33ee15bSWei Xu } 241254f968d6SJason Wang 2413eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 24143811ae76SGao feng ret = -EINVAL; 2415c33ee15bSWei Xu goto out_put_tun; 24163811ae76SGao feng } 2417eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2418eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2419eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2420eda29772SRichard Cochran goto out; 2421eda29772SRichard Cochran } 2422fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 242387897931SAlex Gartrell if (ret > (ssize_t)total_len) { 242442404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 242542404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 242642404c09SDavid S. Miller } 24273811ae76SGao feng out: 242854f968d6SJason Wang tun_put(tun); 242905c2828cSMichael S. Tsirkin return ret; 2430c33ee15bSWei Xu 2431c33ee15bSWei Xu out_put_tun: 2432c33ee15bSWei Xu tun_put(tun); 2433fc72d1d5SJason Wang out_free: 2434fc72d1d5SJason Wang tun_ptr_free(ptr); 2435c33ee15bSWei Xu return ret; 243605c2828cSMichael S. Tsirkin } 243705c2828cSMichael S. Tsirkin 2438fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2439fc72d1d5SJason Wang { 2440fc72d1d5SJason Wang if (likely(ptr)) { 2441fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 2442fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2443fc72d1d5SJason Wang 2444fc72d1d5SJason Wang return xdp->data_end - xdp->data; 2445fc72d1d5SJason Wang } 2446fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2447fc72d1d5SJason Wang } else { 2448fc72d1d5SJason Wang return 0; 2449fc72d1d5SJason Wang } 2450fc72d1d5SJason Wang } 2451fc72d1d5SJason Wang 24521576d986SJason Wang static int tun_peek_len(struct socket *sock) 24531576d986SJason Wang { 24541576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24551576d986SJason Wang struct tun_struct *tun; 24561576d986SJason Wang int ret = 0; 24571576d986SJason Wang 24589484dc74Syuan linyu tun = tun_get(tfile); 24591576d986SJason Wang if (!tun) 24601576d986SJason Wang return 0; 24611576d986SJason Wang 2462fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 24631576d986SJason Wang tun_put(tun); 24641576d986SJason Wang 24651576d986SJason Wang return ret; 24661576d986SJason Wang } 24671576d986SJason Wang 246805c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 246905c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 24701576d986SJason Wang .peek_len = tun_peek_len, 247105c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 247205c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 247305c2828cSMichael S. Tsirkin }; 247405c2828cSMichael S. Tsirkin 247533dccbb0SHerbert Xu static struct proto tun_proto = { 247633dccbb0SHerbert Xu .name = "tun", 247733dccbb0SHerbert Xu .owner = THIS_MODULE, 247854f968d6SJason Wang .obj_size = sizeof(struct tun_file), 247933dccbb0SHerbert Xu }; 2480f019a7a5SEric W. Biederman 2481980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2482980c9e8cSDavid Woodhouse { 2483031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2484980c9e8cSDavid Woodhouse } 2485980c9e8cSDavid Woodhouse 2486980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2487980c9e8cSDavid Woodhouse char *buf) 2488980c9e8cSDavid Woodhouse { 2489980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2490980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2491980c9e8cSDavid Woodhouse } 2492980c9e8cSDavid Woodhouse 2493980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2494980c9e8cSDavid Woodhouse char *buf) 2495980c9e8cSDavid Woodhouse { 2496980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 24970625c883SEric W. Biederman return uid_valid(tun->owner)? 24980625c883SEric W. Biederman sprintf(buf, "%u\n", 24990625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 25000625c883SEric W. Biederman sprintf(buf, "-1\n"); 2501980c9e8cSDavid Woodhouse } 2502980c9e8cSDavid Woodhouse 2503980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2504980c9e8cSDavid Woodhouse char *buf) 2505980c9e8cSDavid Woodhouse { 2506980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 25070625c883SEric W. Biederman return gid_valid(tun->group) ? 25080625c883SEric W. Biederman sprintf(buf, "%u\n", 25090625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 25100625c883SEric W. Biederman sprintf(buf, "-1\n"); 2511980c9e8cSDavid Woodhouse } 2512980c9e8cSDavid Woodhouse 2513980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2514980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2515980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2516980c9e8cSDavid Woodhouse 2517c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2518c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2519c4d33e24STakashi Iwai &dev_attr_owner.attr, 2520c4d33e24STakashi Iwai &dev_attr_group.attr, 2521c4d33e24STakashi Iwai NULL 2522c4d33e24STakashi Iwai }; 2523c4d33e24STakashi Iwai 2524c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2525c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2526c4d33e24STakashi Iwai }; 2527c4d33e24STakashi Iwai 2528d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 25291da177e4SLinus Torvalds { 25301da177e4SLinus Torvalds struct tun_struct *tun; 253154f968d6SJason Wang struct tun_file *tfile = file->private_data; 25321da177e4SLinus Torvalds struct net_device *dev; 25331da177e4SLinus Torvalds int err; 25341da177e4SLinus Torvalds 25357c0c3b1aSJason Wang if (tfile->detached) 25367c0c3b1aSJason Wang return -EINVAL; 25377c0c3b1aSJason Wang 253890e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 253990e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 254090e33d45SPetar Penkov return -EPERM; 254190e33d45SPetar Penkov 254290e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 254390e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 254490e33d45SPetar Penkov return -EINVAL; 254590e33d45SPetar Penkov } 254690e33d45SPetar Penkov 254774a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 254874a3e5a7SEric W. Biederman if (dev) { 2549f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2550f85ba780SDavid Woodhouse return -EBUSY; 255174a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 255274a3e5a7SEric W. Biederman tun = netdev_priv(dev); 255374a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 255474a3e5a7SEric W. Biederman tun = netdev_priv(dev); 255574a3e5a7SEric W. Biederman else 255674a3e5a7SEric W. Biederman return -EINVAL; 255774a3e5a7SEric W. Biederman 25588e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 255940630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 25608e6d91aeSJason Wang return -EINVAL; 25618e6d91aeSJason Wang 2562cde8b15fSJason Wang if (tun_not_capable(tun)) 25632b980dbdSPaul Moore return -EPERM; 25645dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 25652b980dbdSPaul Moore if (err < 0) 25662b980dbdSPaul Moore return err; 25672b980dbdSPaul Moore 256894317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 256994317099SPetar Penkov ifr->ifr_flags & IFF_NAPI); 2570a7385ba2SEric W. Biederman if (err < 0) 2571a7385ba2SEric W. Biederman return err; 25724008e97fSJason Wang 257340630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2574e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2575e8dbad66SJason Wang /* One or more queue has already been attached, no need 2576e8dbad66SJason Wang * to initialize the device again. 2577e8dbad66SJason Wang */ 2578e8dbad66SJason Wang return 0; 2579e8dbad66SJason Wang } 258086a264abSDavid Howells } 25811da177e4SLinus Torvalds else { 25821da177e4SLinus Torvalds char *name; 25831da177e4SLinus Torvalds unsigned long flags = 0; 2584edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2585edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 25861da177e4SLinus Torvalds 2587c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2588ca6bb5d7SDavid Woodhouse return -EPERM; 25892b980dbdSPaul Moore err = security_tun_dev_create(); 25902b980dbdSPaul Moore if (err < 0) 25912b980dbdSPaul Moore return err; 2592ca6bb5d7SDavid Woodhouse 25931da177e4SLinus Torvalds /* Set dev type */ 25941da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 25951da177e4SLinus Torvalds /* TUN device */ 259640630b82SMichael S. Tsirkin flags |= IFF_TUN; 25971da177e4SLinus Torvalds name = "tun%d"; 25981da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 25991da177e4SLinus Torvalds /* TAP device */ 260040630b82SMichael S. Tsirkin flags |= IFF_TAP; 26011da177e4SLinus Torvalds name = "tap%d"; 26021da177e4SLinus Torvalds } else 260336989b90SKusanagi Kouichi return -EINVAL; 26041da177e4SLinus Torvalds 26051da177e4SLinus Torvalds if (*ifr->ifr_name) 26061da177e4SLinus Torvalds name = ifr->ifr_name; 26071da177e4SLinus Torvalds 2608c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2609c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2610c835a677STom Gundersen queues); 2611edfb6a14SJason Wang 26121da177e4SLinus Torvalds if (!dev) 26131da177e4SLinus Torvalds return -ENOMEM; 26140ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 26155c25f65fSJulien Gomes if (err < 0) 26160ad646c8SCong Wang goto err_free_dev; 26171da177e4SLinus Torvalds 2618fc54c658SPavel Emelyanov dev_net_set(dev, net); 2619f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2620fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2621c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2622758e43b7SStephen Hemminger 26231da177e4SLinus Torvalds tun = netdev_priv(dev); 26241da177e4SLinus Torvalds tun->dev = dev; 26251da177e4SLinus Torvalds tun->flags = flags; 2626f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2627d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 26281da177e4SLinus Torvalds 2629eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 263054f968d6SJason Wang tun->filter_attached = false; 263154f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 26325503fcecSJason Wang tun->rx_batched = 0; 263396f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 263433dccbb0SHerbert Xu 2635608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2636608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2637608b9977SPaolo Abeni err = -ENOMEM; 2638608b9977SPaolo Abeni goto err_free_dev; 2639608b9977SPaolo Abeni } 2640608b9977SPaolo Abeni 264196442e42SJason Wang spin_lock_init(&tun->lock); 264296442e42SJason Wang 26435dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 26445dbbaf2dSPaul Moore if (err < 0) 2645608b9977SPaolo Abeni goto err_free_stat; 26462b980dbdSPaul Moore 26471da177e4SLinus Torvalds tun_net_init(dev); 2648944a1376SPavel Emelyanov tun_flow_init(tun); 264996442e42SJason Wang 265088255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 26516680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 26526680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 26532a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 26546671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 26556671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 26566671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 265788255375SMichał Mirosław 26584008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 265994317099SPetar Penkov err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2660eb0fb363SJason Wang if (err < 0) 2661662ca437SJason Wang goto err_free_flow; 2662eb0fb363SJason Wang 26631da177e4SLinus Torvalds err = register_netdevice(tun->dev); 26641da177e4SLinus Torvalds if (err < 0) 2665662ca437SJason Wang goto err_detach; 2666af668b3cSMichael S. Tsirkin } 2667980c9e8cSDavid Woodhouse 2668eb0fb363SJason Wang netif_carrier_on(tun->dev); 26691da177e4SLinus Torvalds 26706b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 26711da177e4SLinus Torvalds 2672031f5e03SMichael S. Tsirkin tun->flags = (tun->flags & ~TUN_FEATURES) | 2673031f5e03SMichael S. Tsirkin (ifr->ifr_flags & TUN_FEATURES); 2674c8d68e6bSJason Wang 2675e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2676e35259a9SMax Krasnyansky * xoff state. 2677e35259a9SMax Krasnyansky */ 2678e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2679c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2680e35259a9SMax Krasnyansky 26811da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 26821da177e4SLinus Torvalds return 0; 26831da177e4SLinus Torvalds 2684662ca437SJason Wang err_detach: 2685662ca437SJason Wang tun_detach_all(dev); 2686ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2687ff244c6bSEric Dumazet goto err_free_dev; 2688ff244c6bSEric Dumazet 2689662ca437SJason Wang err_free_flow: 2690662ca437SJason Wang tun_flow_uninit(tun); 2691662ca437SJason Wang security_tun_dev_free_security(tun->security); 2692608b9977SPaolo Abeni err_free_stat: 2693608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 26941da177e4SLinus Torvalds err_free_dev: 26951da177e4SLinus Torvalds free_netdev(dev); 26961da177e4SLinus Torvalds return err; 26971da177e4SLinus Torvalds } 26981da177e4SLinus Torvalds 26999ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2700876bfd4dSHerbert Xu struct ifreq *ifr) 2701e3b99556SMark McLoughlin { 27026b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2703e3b99556SMark McLoughlin 2704e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2705e3b99556SMark McLoughlin 2706980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2707e3b99556SMark McLoughlin 2708e3b99556SMark McLoughlin } 2709e3b99556SMark McLoughlin 27105228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 27115228ddc9SRusty Russell * privs required. */ 271288255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 27135228ddc9SRusty Russell { 2714c8f44affSMichał Mirosław netdev_features_t features = 0; 27155228ddc9SRusty Russell 27165228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 271788255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 27185228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 27195228ddc9SRusty Russell 27205228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 27215228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 27225228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 27235228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 27245228ddc9SRusty Russell } 27255228ddc9SRusty Russell if (arg & TUN_F_TSO4) 27265228ddc9SRusty Russell features |= NETIF_F_TSO; 27275228ddc9SRusty Russell if (arg & TUN_F_TSO6) 27285228ddc9SRusty Russell features |= NETIF_F_TSO6; 27295228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 27305228ddc9SRusty Russell } 27310c19f846SWillem de Bruijn 27320c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 27335228ddc9SRusty Russell } 27345228ddc9SRusty Russell 27355228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 27365228ddc9SRusty Russell * trying to set them. */ 27375228ddc9SRusty Russell if (arg) 27385228ddc9SRusty Russell return -EINVAL; 27395228ddc9SRusty Russell 274088255375SMichał Mirosław tun->set_features = features; 274109050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 274209050957SYaroslav Isakov tun->dev->wanted_features |= features; 274388255375SMichał Mirosław netdev_update_features(tun->dev); 27445228ddc9SRusty Russell 27455228ddc9SRusty Russell return 0; 27465228ddc9SRusty Russell } 27475228ddc9SRusty Russell 2748c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2749c8d68e6bSJason Wang { 2750c8d68e6bSJason Wang int i; 2751c8d68e6bSJason Wang struct tun_file *tfile; 2752c8d68e6bSJason Wang 2753c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2754b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 27558ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 27568ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 27578ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2758c8d68e6bSJason Wang } 2759c8d68e6bSJason Wang 2760c8d68e6bSJason Wang tun->filter_attached = false; 2761c8d68e6bSJason Wang } 2762c8d68e6bSJason Wang 2763c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2764c8d68e6bSJason Wang { 2765c8d68e6bSJason Wang int i, ret = 0; 2766c8d68e6bSJason Wang struct tun_file *tfile; 2767c8d68e6bSJason Wang 2768c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2769b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 27708ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 27718ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 27728ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2773c8d68e6bSJason Wang if (ret) { 2774c8d68e6bSJason Wang tun_detach_filter(tun, i); 2775c8d68e6bSJason Wang return ret; 2776c8d68e6bSJason Wang } 2777c8d68e6bSJason Wang } 2778c8d68e6bSJason Wang 2779c8d68e6bSJason Wang tun->filter_attached = true; 2780c8d68e6bSJason Wang return ret; 2781c8d68e6bSJason Wang } 2782c8d68e6bSJason Wang 2783c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2784c8d68e6bSJason Wang { 2785c8d68e6bSJason Wang struct tun_file *tfile; 2786c8d68e6bSJason Wang int i; 2787c8d68e6bSJason Wang 2788c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2789b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2790c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2791c8d68e6bSJason Wang } 2792c8d68e6bSJason Wang } 2793c8d68e6bSJason Wang 2794cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2795cde8b15fSJason Wang { 2796cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2797cde8b15fSJason Wang struct tun_struct *tun; 2798cde8b15fSJason Wang int ret = 0; 2799cde8b15fSJason Wang 2800cde8b15fSJason Wang rtnl_lock(); 2801cde8b15fSJason Wang 2802cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 28034008e97fSJason Wang tun = tfile->detached; 28045dbbaf2dSPaul Moore if (!tun) { 2805cde8b15fSJason Wang ret = -EINVAL; 28065dbbaf2dSPaul Moore goto unlock; 28075dbbaf2dSPaul Moore } 28085dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 28095dbbaf2dSPaul Moore if (ret < 0) 28105dbbaf2dSPaul Moore goto unlock; 281194317099SPetar Penkov ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 28124008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2813b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 281440630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 28154008e97fSJason Wang ret = -EINVAL; 2816cde8b15fSJason Wang else 28174008e97fSJason Wang __tun_detach(tfile, false); 28184008e97fSJason Wang } else 2819cde8b15fSJason Wang ret = -EINVAL; 2820cde8b15fSJason Wang 28215dbbaf2dSPaul Moore unlock: 2822cde8b15fSJason Wang rtnl_unlock(); 2823cde8b15fSJason Wang return ret; 2824cde8b15fSJason Wang } 2825cde8b15fSJason Wang 2826cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2827cd5681d7SJason Wang void __user *data) 282896f84061SJason Wang { 282996f84061SJason Wang struct bpf_prog *prog; 283096f84061SJason Wang int fd; 283196f84061SJason Wang 283296f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 283396f84061SJason Wang return -EFAULT; 283496f84061SJason Wang 283596f84061SJason Wang if (fd == -1) { 283696f84061SJason Wang prog = NULL; 283796f84061SJason Wang } else { 283896f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 283996f84061SJason Wang if (IS_ERR(prog)) 284096f84061SJason Wang return PTR_ERR(prog); 284196f84061SJason Wang } 284296f84061SJason Wang 2843cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 284496f84061SJason Wang } 284596f84061SJason Wang 284650857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 284750857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 28481da177e4SLinus Torvalds { 284936b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2850631ab46bSEric W. Biederman struct tun_struct *tun; 28511da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 28521da177e4SLinus Torvalds struct ifreq ifr; 2853f2780d6dSKirill Tkhai struct net *net; 28540625c883SEric W. Biederman kuid_t owner; 28550625c883SEric W. Biederman kgid_t group; 285633dccbb0SHerbert Xu int sndbuf; 2857d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2858fb7589a1SPavel Emelyanov unsigned int ifindex; 28591cf8e410SMichael S. Tsirkin int le; 2860f271b2ccSMax Krasnyansky int ret; 28611da177e4SLinus Torvalds 2862f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2863f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 286450857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 28651da177e4SLinus Torvalds return -EFAULT; 28668bbb1813SDavid S. Miller } else { 2867a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 28688bbb1813SDavid S. Miller } 2869631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2870631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2871631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2872031f5e03SMichael S. Tsirkin * TUNSETIFF. 2873031f5e03SMichael S. Tsirkin */ 2874031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2875631ab46bSEric W. Biederman (unsigned int __user*)argp); 2876cde8b15fSJason Wang } else if (cmd == TUNSETQUEUE) 2877cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2878631ab46bSEric W. Biederman 2879c8d68e6bSJason Wang ret = 0; 2880876bfd4dSHerbert Xu rtnl_lock(); 2881876bfd4dSHerbert Xu 28829484dc74Syuan linyu tun = tun_get(tfile); 2883f2780d6dSKirill Tkhai net = sock_net(&tfile->sk); 28840f16bc13SGao Feng if (cmd == TUNSETIFF) { 28850f16bc13SGao Feng ret = -EEXIST; 28860f16bc13SGao Feng if (tun) 28870f16bc13SGao Feng goto unlock; 28880f16bc13SGao Feng 28891da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 28901da177e4SLinus Torvalds 2891f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 28921da177e4SLinus Torvalds 2893876bfd4dSHerbert Xu if (ret) 2894876bfd4dSHerbert Xu goto unlock; 28951da177e4SLinus Torvalds 289650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2897876bfd4dSHerbert Xu ret = -EFAULT; 2898876bfd4dSHerbert Xu goto unlock; 28991da177e4SLinus Torvalds } 2900fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2901fb7589a1SPavel Emelyanov ret = -EPERM; 2902fb7589a1SPavel Emelyanov if (tun) 2903fb7589a1SPavel Emelyanov goto unlock; 2904fb7589a1SPavel Emelyanov 2905fb7589a1SPavel Emelyanov ret = -EFAULT; 2906fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2907fb7589a1SPavel Emelyanov goto unlock; 2908fb7589a1SPavel Emelyanov 2909fb7589a1SPavel Emelyanov ret = 0; 2910fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2911fb7589a1SPavel Emelyanov goto unlock; 2912fb7589a1SPavel Emelyanov } 2913f2780d6dSKirill Tkhai if (cmd == SIOCGSKNS) { 2914f2780d6dSKirill Tkhai ret = -EPERM; 2915f2780d6dSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2916f2780d6dSKirill Tkhai goto unlock; 2917f2780d6dSKirill Tkhai 2918f2780d6dSKirill Tkhai ret = open_related_ns(&net->ns, get_net_ns); 2919f2780d6dSKirill Tkhai goto unlock; 2920f2780d6dSKirill Tkhai } 29211da177e4SLinus Torvalds 2922876bfd4dSHerbert Xu ret = -EBADFD; 29231da177e4SLinus Torvalds if (!tun) 2924876bfd4dSHerbert Xu goto unlock; 29251da177e4SLinus Torvalds 29261e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 29271da177e4SLinus Torvalds 2928631ab46bSEric W. Biederman ret = 0; 29291da177e4SLinus Torvalds switch (cmd) { 2930e3b99556SMark McLoughlin case TUNGETIFF: 29319ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2932e3b99556SMark McLoughlin 29333d407a80SPavel Emelyanov if (tfile->detached) 29343d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2935849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2936849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 29373d407a80SPavel Emelyanov 293850857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2939631ab46bSEric W. Biederman ret = -EFAULT; 2940e3b99556SMark McLoughlin break; 2941e3b99556SMark McLoughlin 29421da177e4SLinus Torvalds case TUNSETNOCSUM: 29431da177e4SLinus Torvalds /* Disable/Enable checksum */ 29441da177e4SLinus Torvalds 294588255375SMichał Mirosław /* [unimplemented] */ 294688255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 29476b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 29481da177e4SLinus Torvalds break; 29491da177e4SLinus Torvalds 29501da177e4SLinus Torvalds case TUNSETPERSIST: 295154f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 295254f968d6SJason Wang * module to prevent the module being unprobed. 295354f968d6SJason Wang */ 295440630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 295540630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 295654f968d6SJason Wang __module_get(THIS_MODULE); 2957dd38bd85SJason Wang } 295840630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 295940630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 296054f968d6SJason Wang module_put(THIS_MODULE); 296154f968d6SJason Wang } 29621da177e4SLinus Torvalds 29636b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 29646b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 29651da177e4SLinus Torvalds break; 29661da177e4SLinus Torvalds 29671da177e4SLinus Torvalds case TUNSETOWNER: 29681da177e4SLinus Torvalds /* Set owner of the device */ 29690625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 29700625c883SEric W. Biederman if (!uid_valid(owner)) { 29710625c883SEric W. Biederman ret = -EINVAL; 29720625c883SEric W. Biederman break; 29730625c883SEric W. Biederman } 29740625c883SEric W. Biederman tun->owner = owner; 29751e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 29760625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 29771da177e4SLinus Torvalds break; 29781da177e4SLinus Torvalds 29798c644623SGuido Guenther case TUNSETGROUP: 29808c644623SGuido Guenther /* Set group of the device */ 29810625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 29820625c883SEric W. Biederman if (!gid_valid(group)) { 29830625c883SEric W. Biederman ret = -EINVAL; 29840625c883SEric W. Biederman break; 29850625c883SEric W. Biederman } 29860625c883SEric W. Biederman tun->group = group; 29871e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 29880625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 29898c644623SGuido Guenther break; 29908c644623SGuido Guenther 2991ff4cc3acSMike Kershaw case TUNSETLINK: 2992ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2993ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 29946b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 29956b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 299648abfe05SDavid S. Miller ret = -EBUSY; 2997ff4cc3acSMike Kershaw } else { 2998ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 29996b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 30006b8a66eeSJoe Perches tun->dev->type); 300148abfe05SDavid S. Miller ret = 0; 3002ff4cc3acSMike Kershaw } 3003631ab46bSEric W. Biederman break; 3004ff4cc3acSMike Kershaw 30051da177e4SLinus Torvalds #ifdef TUN_DEBUG 30061da177e4SLinus Torvalds case TUNSETDEBUG: 30071da177e4SLinus Torvalds tun->debug = arg; 30081da177e4SLinus Torvalds break; 30091da177e4SLinus Torvalds #endif 30105228ddc9SRusty Russell case TUNSETOFFLOAD: 301188255375SMichał Mirosław ret = set_offload(tun, arg); 3012631ab46bSEric W. Biederman break; 30135228ddc9SRusty Russell 3014f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3015f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3016631ab46bSEric W. Biederman ret = -EINVAL; 301740630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3018631ab46bSEric W. Biederman break; 3019c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3020631ab46bSEric W. Biederman break; 30211da177e4SLinus Torvalds 30221da177e4SLinus Torvalds case SIOCGIFHWADDR: 3023b595076aSUwe Kleine-König /* Get hw address */ 3024f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3025f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 302650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3027631ab46bSEric W. Biederman ret = -EFAULT; 3028631ab46bSEric W. Biederman break; 30291da177e4SLinus Torvalds 30301da177e4SLinus Torvalds case SIOCSIFHWADDR: 3031f271b2ccSMax Krasnyansky /* Set hw address */ 30326b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 30336b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 303440102371SKim B. Heino 303540102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 3036631ab46bSEric W. Biederman break; 303733dccbb0SHerbert Xu 303833dccbb0SHerbert Xu case TUNGETSNDBUF: 303954f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 304033dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 304133dccbb0SHerbert Xu ret = -EFAULT; 304233dccbb0SHerbert Xu break; 304333dccbb0SHerbert Xu 304433dccbb0SHerbert Xu case TUNSETSNDBUF: 304533dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 304633dccbb0SHerbert Xu ret = -EFAULT; 304733dccbb0SHerbert Xu break; 304833dccbb0SHerbert Xu } 304993161922SCraig Gallek if (sndbuf <= 0) { 305093161922SCraig Gallek ret = -EINVAL; 305193161922SCraig Gallek break; 305293161922SCraig Gallek } 305333dccbb0SHerbert Xu 3054c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3055c8d68e6bSJason Wang tun_set_sndbuf(tun); 305633dccbb0SHerbert Xu break; 305733dccbb0SHerbert Xu 3058d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3059d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3060d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3061d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3062d9d52b51SMichael S. Tsirkin break; 3063d9d52b51SMichael S. Tsirkin 3064d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3065d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3066d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3067d9d52b51SMichael S. Tsirkin break; 3068d9d52b51SMichael S. Tsirkin } 3069d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3070d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3071d9d52b51SMichael S. Tsirkin break; 3072d9d52b51SMichael S. Tsirkin } 3073d9d52b51SMichael S. Tsirkin 3074d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3075d9d52b51SMichael S. Tsirkin break; 3076d9d52b51SMichael S. Tsirkin 30771cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 30781cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 30791cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 30801cf8e410SMichael S. Tsirkin ret = -EFAULT; 30811cf8e410SMichael S. Tsirkin break; 30821cf8e410SMichael S. Tsirkin 30831cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 30841cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 30851cf8e410SMichael S. Tsirkin ret = -EFAULT; 30861cf8e410SMichael S. Tsirkin break; 30871cf8e410SMichael S. Tsirkin } 30881cf8e410SMichael S. Tsirkin if (le) 30891cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 30901cf8e410SMichael S. Tsirkin else 30911cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 30921cf8e410SMichael S. Tsirkin break; 30931cf8e410SMichael S. Tsirkin 30948b8e658bSGreg Kurz case TUNGETVNETBE: 30958b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 30968b8e658bSGreg Kurz break; 30978b8e658bSGreg Kurz 30988b8e658bSGreg Kurz case TUNSETVNETBE: 30998b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 31008b8e658bSGreg Kurz break; 31018b8e658bSGreg Kurz 310299405162SMichael S. Tsirkin case TUNATTACHFILTER: 310399405162SMichael S. Tsirkin /* Can be set only for TAPs */ 310499405162SMichael S. Tsirkin ret = -EINVAL; 310540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 310699405162SMichael S. Tsirkin break; 310799405162SMichael S. Tsirkin ret = -EFAULT; 310854f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 310999405162SMichael S. Tsirkin break; 311099405162SMichael S. Tsirkin 3111c8d68e6bSJason Wang ret = tun_attach_filter(tun); 311299405162SMichael S. Tsirkin break; 311399405162SMichael S. Tsirkin 311499405162SMichael S. Tsirkin case TUNDETACHFILTER: 311599405162SMichael S. Tsirkin /* Can be set only for TAPs */ 311699405162SMichael S. Tsirkin ret = -EINVAL; 311740630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 311899405162SMichael S. Tsirkin break; 3119c8d68e6bSJason Wang ret = 0; 3120c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 312199405162SMichael S. Tsirkin break; 312299405162SMichael S. Tsirkin 312376975e9cSPavel Emelyanov case TUNGETFILTER: 312476975e9cSPavel Emelyanov ret = -EINVAL; 312540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 312676975e9cSPavel Emelyanov break; 312776975e9cSPavel Emelyanov ret = -EFAULT; 312876975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 312976975e9cSPavel Emelyanov break; 313076975e9cSPavel Emelyanov ret = 0; 313176975e9cSPavel Emelyanov break; 313276975e9cSPavel Emelyanov 313396f84061SJason Wang case TUNSETSTEERINGEBPF: 3134cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 313596f84061SJason Wang break; 313696f84061SJason Wang 3137aff3d70aSJason Wang case TUNSETFILTEREBPF: 3138aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3139aff3d70aSJason Wang break; 3140aff3d70aSJason Wang 31411da177e4SLinus Torvalds default: 3142631ab46bSEric W. Biederman ret = -EINVAL; 3143631ab46bSEric W. Biederman break; 3144ee289b64SJoe Perches } 31451da177e4SLinus Torvalds 3146876bfd4dSHerbert Xu unlock: 3147876bfd4dSHerbert Xu rtnl_unlock(); 3148876bfd4dSHerbert Xu if (tun) 3149631ab46bSEric W. Biederman tun_put(tun); 3150631ab46bSEric W. Biederman return ret; 31511da177e4SLinus Torvalds } 31521da177e4SLinus Torvalds 315350857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 315450857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 315550857e2aSArnd Bergmann { 315650857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 315750857e2aSArnd Bergmann } 315850857e2aSArnd Bergmann 315950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 316050857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 316150857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 316250857e2aSArnd Bergmann { 316350857e2aSArnd Bergmann switch (cmd) { 316450857e2aSArnd Bergmann case TUNSETIFF: 316550857e2aSArnd Bergmann case TUNGETIFF: 316650857e2aSArnd Bergmann case TUNSETTXFILTER: 316750857e2aSArnd Bergmann case TUNGETSNDBUF: 316850857e2aSArnd Bergmann case TUNSETSNDBUF: 316950857e2aSArnd Bergmann case SIOCGIFHWADDR: 317050857e2aSArnd Bergmann case SIOCSIFHWADDR: 317150857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 317250857e2aSArnd Bergmann break; 317350857e2aSArnd Bergmann default: 317450857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 317550857e2aSArnd Bergmann break; 317650857e2aSArnd Bergmann } 317750857e2aSArnd Bergmann 317850857e2aSArnd Bergmann /* 317950857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 318050857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 318150857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 318250857e2aSArnd Bergmann * contents. 318350857e2aSArnd Bergmann */ 318450857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 318550857e2aSArnd Bergmann } 318650857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 318750857e2aSArnd Bergmann 31881da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 31891da177e4SLinus Torvalds { 319054f968d6SJason Wang struct tun_file *tfile = file->private_data; 31911da177e4SLinus Torvalds int ret; 31921da177e4SLinus Torvalds 319354f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 31949d319522SJonathan Corbet goto out; 31951da177e4SLinus Torvalds 31961da177e4SLinus Torvalds if (on) { 3197e0b93eddSJeff Layton __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 319854f968d6SJason Wang tfile->flags |= TUN_FASYNC; 31991da177e4SLinus Torvalds } else 320054f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 32019d319522SJonathan Corbet ret = 0; 32029d319522SJonathan Corbet out: 32039d319522SJonathan Corbet return ret; 32041da177e4SLinus Torvalds } 32051da177e4SLinus Torvalds 32061da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 32071da177e4SLinus Torvalds { 3208140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3209631ab46bSEric W. Biederman struct tun_file *tfile; 3210deed49fbSThomas Gleixner 32116b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3212631ab46bSEric W. Biederman 3213140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 321411aa9c28SEric W. Biederman &tun_proto, 0); 3215631ab46bSEric W. Biederman if (!tfile) 3216631ab46bSEric W. Biederman return -ENOMEM; 3217c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 321854f968d6SJason Wang tfile->flags = 0; 3219fb7589a1SPavel Emelyanov tfile->ifindex = 0; 322054f968d6SJason Wang 322154f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 32229e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 322354f968d6SJason Wang 322454f968d6SJason Wang tfile->socket.file = file; 322554f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 322654f968d6SJason Wang 322754f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 322854f968d6SJason Wang 322954f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 323054f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 323154f968d6SJason Wang 3232631ab46bSEric W. Biederman file->private_data = tfile; 32334008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 323454f968d6SJason Wang 323519a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 323619a6afb2SJason Wang 32378565d26bSDavid S. Miller memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3238762c330dSJason Wang tfile->xdp_pending_pkts = 0; 32394df0bfc7SCong Wang 32401da177e4SLinus Torvalds return 0; 32411da177e4SLinus Torvalds } 32421da177e4SLinus Torvalds 32431da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 32441da177e4SLinus Torvalds { 3245631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 32461da177e4SLinus Torvalds 3247c8d68e6bSJason Wang tun_detach(tfile, true); 32481da177e4SLinus Torvalds 32491da177e4SLinus Torvalds return 0; 32501da177e4SLinus Torvalds } 32511da177e4SLinus Torvalds 325293e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 32539484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 325493e14b6dSMasatake YAMATO { 32559484dc74Syuan linyu struct tun_file *tfile = file->private_data; 325693e14b6dSMasatake YAMATO struct tun_struct *tun; 325793e14b6dSMasatake YAMATO struct ifreq ifr; 325893e14b6dSMasatake YAMATO 325993e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 326093e14b6dSMasatake YAMATO 326193e14b6dSMasatake YAMATO rtnl_lock(); 32629484dc74Syuan linyu tun = tun_get(tfile); 326393e14b6dSMasatake YAMATO if (tun) 326493e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 326593e14b6dSMasatake YAMATO rtnl_unlock(); 326693e14b6dSMasatake YAMATO 326793e14b6dSMasatake YAMATO if (tun) 326893e14b6dSMasatake YAMATO tun_put(tun); 326993e14b6dSMasatake YAMATO 3270a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 327193e14b6dSMasatake YAMATO } 327293e14b6dSMasatake YAMATO #endif 327393e14b6dSMasatake YAMATO 3274d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 32751da177e4SLinus Torvalds .owner = THIS_MODULE, 32761da177e4SLinus Torvalds .llseek = no_llseek, 32779b067034SAl Viro .read_iter = tun_chr_read_iter, 3278f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 32791da177e4SLinus Torvalds .poll = tun_chr_poll, 3280876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 328150857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 328250857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 328350857e2aSArnd Bergmann #endif 32841da177e4SLinus Torvalds .open = tun_chr_open, 32851da177e4SLinus Torvalds .release = tun_chr_close, 328693e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 328793e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 328893e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 328993e14b6dSMasatake YAMATO #endif 32901da177e4SLinus Torvalds }; 32911da177e4SLinus Torvalds 32921da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 32931da177e4SLinus Torvalds .minor = TUN_MINOR, 32941da177e4SLinus Torvalds .name = "tun", 3295e454cea2SKay Sievers .nodename = "net/tun", 32961da177e4SLinus Torvalds .fops = &tun_fops, 32971da177e4SLinus Torvalds }; 32981da177e4SLinus Torvalds 32991da177e4SLinus Torvalds /* ethtool interface */ 33001da177e4SLinus Torvalds 330129ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev, 330229ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 33031da177e4SLinus Torvalds { 330429ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 330529ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 330629ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 330729ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 330829ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 330929ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 331029ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 33111da177e4SLinus Torvalds return 0; 33121da177e4SLinus Torvalds } 33131da177e4SLinus Torvalds 33141da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 33151da177e4SLinus Torvalds { 33161da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33171da177e4SLinus Torvalds 331833a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 331933a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 33201da177e4SLinus Torvalds 33211da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 332240630b82SMichael S. Tsirkin case IFF_TUN: 332333a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 33241da177e4SLinus Torvalds break; 332540630b82SMichael S. Tsirkin case IFF_TAP: 332633a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 33271da177e4SLinus Torvalds break; 33281da177e4SLinus Torvalds } 33291da177e4SLinus Torvalds } 33301da177e4SLinus Torvalds 33311da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 33321da177e4SLinus Torvalds { 33331da177e4SLinus Torvalds #ifdef TUN_DEBUG 33341da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33351da177e4SLinus Torvalds return tun->debug; 33361da177e4SLinus Torvalds #else 33371da177e4SLinus Torvalds return -EOPNOTSUPP; 33381da177e4SLinus Torvalds #endif 33391da177e4SLinus Torvalds } 33401da177e4SLinus Torvalds 33411da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 33421da177e4SLinus Torvalds { 33431da177e4SLinus Torvalds #ifdef TUN_DEBUG 33441da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33451da177e4SLinus Torvalds tun->debug = value; 33461da177e4SLinus Torvalds #endif 33471da177e4SLinus Torvalds } 33481da177e4SLinus Torvalds 33495503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 33505503fcecSJason Wang struct ethtool_coalesce *ec) 33515503fcecSJason Wang { 33525503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 33535503fcecSJason Wang 33545503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 33555503fcecSJason Wang 33565503fcecSJason Wang return 0; 33575503fcecSJason Wang } 33585503fcecSJason Wang 33595503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 33605503fcecSJason Wang struct ethtool_coalesce *ec) 33615503fcecSJason Wang { 33625503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 33635503fcecSJason Wang 33645503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 33655503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 33665503fcecSJason Wang else 33675503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 33685503fcecSJason Wang 33695503fcecSJason Wang return 0; 33705503fcecSJason Wang } 33715503fcecSJason Wang 33727282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 33731da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 33741da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 33751da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3376bee31369SNolan Leake .get_link = ethtool_op_get_link, 3377eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 33785503fcecSJason Wang .get_coalesce = tun_get_coalesce, 33795503fcecSJason Wang .set_coalesce = tun_set_coalesce, 338029ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 33811da177e4SLinus Torvalds }; 33821da177e4SLinus Torvalds 33831576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 33841576d986SJason Wang { 33851576d986SJason Wang struct net_device *dev = tun->dev; 33861576d986SJason Wang struct tun_file *tfile; 33875990a305SJason Wang struct ptr_ring **rings; 33881576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 33891576d986SJason Wang int ret, i; 33901576d986SJason Wang 33915990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 33925990a305SJason Wang if (!rings) 33931576d986SJason Wang return -ENOMEM; 33941576d986SJason Wang 33951576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 33961576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 33975990a305SJason Wang rings[i] = &tfile->tx_ring; 33981576d986SJason Wang } 33991576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 34005990a305SJason Wang rings[i++] = &tfile->tx_ring; 34011576d986SJason Wang 34025990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 34035990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3404fc72d1d5SJason Wang tun_ptr_free); 34051576d986SJason Wang 34065990a305SJason Wang kfree(rings); 34071576d986SJason Wang return ret; 34081576d986SJason Wang } 34091576d986SJason Wang 34101576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 34111576d986SJason Wang unsigned long event, void *ptr) 34121576d986SJason Wang { 34131576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 34141576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 34151576d986SJason Wang 341686dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 341786dfb4acSCraig Gallek return NOTIFY_DONE; 341886dfb4acSCraig Gallek 34191576d986SJason Wang switch (event) { 34201576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 34211576d986SJason Wang if (tun_queue_resize(tun)) 34221576d986SJason Wang return NOTIFY_BAD; 34231576d986SJason Wang break; 34241576d986SJason Wang default: 34251576d986SJason Wang break; 34261576d986SJason Wang } 34271576d986SJason Wang 34281576d986SJason Wang return NOTIFY_DONE; 34291576d986SJason Wang } 34301576d986SJason Wang 34311576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 34321576d986SJason Wang .notifier_call = tun_device_event, 34331576d986SJason Wang }; 343479d17604SPavel Emelyanov 34351da177e4SLinus Torvalds static int __init tun_init(void) 34361da177e4SLinus Torvalds { 34371da177e4SLinus Torvalds int ret = 0; 34381da177e4SLinus Torvalds 34396b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 34401da177e4SLinus Torvalds 3441f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 344279d17604SPavel Emelyanov if (ret) { 34436b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3444f019a7a5SEric W. Biederman goto err_linkops; 344579d17604SPavel Emelyanov } 344679d17604SPavel Emelyanov 34471da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 344879d17604SPavel Emelyanov if (ret) { 34496b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 345079d17604SPavel Emelyanov goto err_misc; 345179d17604SPavel Emelyanov } 34521576d986SJason Wang 34535edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 34545edfbd3cSTonghao Zhang if (ret) { 34555edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 34565edfbd3cSTonghao Zhang goto err_notifier; 34575edfbd3cSTonghao Zhang } 34585edfbd3cSTonghao Zhang 345979d17604SPavel Emelyanov return 0; 34605edfbd3cSTonghao Zhang 34615edfbd3cSTonghao Zhang err_notifier: 34625edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 346379d17604SPavel Emelyanov err_misc: 3464f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3465f019a7a5SEric W. Biederman err_linkops: 34661da177e4SLinus Torvalds return ret; 34671da177e4SLinus Torvalds } 34681da177e4SLinus Torvalds 34691da177e4SLinus Torvalds static void tun_cleanup(void) 34701da177e4SLinus Torvalds { 34711da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3472f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 34731576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 34741da177e4SLinus Torvalds } 34751da177e4SLinus Torvalds 347605c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 347705c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 347805c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 347905c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 348005c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 348105c2828cSMichael S. Tsirkin { 34826e914fc7SJason Wang struct tun_file *tfile; 348305c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 348405c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 34856e914fc7SJason Wang tfile = file->private_data; 34866e914fc7SJason Wang if (!tfile) 348705c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 348854f968d6SJason Wang return &tfile->socket; 348905c2828cSMichael S. Tsirkin } 349005c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 349105c2828cSMichael S. Tsirkin 34925990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 349383339c6bSJason Wang { 349483339c6bSJason Wang struct tun_file *tfile; 349583339c6bSJason Wang 349683339c6bSJason Wang if (file->f_op != &tun_fops) 349783339c6bSJason Wang return ERR_PTR(-EINVAL); 349883339c6bSJason Wang tfile = file->private_data; 349983339c6bSJason Wang if (!tfile) 350083339c6bSJason Wang return ERR_PTR(-EBADFD); 35015990a305SJason Wang return &tfile->tx_ring; 350283339c6bSJason Wang } 35035990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 350483339c6bSJason Wang 35051da177e4SLinus Torvalds module_init(tun_init); 35061da177e4SLinus Torvalds module_exit(tun_cleanup); 35071da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 35081da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 35091da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 35101da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3511578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3512