1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 41da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds /* 101da177e4SLinus Torvalds * Changes: 111da177e4SLinus Torvalds * 12ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 13ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 14ff4cc3acSMike Kershaw * 151da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 16344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 191da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 201da177e4SLinus Torvalds * Increased default tx queue length. 211da177e4SLinus Torvalds * Added ethtool API. 221da177e4SLinus Torvalds * Minor cleanups 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 251da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 261da177e4SLinus Torvalds */ 271da177e4SLinus Torvalds 286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 296b8a66eeSJoe Perches 301da177e4SLinus Torvalds #define DRV_NAME "tun" 311da177e4SLinus Torvalds #define DRV_VERSION "1.6" 321da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 331da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds #include <linux/module.h> 361da177e4SLinus Torvalds #include <linux/errno.h> 371da177e4SLinus Torvalds #include <linux/kernel.h> 38174cd4b1SIngo Molnar #include <linux/sched/signal.h> 391da177e4SLinus Torvalds #include <linux/major.h> 401da177e4SLinus Torvalds #include <linux/slab.h> 411da177e4SLinus Torvalds #include <linux/poll.h> 421da177e4SLinus Torvalds #include <linux/fcntl.h> 431da177e4SLinus Torvalds #include <linux/init.h> 441da177e4SLinus Torvalds #include <linux/skbuff.h> 451da177e4SLinus Torvalds #include <linux/netdevice.h> 461da177e4SLinus Torvalds #include <linux/etherdevice.h> 471da177e4SLinus Torvalds #include <linux/miscdevice.h> 481da177e4SLinus Torvalds #include <linux/ethtool.h> 491da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5050857e2aSArnd Bergmann #include <linux/compat.h> 511da177e4SLinus Torvalds #include <linux/if.h> 521da177e4SLinus Torvalds #include <linux/if_arp.h> 531da177e4SLinus Torvalds #include <linux/if_ether.h> 541da177e4SLinus Torvalds #include <linux/if_tun.h> 556680ec68SJason Wang #include <linux/if_vlan.h> 561da177e4SLinus Torvalds #include <linux/crc32.h> 57d647a591SPavel Emelyanov #include <linux/nsproxy.h> 58f43798c2SRusty Russell #include <linux/virtio_net.h> 5999405162SMichael S. Tsirkin #include <linux/rcupdate.h> 60881d966bSEric W. Biederman #include <net/net_namespace.h> 6179d17604SPavel Emelyanov #include <net/netns/generic.h> 62f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 6333dccbb0SHerbert Xu #include <net/sock.h> 64735fc405SJesper Dangaard Brouer #include <net/xdp.h> 65b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h> 6693e14b6dSMasatake YAMATO #include <linux/seq_file.h> 67e0b46d0eSHerbert Xu #include <linux/uio.h> 681576d986SJason Wang #include <linux/skb_array.h> 69761876c8SJason Wang #include <linux/bpf.h> 70761876c8SJason Wang #include <linux/bpf_trace.h> 7190e33d45SPetar Penkov #include <linux/mutex.h> 72cca8ea3bSPhillip Potter #include <linux/ieee802154.h> 73cca8ea3bSPhillip Potter #include <linux/if_ltalk.h> 74cca8ea3bSPhillip Potter #include <uapi/linux/if_fddi.h> 75cca8ea3bSPhillip Potter #include <uapi/linux/if_hippi.h> 76cca8ea3bSPhillip Potter #include <uapi/linux/if_fc.h> 77cca8ea3bSPhillip Potter #include <net/ax25.h> 78cca8ea3bSPhillip Potter #include <net/rose.h> 79cca8ea3bSPhillip Potter #include <net/6lowpan.h> 801da177e4SLinus Torvalds 817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 82f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 831da177e4SLinus Torvalds 844e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 854e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 864e24f2ddSChas Williams 877df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 8866ccbc9cSJason Wang 89031f5e03SMichael S. Tsirkin /* TUN device flags */ 90031f5e03SMichael S. Tsirkin 91031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 92031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 93031f5e03SMichael S. Tsirkin */ 94031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 951cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 961cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 978b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 98031f5e03SMichael S. Tsirkin 99031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 10090e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 10190e33d45SPetar Penkov 1020690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1030690899bSMichael S. Tsirkin 104f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 105f271b2ccSMax Krasnyansky struct tap_filter { 106f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 107f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 108f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 109f271b2ccSMax Krasnyansky }; 110f271b2ccSMax Krasnyansky 111baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 112baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 113baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 114b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 115c8d68e6bSJason Wang 11696442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 11796442e42SJason Wang 11854f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 11992d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 12054f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 12154f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 12236fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 12354f968d6SJason Wang * this). 1246e914fc7SJason Wang * 1256e914fc7SJason Wang * RCU usage: 12636fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1276e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 12854f968d6SJason Wang */ 129631ab46bSEric W. Biederman struct tun_file { 13054f968d6SJason Wang struct sock sk; 13154f968d6SJason Wang struct socket socket; 1326e914fc7SJason Wang struct tun_struct __rcu *tun; 13354f968d6SJason Wang struct fasync_struct *fasync; 13454f968d6SJason Wang /* only used for fasnyc */ 13554f968d6SJason Wang unsigned int flags; 136fb7589a1SPavel Emelyanov union { 137c8d68e6bSJason Wang u16 queue_index; 138fb7589a1SPavel Emelyanov unsigned int ifindex; 139fb7589a1SPavel Emelyanov }; 14094317099SPetar Penkov struct napi_struct napi; 141aec72f33SEric Dumazet bool napi_enabled; 142af3fb24eSEric Dumazet bool napi_frags_enabled; 14390e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1444008e97fSJason Wang struct list_head next; 1454008e97fSJason Wang struct tun_struct *detached; 1465990a305SJason Wang struct ptr_ring tx_ring; 1478bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 148631ab46bSEric W. Biederman }; 149631ab46bSEric W. Biederman 150f9e06c45SJason Wang struct tun_page { 151f9e06c45SJason Wang struct page *page; 152f9e06c45SJason Wang int count; 153f9e06c45SJason Wang }; 154f9e06c45SJason Wang 15596442e42SJason Wang struct tun_flow_entry { 15696442e42SJason Wang struct hlist_node hash_link; 15796442e42SJason Wang struct rcu_head rcu; 15896442e42SJason Wang struct tun_struct *tun; 15996442e42SJason Wang 16096442e42SJason Wang u32 rxhash; 1619bc88939STom Herbert u32 rps_rxhash; 16296442e42SJason Wang int queue_index; 16383b1bc12SLi RongQing unsigned long updated ____cacheline_aligned_in_smp; 16496442e42SJason Wang }; 16596442e42SJason Wang 16696442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 167f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 16896442e42SJason Wang 169cd5681d7SJason Wang struct tun_prog { 17096f84061SJason Wang struct rcu_head rcu; 17196f84061SJason Wang struct bpf_prog *prog; 17296f84061SJason Wang }; 17396f84061SJason Wang 17454f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 17536fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 17654f968d6SJason Wang * file were attached to a persist device. 17754f968d6SJason Wang */ 17814daa021SRusty Russell struct tun_struct { 179c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 180c8d68e6bSJason Wang unsigned int numqueues; 181f271b2ccSMax Krasnyansky unsigned int flags; 1820625c883SEric W. Biederman kuid_t owner; 1830625c883SEric W. Biederman kgid_t group; 18414daa021SRusty Russell 18514daa021SRusty Russell struct net_device *dev; 186c8f44affSMichał Mirosław netdev_features_t set_features; 18788255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188399e0827SAndrew Melnychenko NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4) 189d9d52b51SMichael S. Tsirkin 190eaea34b2SPaolo Abeni int align; 191d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 19254f968d6SJason Wang int sndbuf; 19354f968d6SJason Wang struct tap_filter txflt; 19454f968d6SJason Wang struct sock_fprog fprog; 19554f968d6SJason Wang /* protected by rtnl lock */ 19654f968d6SJason Wang bool filter_attached; 1973424170fSMichal Kubecek u32 msg_enable; 19896442e42SJason Wang spinlock_t lock; 19996442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 20096442e42SJason Wang struct timer_list flow_gc_timer; 20196442e42SJason Wang unsigned long ageing_time; 2024008e97fSJason Wang unsigned int numdisabled; 2034008e97fSJason Wang struct list_head disabled; 2045dbbaf2dSPaul Moore void *security; 205b8732fb7SJason Wang u32 flow_count; 2065503fcecSJason Wang u32 rx_batched; 207497a5757SHeiner Kallweit atomic_long_t rx_frame_errors; 208761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 209cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 210aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2114e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 212158b515fSGeorge Kennedy /* init args */ 213158b515fSGeorge Kennedy struct file *file; 214158b515fSGeorge Kennedy struct ifreq *ifr; 21514daa021SRusty Russell }; 21614daa021SRusty Russell 217aff3d70aSJason Wang struct veth { 218aff3d70aSJason Wang __be16 h_vlan_proto; 219aff3d70aSJason Wang __be16 h_vlan_TCI; 2201da177e4SLinus Torvalds }; 2211da177e4SLinus Torvalds 222158b515fSGeorge Kennedy static void tun_flow_init(struct tun_struct *tun); 223158b515fSGeorge Kennedy static void tun_flow_uninit(struct tun_struct *tun); 224158b515fSGeorge Kennedy 22594317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 22694317099SPetar Penkov { 22794317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 22894317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 22994317099SPetar Penkov struct sk_buff_head process_queue; 23094317099SPetar Penkov struct sk_buff *skb; 23194317099SPetar Penkov int received = 0; 23294317099SPetar Penkov 23394317099SPetar Penkov __skb_queue_head_init(&process_queue); 23494317099SPetar Penkov 23594317099SPetar Penkov spin_lock(&queue->lock); 23694317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 23794317099SPetar Penkov spin_unlock(&queue->lock); 23894317099SPetar Penkov 23994317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 24094317099SPetar Penkov napi_gro_receive(napi, skb); 24194317099SPetar Penkov ++received; 24294317099SPetar Penkov } 24394317099SPetar Penkov 24494317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 24594317099SPetar Penkov spin_lock(&queue->lock); 24694317099SPetar Penkov skb_queue_splice(&process_queue, queue); 24794317099SPetar Penkov spin_unlock(&queue->lock); 24894317099SPetar Penkov } 24994317099SPetar Penkov 25094317099SPetar Penkov return received; 25194317099SPetar Penkov } 25294317099SPetar Penkov 25394317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 25494317099SPetar Penkov { 25594317099SPetar Penkov unsigned int received; 25694317099SPetar Penkov 25794317099SPetar Penkov received = tun_napi_receive(napi, budget); 25894317099SPetar Penkov 25994317099SPetar Penkov if (received < budget) 26094317099SPetar Penkov napi_complete_done(napi, received); 26194317099SPetar Penkov 26294317099SPetar Penkov return received; 26394317099SPetar Penkov } 26494317099SPetar Penkov 26594317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 266af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 26794317099SPetar Penkov { 268aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 269af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 27094317099SPetar Penkov if (napi_en) { 27116d083e2SJakub Kicinski netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); 27294317099SPetar Penkov napi_enable(&tfile->napi); 27394317099SPetar Penkov } 27494317099SPetar Penkov } 27594317099SPetar Penkov 276a8fc8cb5SJakub Kicinski static void tun_napi_enable(struct tun_file *tfile) 277a8fc8cb5SJakub Kicinski { 278a8fc8cb5SJakub Kicinski if (tfile->napi_enabled) 279a8fc8cb5SJakub Kicinski napi_enable(&tfile->napi); 280a8fc8cb5SJakub Kicinski } 281a8fc8cb5SJakub Kicinski 28206e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 28394317099SPetar Penkov { 284aec72f33SEric Dumazet if (tfile->napi_enabled) 28594317099SPetar Penkov napi_disable(&tfile->napi); 28694317099SPetar Penkov } 28794317099SPetar Penkov 28806e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 28994317099SPetar Penkov { 290aec72f33SEric Dumazet if (tfile->napi_enabled) 29194317099SPetar Penkov netif_napi_del(&tfile->napi); 29294317099SPetar Penkov } 29394317099SPetar Penkov 294af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 29590e33d45SPetar Penkov { 296af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 29790e33d45SPetar Penkov } 29890e33d45SPetar Penkov 2998b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3008b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3018b8e658bSGreg Kurz { 3028b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3038b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3048b8e658bSGreg Kurz } 3058b8e658bSGreg Kurz 3068b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3078b8e658bSGreg Kurz { 3088b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3098b8e658bSGreg Kurz 3108b8e658bSGreg Kurz if (put_user(be, argp)) 3118b8e658bSGreg Kurz return -EFAULT; 3128b8e658bSGreg Kurz 3138b8e658bSGreg Kurz return 0; 3148b8e658bSGreg Kurz } 3158b8e658bSGreg Kurz 3168b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3178b8e658bSGreg Kurz { 3188b8e658bSGreg Kurz int be; 3198b8e658bSGreg Kurz 3208b8e658bSGreg Kurz if (get_user(be, argp)) 3218b8e658bSGreg Kurz return -EFAULT; 3228b8e658bSGreg Kurz 3238b8e658bSGreg Kurz if (be) 3248b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3258b8e658bSGreg Kurz else 3268b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3278b8e658bSGreg Kurz 3288b8e658bSGreg Kurz return 0; 3298b8e658bSGreg Kurz } 3308b8e658bSGreg Kurz #else 3318b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3328b8e658bSGreg Kurz { 3338b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3348b8e658bSGreg Kurz } 3358b8e658bSGreg Kurz 3368b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3378b8e658bSGreg Kurz { 3388b8e658bSGreg Kurz return -EINVAL; 3398b8e658bSGreg Kurz } 3408b8e658bSGreg Kurz 3418b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3428b8e658bSGreg Kurz { 3438b8e658bSGreg Kurz return -EINVAL; 3448b8e658bSGreg Kurz } 3458b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3468b8e658bSGreg Kurz 34725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 34825bd55bbSGreg Kurz { 3497d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3508b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 35125bd55bbSGreg Kurz } 35225bd55bbSGreg Kurz 35356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 35456f0dcc5SMichael S. Tsirkin { 35525bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 35656f0dcc5SMichael S. Tsirkin } 35756f0dcc5SMichael S. Tsirkin 35856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 35956f0dcc5SMichael S. Tsirkin { 36025bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 36156f0dcc5SMichael S. Tsirkin } 36256f0dcc5SMichael S. Tsirkin 36396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 36496442e42SJason Wang { 365f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 36696442e42SJason Wang } 36796442e42SJason Wang 36896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 36996442e42SJason Wang { 37096442e42SJason Wang struct tun_flow_entry *e; 37196442e42SJason Wang 372b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 37396442e42SJason Wang if (e->rxhash == rxhash) 37496442e42SJason Wang return e; 37596442e42SJason Wang } 37696442e42SJason Wang return NULL; 37796442e42SJason Wang } 37896442e42SJason Wang 37996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 38096442e42SJason Wang struct hlist_head *head, 38196442e42SJason Wang u32 rxhash, u16 queue_index) 38296442e42SJason Wang { 3839fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3849fdc6befSEric Dumazet 38596442e42SJason Wang if (e) { 3863424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, 3873424170fSMichal Kubecek "create flow: hash %u index %u\n", 38896442e42SJason Wang rxhash, queue_index); 38996442e42SJason Wang e->updated = jiffies; 39096442e42SJason Wang e->rxhash = rxhash; 3919bc88939STom Herbert e->rps_rxhash = 0; 39296442e42SJason Wang e->queue_index = queue_index; 39396442e42SJason Wang e->tun = tun; 39496442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 395b8732fb7SJason Wang ++tun->flow_count; 39696442e42SJason Wang } 39796442e42SJason Wang return e; 39896442e42SJason Wang } 39996442e42SJason Wang 40096442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 40196442e42SJason Wang { 4023424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", 40396442e42SJason Wang e->rxhash, e->queue_index); 40496442e42SJason Wang hlist_del_rcu(&e->hash_link); 4059fdc6befSEric Dumazet kfree_rcu(e, rcu); 406b8732fb7SJason Wang --tun->flow_count; 40796442e42SJason Wang } 40896442e42SJason Wang 40996442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 41096442e42SJason Wang { 41196442e42SJason Wang int i; 41296442e42SJason Wang 41396442e42SJason Wang spin_lock_bh(&tun->lock); 41496442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 41596442e42SJason Wang struct tun_flow_entry *e; 416b67bfe0dSSasha Levin struct hlist_node *n; 41796442e42SJason Wang 418b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 41996442e42SJason Wang tun_flow_delete(tun, e); 42096442e42SJason Wang } 42196442e42SJason Wang spin_unlock_bh(&tun->lock); 42296442e42SJason Wang } 42396442e42SJason Wang 42496442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 42596442e42SJason Wang { 42696442e42SJason Wang int i; 42796442e42SJason Wang 42896442e42SJason Wang spin_lock_bh(&tun->lock); 42996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 43096442e42SJason Wang struct tun_flow_entry *e; 431b67bfe0dSSasha Levin struct hlist_node *n; 43296442e42SJason Wang 433b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 43496442e42SJason Wang if (e->queue_index == queue_index) 43596442e42SJason Wang tun_flow_delete(tun, e); 43696442e42SJason Wang } 43796442e42SJason Wang } 43896442e42SJason Wang spin_unlock_bh(&tun->lock); 43996442e42SJason Wang } 44096442e42SJason Wang 441e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 44296442e42SJason Wang { 443e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 44496442e42SJason Wang unsigned long delay = tun->ageing_time; 44596442e42SJason Wang unsigned long next_timer = jiffies + delay; 44696442e42SJason Wang unsigned long count = 0; 44796442e42SJason Wang int i; 44896442e42SJason Wang 4497dbfb4efSEric Dumazet spin_lock(&tun->lock); 45096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45196442e42SJason Wang struct tun_flow_entry *e; 452b67bfe0dSSasha Levin struct hlist_node *n; 45396442e42SJason Wang 454b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 45596442e42SJason Wang unsigned long this_timer; 45681d98fa4SEric Dumazet 45796442e42SJason Wang this_timer = e->updated + delay; 45881d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 45996442e42SJason Wang tun_flow_delete(tun, e); 46081d98fa4SEric Dumazet continue; 46181d98fa4SEric Dumazet } 46281d98fa4SEric Dumazet count++; 46381d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 46496442e42SJason Wang next_timer = this_timer; 46596442e42SJason Wang } 46696442e42SJason Wang } 46796442e42SJason Wang 46896442e42SJason Wang if (count) 46996442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 4707dbfb4efSEric Dumazet spin_unlock(&tun->lock); 47196442e42SJason Wang } 47296442e42SJason Wang 47349974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4749e85722dSJason Wang struct tun_file *tfile) 47596442e42SJason Wang { 47696442e42SJason Wang struct hlist_head *head; 47796442e42SJason Wang struct tun_flow_entry *e; 47896442e42SJason Wang unsigned long delay = tun->ageing_time; 4799e85722dSJason Wang u16 queue_index = tfile->queue_index; 48096442e42SJason Wang 48196442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 48296442e42SJason Wang 48396442e42SJason Wang rcu_read_lock(); 48496442e42SJason Wang 48596442e42SJason Wang e = tun_flow_find(head, rxhash); 48696442e42SJason Wang if (likely(e)) { 48796442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 4884ffdd22eSEric Dumazet if (READ_ONCE(e->queue_index) != queue_index) 4894ffdd22eSEric Dumazet WRITE_ONCE(e->queue_index, queue_index); 49083b1bc12SLi RongQing if (e->updated != jiffies) 49196442e42SJason Wang e->updated = jiffies; 4929bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 49396442e42SJason Wang } else { 49496442e42SJason Wang spin_lock_bh(&tun->lock); 495b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 496b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 49796442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 49896442e42SJason Wang 49996442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 50096442e42SJason Wang mod_timer(&tun->flow_gc_timer, 50196442e42SJason Wang round_jiffies_up(jiffies + delay)); 50296442e42SJason Wang spin_unlock_bh(&tun->lock); 50396442e42SJason Wang } 50496442e42SJason Wang 50596442e42SJason Wang rcu_read_unlock(); 50696442e42SJason Wang } 50796442e42SJason Wang 508516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the 5099bc88939STom Herbert * flow_hash table accordingly. 5109bc88939STom Herbert */ 5119bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5129bc88939STom Herbert { 513567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5149bc88939STom Herbert e->rps_rxhash = hash; 5159bc88939STom Herbert } 5169bc88939STom Herbert 5174b035271SWang Li /* We try to identify a flow through its rxhash. The reason that 51892d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 519c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 520c8d68e6bSJason Wang * the userspace application move between processors, we may get a 5214b035271SWang Li * different rxq no. here. 522c8d68e6bSJason Wang */ 52396f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 524c8d68e6bSJason Wang { 52596442e42SJason Wang struct tun_flow_entry *e; 526c8d68e6bSJason Wang u32 txq = 0; 527c8d68e6bSJason Wang u32 numqueues = 0; 528c8d68e6bSJason Wang 5296aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 530c8d68e6bSJason Wang 531feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 53296442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5339bc88939STom Herbert if (e) { 5349bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 535fbe4d456SZhi Yong Wu txq = e->queue_index; 5364b035271SWang Li } else { 537c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 538c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 539c8d68e6bSJason Wang } 540c8d68e6bSJason Wang 541c8d68e6bSJason Wang return txq; 542c8d68e6bSJason Wang } 543c8d68e6bSJason Wang 54496f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 54596f84061SJason Wang { 546cd5681d7SJason Wang struct tun_prog *prog; 547a35d310fSJason Wang u32 numqueues; 54896f84061SJason Wang u16 ret = 0; 54996f84061SJason Wang 550a35d310fSJason Wang numqueues = READ_ONCE(tun->numqueues); 551a35d310fSJason Wang if (!numqueues) 552a35d310fSJason Wang return 0; 553a35d310fSJason Wang 55496f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 55596f84061SJason Wang if (prog) 55696f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 55796f84061SJason Wang 558a35d310fSJason Wang return ret % numqueues; 55996f84061SJason Wang } 56096f84061SJason Wang 56196f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 562a350ecceSPaolo Abeni struct net_device *sb_dev) 56396f84061SJason Wang { 56496f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 56596f84061SJason Wang u16 ret; 56696f84061SJason Wang 56796f84061SJason Wang rcu_read_lock(); 56896f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 56996f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 57096f84061SJason Wang else 57196f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 57296f84061SJason Wang rcu_read_unlock(); 57396f84061SJason Wang 57496f84061SJason Wang return ret; 57596f84061SJason Wang } 57696f84061SJason Wang 577cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 578cde8b15fSJason Wang { 579cde8b15fSJason Wang const struct cred *cred = current_cred(); 580c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 581cde8b15fSJason Wang 582cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 583cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 584c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 585cde8b15fSJason Wang } 586cde8b15fSJason Wang 587c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 588c8d68e6bSJason Wang { 589c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 590c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 591c8d68e6bSJason Wang } 592c8d68e6bSJason Wang 5934008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 5944008e97fSJason Wang { 5954008e97fSJason Wang tfile->detached = tun; 5964008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 5974008e97fSJason Wang ++tun->numdisabled; 5984008e97fSJason Wang } 5994008e97fSJason Wang 600d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6014008e97fSJason Wang { 6024008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6034008e97fSJason Wang 6044008e97fSJason Wang tfile->detached = NULL; 6054008e97fSJason Wang list_del_init(&tfile->next); 6064008e97fSJason Wang --tun->numdisabled; 6074008e97fSJason Wang return tun; 6084008e97fSJason Wang } 6094008e97fSJason Wang 6103a403076SJason Wang void tun_ptr_free(void *ptr) 611fc72d1d5SJason Wang { 612fc72d1d5SJason Wang if (!ptr) 613fc72d1d5SJason Wang return; 6141ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 6151ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 616fc72d1d5SJason Wang 61703993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 618fc72d1d5SJason Wang } else { 619fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 620fc72d1d5SJason Wang } 621fc72d1d5SJason Wang } 6223a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 623fc72d1d5SJason Wang 6244bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6254bfb0513SJason Wang { 626fc72d1d5SJason Wang void *ptr; 6271576d986SJason Wang 628fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 629fc72d1d5SJason Wang tun_ptr_free(ptr); 6301576d986SJason Wang 6315503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6324bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6334bfb0513SJason Wang } 6344bfb0513SJason Wang 635c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 636c8d68e6bSJason Wang { 637c8d68e6bSJason Wang struct tun_file *ntfile; 638c8d68e6bSJason Wang struct tun_struct *tun; 639c8d68e6bSJason Wang 640b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 641b8deabd3SJason Wang 64294317099SPetar Penkov if (tun && clean) { 643ff1fa208SJakub Kicinski if (!tfile->detached) 64406e55addSEric Dumazet tun_napi_disable(tfile); 64506e55addSEric Dumazet tun_napi_del(tfile); 64694317099SPetar Penkov } 64794317099SPetar Penkov 6489e85722dSJason Wang if (tun && !tfile->detached) { 649c8d68e6bSJason Wang u16 index = tfile->queue_index; 650c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 651c8d68e6bSJason Wang 652c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 653c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 654b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 655c8d68e6bSJason Wang ntfile->queue_index = index; 6567322b217SYunjian Wang ntfile->xdp_rxq.queue_index = index; 6579871a9e4SJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], 6589871a9e4SJason Wang NULL); 659c8d68e6bSJason Wang 660c8d68e6bSJason Wang --tun->numqueues; 6619e85722dSJason Wang if (clean) { 662c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 663c8d68e6bSJason Wang sock_put(&tfile->sk); 664a8fc8cb5SJakub Kicinski } else { 6654008e97fSJason Wang tun_disable_queue(tun, tfile); 666a8fc8cb5SJakub Kicinski tun_napi_disable(tfile); 667a8fc8cb5SJakub Kicinski } 668c8d68e6bSJason Wang 669c8d68e6bSJason Wang synchronize_net(); 67096442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 671c8d68e6bSJason Wang /* Drop read queue */ 6724bfb0513SJason Wang tun_queue_purge(tfile); 673c8d68e6bSJason Wang tun_set_real_num_queues(tun); 674dd38bd85SJason Wang } else if (tfile->detached && clean) { 6754008e97fSJason Wang tun = tun_enable_queue(tfile); 676dd38bd85SJason Wang sock_put(&tfile->sk); 677dd38bd85SJason Wang } 678c8d68e6bSJason Wang 679c8d68e6bSJason Wang if (clean) { 680af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 681af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 682af668b3cSMichael S. Tsirkin 68340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 684af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6854008e97fSJason Wang unregister_netdevice(tun->dev); 686af668b3cSMichael S. Tsirkin } 687b196d88aSJason Wang if (tun) 688b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 6897063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 690c8d68e6bSJason Wang } 691c8d68e6bSJason Wang } 692c8d68e6bSJason Wang 693c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 694c8d68e6bSJason Wang { 69583c1f36fSSabrina Dubroca struct tun_struct *tun; 69683c1f36fSSabrina Dubroca struct net_device *dev; 69783c1f36fSSabrina Dubroca 698c8d68e6bSJason Wang rtnl_lock(); 69983c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 70083c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 701c8d68e6bSJason Wang __tun_detach(tfile, clean); 70283c1f36fSSabrina Dubroca if (dev) 70383c1f36fSSabrina Dubroca netdev_state_change(dev); 704c8d68e6bSJason Wang rtnl_unlock(); 7055daadc86SShigeru Yoshida 7065daadc86SShigeru Yoshida if (clean) 7075daadc86SShigeru Yoshida sock_put(&tfile->sk); 708c8d68e6bSJason Wang } 709c8d68e6bSJason Wang 710c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 711c8d68e6bSJason Wang { 712c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7134008e97fSJason Wang struct tun_file *tfile, *tmp; 714c8d68e6bSJason Wang int i, n = tun->numqueues; 715c8d68e6bSJason Wang 716c8d68e6bSJason Wang for (i = 0; i < n; i++) { 717b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 718c8d68e6bSJason Wang BUG_ON(!tfile); 71906e55addSEric Dumazet tun_napi_disable(tfile); 720addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7219e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 722c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 723c8d68e6bSJason Wang --tun->numqueues; 724c8d68e6bSJason Wang } 7259e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 726addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7279e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 728c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7299e85722dSJason Wang } 730c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 731c8d68e6bSJason Wang 732c8d68e6bSJason Wang synchronize_net(); 733c8d68e6bSJason Wang for (i = 0; i < n; i++) { 734b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 73506e55addSEric Dumazet tun_napi_del(tfile); 736c8d68e6bSJason Wang /* Drop read queue */ 7374bfb0513SJason Wang tun_queue_purge(tfile); 738b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 739c8d68e6bSJason Wang sock_put(&tfile->sk); 740c8d68e6bSJason Wang } 7414008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7423b9bc84dSJakub Kicinski tun_napi_del(tfile); 7434008e97fSJason Wang tun_enable_queue(tfile); 7444bfb0513SJason Wang tun_queue_purge(tfile); 745b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7464008e97fSJason Wang sock_put(&tfile->sk); 7474008e97fSJason Wang } 7484008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 749dd38bd85SJason Wang 75040630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 751dd38bd85SJason Wang module_put(THIS_MODULE); 752c8d68e6bSJason Wang } 753c8d68e6bSJason Wang 75494317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 75577f22f92SYang Yingliang bool skip_filter, bool napi, bool napi_frags, 75677f22f92SYang Yingliang bool publish_tun) 757a7385ba2SEric W. Biederman { 758631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7591576d986SJason Wang struct net_device *dev = tun->dev; 76038231b7aSEric W. Biederman int err; 761a7385ba2SEric W. Biederman 7625dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7635dbbaf2dSPaul Moore if (err < 0) 7645dbbaf2dSPaul Moore goto out; 7655dbbaf2dSPaul Moore 76638231b7aSEric W. Biederman err = -EINVAL; 7679e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 76838231b7aSEric W. Biederman goto out; 76938231b7aSEric W. Biederman 77038231b7aSEric W. Biederman err = -EBUSY; 77140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 772c8d68e6bSJason Wang goto out; 773c8d68e6bSJason Wang 774c8d68e6bSJason Wang err = -E2BIG; 7754008e97fSJason Wang if (!tfile->detached && 7764008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 77738231b7aSEric W. Biederman goto out; 77838231b7aSEric W. Biederman 77938231b7aSEric W. Biederman err = 0; 78054f968d6SJason Wang 78192d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 782849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7838ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7848ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7858ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 78654f968d6SJason Wang if (!err) 78754f968d6SJason Wang goto out; 78854f968d6SJason Wang } 7891576d986SJason Wang 7901576d986SJason Wang if (!tfile->detached && 791b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 792b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 7931576d986SJason Wang err = -ENOMEM; 7941576d986SJason Wang goto out; 7951576d986SJason Wang } 7961576d986SJason Wang 797c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 798addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 7998bf5c4eeSJesper Dangaard Brouer 8008bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8018bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8028bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8038bf5c4eeSJesper Dangaard Brouer 8048bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8058bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8068bf5c4eeSJesper Dangaard Brouer } else { 8078bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8088bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 809b02e5a0eSBjörn Töpel tun->dev, tfile->queue_index, 0); 8108bf5c4eeSJesper Dangaard Brouer if (err < 0) 8118bf5c4eeSJesper Dangaard Brouer goto out; 8128d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 8138d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 8148d5d8852SJesper Dangaard Brouer if (err < 0) { 8158d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 8168d5d8852SJesper Dangaard Brouer goto out; 8178d5d8852SJesper Dangaard Brouer } 8188bf5c4eeSJesper Dangaard Brouer err = 0; 8198bf5c4eeSJesper Dangaard Brouer } 8208bf5c4eeSJesper Dangaard Brouer 82194317099SPetar Penkov if (tfile->detached) { 8224008e97fSJason Wang tun_enable_queue(tfile); 823a8fc8cb5SJakub Kicinski tun_napi_enable(tfile); 82494317099SPetar Penkov } else { 8254008e97fSJason Wang sock_hold(&tfile->sk); 826af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 82794317099SPetar Penkov } 8284008e97fSJason Wang 829e4a2a304SJason Wang if (rtnl_dereference(tun->xdp_prog)) 830e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 831e4a2a304SJason Wang 832c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 833c8d68e6bSJason Wang * refcnt. 834c8d68e6bSJason Wang */ 835a7385ba2SEric W. Biederman 8360b7959b6SStanislav Fomichev /* Publish tfile->tun and tun->tfiles only after we've fully 8370b7959b6SStanislav Fomichev * initialized tfile; otherwise we risk using half-initialized 8380b7959b6SStanislav Fomichev * object. 8390b7959b6SStanislav Fomichev */ 84077f22f92SYang Yingliang if (publish_tun) 8410b7959b6SStanislav Fomichev rcu_assign_pointer(tfile->tun, tun); 8420b7959b6SStanislav Fomichev rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 8430b7959b6SStanislav Fomichev tun->numqueues++; 8443a03cb84SGeorge Amanakis tun_set_real_num_queues(tun); 84538231b7aSEric W. Biederman out: 84638231b7aSEric W. Biederman return err; 847a7385ba2SEric W. Biederman } 848a7385ba2SEric W. Biederman 8499484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 850631ab46bSEric W. Biederman { 8516e914fc7SJason Wang struct tun_struct *tun; 852c70f1829SEric W. Biederman 8536e914fc7SJason Wang rcu_read_lock(); 8546e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8556e914fc7SJason Wang if (tun) 8566e914fc7SJason Wang dev_hold(tun->dev); 8576e914fc7SJason Wang rcu_read_unlock(); 858c70f1829SEric W. Biederman 859c70f1829SEric W. Biederman return tun; 860631ab46bSEric W. Biederman } 861631ab46bSEric W. Biederman 862631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 863631ab46bSEric W. Biederman { 8646e914fc7SJason Wang dev_put(tun->dev); 865631ab46bSEric W. Biederman } 866631ab46bSEric W. Biederman 8676b8a66eeSJoe Perches /* TAP filtering */ 868f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 869f271b2ccSMax Krasnyansky { 870f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 871f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 872f271b2ccSMax Krasnyansky } 873f271b2ccSMax Krasnyansky 874f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 875f271b2ccSMax Krasnyansky { 876f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 877f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 878f271b2ccSMax Krasnyansky } 879f271b2ccSMax Krasnyansky 880f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 881f271b2ccSMax Krasnyansky { 882f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 883f271b2ccSMax Krasnyansky struct tun_filter uf; 884f271b2ccSMax Krasnyansky int err, alen, n, nexact; 885f271b2ccSMax Krasnyansky 886f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 887f271b2ccSMax Krasnyansky return -EFAULT; 888f271b2ccSMax Krasnyansky 889f271b2ccSMax Krasnyansky if (!uf.count) { 890f271b2ccSMax Krasnyansky /* Disabled */ 891f271b2ccSMax Krasnyansky filter->count = 0; 892f271b2ccSMax Krasnyansky return 0; 893f271b2ccSMax Krasnyansky } 894f271b2ccSMax Krasnyansky 895f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 89628e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 89728e8190dSMarkus Elfring if (IS_ERR(addr)) 89828e8190dSMarkus Elfring return PTR_ERR(addr); 899f271b2ccSMax Krasnyansky 900f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 901f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 902f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 903f271b2ccSMax Krasnyansky filter->count = 0; 904f271b2ccSMax Krasnyansky wmb(); 905f271b2ccSMax Krasnyansky 906f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 907f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 908f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 909f271b2ccSMax Krasnyansky 910f271b2ccSMax Krasnyansky nexact = n; 911f271b2ccSMax Krasnyansky 912cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 913cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 914f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 915cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 916cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 917cfbf84fcSAlex Williamson err = 0; /* no filter */ 9183b8d2a69SMarkus Elfring goto free_addr; 919cfbf84fcSAlex Williamson } 920f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 921cfbf84fcSAlex Williamson } 922f271b2ccSMax Krasnyansky 923f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 924f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 925f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 926f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 927f271b2ccSMax Krasnyansky 928f271b2ccSMax Krasnyansky /* Now enable the filter */ 929f271b2ccSMax Krasnyansky wmb(); 930f271b2ccSMax Krasnyansky filter->count = nexact; 931f271b2ccSMax Krasnyansky 932f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 933f271b2ccSMax Krasnyansky err = nexact; 9343b8d2a69SMarkus Elfring free_addr: 935f271b2ccSMax Krasnyansky kfree(addr); 936f271b2ccSMax Krasnyansky return err; 937f271b2ccSMax Krasnyansky } 938f271b2ccSMax Krasnyansky 939f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 940f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 941f271b2ccSMax Krasnyansky { 942f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 943f271b2ccSMax Krasnyansky * at this point. */ 944f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 945f271b2ccSMax Krasnyansky int i; 946f271b2ccSMax Krasnyansky 947f271b2ccSMax Krasnyansky /* Exact match */ 948f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9492e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 950f271b2ccSMax Krasnyansky return 1; 951f271b2ccSMax Krasnyansky 952f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 953f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 954f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 955f271b2ccSMax Krasnyansky 956f271b2ccSMax Krasnyansky return 0; 957f271b2ccSMax Krasnyansky } 958f271b2ccSMax Krasnyansky 959f271b2ccSMax Krasnyansky /* 960f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 961f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 962f271b2ccSMax Krasnyansky */ 963f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 964f271b2ccSMax Krasnyansky { 965f271b2ccSMax Krasnyansky if (!filter->count) 966f271b2ccSMax Krasnyansky return 1; 967f271b2ccSMax Krasnyansky 968f271b2ccSMax Krasnyansky return run_filter(filter, skb); 969f271b2ccSMax Krasnyansky } 970f271b2ccSMax Krasnyansky 9711da177e4SLinus Torvalds /* Network device part of the driver */ 9721da177e4SLinus Torvalds 9731da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 9741da177e4SLinus Torvalds 975158b515fSGeorge Kennedy static int tun_net_init(struct net_device *dev) 976158b515fSGeorge Kennedy { 977158b515fSGeorge Kennedy struct tun_struct *tun = netdev_priv(dev); 978158b515fSGeorge Kennedy struct ifreq *ifr = tun->ifr; 979158b515fSGeorge Kennedy int err; 980158b515fSGeorge Kennedy 981158b515fSGeorge Kennedy dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 982158b515fSGeorge Kennedy if (!dev->tstats) 983158b515fSGeorge Kennedy return -ENOMEM; 984158b515fSGeorge Kennedy 985158b515fSGeorge Kennedy spin_lock_init(&tun->lock); 986158b515fSGeorge Kennedy 987158b515fSGeorge Kennedy err = security_tun_dev_alloc_security(&tun->security); 988158b515fSGeorge Kennedy if (err < 0) { 989158b515fSGeorge Kennedy free_percpu(dev->tstats); 990158b515fSGeorge Kennedy return err; 991158b515fSGeorge Kennedy } 992158b515fSGeorge Kennedy 993158b515fSGeorge Kennedy tun_flow_init(tun); 994158b515fSGeorge Kennedy 995158b515fSGeorge Kennedy dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 996158b515fSGeorge Kennedy TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 997158b515fSGeorge Kennedy NETIF_F_HW_VLAN_STAG_TX; 998158b515fSGeorge Kennedy dev->features = dev->hw_features | NETIF_F_LLTX; 999158b515fSGeorge Kennedy dev->vlan_features = dev->features & 1000158b515fSGeorge Kennedy ~(NETIF_F_HW_VLAN_CTAG_TX | 1001158b515fSGeorge Kennedy NETIF_F_HW_VLAN_STAG_TX); 1002158b515fSGeorge Kennedy 1003158b515fSGeorge Kennedy tun->flags = (tun->flags & ~TUN_FEATURES) | 1004158b515fSGeorge Kennedy (ifr->ifr_flags & TUN_FEATURES); 1005158b515fSGeorge Kennedy 1006158b515fSGeorge Kennedy INIT_LIST_HEAD(&tun->disabled); 1007158b515fSGeorge Kennedy err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, 1008158b515fSGeorge Kennedy ifr->ifr_flags & IFF_NAPI_FRAGS, false); 1009158b515fSGeorge Kennedy if (err < 0) { 1010158b515fSGeorge Kennedy tun_flow_uninit(tun); 1011158b515fSGeorge Kennedy security_tun_dev_free_security(tun->security); 1012158b515fSGeorge Kennedy free_percpu(dev->tstats); 1013158b515fSGeorge Kennedy return err; 1014158b515fSGeorge Kennedy } 1015158b515fSGeorge Kennedy return 0; 1016158b515fSGeorge Kennedy } 1017158b515fSGeorge Kennedy 1018c70f1829SEric W. Biederman /* Net device detach from fd. */ 1019c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1020c70f1829SEric W. Biederman { 1021c8d68e6bSJason Wang tun_detach_all(dev); 1022c70f1829SEric W. Biederman } 1023c70f1829SEric W. Biederman 10241da177e4SLinus Torvalds /* Net device open. */ 10251da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10261da177e4SLinus Torvalds { 1027c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1028b20e2d54SHannes Frederic Sowa 10291da177e4SLinus Torvalds return 0; 10301da177e4SLinus Torvalds } 10311da177e4SLinus Torvalds 10321da177e4SLinus Torvalds /* Net device close. */ 10331da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10341da177e4SLinus Torvalds { 1035c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10361da177e4SLinus Torvalds return 0; 10371da177e4SLinus Torvalds } 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds /* Net device start xmit */ 104096f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10411da177e4SLinus Torvalds { 10423df97ba8SJason Wang #ifdef CONFIG_RPS 1043dc05360fSEric Dumazet if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 10449bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10459bc88939STom Herbert * RPS hash and save it into the flow_table here. 10469bc88939STom Herbert */ 10474b035271SWang Li struct tun_flow_entry *e; 10489bc88939STom Herbert __u32 rxhash; 10499bc88939STom Herbert 1050feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10514b035271SWang Li e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 10529bc88939STom Herbert if (e) 10539bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10549bc88939STom Herbert } 10553df97ba8SJason Wang #endif 105696f84061SJason Wang } 105796f84061SJason Wang 1058aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1059aff3d70aSJason Wang struct sk_buff *skb, 1060aff3d70aSJason Wang int len) 1061aff3d70aSJason Wang { 1062aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1063aff3d70aSJason Wang 1064aff3d70aSJason Wang if (prog) 1065aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1066aff3d70aSJason Wang 1067aff3d70aSJason Wang return len; 1068aff3d70aSJason Wang } 1069aff3d70aSJason Wang 107096f84061SJason Wang /* Net device start xmit */ 107196f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 107296f84061SJason Wang { 107396f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 10744b4f052eSDongli Zhang enum skb_drop_reason drop_reason; 107596f84061SJason Wang int txq = skb->queue_mapping; 1076a31d27fbSNicolas Dichtel struct netdev_queue *queue; 107796f84061SJason Wang struct tun_file *tfile; 1078aff3d70aSJason Wang int len = skb->len; 107996f84061SJason Wang 108096f84061SJason Wang rcu_read_lock(); 108196f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 108296f84061SJason Wang 108396f84061SJason Wang /* Drop packet if interface is not attached */ 10844b4f052eSDongli Zhang if (!tfile) { 10854b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_DEV_READY; 108696f84061SJason Wang goto drop; 10874b4f052eSDongli Zhang } 108896f84061SJason Wang 108996f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 109096f84061SJason Wang tun_automq_xmit(tun, skb); 10919bc88939STom Herbert 10923424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); 10936e914fc7SJason Wang 1094f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1095f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1096f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 10974b4f052eSDongli Zhang if (!check_filter(&tun->txflt, skb)) { 10984b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_TAP_TXFILTER; 1099f271b2ccSMax Krasnyansky goto drop; 11004b4f052eSDongli Zhang } 1101f271b2ccSMax Krasnyansky 110254f968d6SJason Wang if (tfile->socket.sk->sk_filter && 11034b4f052eSDongli Zhang sk_filter(tfile->socket.sk, skb)) { 11044b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 110599405162SMichael S. Tsirkin goto drop; 11064b4f052eSDongli Zhang } 110799405162SMichael S. Tsirkin 1108aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 11094b4f052eSDongli Zhang if (len == 0) { 11104b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_TAP_FILTER; 111145a15d89SDongli Zhang goto drop; 11124b4f052eSDongli Zhang } 111345a15d89SDongli Zhang 11144b4f052eSDongli Zhang if (pskb_trim(skb, len)) { 11154b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_NOMEM; 1116aff3d70aSJason Wang goto drop; 11174b4f052eSDongli Zhang } 1118aff3d70aSJason Wang 11194b4f052eSDongli Zhang if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { 11204b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 11217bf66305SJason Wang goto drop; 11224b4f052eSDongli Zhang } 11237bf66305SJason Wang 11247b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1125eda29772SRichard Cochran 11260110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11277bf66305SJason Wang * for indefinite time. 11287bf66305SJason Wang */ 11290110d6f2SMichael S. Tsirkin skb_orphan(skb); 11300110d6f2SMichael S. Tsirkin 1131895b5c9fSFlorian Westphal nf_reset_ct(skb); 1132f8af75f3SEric Dumazet 11334b4f052eSDongli Zhang if (ptr_ring_produce(&tfile->tx_ring, skb)) { 11344b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_FULL_RING; 11351576d986SJason Wang goto drop; 11364b4f052eSDongli Zhang } 11371da177e4SLinus Torvalds 1138a31d27fbSNicolas Dichtel /* NETIF_F_LLTX requires to do our own update of trans_start */ 1139a31d27fbSNicolas Dichtel queue = netdev_get_tx_queue(dev, txq); 1140968a1a5dSAntoine Tenart txq_trans_cond_update(queue); 1141a31d27fbSNicolas Dichtel 11421da177e4SLinus Torvalds /* Notify and wake up reader process */ 114354f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 114454f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11459e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11466e914fc7SJason Wang 11476e914fc7SJason Wang rcu_read_unlock(); 11486ed10654SPatrick McHardy return NETDEV_TX_OK; 11491da177e4SLinus Torvalds 11501da177e4SLinus Torvalds drop: 1151625788b5SEric Dumazet dev_core_stats_tx_dropped_inc(dev); 1152149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11534b4f052eSDongli Zhang kfree_skb_reason(skb, drop_reason); 11546e914fc7SJason Wang rcu_read_unlock(); 1155baeababbSJason Wang return NET_XMIT_DROP; 11561da177e4SLinus Torvalds } 11571da177e4SLinus Torvalds 1158f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11591da177e4SLinus Torvalds { 1160f271b2ccSMax Krasnyansky /* 1161f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1162f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1163f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1164f271b2ccSMax Krasnyansky */ 11651da177e4SLinus Torvalds } 11661da177e4SLinus Torvalds 1167c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1168c8f44affSMichał Mirosław netdev_features_t features) 116988255375SMichał Mirosław { 117088255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 117188255375SMichał Mirosław 117288255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 117388255375SMichał Mirosław } 1174eaea34b2SPaolo Abeni 1175eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1176eaea34b2SPaolo Abeni { 1177eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1178eaea34b2SPaolo Abeni 1179eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1180eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1181eaea34b2SPaolo Abeni 1182eaea34b2SPaolo Abeni tun->align = new_hr; 1183eaea34b2SPaolo Abeni } 1184eaea34b2SPaolo Abeni 1185bc1f4470Sstephen hemminger static void 1186608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1187608b9977SPaolo Abeni { 1188608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1189608b9977SPaolo Abeni 1190497a5757SHeiner Kallweit dev_get_tstats64(dev, stats); 1191608b9977SPaolo Abeni 1192497a5757SHeiner Kallweit stats->rx_frame_errors += 1193497a5757SHeiner Kallweit (unsigned long)atomic_long_read(&tun->rx_frame_errors); 1194608b9977SPaolo Abeni } 1195608b9977SPaolo Abeni 1196761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1197761876c8SJason Wang struct netlink_ext_ack *extack) 1198761876c8SJason Wang { 1199761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1200e4a2a304SJason Wang struct tun_file *tfile; 1201761876c8SJason Wang struct bpf_prog *old_prog; 1202e4a2a304SJason Wang int i; 1203761876c8SJason Wang 1204761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1205761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1206761876c8SJason Wang if (old_prog) 1207761876c8SJason Wang bpf_prog_put(old_prog); 1208761876c8SJason Wang 1209e4a2a304SJason Wang for (i = 0; i < tun->numqueues; i++) { 1210e4a2a304SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 1211e4a2a304SJason Wang if (prog) 1212e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1213e4a2a304SJason Wang else 1214e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1215e4a2a304SJason Wang } 1216e4a2a304SJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 1217e4a2a304SJason Wang if (prog) 1218e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1219e4a2a304SJason Wang else 1220e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1221e4a2a304SJason Wang } 1222e4a2a304SJason Wang 1223761876c8SJason Wang return 0; 1224761876c8SJason Wang } 1225761876c8SJason Wang 1226f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1227761876c8SJason Wang { 1228761876c8SJason Wang switch (xdp->command) { 1229761876c8SJason Wang case XDP_SETUP_PROG: 1230761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1231761876c8SJason Wang default: 1232761876c8SJason Wang return -EINVAL; 1233761876c8SJason Wang } 1234761876c8SJason Wang } 1235761876c8SJason Wang 123626d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 123726d31925SNicolas Dichtel { 123826d31925SNicolas Dichtel if (new_carrier) { 123926d31925SNicolas Dichtel struct tun_struct *tun = netdev_priv(dev); 124026d31925SNicolas Dichtel 124126d31925SNicolas Dichtel if (!tun->numqueues) 124226d31925SNicolas Dichtel return -EPERM; 124326d31925SNicolas Dichtel 124426d31925SNicolas Dichtel netif_carrier_on(dev); 124526d31925SNicolas Dichtel } else { 124626d31925SNicolas Dichtel netif_carrier_off(dev); 124726d31925SNicolas Dichtel } 124826d31925SNicolas Dichtel return 0; 124926d31925SNicolas Dichtel } 125026d31925SNicolas Dichtel 1251758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1252158b515fSGeorge Kennedy .ndo_init = tun_net_init, 1253c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1254758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1255758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 125600829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 125788255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1258c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1259eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1260608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 126126d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1262758e43b7SStephen Hemminger }; 1263758e43b7SStephen Hemminger 12640c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 12650c9d917bSJesper Dangaard Brouer { 12660c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 12670c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 12680c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 12690c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 12700c9d917bSJesper Dangaard Brouer } 12710c9d917bSJesper Dangaard Brouer 127242b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 127342b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1274fc72d1d5SJason Wang { 1275fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1276fc72d1d5SJason Wang struct tun_file *tfile; 1277fc72d1d5SJason Wang u32 numqueues; 1278fdc13979SLorenzo Bianconi int nxmit = 0; 1279735fc405SJesper Dangaard Brouer int i; 1280fc72d1d5SJason Wang 12810c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 128242b33468SJesper Dangaard Brouer return -EINVAL; 128342b33468SJesper Dangaard Brouer 1284fc72d1d5SJason Wang rcu_read_lock(); 1285fc72d1d5SJason Wang 12869871a9e4SJason Wang resample: 1287fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1288fc72d1d5SJason Wang if (!numqueues) { 1289735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1290735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1291fc72d1d5SJason Wang } 1292fc72d1d5SJason Wang 1293fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1294fc72d1d5SJason Wang numqueues]); 12959871a9e4SJason Wang if (unlikely(!tfile)) 12969871a9e4SJason Wang goto resample; 1297735fc405SJesper Dangaard Brouer 1298735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1299735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1300735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1301fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1302fc72d1d5SJason Wang * XDP buffer from sk_buff. 1303fc72d1d5SJason Wang */ 1304735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1305fc72d1d5SJason Wang 1306735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1307625788b5SEric Dumazet dev_core_stats_tx_dropped_inc(dev); 1308fdc13979SLorenzo Bianconi break; 1309735fc405SJesper Dangaard Brouer } 1310fdc13979SLorenzo Bianconi nxmit++; 1311735fc405SJesper Dangaard Brouer } 1312735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1313735fc405SJesper Dangaard Brouer 13140c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 13150c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 13160c9d917bSJesper Dangaard Brouer 1317fc72d1d5SJason Wang rcu_read_unlock(); 1318fdc13979SLorenzo Bianconi return nxmit; 1319fc72d1d5SJason Wang } 1320fc72d1d5SJason Wang 132144fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 132244fa2dbdSJesper Dangaard Brouer { 13231b698fa5SLorenzo Bianconi struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); 1324fdc13979SLorenzo Bianconi int nxmit; 132544fa2dbdSJesper Dangaard Brouer 132644fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 132744fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 132844fa2dbdSJesper Dangaard Brouer 1329fdc13979SLorenzo Bianconi nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1330fdc13979SLorenzo Bianconi if (!nxmit) 1331fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(frame); 1332fdc13979SLorenzo Bianconi return nxmit; 1333fc72d1d5SJason Wang } 1334fc72d1d5SJason Wang 1335758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1336158b515fSGeorge Kennedy .ndo_init = tun_net_init, 1337c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1338758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1339758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 134000829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 134188255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1342afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1343758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1344758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1345c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 13465e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1347eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1348497a5757SHeiner Kallweit .ndo_get_stats64 = dev_get_tstats64, 1349f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1350fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 135126d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1352758e43b7SStephen Hemminger }; 1353758e43b7SStephen Hemminger 1354944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 135596442e42SJason Wang { 135696442e42SJason Wang int i; 135796442e42SJason Wang 135896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 135996442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 136096442e42SJason Wang 136196442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1362e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1363e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1364e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 136596442e42SJason Wang } 136696442e42SJason Wang 136796442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 136896442e42SJason Wang { 136996442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 137096442e42SJason Wang tun_flow_flush(tun); 137196442e42SJason Wang } 137296442e42SJason Wang 137391572088SJarod Wilson #define MIN_MTU 68 137491572088SJarod Wilson #define MAX_MTU 65535 137591572088SJarod Wilson 13761da177e4SLinus Torvalds /* Initialize net device. */ 1377158b515fSGeorge Kennedy static void tun_net_initialize(struct net_device *dev) 13781da177e4SLinus Torvalds { 13791da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13801da177e4SLinus Torvalds 13811da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 138240630b82SMichael S. Tsirkin case IFF_TUN: 1383758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1384b9815eb1SJason A. Donenfeld dev->header_ops = &ip_tunnel_header_ops; 1385758e43b7SStephen Hemminger 13861da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 13871da177e4SLinus Torvalds dev->hard_header_len = 0; 13881da177e4SLinus Torvalds dev->addr_len = 0; 13891da177e4SLinus Torvalds dev->mtu = 1500; 13901da177e4SLinus Torvalds 13911da177e4SLinus Torvalds /* Zero header length */ 13921da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 13931da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 13941da177e4SLinus Torvalds break; 13951da177e4SLinus Torvalds 139640630b82SMichael S. Tsirkin case IFF_TAP: 13977a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 13981da177e4SLinus Torvalds /* Ethernet TAP Device */ 13991da177e4SLinus Torvalds ether_setup(dev); 1400550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1401a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 140236226a8dSBrian Braunstein 1403f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 140436226a8dSBrian Braunstein 140566c0e13aSMarek Majtyka /* Currently tun does not support XDP, only tap does. */ 140666c0e13aSMarek Majtyka dev->xdp_features = NETDEV_XDP_ACT_BASIC | 140766c0e13aSMarek Majtyka NETDEV_XDP_ACT_REDIRECT | 140866c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT; 140966c0e13aSMarek Majtyka 14101da177e4SLinus Torvalds break; 14111da177e4SLinus Torvalds } 141291572088SJarod Wilson 141391572088SJarod Wilson dev->min_mtu = MIN_MTU; 141491572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 14151da177e4SLinus Torvalds } 14161da177e4SLinus Torvalds 14172f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 14182f3ab622SJason Wang { 14192f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 14202f3ab622SJason Wang 14212f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 14222f3ab622SJason Wang } 14232f3ab622SJason Wang 14241da177e4SLinus Torvalds /* Character device part */ 14251da177e4SLinus Torvalds 14261da177e4SLinus Torvalds /* Poll */ 1427afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14281da177e4SLinus Torvalds { 1429b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14309484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14313c8a9c63SMariusz Kozlowski struct sock *sk; 1432afc9a42bSAl Viro __poll_t mask = 0; 14331da177e4SLinus Torvalds 14341da177e4SLinus Torvalds if (!tun) 1435a9a08845SLinus Torvalds return EPOLLERR; 14361da177e4SLinus Torvalds 143754f968d6SJason Wang sk = tfile->socket.sk; 14383c8a9c63SMariusz Kozlowski 14399e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14401da177e4SLinus Torvalds 14415990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1442a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14431da177e4SLinus Torvalds 14442f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 14452f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 14462f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 14472f3ab622SJason Wang * after it writes to a down device and meets -EIO. 14482f3ab622SJason Wang */ 14492f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 14509cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 14512f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1452a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 145333dccbb0SHerbert Xu 1454c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1455a9a08845SLinus Torvalds mask = EPOLLERR; 1456c70f1829SEric W. Biederman 1457631ab46bSEric W. Biederman tun_put(tun); 14581da177e4SLinus Torvalds return mask; 14591da177e4SLinus Torvalds } 14601da177e4SLinus Torvalds 146190e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 146290e33d45SPetar Penkov size_t len, 146390e33d45SPetar Penkov const struct iov_iter *it) 146490e33d45SPetar Penkov { 146590e33d45SPetar Penkov struct sk_buff *skb; 146690e33d45SPetar Penkov size_t linear; 146790e33d45SPetar Penkov int err; 146890e33d45SPetar Penkov int i; 146990e33d45SPetar Penkov 1470363a5328SZiyang Xuan if (it->nr_segs > MAX_SKB_FRAGS + 1 || 1471363a5328SZiyang Xuan len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) 1472950271d7SYunjian Wang return ERR_PTR(-EMSGSIZE); 147390e33d45SPetar Penkov 147490e33d45SPetar Penkov local_bh_disable(); 147590e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 147690e33d45SPetar Penkov local_bh_enable(); 147790e33d45SPetar Penkov if (!skb) 147890e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 147990e33d45SPetar Penkov 148090e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 148190e33d45SPetar Penkov err = __skb_grow(skb, linear); 148290e33d45SPetar Penkov if (err) 148390e33d45SPetar Penkov goto free; 148490e33d45SPetar Penkov 148590e33d45SPetar Penkov skb->len = len; 148690e33d45SPetar Penkov skb->data_len = len - linear; 148790e33d45SPetar Penkov skb->truesize += skb->data_len; 148890e33d45SPetar Penkov 148990e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 1490de4f5fedSJens Axboe const struct iovec *iov = iter_iov(it); 1491de4f5fedSJens Axboe size_t fragsz = iov->iov_len; 1492aa6daacaSEric Dumazet struct page *page; 1493aa6daacaSEric Dumazet void *frag; 149490e33d45SPetar Penkov 149590e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 149690e33d45SPetar Penkov err = -EINVAL; 149790e33d45SPetar Penkov goto free; 149890e33d45SPetar Penkov } 1499aa6daacaSEric Dumazet frag = netdev_alloc_frag(fragsz); 1500aa6daacaSEric Dumazet if (!frag) { 150190e33d45SPetar Penkov err = -ENOMEM; 150290e33d45SPetar Penkov goto free; 150390e33d45SPetar Penkov } 1504aa6daacaSEric Dumazet page = virt_to_head_page(frag); 1505aa6daacaSEric Dumazet skb_fill_page_desc(skb, i - 1, page, 1506aa6daacaSEric Dumazet frag - page_address(page), fragsz); 150790e33d45SPetar Penkov } 150890e33d45SPetar Penkov 150990e33d45SPetar Penkov return skb; 151090e33d45SPetar Penkov free: 151190e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 151290e33d45SPetar Penkov napi_free_frags(&tfile->napi); 151390e33d45SPetar Penkov return ERR_PTR(err); 151490e33d45SPetar Penkov } 151590e33d45SPetar Penkov 1516f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1517f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 151854f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 151933dccbb0SHerbert Xu size_t prepad, size_t len, 152033dccbb0SHerbert Xu size_t linear, int noblock) 1521f42157cbSRusty Russell { 152254f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1523f42157cbSRusty Russell struct sk_buff *skb; 152433dccbb0SHerbert Xu int err; 1525f42157cbSRusty Russell 1526f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15276231e47bSTahsin Erdogan if (prepad + len < PAGE_SIZE) 152833dccbb0SHerbert Xu linear = len; 1529f42157cbSRusty Russell 1530ce7c7fefSEric Dumazet if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 1531ce7c7fefSEric Dumazet linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 153233dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1533ce7c7fefSEric Dumazet &err, PAGE_ALLOC_COSTLY_ORDER); 1534f42157cbSRusty Russell if (!skb) 153533dccbb0SHerbert Xu return ERR_PTR(err); 1536f42157cbSRusty Russell 1537f42157cbSRusty Russell skb_reserve(skb, prepad); 1538f42157cbSRusty Russell skb_put(skb, linear); 153933dccbb0SHerbert Xu skb->data_len = len - linear; 154033dccbb0SHerbert Xu skb->len += len - linear; 1541f42157cbSRusty Russell 1542f42157cbSRusty Russell return skb; 1543f42157cbSRusty Russell } 1544f42157cbSRusty Russell 15455503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15465503fcecSJason Wang struct sk_buff *skb, int more) 15475503fcecSJason Wang { 15485503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15495503fcecSJason Wang struct sk_buff_head process_queue; 15505503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15515503fcecSJason Wang bool rcv = false; 15525503fcecSJason Wang 15535503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15545503fcecSJason Wang local_bh_disable(); 15558ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15565503fcecSJason Wang netif_receive_skb(skb); 15575503fcecSJason Wang local_bh_enable(); 15585503fcecSJason Wang return; 15595503fcecSJason Wang } 15605503fcecSJason Wang 15615503fcecSJason Wang spin_lock(&queue->lock); 15625503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15635503fcecSJason Wang __skb_queue_head_init(&process_queue); 15645503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15655503fcecSJason Wang rcv = true; 15665503fcecSJason Wang } else { 15675503fcecSJason Wang __skb_queue_tail(queue, skb); 15685503fcecSJason Wang } 15695503fcecSJason Wang spin_unlock(&queue->lock); 15705503fcecSJason Wang 15715503fcecSJason Wang if (rcv) { 15725503fcecSJason Wang struct sk_buff *nskb; 15735503fcecSJason Wang 15745503fcecSJason Wang local_bh_disable(); 15758ebebcbaSMatthew Cover while ((nskb = __skb_dequeue(&process_queue))) { 15768ebebcbaSMatthew Cover skb_record_rx_queue(nskb, tfile->queue_index); 15775503fcecSJason Wang netif_receive_skb(nskb); 15788ebebcbaSMatthew Cover } 15798ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15805503fcecSJason Wang netif_receive_skb(skb); 15815503fcecSJason Wang local_bh_enable(); 15825503fcecSJason Wang } 15835503fcecSJason Wang } 15845503fcecSJason Wang 158566ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 158666ccbc9cSJason Wang int len, int noblock, bool zerocopy) 158766ccbc9cSJason Wang { 158866ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 158966ccbc9cSJason Wang return false; 159066ccbc9cSJason Wang 159166ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 159266ccbc9cSJason Wang return false; 159366ccbc9cSJason Wang 159466ccbc9cSJason Wang if (!noblock) 159566ccbc9cSJason Wang return false; 159666ccbc9cSJason Wang 159766ccbc9cSJason Wang if (zerocopy) 159866ccbc9cSJason Wang return false; 159966ccbc9cSJason Wang 160059eeb232SAndrew Kanner if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + 160166ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 160266ccbc9cSJason Wang return false; 160366ccbc9cSJason Wang 160466ccbc9cSJason Wang return true; 160566ccbc9cSJason Wang } 160666ccbc9cSJason Wang 16074b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile, 16084b663366SAlexis Bauvin struct page_frag *alloc_frag, char *buf, 16098ae1aff0SJason Wang int buflen, int len, int pad) 1610ac1f1f6cSJason Wang { 1611ac1f1f6cSJason Wang struct sk_buff *skb = build_skb(buf, buflen); 1612ac1f1f6cSJason Wang 1613ac1f1f6cSJason Wang if (!skb) 1614ac1f1f6cSJason Wang return ERR_PTR(-ENOMEM); 1615ac1f1f6cSJason Wang 16168ae1aff0SJason Wang skb_reserve(skb, pad); 1617ac1f1f6cSJason Wang skb_put(skb, len); 16184b663366SAlexis Bauvin skb_set_owner_w(skb, tfile->socket.sk); 1619ac1f1f6cSJason Wang 1620ac1f1f6cSJason Wang get_page(alloc_frag->page); 1621ac1f1f6cSJason Wang alloc_frag->offset += buflen; 1622ac1f1f6cSJason Wang 1623ac1f1f6cSJason Wang return skb; 1624ac1f1f6cSJason Wang } 1625ac1f1f6cSJason Wang 16268ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 16278ae1aff0SJason Wang struct xdp_buff *xdp, u32 act) 16288ae1aff0SJason Wang { 16298ae1aff0SJason Wang int err; 16308ae1aff0SJason Wang 16318ae1aff0SJason Wang switch (act) { 16328ae1aff0SJason Wang case XDP_REDIRECT: 16338ae1aff0SJason Wang err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 163433a89800SYunjian Wang if (err) { 163533a89800SYunjian Wang dev_core_stats_rx_dropped_inc(tun->dev); 16368ae1aff0SJason Wang return err; 163733a89800SYunjian Wang } 163834ffe229SYunjian Wang dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 16398ae1aff0SJason Wang break; 16408ae1aff0SJason Wang case XDP_TX: 16418ae1aff0SJason Wang err = tun_xdp_tx(tun->dev, xdp); 164233a89800SYunjian Wang if (err < 0) { 164333a89800SYunjian Wang dev_core_stats_rx_dropped_inc(tun->dev); 16448ae1aff0SJason Wang return err; 164533a89800SYunjian Wang } 164634ffe229SYunjian Wang dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 16478ae1aff0SJason Wang break; 16488ae1aff0SJason Wang case XDP_PASS: 16498ae1aff0SJason Wang break; 16508ae1aff0SJason Wang default: 1651c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); 1652df561f66SGustavo A. R. Silva fallthrough; 16538ae1aff0SJason Wang case XDP_ABORTED: 16548ae1aff0SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1655df561f66SGustavo A. R. Silva fallthrough; 16568ae1aff0SJason Wang case XDP_DROP: 1657625788b5SEric Dumazet dev_core_stats_rx_dropped_inc(tun->dev); 16588ae1aff0SJason Wang break; 16598ae1aff0SJason Wang } 16608ae1aff0SJason Wang 16618ae1aff0SJason Wang return act; 16628ae1aff0SJason Wang } 16638ae1aff0SJason Wang 1664761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1665761876c8SJason Wang struct tun_file *tfile, 166666ccbc9cSJason Wang struct iov_iter *from, 1667761876c8SJason Wang struct virtio_net_hdr *hdr, 16681cfe6e93SJason Wang int len, int *skb_xdp) 166966ccbc9cSJason Wang { 16700bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 1671761876c8SJason Wang struct bpf_prog *xdp_prog; 16727df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 167366ccbc9cSJason Wang char *buf; 167466ccbc9cSJason Wang size_t copied; 16758ae1aff0SJason Wang int pad = TUN_RX_PAD; 16768ae1aff0SJason Wang int err = 0; 16777df13219SJason Wang 16787df13219SJason Wang rcu_read_lock(); 16797df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16807df13219SJason Wang if (xdp_prog) 16814f23aff8SJason Wang pad += XDP_PACKET_HEADROOM; 16827df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16837df13219SJason Wang rcu_read_unlock(); 168466ccbc9cSJason Wang 168563b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 168666ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 168766ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 168866ccbc9cSJason Wang 168966ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 169066ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16917df13219SJason Wang alloc_frag->offset + pad, 169266ccbc9cSJason Wang len, from); 169366ccbc9cSJason Wang if (copied != len) 169466ccbc9cSJason Wang return ERR_PTR(-EFAULT); 169566ccbc9cSJason Wang 16967df13219SJason Wang /* There's a small window that XDP may be set after the check 16977df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16987df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16997df13219SJason Wang */ 1700ac1f1f6cSJason Wang if (hdr->gso_type || !xdp_prog) { 17011cfe6e93SJason Wang *skb_xdp = 1; 17024b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, 17034b663366SAlexis Bauvin pad); 1704ac1f1f6cSJason Wang } 1705ac1f1f6cSJason Wang 17061cfe6e93SJason Wang *skb_xdp = 0; 170766ccbc9cSJason Wang 17086547e387SToshiaki Makita local_bh_disable(); 1709761876c8SJason Wang rcu_read_lock(); 1710761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 17118ae1aff0SJason Wang if (xdp_prog) { 1712761876c8SJason Wang struct xdp_buff xdp; 1713761876c8SJason Wang u32 act; 1714761876c8SJason Wang 171543b5169dSLorenzo Bianconi xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); 1716be9df4afSLorenzo Bianconi xdp_prepare_buff(&xdp, buf, pad, len, false); 1717761876c8SJason Wang 17188ae1aff0SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 17198ae1aff0SJason Wang if (act == XDP_REDIRECT || act == XDP_TX) { 1720761876c8SJason Wang get_page(alloc_frag->page); 1721761876c8SJason Wang alloc_frag->offset += buflen; 1722761876c8SJason Wang } 17238ae1aff0SJason Wang err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1724bee34890SWill Deacon if (err < 0) { 1725bee34890SWill Deacon if (act == XDP_REDIRECT || act == XDP_TX) 1726bee34890SWill Deacon put_page(alloc_frag->page); 1727bee34890SWill Deacon goto out; 1728bee34890SWill Deacon } 1729bee34890SWill Deacon 17301a097910SJason Wang if (err == XDP_REDIRECT) 17311d233886SToke Høiland-Jørgensen xdp_do_flush(); 17328ae1aff0SJason Wang if (err != XDP_PASS) 17338ae1aff0SJason Wang goto out; 17348ae1aff0SJason Wang 17358ae1aff0SJason Wang pad = xdp.data - xdp.data_hard_start; 17368ae1aff0SJason Wang len = xdp.data_end - xdp.data; 1737761876c8SJason Wang } 1738761876c8SJason Wang rcu_read_unlock(); 17396547e387SToshiaki Makita local_bh_enable(); 1740291aeb2bSJason Wang 17414b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); 1742761876c8SJason Wang 1743f7053b6cSJason Wang out: 1744761876c8SJason Wang rcu_read_unlock(); 17456547e387SToshiaki Makita local_bh_enable(); 1746761876c8SJason Wang return NULL; 174766ccbc9cSJason Wang } 174866ccbc9cSJason Wang 17491da177e4SLinus Torvalds /* Get packet from user space buffer */ 175054f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1751f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17525503fcecSJason Wang int noblock, bool more) 17531da177e4SLinus Torvalds { 175409640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17551da177e4SLinus Torvalds struct sk_buff *skb; 1756f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1757eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1758f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 175996f8d9ecSJason Wang int good_linear; 17600690899bSMichael S. Tsirkin int copylen; 17610690899bSMichael S. Tsirkin bool zerocopy = false; 17620690899bSMichael S. Tsirkin int err; 176396f84061SJason Wang u32 rxhash = 0; 17641cfe6e93SJason Wang int skb_xdp = 1; 1765af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 1766ab00af85SChuang Wang enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 17671da177e4SLinus Torvalds 176840630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 176915718ea0SDan Carpenter if (len < sizeof(pi)) 17701da177e4SLinus Torvalds return -EINVAL; 177115718ea0SDan Carpenter len -= sizeof(pi); 17721da177e4SLinus Torvalds 1773cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17741da177e4SLinus Torvalds return -EFAULT; 17751da177e4SLinus Torvalds } 17761da177e4SLinus Torvalds 177740630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1778e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1779e1edab87SWillem de Bruijn 1780e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1781f43798c2SRusty Russell return -EINVAL; 1782e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1783f43798c2SRusty Russell 1784cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1785f43798c2SRusty Russell return -EFAULT; 1786f43798c2SRusty Russell 17874909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 178856f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 178956f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17904909122fSHerbert Xu 179156f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1792f43798c2SRusty Russell return -EINVAL; 1793e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1794f43798c2SRusty Russell } 1795f43798c2SRusty Russell 179640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1797a504b86eSstephen hemminger align += NET_IP_ALIGN; 17980eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 179956f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1800e01bf1c8SRusty Russell return -EINVAL; 1801e01bf1c8SRusty Russell } 18021da177e4SLinus Torvalds 180396f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 180496f8d9ecSJason Wang 180588529176SJason Wang if (msg_control) { 1806f5ff53b4SAl Viro struct iov_iter i = *from; 1807f5ff53b4SAl Viro 180888529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 180988529176SJason Wang * enough room for skb expand head in case it is used. 18100690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 18110690899bSMichael S. Tsirkin */ 181256f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 181396f8d9ecSJason Wang if (copylen > good_linear) 181496f8d9ecSJason Wang copylen = good_linear; 18153dd5c330SJason Wang linear = copylen; 1816f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1817f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 181888529176SJason Wang zerocopy = true; 181988529176SJason Wang } 182088529176SJason Wang 182190e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 18221cfe6e93SJason Wang /* For the packet that is not easy to be processed 18231cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 18241cfe6e93SJason Wang * skb was created with generic XDP routine. 18251cfe6e93SJason Wang */ 18261cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1827ab00af85SChuang Wang err = PTR_ERR_OR_ZERO(skb); 1828ab00af85SChuang Wang if (err) 1829ab00af85SChuang Wang goto drop; 1830761876c8SJason Wang if (!skb) 1831761876c8SJason Wang return total_len; 183266ccbc9cSJason Wang } else { 183388529176SJason Wang if (!zerocopy) { 18340690899bSMichael S. Tsirkin copylen = len; 183556f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 183696f8d9ecSJason Wang linear = good_linear; 183796f8d9ecSJason Wang else 183856f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18393dd5c330SJason Wang } 18400690899bSMichael S. Tsirkin 184190e33d45SPetar Penkov if (frags) { 184290e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 184390e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 184490e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 184590e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 184690e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 184790e33d45SPetar Penkov */ 184890e33d45SPetar Penkov zerocopy = false; 184990e33d45SPetar Penkov } else { 18506231e47bSTahsin Erdogan if (!linear) 18516231e47bSTahsin Erdogan linear = min_t(size_t, good_linear, copylen); 18526231e47bSTahsin Erdogan 185390e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 185490e33d45SPetar Penkov noblock); 185590e33d45SPetar Penkov } 185690e33d45SPetar Penkov 1857ab00af85SChuang Wang err = PTR_ERR_OR_ZERO(skb); 1858ab00af85SChuang Wang if (err) 1859ab00af85SChuang Wang goto drop; 18601da177e4SLinus Torvalds 18610690899bSMichael S. Tsirkin if (zerocopy) 1862f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1863af1cc7a2SJason Wang else 1864f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18650690899bSMichael S. Tsirkin 18660690899bSMichael S. Tsirkin if (err) { 18674477138fSEric Dumazet err = -EFAULT; 18684b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 1869ab00af85SChuang Wang goto drop; 18708f22757eSDave Jones } 187166ccbc9cSJason Wang } 18721da177e4SLinus Torvalds 18733e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1874497a5757SHeiner Kallweit atomic_long_inc(&tun->rx_frame_errors); 1875ab00af85SChuang Wang err = -EINVAL; 1876ab00af85SChuang Wang goto free_skb; 1877df10db98SPaolo Abeni } 1878df10db98SPaolo Abeni 18791da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 188040630b82SMichael S. Tsirkin case IFF_TUN: 188140630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18822580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18832580c4c1SAlexander Potapenko 18842580c4c1SAlexander Potapenko switch (ip_version) { 18852580c4c1SAlexander Potapenko case 4: 1886f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1887f09f7ee2SAng Way Chuang break; 18882580c4c1SAlexander Potapenko case 6: 1889f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1890f09f7ee2SAng Way Chuang break; 1891f09f7ee2SAng Way Chuang default: 1892ab00af85SChuang Wang err = -EINVAL; 1893ab00af85SChuang Wang goto drop; 1894f09f7ee2SAng Way Chuang } 1895f09f7ee2SAng Way Chuang } 1896f09f7ee2SAng Way Chuang 1897459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18981da177e4SLinus Torvalds skb->protocol = pi.proto; 18994c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 19001da177e4SLinus Torvalds break; 190140630b82SMichael S. Tsirkin case IFF_TAP: 190296aa1b22SWillem de Bruijn if (frags && !pskb_may_pull(skb, ETH_HLEN)) { 190396aa1b22SWillem de Bruijn err = -ENOMEM; 19044b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_HDR_TRUNC; 190596aa1b22SWillem de Bruijn goto drop; 190696aa1b22SWillem de Bruijn } 19071da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 19081da177e4SLinus Torvalds break; 19096403eab1SJoe Perches } 19101da177e4SLinus Torvalds 19110690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 19120690899bSMichael S. Tsirkin if (zerocopy) { 19139ee5e5adSJonathan Lemon skb_zcopy_init(skb, msg_control); 1914af1cc7a2SJason Wang } else if (msg_control) { 1915af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 191636177832SJonathan Lemon uarg->callback(NULL, uarg, false); 19170690899bSMichael S. Tsirkin } 19180690899bSMichael S. Tsirkin 191972f65107SVlad Yasevich skb_reset_network_header(skb); 1920d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 19213fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 192238502af7SJason Wang 19231cfe6e93SJason Wang if (skb_xdp) { 1924761876c8SJason Wang struct bpf_prog *xdp_prog; 1925761876c8SJason Wang int ret; 1926761876c8SJason Wang 19276547e387SToshiaki Makita local_bh_disable(); 1928761876c8SJason Wang rcu_read_lock(); 1929761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1930761876c8SJason Wang if (xdp_prog) { 1931761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1932761876c8SJason Wang if (ret != XDP_PASS) { 1933761876c8SJason Wang rcu_read_unlock(); 19346547e387SToshiaki Makita local_bh_enable(); 1935ab00af85SChuang Wang goto unlock_frags; 1936761876c8SJason Wang } 1937761876c8SJason Wang } 1938761876c8SJason Wang rcu_read_unlock(); 19396547e387SToshiaki Makita local_bh_enable(); 1940761876c8SJason Wang } 1941761876c8SJason Wang 1942cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1943cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1944cf1a1e07SPaolo Abeni * worth to optimize. 1945cf1a1e07SPaolo Abeni */ 1946cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1947cf1a1e07SPaolo Abeni !tfile->detached) 1948feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 194994317099SPetar Penkov 19504477138fSEric Dumazet rcu_read_lock(); 19514477138fSEric Dumazet if (unlikely(!(tun->dev->flags & IFF_UP))) { 19524477138fSEric Dumazet err = -EIO; 19539180bb4fSEric Dumazet rcu_read_unlock(); 19544b4f052eSDongli Zhang drop_reason = SKB_DROP_REASON_DEV_READY; 19554477138fSEric Dumazet goto drop; 19564477138fSEric Dumazet } 19574477138fSEric Dumazet 195890e33d45SPetar Penkov if (frags) { 195996aa1b22SWillem de Bruijn u32 headlen; 196096aa1b22SWillem de Bruijn 196190e33d45SPetar Penkov /* Exercise flow dissector code path. */ 196296aa1b22SWillem de Bruijn skb_push(skb, ETH_HLEN); 196396aa1b22SWillem de Bruijn headlen = eth_get_headlen(tun->dev, skb->data, 1964c43f1255SStanislav Fomichev skb_headlen(skb)); 196590e33d45SPetar Penkov 1966010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 196707d120aaSEric Dumazet WARN_ON_ONCE(1); 196807d120aaSEric Dumazet err = -ENOMEM; 1969625788b5SEric Dumazet dev_core_stats_rx_dropped_inc(tun->dev); 197007d120aaSEric Dumazet napi_busy: 197190e33d45SPetar Penkov napi_free_frags(&tfile->napi); 19724477138fSEric Dumazet rcu_read_unlock(); 197390e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 197407d120aaSEric Dumazet return err; 197590e33d45SPetar Penkov } 197690e33d45SPetar Penkov 197707d120aaSEric Dumazet if (likely(napi_schedule_prep(&tfile->napi))) { 197890e33d45SPetar Penkov local_bh_disable(); 197990e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 19801118b204SWang Yufen napi_complete(&tfile->napi); 198190e33d45SPetar Penkov local_bh_enable(); 198207d120aaSEric Dumazet } else { 198307d120aaSEric Dumazet err = -EBUSY; 198407d120aaSEric Dumazet goto napi_busy; 198507d120aaSEric Dumazet } 198690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1987aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 198894317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 198994317099SPetar Penkov int queue_len; 199094317099SPetar Penkov 199194317099SPetar Penkov spin_lock_bh(&queue->lock); 199282b2bc27SKuniyuki Iwashima 199382b2bc27SKuniyuki Iwashima if (unlikely(tfile->detached)) { 199482b2bc27SKuniyuki Iwashima spin_unlock_bh(&queue->lock); 199582b2bc27SKuniyuki Iwashima rcu_read_unlock(); 199682b2bc27SKuniyuki Iwashima err = -EBUSY; 199782b2bc27SKuniyuki Iwashima goto free_skb; 199882b2bc27SKuniyuki Iwashima } 199982b2bc27SKuniyuki Iwashima 200094317099SPetar Penkov __skb_queue_tail(queue, skb); 200194317099SPetar Penkov queue_len = skb_queue_len(queue); 200294317099SPetar Penkov spin_unlock(&queue->lock); 200394317099SPetar Penkov 200494317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 200594317099SPetar Penkov napi_schedule(&tfile->napi); 200694317099SPetar Penkov 200794317099SPetar Penkov local_bh_enable(); 200894317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 20095503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 201094317099SPetar Penkov } else { 20113d391f65SSebastian Andrzej Siewior netif_rx(skb); 201294317099SPetar Penkov } 20134477138fSEric Dumazet rcu_read_unlock(); 20141da177e4SLinus Torvalds 2015497a5757SHeiner Kallweit preempt_disable(); 2016497a5757SHeiner Kallweit dev_sw_netstats_rx_add(tun->dev, len); 2017497a5757SHeiner Kallweit preempt_enable(); 20181da177e4SLinus Torvalds 201996f84061SJason Wang if (rxhash) 20209e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 202196f84061SJason Wang 20220690899bSMichael S. Tsirkin return total_len; 2023ab00af85SChuang Wang 2024ab00af85SChuang Wang drop: 2025ab00af85SChuang Wang if (err != -EAGAIN) 2026ab00af85SChuang Wang dev_core_stats_rx_dropped_inc(tun->dev); 2027ab00af85SChuang Wang 2028ab00af85SChuang Wang free_skb: 2029ab00af85SChuang Wang if (!IS_ERR_OR_NULL(skb)) 2030ab00af85SChuang Wang kfree_skb_reason(skb, drop_reason); 2031ab00af85SChuang Wang 2032ab00af85SChuang Wang unlock_frags: 2033ab00af85SChuang Wang if (frags) { 2034ab00af85SChuang Wang tfile->napi.skb = NULL; 2035ab00af85SChuang Wang mutex_unlock(&tfile->napi_mutex); 2036ab00af85SChuang Wang } 2037ab00af85SChuang Wang 2038ab00af85SChuang Wang return err ?: total_len; 20391da177e4SLinus Torvalds } 20401da177e4SLinus Torvalds 2041f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 20421da177e4SLinus Torvalds { 204333dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 204454f968d6SJason Wang struct tun_file *tfile = file->private_data; 20459484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2046631ab46bSEric W. Biederman ssize_t result; 20475aac0390SJens Axboe int noblock = 0; 20481da177e4SLinus Torvalds 20491da177e4SLinus Torvalds if (!tun) 20501da177e4SLinus Torvalds return -EBADFD; 20511da177e4SLinus Torvalds 20525aac0390SJens Axboe if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 20535aac0390SJens Axboe noblock = 1; 20545aac0390SJens Axboe 20555aac0390SJens Axboe result = tun_get_user(tun, tfile, NULL, from, noblock, false); 2056631ab46bSEric W. Biederman 2057631ab46bSEric W. Biederman tun_put(tun); 2058631ab46bSEric W. Biederman return result; 20591da177e4SLinus Torvalds } 20601da177e4SLinus Torvalds 2061fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2062fc72d1d5SJason Wang struct tun_file *tfile, 20631ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 2064fc72d1d5SJason Wang struct iov_iter *iter) 2065fc72d1d5SJason Wang { 2066fc72d1d5SJason Wang int vnet_hdr_sz = 0; 20671ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 2068fc72d1d5SJason Wang size_t ret; 2069fc72d1d5SJason Wang 2070fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2071fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2072fc72d1d5SJason Wang 2073fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2074fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2075fc72d1d5SJason Wang return -EINVAL; 2076fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2077fc72d1d5SJason Wang sizeof(gso))) 2078fc72d1d5SJason Wang return -EFAULT; 2079fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2080fc72d1d5SJason Wang } 2081fc72d1d5SJason Wang 20821ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2083fc72d1d5SJason Wang 2084497a5757SHeiner Kallweit preempt_disable(); 2085497a5757SHeiner Kallweit dev_sw_netstats_tx_add(tun->dev, 1, ret); 2086497a5757SHeiner Kallweit preempt_enable(); 2087fc72d1d5SJason Wang 2088fc72d1d5SJason Wang return ret; 2089fc72d1d5SJason Wang } 2090fc72d1d5SJason Wang 20911da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20926f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 209354f968d6SJason Wang struct tun_file *tfile, 20941da177e4SLinus Torvalds struct sk_buff *skb, 2095e0b46d0eSHerbert Xu struct iov_iter *iter) 20961da177e4SLinus Torvalds { 20971da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2098e0b46d0eSHerbert Xu ssize_t total; 20998c847d25SJason Wang int vlan_offset = 0; 2100a8f9bfdfSHerbert Xu int vlan_hlen = 0; 21012eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2102a8f9bfdfSHerbert Xu 2103df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2104a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 21051da177e4SLinus Torvalds 210640630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2107e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 21081da177e4SLinus Torvalds 2109e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2110e0b46d0eSHerbert Xu 211140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2112e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 21131da177e4SLinus Torvalds return -EINVAL; 21141da177e4SLinus Torvalds 2115e0b46d0eSHerbert Xu total += sizeof(pi); 2116e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 21171da177e4SLinus Torvalds /* Packet will be striped */ 21181da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 21191da177e4SLinus Torvalds } 21201da177e4SLinus Torvalds 2121e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 21221da177e4SLinus Torvalds return -EFAULT; 21231da177e4SLinus Torvalds } 21241da177e4SLinus Torvalds 21252eb783c4SHerbert Xu if (vnet_hdr_sz) { 21269403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 212734166093SMike Rapoport 2128e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2129f43798c2SRusty Russell return -EINVAL; 2130f43798c2SRusty Russell 21313e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2132fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2133fd3a8862SWillem de Bruijn vlan_hlen)) { 2134f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 2135*40f4ced3SLei Chen 2136*40f4ced3SLei Chen if (net_ratelimit()) { 2137*40f4ced3SLei Chen netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", 213856f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 213956f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2140ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2141ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2142ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 214356f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2144*40f4ced3SLei Chen } 2145ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2146ef3db4a5SMichael S. Tsirkin return -EINVAL; 2147ef3db4a5SMichael S. Tsirkin } 2148f43798c2SRusty Russell 2149e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2150f43798c2SRusty Russell return -EFAULT; 21518c847d25SJason Wang 21528c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2153f43798c2SRusty Russell } 2154f43798c2SRusty Russell 2155a8f9bfdfSHerbert Xu if (vlan_hlen) { 2156e0b46d0eSHerbert Xu int ret; 2157aff3d70aSJason Wang struct veth veth; 21581da177e4SLinus Torvalds 21596680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2160df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 21611da177e4SLinus Torvalds 21626680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21636680ec68SJason Wang 2164e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2165e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21666680ec68SJason Wang goto done; 21676680ec68SJason Wang 2168e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2169e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21706680ec68SJason Wang goto done; 21716680ec68SJason Wang } 21726680ec68SJason Wang 2173e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21746680ec68SJason Wang 21756680ec68SJason Wang done: 2176608b9977SPaolo Abeni /* caller is in process context, */ 2177497a5757SHeiner Kallweit preempt_disable(); 2178497a5757SHeiner Kallweit dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); 2179497a5757SHeiner Kallweit preempt_enable(); 21801da177e4SLinus Torvalds 21811da177e4SLinus Torvalds return total; 21821da177e4SLinus Torvalds } 21831da177e4SLinus Torvalds 2184fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21851576d986SJason Wang { 21861576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2187fc72d1d5SJason Wang void *ptr = NULL; 2188f48cc6b2SJason Wang int error = 0; 21891576d986SJason Wang 2190fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2191fc72d1d5SJason Wang if (ptr) 21921576d986SJason Wang goto out; 21931576d986SJason Wang if (noblock) { 2194f48cc6b2SJason Wang error = -EAGAIN; 21951576d986SJason Wang goto out; 21961576d986SJason Wang } 21971576d986SJason Wang 2198333f7909SAl Viro add_wait_queue(&tfile->socket.wq.wait, &wait); 21991576d986SJason Wang 22001576d986SJason Wang while (1) { 220171828b22STimur Celik set_current_state(TASK_INTERRUPTIBLE); 2202fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2203fc72d1d5SJason Wang if (ptr) 22041576d986SJason Wang break; 22051576d986SJason Wang if (signal_pending(current)) { 2206f48cc6b2SJason Wang error = -ERESTARTSYS; 22071576d986SJason Wang break; 22081576d986SJason Wang } 22091576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2210f48cc6b2SJason Wang error = -EFAULT; 22111576d986SJason Wang break; 22121576d986SJason Wang } 22131576d986SJason Wang 22141576d986SJason Wang schedule(); 22151576d986SJason Wang } 22161576d986SJason Wang 2217ecef67cbSTimur Celik __set_current_state(TASK_RUNNING); 2218333f7909SAl Viro remove_wait_queue(&tfile->socket.wq.wait, &wait); 22191576d986SJason Wang 22201576d986SJason Wang out: 2221f48cc6b2SJason Wang *err = error; 2222fc72d1d5SJason Wang return ptr; 22231576d986SJason Wang } 22241576d986SJason Wang 222554f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 22269b067034SAl Viro struct iov_iter *to, 2227fc72d1d5SJason Wang int noblock, void *ptr) 22281da177e4SLinus Torvalds { 22299b067034SAl Viro ssize_t ret; 22301576d986SJason Wang int err; 22311da177e4SLinus Torvalds 2232c33ee15bSWei Xu if (!iov_iter_count(to)) { 2233fc72d1d5SJason Wang tun_ptr_free(ptr); 22349b067034SAl Viro return 0; 2235c33ee15bSWei Xu } 22361da177e4SLinus Torvalds 2237fc72d1d5SJason Wang if (!ptr) { 22381576d986SJason Wang /* Read frames from ring */ 2239fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2240fc72d1d5SJason Wang if (!ptr) 2241957f094fSAlex Gartrell return err; 2242ac77cfd4SJason Wang } 2243e0b46d0eSHerbert Xu 22441ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 22451ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2246fc72d1d5SJason Wang 22471ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 224803993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2249fc72d1d5SJason Wang } else { 2250fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2251fc72d1d5SJason Wang 22529b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2253f51a5e82SJason Wang if (unlikely(ret < 0)) 22541da177e4SLinus Torvalds kfree_skb(skb); 2255f51a5e82SJason Wang else 2256f51a5e82SJason Wang consume_skb(skb); 2257fc72d1d5SJason Wang } 22581da177e4SLinus Torvalds 225905c2828cSMichael S. Tsirkin return ret; 226005c2828cSMichael S. Tsirkin } 226105c2828cSMichael S. Tsirkin 22629b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 226305c2828cSMichael S. Tsirkin { 226405c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 226505c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22669484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22679b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 22685aac0390SJens Axboe int noblock = 0; 226905c2828cSMichael S. Tsirkin 227005c2828cSMichael S. Tsirkin if (!tun) 227105c2828cSMichael S. Tsirkin return -EBADFD; 22725aac0390SJens Axboe 22735aac0390SJens Axboe if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 22745aac0390SJens Axboe noblock = 1; 22755aac0390SJens Axboe 22765aac0390SJens Axboe ret = tun_do_read(tun, tfile, to, noblock, NULL); 227742404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2278d0b7da8aSZhi Yong Wu if (ret > 0) 2279d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2280631ab46bSEric W. Biederman tun_put(tun); 22811da177e4SLinus Torvalds return ret; 22821da177e4SLinus Torvalds } 22831da177e4SLinus Torvalds 2284cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 228596f84061SJason Wang { 2286cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 228796f84061SJason Wang 228896f84061SJason Wang bpf_prog_destroy(prog->prog); 228996f84061SJason Wang kfree(prog); 229096f84061SJason Wang } 229196f84061SJason Wang 22929d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22939d6474e4SJason Wang struct tun_prog __rcu **prog_p, 229496f84061SJason Wang struct bpf_prog *prog) 229596f84061SJason Wang { 2296cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 229796f84061SJason Wang 229896f84061SJason Wang if (prog) { 229996f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 230096f84061SJason Wang if (!new) 230196f84061SJason Wang return -ENOMEM; 230296f84061SJason Wang new->prog = prog; 230396f84061SJason Wang } 230496f84061SJason Wang 2305124da8f6SJason Wang spin_lock_bh(&tun->lock); 2306cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2307124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2308cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2309124da8f6SJason Wang spin_unlock_bh(&tun->lock); 231096f84061SJason Wang 231196f84061SJason Wang if (old) 2312cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 231396f84061SJason Wang 231496f84061SJason Wang return 0; 231596f84061SJason Wang } 231696f84061SJason Wang 231796442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 231896442e42SJason Wang { 231996442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 232096442e42SJason Wang 23214008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 232211fc7d5aSEric Dumazet 2323497a5757SHeiner Kallweit free_percpu(dev->tstats); 232496442e42SJason Wang tun_flow_uninit(tun); 23255dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2326cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2327aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 232896442e42SJason Wang } 232996442e42SJason Wang 23301da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 23311da177e4SLinus Torvalds { 23321da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 23331da177e4SLinus Torvalds 23340625c883SEric W. Biederman tun->owner = INVALID_UID; 23350625c883SEric W. Biederman tun->group = INVALID_GID; 23364e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 23371da177e4SLinus Torvalds 23381da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2339cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2340cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2341016adb72SJason Wang /* We prefer our own queue length */ 2342016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 23431da177e4SLinus Torvalds } 23441da177e4SLinus Torvalds 2345f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2346f019a7a5SEric W. Biederman * device with netlink. 2347f019a7a5SEric W. Biederman */ 2348a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2349a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2350f019a7a5SEric W. Biederman { 235135b827b6SNicolas Dichtel NL_SET_ERR_MSG(extack, 235235b827b6SNicolas Dichtel "tun/tap creation via rtnetlink is not supported."); 235335b827b6SNicolas Dichtel return -EOPNOTSUPP; 2354f019a7a5SEric W. Biederman } 2355f019a7a5SEric W. Biederman 23561ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 23571ec010e7SSabrina Dubroca { 23581ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 23591ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 23601ec010e7SSabrina Dubroca 23611ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 23621ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 23631ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 23641ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 23651ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 23661ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 23671ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 23681ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 23691ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 23701ec010e7SSabrina Dubroca 0; 23711ec010e7SSabrina Dubroca } 23721ec010e7SSabrina Dubroca 23731ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 23741ec010e7SSabrina Dubroca { 23751ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 23761ec010e7SSabrina Dubroca 23771ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 23781ec010e7SSabrina Dubroca goto nla_put_failure; 23791ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 23801ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 23811ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 23821ec010e7SSabrina Dubroca goto nla_put_failure; 23831ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 23841ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 23851ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 23861ec010e7SSabrina Dubroca goto nla_put_failure; 23871ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 23881ec010e7SSabrina Dubroca goto nla_put_failure; 23891ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 23901ec010e7SSabrina Dubroca goto nla_put_failure; 23911ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 23921ec010e7SSabrina Dubroca goto nla_put_failure; 23931ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 23941ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 23951ec010e7SSabrina Dubroca goto nla_put_failure; 23961ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 23971ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 23981ec010e7SSabrina Dubroca goto nla_put_failure; 23991ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 24001ec010e7SSabrina Dubroca tun->numdisabled)) 24011ec010e7SSabrina Dubroca goto nla_put_failure; 24021ec010e7SSabrina Dubroca } 24031ec010e7SSabrina Dubroca 24041ec010e7SSabrina Dubroca return 0; 24051ec010e7SSabrina Dubroca 24061ec010e7SSabrina Dubroca nla_put_failure: 24071ec010e7SSabrina Dubroca return -EMSGSIZE; 24081ec010e7SSabrina Dubroca } 24091ec010e7SSabrina Dubroca 2410f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2411f019a7a5SEric W. Biederman .kind = DRV_NAME, 2412f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2413f019a7a5SEric W. Biederman .setup = tun_setup, 2414f019a7a5SEric W. Biederman .validate = tun_validate, 24151ec010e7SSabrina Dubroca .get_size = tun_get_size, 24161ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2417f019a7a5SEric W. Biederman }; 2418f019a7a5SEric W. Biederman 241933dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 242033dccbb0SHerbert Xu { 242154f968d6SJason Wang struct tun_file *tfile; 242243815482SEric Dumazet wait_queue_head_t *wqueue; 242333dccbb0SHerbert Xu 242433dccbb0SHerbert Xu if (!sock_writeable(sk)) 242533dccbb0SHerbert Xu return; 242633dccbb0SHerbert Xu 24279cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 242833dccbb0SHerbert Xu return; 242933dccbb0SHerbert Xu 243043815482SEric Dumazet wqueue = sk_sleep(sk); 243143815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2432a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2433a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2434c722c625SHerbert Xu 243554f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 243654f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 243733dccbb0SHerbert Xu } 243833dccbb0SHerbert Xu 2439f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage) 2440f9e06c45SJason Wang { 2441f9e06c45SJason Wang if (tpage->page) 2442f9e06c45SJason Wang __page_frag_cache_drain(tpage->page, tpage->count); 2443f9e06c45SJason Wang } 2444f9e06c45SJason Wang 2445043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun, 2446043d222fSJason Wang struct tun_file *tfile, 2447f9e06c45SJason Wang struct xdp_buff *xdp, int *flush, 2448f9e06c45SJason Wang struct tun_page *tpage) 2449043d222fSJason Wang { 24504e4b08e5SPrashant Bhole unsigned int datasize = xdp->data_end - xdp->data; 2451043d222fSJason Wang struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2452043d222fSJason Wang struct virtio_net_hdr *gso = &hdr->gso; 2453043d222fSJason Wang struct bpf_prog *xdp_prog; 2454043d222fSJason Wang struct sk_buff *skb = NULL; 2455fb3f9037SHarold Huang struct sk_buff_head *queue; 2456043d222fSJason Wang u32 rxhash = 0, act; 2457043d222fSJason Wang int buflen = hdr->buflen; 2458fb3f9037SHarold Huang int ret = 0; 2459043d222fSJason Wang bool skb_xdp = false; 2460f9e06c45SJason Wang struct page *page; 2461043d222fSJason Wang 2462043d222fSJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 2463043d222fSJason Wang if (xdp_prog) { 2464043d222fSJason Wang if (gso->gso_type) { 2465043d222fSJason Wang skb_xdp = true; 2466043d222fSJason Wang goto build; 2467043d222fSJason Wang } 246843b5169dSLorenzo Bianconi 246943b5169dSLorenzo Bianconi xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); 2470043d222fSJason Wang xdp_set_data_meta_invalid(xdp); 2471043d222fSJason Wang 2472043d222fSJason Wang act = bpf_prog_run_xdp(xdp_prog, xdp); 2473fb3f9037SHarold Huang ret = tun_xdp_act(tun, xdp_prog, xdp, act); 2474fb3f9037SHarold Huang if (ret < 0) { 2475043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2476fb3f9037SHarold Huang return ret; 2477043d222fSJason Wang } 2478043d222fSJason Wang 2479fb3f9037SHarold Huang switch (ret) { 2480043d222fSJason Wang case XDP_REDIRECT: 2481043d222fSJason Wang *flush = true; 2482df561f66SGustavo A. R. Silva fallthrough; 2483043d222fSJason Wang case XDP_TX: 2484043d222fSJason Wang return 0; 2485043d222fSJason Wang case XDP_PASS: 2486043d222fSJason Wang break; 2487043d222fSJason Wang default: 2488f9e06c45SJason Wang page = virt_to_head_page(xdp->data); 2489f9e06c45SJason Wang if (tpage->page == page) { 2490f9e06c45SJason Wang ++tpage->count; 2491f9e06c45SJason Wang } else { 2492f9e06c45SJason Wang tun_put_page(tpage); 2493f9e06c45SJason Wang tpage->page = page; 2494f9e06c45SJason Wang tpage->count = 1; 2495f9e06c45SJason Wang } 2496043d222fSJason Wang return 0; 2497043d222fSJason Wang } 2498043d222fSJason Wang } 2499043d222fSJason Wang 2500043d222fSJason Wang build: 2501043d222fSJason Wang skb = build_skb(xdp->data_hard_start, buflen); 2502043d222fSJason Wang if (!skb) { 2503fb3f9037SHarold Huang ret = -ENOMEM; 2504043d222fSJason Wang goto out; 2505043d222fSJason Wang } 2506043d222fSJason Wang 2507043d222fSJason Wang skb_reserve(skb, xdp->data - xdp->data_hard_start); 2508043d222fSJason Wang skb_put(skb, xdp->data_end - xdp->data); 2509043d222fSJason Wang 2510043d222fSJason Wang if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2511497a5757SHeiner Kallweit atomic_long_inc(&tun->rx_frame_errors); 2512043d222fSJason Wang kfree_skb(skb); 2513fb3f9037SHarold Huang ret = -EINVAL; 2514043d222fSJason Wang goto out; 2515043d222fSJason Wang } 2516043d222fSJason Wang 2517043d222fSJason Wang skb->protocol = eth_type_trans(skb, tun->dev); 2518043d222fSJason Wang skb_reset_network_header(skb); 2519d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 25203fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 2521043d222fSJason Wang 2522043d222fSJason Wang if (skb_xdp) { 2523fb3f9037SHarold Huang ret = do_xdp_generic(xdp_prog, skb); 2524fb3f9037SHarold Huang if (ret != XDP_PASS) { 2525fb3f9037SHarold Huang ret = 0; 2526043d222fSJason Wang goto out; 2527043d222fSJason Wang } 2528fb3f9037SHarold Huang } 2529043d222fSJason Wang 2530f29eb2a9SPaolo Abeni if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2531f29eb2a9SPaolo Abeni !tfile->detached) 2532043d222fSJason Wang rxhash = __skb_get_hash_symmetric(skb); 2533043d222fSJason Wang 2534fb3f9037SHarold Huang if (tfile->napi_enabled) { 2535fb3f9037SHarold Huang queue = &tfile->sk.sk_write_queue; 2536fb3f9037SHarold Huang spin_lock(&queue->lock); 253782b2bc27SKuniyuki Iwashima 253882b2bc27SKuniyuki Iwashima if (unlikely(tfile->detached)) { 253982b2bc27SKuniyuki Iwashima spin_unlock(&queue->lock); 254082b2bc27SKuniyuki Iwashima kfree_skb(skb); 254182b2bc27SKuniyuki Iwashima return -EBUSY; 254282b2bc27SKuniyuki Iwashima } 254382b2bc27SKuniyuki Iwashima 2544fb3f9037SHarold Huang __skb_queue_tail(queue, skb); 2545fb3f9037SHarold Huang spin_unlock(&queue->lock); 2546fb3f9037SHarold Huang ret = 1; 2547fb3f9037SHarold Huang } else { 2548043d222fSJason Wang netif_receive_skb(skb); 2549fb3f9037SHarold Huang ret = 0; 2550fb3f9037SHarold Huang } 2551043d222fSJason Wang 2552497a5757SHeiner Kallweit /* No need to disable preemption here since this function is 25536342ca64SPrashant Bhole * always called with bh disabled 25546342ca64SPrashant Bhole */ 2555497a5757SHeiner Kallweit dev_sw_netstats_rx_add(tun->dev, datasize); 2556043d222fSJason Wang 2557043d222fSJason Wang if (rxhash) 2558043d222fSJason Wang tun_flow_update(tun, rxhash, tfile); 2559043d222fSJason Wang 2560043d222fSJason Wang out: 2561fb3f9037SHarold Huang return ret; 2562043d222fSJason Wang } 2563043d222fSJason Wang 25641b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 256505c2828cSMichael S. Tsirkin { 2566043d222fSJason Wang int ret, i; 256754f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25689484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2569fe8dd45bSJason Wang struct tun_msg_ctl *ctl = m->msg_control; 2570043d222fSJason Wang struct xdp_buff *xdp; 257154f968d6SJason Wang 257254f968d6SJason Wang if (!tun) 257354f968d6SJason Wang return -EBADFD; 2574f5ff53b4SAl Viro 257574a335a0SHarold Huang if (m->msg_controllen == sizeof(struct tun_msg_ctl) && 257674a335a0SHarold Huang ctl && ctl->type == TUN_MSG_PTR) { 25776f0271d9SDavid S. Miller struct tun_page tpage; 2578043d222fSJason Wang int n = ctl->num; 2579fb3f9037SHarold Huang int flush = 0, queued = 0; 2580043d222fSJason Wang 25816f0271d9SDavid S. Miller memset(&tpage, 0, sizeof(tpage)); 25826f0271d9SDavid S. Miller 2583043d222fSJason Wang local_bh_disable(); 2584043d222fSJason Wang rcu_read_lock(); 2585043d222fSJason Wang 2586043d222fSJason Wang for (i = 0; i < n; i++) { 2587043d222fSJason Wang xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2588fb3f9037SHarold Huang ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2589fb3f9037SHarold Huang if (ret > 0) 2590fb3f9037SHarold Huang queued += ret; 2591043d222fSJason Wang } 2592043d222fSJason Wang 2593043d222fSJason Wang if (flush) 25941d233886SToke Høiland-Jørgensen xdp_do_flush(); 2595043d222fSJason Wang 2596fb3f9037SHarold Huang if (tfile->napi_enabled && queued > 0) 2597fb3f9037SHarold Huang napi_schedule(&tfile->napi); 2598fb3f9037SHarold Huang 2599043d222fSJason Wang rcu_read_unlock(); 2600043d222fSJason Wang local_bh_enable(); 2601043d222fSJason Wang 2602f9e06c45SJason Wang tun_put_page(&tpage); 2603f9e06c45SJason Wang 2604043d222fSJason Wang ret = total_len; 2605043d222fSJason Wang goto out; 2606043d222fSJason Wang } 2607fe8dd45bSJason Wang 2608fe8dd45bSJason Wang ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 26095503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 26105503fcecSJason Wang m->msg_flags & MSG_MORE); 2611043d222fSJason Wang out: 261254f968d6SJason Wang tun_put(tun); 261354f968d6SJason Wang return ret; 261405c2828cSMichael S. Tsirkin } 261505c2828cSMichael S. Tsirkin 26161b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 261705c2828cSMichael S. Tsirkin int flags) 261805c2828cSMichael S. Tsirkin { 261954f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 26209484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2621fc72d1d5SJason Wang void *ptr = m->msg_control; 262205c2828cSMichael S. Tsirkin int ret; 262354f968d6SJason Wang 2624c33ee15bSWei Xu if (!tun) { 2625c33ee15bSWei Xu ret = -EBADFD; 2626fc72d1d5SJason Wang goto out_free; 2627c33ee15bSWei Xu } 262854f968d6SJason Wang 2629eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 26303811ae76SGao feng ret = -EINVAL; 2631c33ee15bSWei Xu goto out_put_tun; 26323811ae76SGao feng } 2633eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2634eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2635eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2636eda29772SRichard Cochran goto out; 2637eda29772SRichard Cochran } 2638fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 263987897931SAlex Gartrell if (ret > (ssize_t)total_len) { 264042404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 264142404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 264242404c09SDavid S. Miller } 26433811ae76SGao feng out: 264454f968d6SJason Wang tun_put(tun); 264505c2828cSMichael S. Tsirkin return ret; 2646c33ee15bSWei Xu 2647c33ee15bSWei Xu out_put_tun: 2648c33ee15bSWei Xu tun_put(tun); 2649fc72d1d5SJason Wang out_free: 2650fc72d1d5SJason Wang tun_ptr_free(ptr); 2651c33ee15bSWei Xu return ret; 265205c2828cSMichael S. Tsirkin } 265305c2828cSMichael S. Tsirkin 2654fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2655fc72d1d5SJason Wang { 2656fc72d1d5SJason Wang if (likely(ptr)) { 26571ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 26581ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2659fc72d1d5SJason Wang 26601ffcbc85SJesper Dangaard Brouer return xdpf->len; 2661fc72d1d5SJason Wang } 2662fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2663fc72d1d5SJason Wang } else { 2664fc72d1d5SJason Wang return 0; 2665fc72d1d5SJason Wang } 2666fc72d1d5SJason Wang } 2667fc72d1d5SJason Wang 26681576d986SJason Wang static int tun_peek_len(struct socket *sock) 26691576d986SJason Wang { 26701576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 26711576d986SJason Wang struct tun_struct *tun; 26721576d986SJason Wang int ret = 0; 26731576d986SJason Wang 26749484dc74Syuan linyu tun = tun_get(tfile); 26751576d986SJason Wang if (!tun) 26761576d986SJason Wang return 0; 26771576d986SJason Wang 2678fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 26791576d986SJason Wang tun_put(tun); 26801576d986SJason Wang 26811576d986SJason Wang return ret; 26821576d986SJason Wang } 26831576d986SJason Wang 268405c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 268505c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 26861576d986SJason Wang .peek_len = tun_peek_len, 268705c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 268805c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 268905c2828cSMichael S. Tsirkin }; 269005c2828cSMichael S. Tsirkin 269133dccbb0SHerbert Xu static struct proto tun_proto = { 269233dccbb0SHerbert Xu .name = "tun", 269333dccbb0SHerbert Xu .owner = THIS_MODULE, 269454f968d6SJason Wang .obj_size = sizeof(struct tun_file), 269533dccbb0SHerbert Xu }; 2696f019a7a5SEric W. Biederman 2697980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2698980c9e8cSDavid Woodhouse { 2699031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2700980c9e8cSDavid Woodhouse } 2701980c9e8cSDavid Woodhouse 2702bc6d076dSYueHaibing static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, 2703980c9e8cSDavid Woodhouse char *buf) 2704980c9e8cSDavid Woodhouse { 2705980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2706aff30699SWang Yufen return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); 2707980c9e8cSDavid Woodhouse } 2708980c9e8cSDavid Woodhouse 2709bc6d076dSYueHaibing static ssize_t owner_show(struct device *dev, struct device_attribute *attr, 2710980c9e8cSDavid Woodhouse char *buf) 2711980c9e8cSDavid Woodhouse { 2712980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 27130625c883SEric W. Biederman return uid_valid(tun->owner)? 2714aff30699SWang Yufen sysfs_emit(buf, "%u\n", 27150625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)) : 2716aff30699SWang Yufen sysfs_emit(buf, "-1\n"); 2717980c9e8cSDavid Woodhouse } 2718980c9e8cSDavid Woodhouse 2719bc6d076dSYueHaibing static ssize_t group_show(struct device *dev, struct device_attribute *attr, 2720980c9e8cSDavid Woodhouse char *buf) 2721980c9e8cSDavid Woodhouse { 2722980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 27230625c883SEric W. Biederman return gid_valid(tun->group) ? 2724aff30699SWang Yufen sysfs_emit(buf, "%u\n", 27250625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)) : 2726aff30699SWang Yufen sysfs_emit(buf, "-1\n"); 2727980c9e8cSDavid Woodhouse } 2728980c9e8cSDavid Woodhouse 2729bc6d076dSYueHaibing static DEVICE_ATTR_RO(tun_flags); 2730bc6d076dSYueHaibing static DEVICE_ATTR_RO(owner); 2731bc6d076dSYueHaibing static DEVICE_ATTR_RO(group); 2732980c9e8cSDavid Woodhouse 2733c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2734c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2735c4d33e24STakashi Iwai &dev_attr_owner.attr, 2736c4d33e24STakashi Iwai &dev_attr_group.attr, 2737c4d33e24STakashi Iwai NULL 2738c4d33e24STakashi Iwai }; 2739c4d33e24STakashi Iwai 2740c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2741c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2742c4d33e24STakashi Iwai }; 2743c4d33e24STakashi Iwai 2744d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 27451da177e4SLinus Torvalds { 27461da177e4SLinus Torvalds struct tun_struct *tun; 274754f968d6SJason Wang struct tun_file *tfile = file->private_data; 27481da177e4SLinus Torvalds struct net_device *dev; 27491da177e4SLinus Torvalds int err; 27501da177e4SLinus Torvalds 27517c0c3b1aSJason Wang if (tfile->detached) 27527c0c3b1aSJason Wang return -EINVAL; 27537c0c3b1aSJason Wang 275490e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 275590e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 275690e33d45SPetar Penkov return -EPERM; 275790e33d45SPetar Penkov 275890e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 275990e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 276090e33d45SPetar Penkov return -EINVAL; 276190e33d45SPetar Penkov } 276290e33d45SPetar Penkov 276374a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 276474a3e5a7SEric W. Biederman if (dev) { 2765f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2766f85ba780SDavid Woodhouse return -EBUSY; 276774a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 276874a3e5a7SEric W. Biederman tun = netdev_priv(dev); 276974a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 277074a3e5a7SEric W. Biederman tun = netdev_priv(dev); 277174a3e5a7SEric W. Biederman else 277274a3e5a7SEric W. Biederman return -EINVAL; 277374a3e5a7SEric W. Biederman 27748e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 277540630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 27768e6d91aeSJason Wang return -EINVAL; 27778e6d91aeSJason Wang 2778cde8b15fSJason Wang if (tun_not_capable(tun)) 27792b980dbdSPaul Moore return -EPERM; 27805dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 27812b980dbdSPaul Moore if (err < 0) 27822b980dbdSPaul Moore return err; 27832b980dbdSPaul Moore 278494317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2785af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 278677f22f92SYang Yingliang ifr->ifr_flags & IFF_NAPI_FRAGS, true); 2787a7385ba2SEric W. Biederman if (err < 0) 2788a7385ba2SEric W. Biederman return err; 27894008e97fSJason Wang 279040630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2791e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2792e8dbad66SJason Wang /* One or more queue has already been attached, no need 2793e8dbad66SJason Wang * to initialize the device again. 2794e8dbad66SJason Wang */ 279583c1f36fSSabrina Dubroca netdev_state_change(dev); 2796e8dbad66SJason Wang return 0; 2797e8dbad66SJason Wang } 27989fffc5c6SSabrina Dubroca 27999fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 28009fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 280183c1f36fSSabrina Dubroca 280283c1f36fSSabrina Dubroca netdev_state_change(dev); 280383c1f36fSSabrina Dubroca } else { 28041da177e4SLinus Torvalds char *name; 28051da177e4SLinus Torvalds unsigned long flags = 0; 2806edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2807edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 28081da177e4SLinus Torvalds 2809c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2810ca6bb5d7SDavid Woodhouse return -EPERM; 28112b980dbdSPaul Moore err = security_tun_dev_create(); 28122b980dbdSPaul Moore if (err < 0) 28132b980dbdSPaul Moore return err; 2814ca6bb5d7SDavid Woodhouse 28151da177e4SLinus Torvalds /* Set dev type */ 28161da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 28171da177e4SLinus Torvalds /* TUN device */ 281840630b82SMichael S. Tsirkin flags |= IFF_TUN; 28191da177e4SLinus Torvalds name = "tun%d"; 28201da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 28211da177e4SLinus Torvalds /* TAP device */ 282240630b82SMichael S. Tsirkin flags |= IFF_TAP; 28231da177e4SLinus Torvalds name = "tap%d"; 28241da177e4SLinus Torvalds } else 282536989b90SKusanagi Kouichi return -EINVAL; 28261da177e4SLinus Torvalds 28271da177e4SLinus Torvalds if (*ifr->ifr_name) 28281da177e4SLinus Torvalds name = ifr->ifr_name; 28291da177e4SLinus Torvalds 2830c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2831c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2832c835a677STom Gundersen queues); 2833edfb6a14SJason Wang 28341da177e4SLinus Torvalds if (!dev) 28351da177e4SLinus Torvalds return -ENOMEM; 28361da177e4SLinus Torvalds 2837fc54c658SPavel Emelyanov dev_net_set(dev, net); 2838f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2839fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2840c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2841758e43b7SStephen Hemminger 28421da177e4SLinus Torvalds tun = netdev_priv(dev); 28431da177e4SLinus Torvalds tun->dev = dev; 28441da177e4SLinus Torvalds tun->flags = flags; 2845f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2846d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 28471da177e4SLinus Torvalds 2848eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 284954f968d6SJason Wang tun->filter_attached = false; 285054f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 28515503fcecSJason Wang tun->rx_batched = 0; 285296f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 285333dccbb0SHerbert Xu 2854158b515fSGeorge Kennedy tun->ifr = ifr; 2855158b515fSGeorge Kennedy tun->file = file; 2856608b9977SPaolo Abeni 2857158b515fSGeorge Kennedy tun_net_initialize(dev); 2858eb0fb363SJason Wang 28591da177e4SLinus Torvalds err = register_netdevice(tun->dev); 2860158b515fSGeorge Kennedy if (err < 0) { 2861158b515fSGeorge Kennedy free_netdev(dev); 2862158b515fSGeorge Kennedy return err; 2863158b515fSGeorge Kennedy } 2864c2e315b8SMenglong Dong /* free_netdev() won't check refcnt, to avoid race 286577f22f92SYang Yingliang * with dev_put() we need publish tun after registration. 286677f22f92SYang Yingliang */ 286777f22f92SYang Yingliang rcu_assign_pointer(tfile->tun, tun); 2868af668b3cSMichael S. Tsirkin } 2869980c9e8cSDavid Woodhouse 2870195624d9SPatrick Rohr if (ifr->ifr_flags & IFF_NO_CARRIER) 2871195624d9SPatrick Rohr netif_carrier_off(tun->dev); 2872195624d9SPatrick Rohr else 2873eb0fb363SJason Wang netif_carrier_on(tun->dev); 28741da177e4SLinus Torvalds 2875e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2876e35259a9SMax Krasnyansky * xoff state. 2877e35259a9SMax Krasnyansky */ 2878e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2879c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2880e35259a9SMax Krasnyansky 28811da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 28821da177e4SLinus Torvalds return 0; 28831da177e4SLinus Torvalds } 28841da177e4SLinus Torvalds 288512132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2886e3b99556SMark McLoughlin { 2887e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2888e3b99556SMark McLoughlin 2889980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2890e3b99556SMark McLoughlin 2891e3b99556SMark McLoughlin } 2892e3b99556SMark McLoughlin 28935228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 28945228ddc9SRusty Russell * privs required. */ 289588255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 28965228ddc9SRusty Russell { 2897c8f44affSMichał Mirosław netdev_features_t features = 0; 28985228ddc9SRusty Russell 28995228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 290088255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 29015228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 29025228ddc9SRusty Russell 29035228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 29045228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 29055228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 29065228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 29075228ddc9SRusty Russell } 29085228ddc9SRusty Russell if (arg & TUN_F_TSO4) 29095228ddc9SRusty Russell features |= NETIF_F_TSO; 29105228ddc9SRusty Russell if (arg & TUN_F_TSO6) 29115228ddc9SRusty Russell features |= NETIF_F_TSO6; 29125228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 29135228ddc9SRusty Russell } 29140c19f846SWillem de Bruijn 29150c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 2916399e0827SAndrew Melnychenko 2917399e0827SAndrew Melnychenko /* TODO: for now USO4 and USO6 should work simultaneously */ 2918399e0827SAndrew Melnychenko if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { 2919399e0827SAndrew Melnychenko features |= NETIF_F_GSO_UDP_L4; 2920399e0827SAndrew Melnychenko arg &= ~(TUN_F_USO4 | TUN_F_USO6); 2921399e0827SAndrew Melnychenko } 29225228ddc9SRusty Russell } 29235228ddc9SRusty Russell 29245228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 29255228ddc9SRusty Russell * trying to set them. */ 29265228ddc9SRusty Russell if (arg) 29275228ddc9SRusty Russell return -EINVAL; 29285228ddc9SRusty Russell 292988255375SMichał Mirosław tun->set_features = features; 293009050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 293109050957SYaroslav Isakov tun->dev->wanted_features |= features; 293288255375SMichał Mirosław netdev_update_features(tun->dev); 29335228ddc9SRusty Russell 29345228ddc9SRusty Russell return 0; 29355228ddc9SRusty Russell } 29365228ddc9SRusty Russell 2937c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2938c8d68e6bSJason Wang { 2939c8d68e6bSJason Wang int i; 2940c8d68e6bSJason Wang struct tun_file *tfile; 2941c8d68e6bSJason Wang 2942c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2943b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29448ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29458ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 29468ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2947c8d68e6bSJason Wang } 2948c8d68e6bSJason Wang 2949c8d68e6bSJason Wang tun->filter_attached = false; 2950c8d68e6bSJason Wang } 2951c8d68e6bSJason Wang 2952c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2953c8d68e6bSJason Wang { 2954c8d68e6bSJason Wang int i, ret = 0; 2955c8d68e6bSJason Wang struct tun_file *tfile; 2956c8d68e6bSJason Wang 2957c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2958b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29598ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29608ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 29618ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2962c8d68e6bSJason Wang if (ret) { 2963c8d68e6bSJason Wang tun_detach_filter(tun, i); 2964c8d68e6bSJason Wang return ret; 2965c8d68e6bSJason Wang } 2966c8d68e6bSJason Wang } 2967c8d68e6bSJason Wang 2968c8d68e6bSJason Wang tun->filter_attached = true; 2969c8d68e6bSJason Wang return ret; 2970c8d68e6bSJason Wang } 2971c8d68e6bSJason Wang 2972c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2973c8d68e6bSJason Wang { 2974c8d68e6bSJason Wang struct tun_file *tfile; 2975c8d68e6bSJason Wang int i; 2976c8d68e6bSJason Wang 2977c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2978b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2979c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2980c8d68e6bSJason Wang } 2981c8d68e6bSJason Wang } 2982c8d68e6bSJason Wang 2983cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2984cde8b15fSJason Wang { 2985cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2986cde8b15fSJason Wang struct tun_struct *tun; 2987cde8b15fSJason Wang int ret = 0; 2988cde8b15fSJason Wang 2989cde8b15fSJason Wang rtnl_lock(); 2990cde8b15fSJason Wang 2991cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 29924008e97fSJason Wang tun = tfile->detached; 29935dbbaf2dSPaul Moore if (!tun) { 2994cde8b15fSJason Wang ret = -EINVAL; 29955dbbaf2dSPaul Moore goto unlock; 29965dbbaf2dSPaul Moore } 29975dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 29985dbbaf2dSPaul Moore if (ret < 0) 29995dbbaf2dSPaul Moore goto unlock; 3000af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 300177f22f92SYang Yingliang tun->flags & IFF_NAPI_FRAGS, true); 30024008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 3003b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 300440630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 30054008e97fSJason Wang ret = -EINVAL; 3006cde8b15fSJason Wang else 30074008e97fSJason Wang __tun_detach(tfile, false); 30084008e97fSJason Wang } else 3009cde8b15fSJason Wang ret = -EINVAL; 3010cde8b15fSJason Wang 301183c1f36fSSabrina Dubroca if (ret >= 0) 301283c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 301383c1f36fSSabrina Dubroca 30145dbbaf2dSPaul Moore unlock: 3015cde8b15fSJason Wang rtnl_unlock(); 3016cde8b15fSJason Wang return ret; 3017cde8b15fSJason Wang } 3018cde8b15fSJason Wang 30198f3f330dSJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, 3020cd5681d7SJason Wang void __user *data) 302196f84061SJason Wang { 302296f84061SJason Wang struct bpf_prog *prog; 302396f84061SJason Wang int fd; 302496f84061SJason Wang 302596f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 302696f84061SJason Wang return -EFAULT; 302796f84061SJason Wang 302896f84061SJason Wang if (fd == -1) { 302996f84061SJason Wang prog = NULL; 303096f84061SJason Wang } else { 303196f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 303296f84061SJason Wang if (IS_ERR(prog)) 303396f84061SJason Wang return PTR_ERR(prog); 303496f84061SJason Wang } 303596f84061SJason Wang 3036cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 303796f84061SJason Wang } 303896f84061SJason Wang 3039cca8ea3bSPhillip Potter /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ 3040cca8ea3bSPhillip Potter static unsigned char tun_get_addr_len(unsigned short type) 3041cca8ea3bSPhillip Potter { 3042cca8ea3bSPhillip Potter switch (type) { 3043cca8ea3bSPhillip Potter case ARPHRD_IP6GRE: 3044cca8ea3bSPhillip Potter case ARPHRD_TUNNEL6: 3045cca8ea3bSPhillip Potter return sizeof(struct in6_addr); 3046cca8ea3bSPhillip Potter case ARPHRD_IPGRE: 3047cca8ea3bSPhillip Potter case ARPHRD_TUNNEL: 3048cca8ea3bSPhillip Potter case ARPHRD_SIT: 3049cca8ea3bSPhillip Potter return 4; 3050cca8ea3bSPhillip Potter case ARPHRD_ETHER: 3051cca8ea3bSPhillip Potter return ETH_ALEN; 3052cca8ea3bSPhillip Potter case ARPHRD_IEEE802154: 3053cca8ea3bSPhillip Potter case ARPHRD_IEEE802154_MONITOR: 3054cca8ea3bSPhillip Potter return IEEE802154_EXTENDED_ADDR_LEN; 3055cca8ea3bSPhillip Potter case ARPHRD_PHONET_PIPE: 3056cca8ea3bSPhillip Potter case ARPHRD_PPP: 3057cca8ea3bSPhillip Potter case ARPHRD_NONE: 3058cca8ea3bSPhillip Potter return 0; 3059cca8ea3bSPhillip Potter case ARPHRD_6LOWPAN: 3060cca8ea3bSPhillip Potter return EUI64_ADDR_LEN; 3061cca8ea3bSPhillip Potter case ARPHRD_FDDI: 3062cca8ea3bSPhillip Potter return FDDI_K_ALEN; 3063cca8ea3bSPhillip Potter case ARPHRD_HIPPI: 3064cca8ea3bSPhillip Potter return HIPPI_ALEN; 3065cca8ea3bSPhillip Potter case ARPHRD_IEEE802: 3066cca8ea3bSPhillip Potter return FC_ALEN; 3067cca8ea3bSPhillip Potter case ARPHRD_ROSE: 3068cca8ea3bSPhillip Potter return ROSE_ADDR_LEN; 3069cca8ea3bSPhillip Potter case ARPHRD_NETROM: 3070cca8ea3bSPhillip Potter return AX25_ADDR_LEN; 3071cca8ea3bSPhillip Potter case ARPHRD_LOCALTLK: 3072cca8ea3bSPhillip Potter return LTALK_ALEN; 3073cca8ea3bSPhillip Potter default: 3074cca8ea3bSPhillip Potter return 0; 3075cca8ea3bSPhillip Potter } 3076cca8ea3bSPhillip Potter } 3077cca8ea3bSPhillip Potter 307850857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 307950857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 30801da177e4SLinus Torvalds { 308136b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 3082f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 3083631ab46bSEric W. Biederman struct tun_struct *tun; 30841da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 3085cbfbfe3aSEric Dumazet unsigned int carrier; 30861da177e4SLinus Torvalds struct ifreq ifr; 30870625c883SEric W. Biederman kuid_t owner; 30880625c883SEric W. Biederman kgid_t group; 3089cbfbfe3aSEric Dumazet int ifindex; 309033dccbb0SHerbert Xu int sndbuf; 3091d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 30921cf8e410SMichael S. Tsirkin int le; 3093f271b2ccSMax Krasnyansky int ret; 309483c1f36fSSabrina Dubroca bool do_notify = false; 30951da177e4SLinus Torvalds 3096f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 3097f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 309850857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 30991da177e4SLinus Torvalds return -EFAULT; 31008bbb1813SDavid S. Miller } else { 3101a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 31028bbb1813SDavid S. Miller } 3103631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 3104631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 3105631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 3106031f5e03SMichael S. Tsirkin * TUNSETIFF. 3107031f5e03SMichael S. Tsirkin */ 3108195624d9SPatrick Rohr return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | 3109195624d9SPatrick Rohr TUN_FEATURES, (unsigned int __user*)argp); 3110f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 3111cde8b15fSJason Wang return tun_set_queue(file, &ifr); 3112f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 3113f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3114f663706aSKirill Tkhai return -EPERM; 3115f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 3116f663706aSKirill Tkhai } 3117631ab46bSEric W. Biederman 3118876bfd4dSHerbert Xu rtnl_lock(); 3119876bfd4dSHerbert Xu 31209484dc74Syuan linyu tun = tun_get(tfile); 31210f16bc13SGao Feng if (cmd == TUNSETIFF) { 31220f16bc13SGao Feng ret = -EEXIST; 31230f16bc13SGao Feng if (tun) 31240f16bc13SGao Feng goto unlock; 31250f16bc13SGao Feng 31261da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 31271da177e4SLinus Torvalds 3128f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 31291da177e4SLinus Torvalds 3130876bfd4dSHerbert Xu if (ret) 3131876bfd4dSHerbert Xu goto unlock; 31321da177e4SLinus Torvalds 313350857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3134876bfd4dSHerbert Xu ret = -EFAULT; 3135876bfd4dSHerbert Xu goto unlock; 31361da177e4SLinus Torvalds } 3137fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 3138fb7589a1SPavel Emelyanov ret = -EPERM; 3139fb7589a1SPavel Emelyanov if (tun) 3140fb7589a1SPavel Emelyanov goto unlock; 3141fb7589a1SPavel Emelyanov 3142fb7589a1SPavel Emelyanov ret = -EFAULT; 3143fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3144fb7589a1SPavel Emelyanov goto unlock; 3145cbfbfe3aSEric Dumazet ret = -EINVAL; 3146cbfbfe3aSEric Dumazet if (ifindex < 0) 3147cbfbfe3aSEric Dumazet goto unlock; 3148fb7589a1SPavel Emelyanov ret = 0; 3149fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 3150fb7589a1SPavel Emelyanov goto unlock; 3151fb7589a1SPavel Emelyanov } 31521da177e4SLinus Torvalds 3153876bfd4dSHerbert Xu ret = -EBADFD; 31541da177e4SLinus Torvalds if (!tun) 3155876bfd4dSHerbert Xu goto unlock; 31561da177e4SLinus Torvalds 31573424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); 31581da177e4SLinus Torvalds 31590c3e0e3bSKirill Tkhai net = dev_net(tun->dev); 3160631ab46bSEric W. Biederman ret = 0; 31611da177e4SLinus Torvalds switch (cmd) { 3162e3b99556SMark McLoughlin case TUNGETIFF: 316312132768SKirill Tkhai tun_get_iff(tun, &ifr); 3164e3b99556SMark McLoughlin 31653d407a80SPavel Emelyanov if (tfile->detached) 31663d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 3167849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 3168849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 31693d407a80SPavel Emelyanov 317050857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3171631ab46bSEric W. Biederman ret = -EFAULT; 3172e3b99556SMark McLoughlin break; 3173e3b99556SMark McLoughlin 31741da177e4SLinus Torvalds case TUNSETNOCSUM: 31751da177e4SLinus Torvalds /* Disable/Enable checksum */ 31761da177e4SLinus Torvalds 317788255375SMichał Mirosław /* [unimplemented] */ 31783424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", 31796b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 31801da177e4SLinus Torvalds break; 31811da177e4SLinus Torvalds 31821da177e4SLinus Torvalds case TUNSETPERSIST: 318354f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 318454f968d6SJason Wang * module to prevent the module being unprobed. 318554f968d6SJason Wang */ 318640630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 318740630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 318854f968d6SJason Wang __module_get(THIS_MODULE); 318983c1f36fSSabrina Dubroca do_notify = true; 3190dd38bd85SJason Wang } 319140630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 319240630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 319354f968d6SJason Wang module_put(THIS_MODULE); 319483c1f36fSSabrina Dubroca do_notify = true; 319554f968d6SJason Wang } 31961da177e4SLinus Torvalds 31973424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "persist %s\n", 31986b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 31991da177e4SLinus Torvalds break; 32001da177e4SLinus Torvalds 32011da177e4SLinus Torvalds case TUNSETOWNER: 32021da177e4SLinus Torvalds /* Set owner of the device */ 32030625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 32040625c883SEric W. Biederman if (!uid_valid(owner)) { 32050625c883SEric W. Biederman ret = -EINVAL; 32060625c883SEric W. Biederman break; 32070625c883SEric W. Biederman } 32080625c883SEric W. Biederman tun->owner = owner; 320983c1f36fSSabrina Dubroca do_notify = true; 32103424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "owner set to %u\n", 32110625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 32121da177e4SLinus Torvalds break; 32131da177e4SLinus Torvalds 32148c644623SGuido Guenther case TUNSETGROUP: 32158c644623SGuido Guenther /* Set group of the device */ 32160625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 32170625c883SEric W. Biederman if (!gid_valid(group)) { 32180625c883SEric W. Biederman ret = -EINVAL; 32190625c883SEric W. Biederman break; 32200625c883SEric W. Biederman } 32210625c883SEric W. Biederman tun->group = group; 322283c1f36fSSabrina Dubroca do_notify = true; 32233424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "group set to %u\n", 32240625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 32258c644623SGuido Guenther break; 32268c644623SGuido Guenther 3227ff4cc3acSMike Kershaw case TUNSETLINK: 3228ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 3229ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 32303424170fSMichal Kubecek netif_info(tun, drv, tun->dev, 32316b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 323248abfe05SDavid S. Miller ret = -EBUSY; 3233ff4cc3acSMike Kershaw } else { 32348e1e33ffSMartin Schiller ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 32358e1e33ffSMartin Schiller tun->dev); 32368e1e33ffSMartin Schiller ret = notifier_to_errno(ret); 32378e1e33ffSMartin Schiller if (ret) { 32388e1e33ffSMartin Schiller netif_info(tun, drv, tun->dev, 32398e1e33ffSMartin Schiller "Refused to change device type\n"); 32408e1e33ffSMartin Schiller break; 32418e1e33ffSMartin Schiller } 3242ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 3243cca8ea3bSPhillip Potter tun->dev->addr_len = tun_get_addr_len(tun->dev->type); 32443424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "linktype set to %d\n", 32456b8a66eeSJoe Perches tun->dev->type); 32468e1e33ffSMartin Schiller call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 32478e1e33ffSMartin Schiller tun->dev); 3248ff4cc3acSMike Kershaw } 3249631ab46bSEric W. Biederman break; 3250ff4cc3acSMike Kershaw 32511da177e4SLinus Torvalds case TUNSETDEBUG: 32523424170fSMichal Kubecek tun->msg_enable = (u32)arg; 32531da177e4SLinus Torvalds break; 32543424170fSMichal Kubecek 32555228ddc9SRusty Russell case TUNSETOFFLOAD: 325688255375SMichał Mirosław ret = set_offload(tun, arg); 3257631ab46bSEric W. Biederman break; 32585228ddc9SRusty Russell 3259f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3260f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3261631ab46bSEric W. Biederman ret = -EINVAL; 326240630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3263631ab46bSEric W. Biederman break; 3264c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3265631ab46bSEric W. Biederman break; 32661da177e4SLinus Torvalds 32671da177e4SLinus Torvalds case SIOCGIFHWADDR: 3268b595076aSUwe Kleine-König /* Get hw address */ 32693b23a32aSCong Wang dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); 327050857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3271631ab46bSEric W. Biederman ret = -EFAULT; 3272631ab46bSEric W. Biederman break; 32731da177e4SLinus Torvalds 32741da177e4SLinus Torvalds case SIOCSIFHWADDR: 3275f271b2ccSMax Krasnyansky /* Set hw address */ 32763b23a32aSCong Wang ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); 3277631ab46bSEric W. Biederman break; 327833dccbb0SHerbert Xu 327933dccbb0SHerbert Xu case TUNGETSNDBUF: 328054f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 328133dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 328233dccbb0SHerbert Xu ret = -EFAULT; 328333dccbb0SHerbert Xu break; 328433dccbb0SHerbert Xu 328533dccbb0SHerbert Xu case TUNSETSNDBUF: 328633dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 328733dccbb0SHerbert Xu ret = -EFAULT; 328833dccbb0SHerbert Xu break; 328933dccbb0SHerbert Xu } 329093161922SCraig Gallek if (sndbuf <= 0) { 329193161922SCraig Gallek ret = -EINVAL; 329293161922SCraig Gallek break; 329393161922SCraig Gallek } 329433dccbb0SHerbert Xu 3295c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3296c8d68e6bSJason Wang tun_set_sndbuf(tun); 329733dccbb0SHerbert Xu break; 329833dccbb0SHerbert Xu 3299d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3300d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3301d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3302d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3303d9d52b51SMichael S. Tsirkin break; 3304d9d52b51SMichael S. Tsirkin 3305d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3306d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3307d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3308d9d52b51SMichael S. Tsirkin break; 3309d9d52b51SMichael S. Tsirkin } 3310d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3311d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3312d9d52b51SMichael S. Tsirkin break; 3313d9d52b51SMichael S. Tsirkin } 3314d9d52b51SMichael S. Tsirkin 3315d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3316d9d52b51SMichael S. Tsirkin break; 3317d9d52b51SMichael S. Tsirkin 33181cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 33191cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 33201cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 33211cf8e410SMichael S. Tsirkin ret = -EFAULT; 33221cf8e410SMichael S. Tsirkin break; 33231cf8e410SMichael S. Tsirkin 33241cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 33251cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 33261cf8e410SMichael S. Tsirkin ret = -EFAULT; 33271cf8e410SMichael S. Tsirkin break; 33281cf8e410SMichael S. Tsirkin } 33291cf8e410SMichael S. Tsirkin if (le) 33301cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 33311cf8e410SMichael S. Tsirkin else 33321cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 33331cf8e410SMichael S. Tsirkin break; 33341cf8e410SMichael S. Tsirkin 33358b8e658bSGreg Kurz case TUNGETVNETBE: 33368b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 33378b8e658bSGreg Kurz break; 33388b8e658bSGreg Kurz 33398b8e658bSGreg Kurz case TUNSETVNETBE: 33408b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 33418b8e658bSGreg Kurz break; 33428b8e658bSGreg Kurz 334399405162SMichael S. Tsirkin case TUNATTACHFILTER: 334499405162SMichael S. Tsirkin /* Can be set only for TAPs */ 334599405162SMichael S. Tsirkin ret = -EINVAL; 334640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 334799405162SMichael S. Tsirkin break; 334899405162SMichael S. Tsirkin ret = -EFAULT; 334954f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 335099405162SMichael S. Tsirkin break; 335199405162SMichael S. Tsirkin 3352c8d68e6bSJason Wang ret = tun_attach_filter(tun); 335399405162SMichael S. Tsirkin break; 335499405162SMichael S. Tsirkin 335599405162SMichael S. Tsirkin case TUNDETACHFILTER: 335699405162SMichael S. Tsirkin /* Can be set only for TAPs */ 335799405162SMichael S. Tsirkin ret = -EINVAL; 335840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 335999405162SMichael S. Tsirkin break; 3360c8d68e6bSJason Wang ret = 0; 3361c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 336299405162SMichael S. Tsirkin break; 336399405162SMichael S. Tsirkin 336476975e9cSPavel Emelyanov case TUNGETFILTER: 336576975e9cSPavel Emelyanov ret = -EINVAL; 336640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 336776975e9cSPavel Emelyanov break; 336876975e9cSPavel Emelyanov ret = -EFAULT; 336976975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 337076975e9cSPavel Emelyanov break; 337176975e9cSPavel Emelyanov ret = 0; 337276975e9cSPavel Emelyanov break; 337376975e9cSPavel Emelyanov 337496f84061SJason Wang case TUNSETSTEERINGEBPF: 3375cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 337696f84061SJason Wang break; 337796f84061SJason Wang 3378aff3d70aSJason Wang case TUNSETFILTEREBPF: 3379aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3380aff3d70aSJason Wang break; 3381aff3d70aSJason Wang 338226d31925SNicolas Dichtel case TUNSETCARRIER: 338326d31925SNicolas Dichtel ret = -EFAULT; 338426d31925SNicolas Dichtel if (copy_from_user(&carrier, argp, sizeof(carrier))) 338526d31925SNicolas Dichtel goto unlock; 338626d31925SNicolas Dichtel 338726d31925SNicolas Dichtel ret = tun_net_change_carrier(tun->dev, (bool)carrier); 338826d31925SNicolas Dichtel break; 338926d31925SNicolas Dichtel 33900c3e0e3bSKirill Tkhai case TUNGETDEVNETNS: 33910c3e0e3bSKirill Tkhai ret = -EPERM; 33920c3e0e3bSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 33930c3e0e3bSKirill Tkhai goto unlock; 33940c3e0e3bSKirill Tkhai ret = open_related_ns(&net->ns, get_net_ns); 33950c3e0e3bSKirill Tkhai break; 33960c3e0e3bSKirill Tkhai 33971da177e4SLinus Torvalds default: 3398631ab46bSEric W. Biederman ret = -EINVAL; 3399631ab46bSEric W. Biederman break; 3400ee289b64SJoe Perches } 34011da177e4SLinus Torvalds 340283c1f36fSSabrina Dubroca if (do_notify) 340383c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 340483c1f36fSSabrina Dubroca 3405876bfd4dSHerbert Xu unlock: 3406876bfd4dSHerbert Xu rtnl_unlock(); 3407876bfd4dSHerbert Xu if (tun) 3408631ab46bSEric W. Biederman tun_put(tun); 3409631ab46bSEric W. Biederman return ret; 34101da177e4SLinus Torvalds } 34111da177e4SLinus Torvalds 341250857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 341350857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 341450857e2aSArnd Bergmann { 341550857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 341650857e2aSArnd Bergmann } 341750857e2aSArnd Bergmann 341850857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 341950857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 342050857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 342150857e2aSArnd Bergmann { 342250857e2aSArnd Bergmann switch (cmd) { 342350857e2aSArnd Bergmann case TUNSETIFF: 342450857e2aSArnd Bergmann case TUNGETIFF: 342550857e2aSArnd Bergmann case TUNSETTXFILTER: 342650857e2aSArnd Bergmann case TUNGETSNDBUF: 342750857e2aSArnd Bergmann case TUNSETSNDBUF: 342850857e2aSArnd Bergmann case SIOCGIFHWADDR: 342950857e2aSArnd Bergmann case SIOCSIFHWADDR: 343050857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 343150857e2aSArnd Bergmann break; 343250857e2aSArnd Bergmann default: 343350857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 343450857e2aSArnd Bergmann break; 343550857e2aSArnd Bergmann } 343650857e2aSArnd Bergmann 343750857e2aSArnd Bergmann /* 343850857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 343950857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 344050857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 344150857e2aSArnd Bergmann * contents. 344250857e2aSArnd Bergmann */ 344350857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 344450857e2aSArnd Bergmann } 344550857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 344650857e2aSArnd Bergmann 34471da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 34481da177e4SLinus Torvalds { 344954f968d6SJason Wang struct tun_file *tfile = file->private_data; 34501da177e4SLinus Torvalds int ret; 34511da177e4SLinus Torvalds 345254f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 34539d319522SJonathan Corbet goto out; 34541da177e4SLinus Torvalds 34551da177e4SLinus Torvalds if (on) { 345601919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 345754f968d6SJason Wang tfile->flags |= TUN_FASYNC; 34581da177e4SLinus Torvalds } else 345954f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 34609d319522SJonathan Corbet ret = 0; 34619d319522SJonathan Corbet out: 34629d319522SJonathan Corbet return ret; 34631da177e4SLinus Torvalds } 34641da177e4SLinus Torvalds 34651da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 34661da177e4SLinus Torvalds { 3467140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3468631ab46bSEric W. Biederman struct tun_file *tfile; 3469deed49fbSThomas Gleixner 3470140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 347111aa9c28SEric W. Biederman &tun_proto, 0); 3472631ab46bSEric W. Biederman if (!tfile) 3473631ab46bSEric W. Biederman return -ENOMEM; 3474b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3475b196d88aSJason Wang sk_free(&tfile->sk); 3476b196d88aSJason Wang return -ENOMEM; 3477b196d88aSJason Wang } 3478b196d88aSJason Wang 3479c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3480c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 348154f968d6SJason Wang tfile->flags = 0; 3482fb7589a1SPavel Emelyanov tfile->ifindex = 0; 348354f968d6SJason Wang 3484333f7909SAl Viro init_waitqueue_head(&tfile->socket.wq.wait); 348554f968d6SJason Wang 348654f968d6SJason Wang tfile->socket.file = file; 348754f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 348854f968d6SJason Wang 34899bc30473SLaszlo Ersek sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); 349054f968d6SJason Wang 349154f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 349254f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 349354f968d6SJason Wang 3494631ab46bSEric W. Biederman file->private_data = tfile; 34954008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 349654f968d6SJason Wang 349719a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 349819a6afb2SJason Wang 3499438b4060SJens Axboe /* tun groks IOCB_NOWAIT just fine, mark it as such */ 3500438b4060SJens Axboe file->f_mode |= FMODE_NOWAIT; 35011da177e4SLinus Torvalds return 0; 35021da177e4SLinus Torvalds } 35031da177e4SLinus Torvalds 35041da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 35051da177e4SLinus Torvalds { 3506631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 35071da177e4SLinus Torvalds 3508c8d68e6bSJason Wang tun_detach(tfile, true); 35091da177e4SLinus Torvalds 35101da177e4SLinus Torvalds return 0; 35111da177e4SLinus Torvalds } 35121da177e4SLinus Torvalds 351393e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 35149484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 351593e14b6dSMasatake YAMATO { 35169484dc74Syuan linyu struct tun_file *tfile = file->private_data; 351793e14b6dSMasatake YAMATO struct tun_struct *tun; 351893e14b6dSMasatake YAMATO struct ifreq ifr; 351993e14b6dSMasatake YAMATO 352093e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 352193e14b6dSMasatake YAMATO 352293e14b6dSMasatake YAMATO rtnl_lock(); 35239484dc74Syuan linyu tun = tun_get(tfile); 352493e14b6dSMasatake YAMATO if (tun) 352512132768SKirill Tkhai tun_get_iff(tun, &ifr); 352693e14b6dSMasatake YAMATO rtnl_unlock(); 352793e14b6dSMasatake YAMATO 352893e14b6dSMasatake YAMATO if (tun) 352993e14b6dSMasatake YAMATO tun_put(tun); 353093e14b6dSMasatake YAMATO 3531a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 353293e14b6dSMasatake YAMATO } 353393e14b6dSMasatake YAMATO #endif 353493e14b6dSMasatake YAMATO 3535d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 35361da177e4SLinus Torvalds .owner = THIS_MODULE, 35371da177e4SLinus Torvalds .llseek = no_llseek, 35389b067034SAl Viro .read_iter = tun_chr_read_iter, 3539f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 35401da177e4SLinus Torvalds .poll = tun_chr_poll, 3541876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 354250857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 354350857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 354450857e2aSArnd Bergmann #endif 35451da177e4SLinus Torvalds .open = tun_chr_open, 35461da177e4SLinus Torvalds .release = tun_chr_close, 354793e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 354893e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 354993e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 355093e14b6dSMasatake YAMATO #endif 35511da177e4SLinus Torvalds }; 35521da177e4SLinus Torvalds 35531da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 35541da177e4SLinus Torvalds .minor = TUN_MINOR, 35551da177e4SLinus Torvalds .name = "tun", 3556e454cea2SKay Sievers .nodename = "net/tun", 35571da177e4SLinus Torvalds .fops = &tun_fops, 35581da177e4SLinus Torvalds }; 35591da177e4SLinus Torvalds 35601da177e4SLinus Torvalds /* ethtool interface */ 35611da177e4SLinus Torvalds 35624e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 356329ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 35641da177e4SLinus Torvalds { 356529ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 356629ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3567598d2982SIlya Maximets cmd->base.speed = SPEED_10000; 356829ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 356929ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 357029ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 357129ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 35724e24f2ddSChas Williams } 35734e24f2ddSChas Williams 35744e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 35754e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 35764e24f2ddSChas Williams { 35774e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 35784e24f2ddSChas Williams 35794e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 35804e24f2ddSChas Williams return 0; 35814e24f2ddSChas Williams } 35824e24f2ddSChas Williams 35834e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 35844e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 35854e24f2ddSChas Williams { 35864e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 35874e24f2ddSChas Williams 35884e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 35891da177e4SLinus Torvalds return 0; 35901da177e4SLinus Torvalds } 35911da177e4SLinus Torvalds 35921da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 35931da177e4SLinus Torvalds { 35941da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35951da177e4SLinus Torvalds 3596fb3ceec1SWolfram Sang strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 3597fb3ceec1SWolfram Sang strscpy(info->version, DRV_VERSION, sizeof(info->version)); 35981da177e4SLinus Torvalds 35991da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 360040630b82SMichael S. Tsirkin case IFF_TUN: 3601fb3ceec1SWolfram Sang strscpy(info->bus_info, "tun", sizeof(info->bus_info)); 36021da177e4SLinus Torvalds break; 360340630b82SMichael S. Tsirkin case IFF_TAP: 3604fb3ceec1SWolfram Sang strscpy(info->bus_info, "tap", sizeof(info->bus_info)); 36051da177e4SLinus Torvalds break; 36061da177e4SLinus Torvalds } 36071da177e4SLinus Torvalds } 36081da177e4SLinus Torvalds 36091da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 36101da177e4SLinus Torvalds { 36111da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 36123424170fSMichal Kubecek 36133424170fSMichal Kubecek return tun->msg_enable; 36141da177e4SLinus Torvalds } 36151da177e4SLinus Torvalds 36161da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 36171da177e4SLinus Torvalds { 36181da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 36193424170fSMichal Kubecek 36203424170fSMichal Kubecek tun->msg_enable = value; 36211da177e4SLinus Torvalds } 36221da177e4SLinus Torvalds 36235503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 3624f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 3625f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 3626f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 36275503fcecSJason Wang { 36285503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 36295503fcecSJason Wang 36305503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 36315503fcecSJason Wang 36325503fcecSJason Wang return 0; 36335503fcecSJason Wang } 36345503fcecSJason Wang 36355503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 3636f3ccfda1SYufeng Mo struct ethtool_coalesce *ec, 3637f3ccfda1SYufeng Mo struct kernel_ethtool_coalesce *kernel_coal, 3638f3ccfda1SYufeng Mo struct netlink_ext_ack *extack) 36395503fcecSJason Wang { 36405503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 36415503fcecSJason Wang 36425503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 36435503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 36445503fcecSJason Wang else 36455503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 36465503fcecSJason Wang 36475503fcecSJason Wang return 0; 36485503fcecSJason Wang } 36495503fcecSJason Wang 36507282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 3651e5ad00b3SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, 36521da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 36531da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 36541da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3655bee31369SNolan Leake .get_link = ethtool_op_get_link, 3656eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 36575503fcecSJason Wang .get_coalesce = tun_get_coalesce, 36585503fcecSJason Wang .set_coalesce = tun_set_coalesce, 365929ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 36604e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 36611da177e4SLinus Torvalds }; 36621da177e4SLinus Torvalds 36631576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 36641576d986SJason Wang { 36651576d986SJason Wang struct net_device *dev = tun->dev; 36661576d986SJason Wang struct tun_file *tfile; 36675990a305SJason Wang struct ptr_ring **rings; 36681576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 36691576d986SJason Wang int ret, i; 36701576d986SJason Wang 36715990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 36725990a305SJason Wang if (!rings) 36731576d986SJason Wang return -ENOMEM; 36741576d986SJason Wang 36751576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 36761576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 36775990a305SJason Wang rings[i] = &tfile->tx_ring; 36781576d986SJason Wang } 36791576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 36805990a305SJason Wang rings[i++] = &tfile->tx_ring; 36811576d986SJason Wang 36825990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 36835990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3684fc72d1d5SJason Wang tun_ptr_free); 36851576d986SJason Wang 36865990a305SJason Wang kfree(rings); 36871576d986SJason Wang return ret; 36881576d986SJason Wang } 36891576d986SJason Wang 36901576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 36911576d986SJason Wang unsigned long event, void *ptr) 36921576d986SJason Wang { 36931576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 36941576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 369572b319dcSFei Li int i; 36961576d986SJason Wang 369786dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 369886dfb4acSCraig Gallek return NOTIFY_DONE; 369986dfb4acSCraig Gallek 37001576d986SJason Wang switch (event) { 37011576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 37021576d986SJason Wang if (tun_queue_resize(tun)) 37031576d986SJason Wang return NOTIFY_BAD; 37041576d986SJason Wang break; 370572b319dcSFei Li case NETDEV_UP: 370672b319dcSFei Li for (i = 0; i < tun->numqueues; i++) { 370772b319dcSFei Li struct tun_file *tfile; 370872b319dcSFei Li 370972b319dcSFei Li tfile = rtnl_dereference(tun->tfiles[i]); 371072b319dcSFei Li tfile->socket.sk->sk_write_space(tfile->socket.sk); 371172b319dcSFei Li } 371272b319dcSFei Li break; 37131576d986SJason Wang default: 37141576d986SJason Wang break; 37151576d986SJason Wang } 37161576d986SJason Wang 37171576d986SJason Wang return NOTIFY_DONE; 37181576d986SJason Wang } 37191576d986SJason Wang 37201576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 37211576d986SJason Wang .notifier_call = tun_device_event, 37221576d986SJason Wang }; 372379d17604SPavel Emelyanov 37241da177e4SLinus Torvalds static int __init tun_init(void) 37251da177e4SLinus Torvalds { 37261da177e4SLinus Torvalds int ret = 0; 37271da177e4SLinus Torvalds 37286b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 37291da177e4SLinus Torvalds 3730f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 373179d17604SPavel Emelyanov if (ret) { 37326b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3733f019a7a5SEric W. Biederman goto err_linkops; 373479d17604SPavel Emelyanov } 373579d17604SPavel Emelyanov 37361da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 373779d17604SPavel Emelyanov if (ret) { 37386b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 373979d17604SPavel Emelyanov goto err_misc; 374079d17604SPavel Emelyanov } 37411576d986SJason Wang 37425edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 37435edfbd3cSTonghao Zhang if (ret) { 37445edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 37455edfbd3cSTonghao Zhang goto err_notifier; 37465edfbd3cSTonghao Zhang } 37475edfbd3cSTonghao Zhang 374879d17604SPavel Emelyanov return 0; 37495edfbd3cSTonghao Zhang 37505edfbd3cSTonghao Zhang err_notifier: 37515edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 375279d17604SPavel Emelyanov err_misc: 3753f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3754f019a7a5SEric W. Biederman err_linkops: 37551da177e4SLinus Torvalds return ret; 37561da177e4SLinus Torvalds } 37571da177e4SLinus Torvalds 3758b2f83233SZiyang Xuan static void __exit tun_cleanup(void) 37591da177e4SLinus Torvalds { 37601da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3761f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 37621576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 37631da177e4SLinus Torvalds } 37641da177e4SLinus Torvalds 376505c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 376605c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 376705c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 376805c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 376905c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 377005c2828cSMichael S. Tsirkin { 37716e914fc7SJason Wang struct tun_file *tfile; 377205c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 377305c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 37746e914fc7SJason Wang tfile = file->private_data; 37756e914fc7SJason Wang if (!tfile) 377605c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 377754f968d6SJason Wang return &tfile->socket; 377805c2828cSMichael S. Tsirkin } 377905c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 378005c2828cSMichael S. Tsirkin 37815990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 378283339c6bSJason Wang { 378383339c6bSJason Wang struct tun_file *tfile; 378483339c6bSJason Wang 378583339c6bSJason Wang if (file->f_op != &tun_fops) 378683339c6bSJason Wang return ERR_PTR(-EINVAL); 378783339c6bSJason Wang tfile = file->private_data; 378883339c6bSJason Wang if (!tfile) 378983339c6bSJason Wang return ERR_PTR(-EBADFD); 37905990a305SJason Wang return &tfile->tx_ring; 379183339c6bSJason Wang } 37925990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 379383339c6bSJason Wang 37941da177e4SLinus Torvalds module_init(tun_init); 37951da177e4SLinus Torvalds module_exit(tun_cleanup); 37961da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 37971da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 37981da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 37991da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3800578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3801