1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 41da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds /* 101da177e4SLinus Torvalds * Changes: 111da177e4SLinus Torvalds * 12ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 13ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 14ff4cc3acSMike Kershaw * 151da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 16344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 191da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 201da177e4SLinus Torvalds * Increased default tx queue length. 211da177e4SLinus Torvalds * Added ethtool API. 221da177e4SLinus Torvalds * Minor cleanups 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 251da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 261da177e4SLinus Torvalds */ 271da177e4SLinus Torvalds 286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 296b8a66eeSJoe Perches 301da177e4SLinus Torvalds #define DRV_NAME "tun" 311da177e4SLinus Torvalds #define DRV_VERSION "1.6" 321da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 331da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds #include <linux/module.h> 361da177e4SLinus Torvalds #include <linux/errno.h> 371da177e4SLinus Torvalds #include <linux/kernel.h> 38174cd4b1SIngo Molnar #include <linux/sched/signal.h> 391da177e4SLinus Torvalds #include <linux/major.h> 401da177e4SLinus Torvalds #include <linux/slab.h> 411da177e4SLinus Torvalds #include <linux/poll.h> 421da177e4SLinus Torvalds #include <linux/fcntl.h> 431da177e4SLinus Torvalds #include <linux/init.h> 441da177e4SLinus Torvalds #include <linux/skbuff.h> 451da177e4SLinus Torvalds #include <linux/netdevice.h> 461da177e4SLinus Torvalds #include <linux/etherdevice.h> 471da177e4SLinus Torvalds #include <linux/miscdevice.h> 481da177e4SLinus Torvalds #include <linux/ethtool.h> 491da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5050857e2aSArnd Bergmann #include <linux/compat.h> 511da177e4SLinus Torvalds #include <linux/if.h> 521da177e4SLinus Torvalds #include <linux/if_arp.h> 531da177e4SLinus Torvalds #include <linux/if_ether.h> 541da177e4SLinus Torvalds #include <linux/if_tun.h> 556680ec68SJason Wang #include <linux/if_vlan.h> 561da177e4SLinus Torvalds #include <linux/crc32.h> 57d647a591SPavel Emelyanov #include <linux/nsproxy.h> 58f43798c2SRusty Russell #include <linux/virtio_net.h> 5999405162SMichael S. Tsirkin #include <linux/rcupdate.h> 60881d966bSEric W. Biederman #include <net/net_namespace.h> 6179d17604SPavel Emelyanov #include <net/netns/generic.h> 62f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 6333dccbb0SHerbert Xu #include <net/sock.h> 64735fc405SJesper Dangaard Brouer #include <net/xdp.h> 65b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h> 6693e14b6dSMasatake YAMATO #include <linux/seq_file.h> 67e0b46d0eSHerbert Xu #include <linux/uio.h> 681576d986SJason Wang #include <linux/skb_array.h> 69761876c8SJason Wang #include <linux/bpf.h> 70761876c8SJason Wang #include <linux/bpf_trace.h> 7190e33d45SPetar Penkov #include <linux/mutex.h> 721da177e4SLinus Torvalds 737c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 74f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 751da177e4SLinus Torvalds 764e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 774e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 784e24f2ddSChas Williams 797df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 8066ccbc9cSJason Wang 81031f5e03SMichael S. Tsirkin /* TUN device flags */ 82031f5e03SMichael S. Tsirkin 83031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 84031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 85031f5e03SMichael S. Tsirkin */ 86031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 871cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 881cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 898b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 90031f5e03SMichael S. Tsirkin 91031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 9290e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 9390e33d45SPetar Penkov 940690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 950690899bSMichael S. Tsirkin 96f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 97f271b2ccSMax Krasnyansky struct tap_filter { 98f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 99f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 100f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 101f271b2ccSMax Krasnyansky }; 102f271b2ccSMax Krasnyansky 103baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 104baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 105baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 106b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 107c8d68e6bSJason Wang 10896442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 10996442e42SJason Wang 11054f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 11192d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 11254f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 11354f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 11436fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 11554f968d6SJason Wang * this). 1166e914fc7SJason Wang * 1176e914fc7SJason Wang * RCU usage: 11836fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1196e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 12054f968d6SJason Wang */ 121631ab46bSEric W. Biederman struct tun_file { 12254f968d6SJason Wang struct sock sk; 12354f968d6SJason Wang struct socket socket; 1246e914fc7SJason Wang struct tun_struct __rcu *tun; 12554f968d6SJason Wang struct fasync_struct *fasync; 12654f968d6SJason Wang /* only used for fasnyc */ 12754f968d6SJason Wang unsigned int flags; 128fb7589a1SPavel Emelyanov union { 129c8d68e6bSJason Wang u16 queue_index; 130fb7589a1SPavel Emelyanov unsigned int ifindex; 131fb7589a1SPavel Emelyanov }; 13294317099SPetar Penkov struct napi_struct napi; 133aec72f33SEric Dumazet bool napi_enabled; 134af3fb24eSEric Dumazet bool napi_frags_enabled; 13590e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1364008e97fSJason Wang struct list_head next; 1374008e97fSJason Wang struct tun_struct *detached; 1385990a305SJason Wang struct ptr_ring tx_ring; 1398bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 140631ab46bSEric W. Biederman }; 141631ab46bSEric W. Biederman 142f9e06c45SJason Wang struct tun_page { 143f9e06c45SJason Wang struct page *page; 144f9e06c45SJason Wang int count; 145f9e06c45SJason Wang }; 146f9e06c45SJason Wang 14796442e42SJason Wang struct tun_flow_entry { 14896442e42SJason Wang struct hlist_node hash_link; 14996442e42SJason Wang struct rcu_head rcu; 15096442e42SJason Wang struct tun_struct *tun; 15196442e42SJason Wang 15296442e42SJason Wang u32 rxhash; 1539bc88939STom Herbert u32 rps_rxhash; 15496442e42SJason Wang int queue_index; 15583b1bc12SLi RongQing unsigned long updated ____cacheline_aligned_in_smp; 15696442e42SJason Wang }; 15796442e42SJason Wang 15896442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 159f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 16096442e42SJason Wang 161cd5681d7SJason Wang struct tun_prog { 16296f84061SJason Wang struct rcu_head rcu; 16396f84061SJason Wang struct bpf_prog *prog; 16496f84061SJason Wang }; 16596f84061SJason Wang 16654f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 16736fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 16854f968d6SJason Wang * file were attached to a persist device. 16954f968d6SJason Wang */ 17014daa021SRusty Russell struct tun_struct { 171c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 172c8d68e6bSJason Wang unsigned int numqueues; 173f271b2ccSMax Krasnyansky unsigned int flags; 1740625c883SEric W. Biederman kuid_t owner; 1750625c883SEric W. Biederman kgid_t group; 17614daa021SRusty Russell 17714daa021SRusty Russell struct net_device *dev; 178c8f44affSMichał Mirosław netdev_features_t set_features; 17988255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 180d591a1f3SDavid S. Miller NETIF_F_TSO6) 181d9d52b51SMichael S. Tsirkin 182eaea34b2SPaolo Abeni int align; 183d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 18454f968d6SJason Wang int sndbuf; 18554f968d6SJason Wang struct tap_filter txflt; 18654f968d6SJason Wang struct sock_fprog fprog; 18754f968d6SJason Wang /* protected by rtnl lock */ 18854f968d6SJason Wang bool filter_attached; 1893424170fSMichal Kubecek u32 msg_enable; 19096442e42SJason Wang spinlock_t lock; 19196442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 19296442e42SJason Wang struct timer_list flow_gc_timer; 19396442e42SJason Wang unsigned long ageing_time; 1944008e97fSJason Wang unsigned int numdisabled; 1954008e97fSJason Wang struct list_head disabled; 1965dbbaf2dSPaul Moore void *security; 197b8732fb7SJason Wang u32 flow_count; 1985503fcecSJason Wang u32 rx_batched; 199497a5757SHeiner Kallweit atomic_long_t rx_frame_errors; 200761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 201cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 202aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2034e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 20414daa021SRusty Russell }; 20514daa021SRusty Russell 206aff3d70aSJason Wang struct veth { 207aff3d70aSJason Wang __be16 h_vlan_proto; 208aff3d70aSJason Wang __be16 h_vlan_TCI; 2091da177e4SLinus Torvalds }; 2101da177e4SLinus Torvalds 21194317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 21294317099SPetar Penkov { 21394317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 21494317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 21594317099SPetar Penkov struct sk_buff_head process_queue; 21694317099SPetar Penkov struct sk_buff *skb; 21794317099SPetar Penkov int received = 0; 21894317099SPetar Penkov 21994317099SPetar Penkov __skb_queue_head_init(&process_queue); 22094317099SPetar Penkov 22194317099SPetar Penkov spin_lock(&queue->lock); 22294317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 22394317099SPetar Penkov spin_unlock(&queue->lock); 22494317099SPetar Penkov 22594317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 22694317099SPetar Penkov napi_gro_receive(napi, skb); 22794317099SPetar Penkov ++received; 22894317099SPetar Penkov } 22994317099SPetar Penkov 23094317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 23194317099SPetar Penkov spin_lock(&queue->lock); 23294317099SPetar Penkov skb_queue_splice(&process_queue, queue); 23394317099SPetar Penkov spin_unlock(&queue->lock); 23494317099SPetar Penkov } 23594317099SPetar Penkov 23694317099SPetar Penkov return received; 23794317099SPetar Penkov } 23894317099SPetar Penkov 23994317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 24094317099SPetar Penkov { 24194317099SPetar Penkov unsigned int received; 24294317099SPetar Penkov 24394317099SPetar Penkov received = tun_napi_receive(napi, budget); 24494317099SPetar Penkov 24594317099SPetar Penkov if (received < budget) 24694317099SPetar Penkov napi_complete_done(napi, received); 24794317099SPetar Penkov 24894317099SPetar Penkov return received; 24994317099SPetar Penkov } 25094317099SPetar Penkov 25194317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 252af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 25394317099SPetar Penkov { 254aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 255af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 25694317099SPetar Penkov if (napi_en) { 257c39e342aSPetar Penkov netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 25894317099SPetar Penkov NAPI_POLL_WEIGHT); 25994317099SPetar Penkov napi_enable(&tfile->napi); 26094317099SPetar Penkov } 26194317099SPetar Penkov } 26294317099SPetar Penkov 26306e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 26494317099SPetar Penkov { 265aec72f33SEric Dumazet if (tfile->napi_enabled) 26694317099SPetar Penkov napi_disable(&tfile->napi); 26794317099SPetar Penkov } 26894317099SPetar Penkov 26906e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 27094317099SPetar Penkov { 271aec72f33SEric Dumazet if (tfile->napi_enabled) 27294317099SPetar Penkov netif_napi_del(&tfile->napi); 27394317099SPetar Penkov } 27494317099SPetar Penkov 275af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 27690e33d45SPetar Penkov { 277af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 27890e33d45SPetar Penkov } 27990e33d45SPetar Penkov 2808b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 2818b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 2828b8e658bSGreg Kurz { 2838b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 2848b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 2858b8e658bSGreg Kurz } 2868b8e658bSGreg Kurz 2878b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 2888b8e658bSGreg Kurz { 2898b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 2908b8e658bSGreg Kurz 2918b8e658bSGreg Kurz if (put_user(be, argp)) 2928b8e658bSGreg Kurz return -EFAULT; 2938b8e658bSGreg Kurz 2948b8e658bSGreg Kurz return 0; 2958b8e658bSGreg Kurz } 2968b8e658bSGreg Kurz 2978b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 2988b8e658bSGreg Kurz { 2998b8e658bSGreg Kurz int be; 3008b8e658bSGreg Kurz 3018b8e658bSGreg Kurz if (get_user(be, argp)) 3028b8e658bSGreg Kurz return -EFAULT; 3038b8e658bSGreg Kurz 3048b8e658bSGreg Kurz if (be) 3058b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3068b8e658bSGreg Kurz else 3078b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3088b8e658bSGreg Kurz 3098b8e658bSGreg Kurz return 0; 3108b8e658bSGreg Kurz } 3118b8e658bSGreg Kurz #else 3128b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3138b8e658bSGreg Kurz { 3148b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3158b8e658bSGreg Kurz } 3168b8e658bSGreg Kurz 3178b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3188b8e658bSGreg Kurz { 3198b8e658bSGreg Kurz return -EINVAL; 3208b8e658bSGreg Kurz } 3218b8e658bSGreg Kurz 3228b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3238b8e658bSGreg Kurz { 3248b8e658bSGreg Kurz return -EINVAL; 3258b8e658bSGreg Kurz } 3268b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3278b8e658bSGreg Kurz 32825bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 32925bd55bbSGreg Kurz { 3307d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3318b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 33225bd55bbSGreg Kurz } 33325bd55bbSGreg Kurz 33456f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 33556f0dcc5SMichael S. Tsirkin { 33625bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 33756f0dcc5SMichael S. Tsirkin } 33856f0dcc5SMichael S. Tsirkin 33956f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 34056f0dcc5SMichael S. Tsirkin { 34125bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 34256f0dcc5SMichael S. Tsirkin } 34356f0dcc5SMichael S. Tsirkin 34496442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 34596442e42SJason Wang { 346f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 34796442e42SJason Wang } 34896442e42SJason Wang 34996442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 35096442e42SJason Wang { 35196442e42SJason Wang struct tun_flow_entry *e; 35296442e42SJason Wang 353b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 35496442e42SJason Wang if (e->rxhash == rxhash) 35596442e42SJason Wang return e; 35696442e42SJason Wang } 35796442e42SJason Wang return NULL; 35896442e42SJason Wang } 35996442e42SJason Wang 36096442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 36196442e42SJason Wang struct hlist_head *head, 36296442e42SJason Wang u32 rxhash, u16 queue_index) 36396442e42SJason Wang { 3649fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3659fdc6befSEric Dumazet 36696442e42SJason Wang if (e) { 3673424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, 3683424170fSMichal Kubecek "create flow: hash %u index %u\n", 36996442e42SJason Wang rxhash, queue_index); 37096442e42SJason Wang e->updated = jiffies; 37196442e42SJason Wang e->rxhash = rxhash; 3729bc88939STom Herbert e->rps_rxhash = 0; 37396442e42SJason Wang e->queue_index = queue_index; 37496442e42SJason Wang e->tun = tun; 37596442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 376b8732fb7SJason Wang ++tun->flow_count; 37796442e42SJason Wang } 37896442e42SJason Wang return e; 37996442e42SJason Wang } 38096442e42SJason Wang 38196442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 38296442e42SJason Wang { 3833424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", 38496442e42SJason Wang e->rxhash, e->queue_index); 38596442e42SJason Wang hlist_del_rcu(&e->hash_link); 3869fdc6befSEric Dumazet kfree_rcu(e, rcu); 387b8732fb7SJason Wang --tun->flow_count; 38896442e42SJason Wang } 38996442e42SJason Wang 39096442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 39196442e42SJason Wang { 39296442e42SJason Wang int i; 39396442e42SJason Wang 39496442e42SJason Wang spin_lock_bh(&tun->lock); 39596442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 39696442e42SJason Wang struct tun_flow_entry *e; 397b67bfe0dSSasha Levin struct hlist_node *n; 39896442e42SJason Wang 399b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 40096442e42SJason Wang tun_flow_delete(tun, e); 40196442e42SJason Wang } 40296442e42SJason Wang spin_unlock_bh(&tun->lock); 40396442e42SJason Wang } 40496442e42SJason Wang 40596442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 40696442e42SJason Wang { 40796442e42SJason Wang int i; 40896442e42SJason Wang 40996442e42SJason Wang spin_lock_bh(&tun->lock); 41096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 41196442e42SJason Wang struct tun_flow_entry *e; 412b67bfe0dSSasha Levin struct hlist_node *n; 41396442e42SJason Wang 414b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 41596442e42SJason Wang if (e->queue_index == queue_index) 41696442e42SJason Wang tun_flow_delete(tun, e); 41796442e42SJason Wang } 41896442e42SJason Wang } 41996442e42SJason Wang spin_unlock_bh(&tun->lock); 42096442e42SJason Wang } 42196442e42SJason Wang 422e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 42396442e42SJason Wang { 424e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 42596442e42SJason Wang unsigned long delay = tun->ageing_time; 42696442e42SJason Wang unsigned long next_timer = jiffies + delay; 42796442e42SJason Wang unsigned long count = 0; 42896442e42SJason Wang int i; 42996442e42SJason Wang 4307dbfb4efSEric Dumazet spin_lock(&tun->lock); 43196442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 43296442e42SJason Wang struct tun_flow_entry *e; 433b67bfe0dSSasha Levin struct hlist_node *n; 43496442e42SJason Wang 435b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 43696442e42SJason Wang unsigned long this_timer; 43781d98fa4SEric Dumazet 43896442e42SJason Wang this_timer = e->updated + delay; 43981d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 44096442e42SJason Wang tun_flow_delete(tun, e); 44181d98fa4SEric Dumazet continue; 44281d98fa4SEric Dumazet } 44381d98fa4SEric Dumazet count++; 44481d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 44596442e42SJason Wang next_timer = this_timer; 44696442e42SJason Wang } 44796442e42SJason Wang } 44896442e42SJason Wang 44996442e42SJason Wang if (count) 45096442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 4517dbfb4efSEric Dumazet spin_unlock(&tun->lock); 45296442e42SJason Wang } 45396442e42SJason Wang 45449974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4559e85722dSJason Wang struct tun_file *tfile) 45696442e42SJason Wang { 45796442e42SJason Wang struct hlist_head *head; 45896442e42SJason Wang struct tun_flow_entry *e; 45996442e42SJason Wang unsigned long delay = tun->ageing_time; 4609e85722dSJason Wang u16 queue_index = tfile->queue_index; 46196442e42SJason Wang 46296442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 46396442e42SJason Wang 46496442e42SJason Wang rcu_read_lock(); 46596442e42SJason Wang 46696442e42SJason Wang e = tun_flow_find(head, rxhash); 46796442e42SJason Wang if (likely(e)) { 46896442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 4694ffdd22eSEric Dumazet if (READ_ONCE(e->queue_index) != queue_index) 4704ffdd22eSEric Dumazet WRITE_ONCE(e->queue_index, queue_index); 47183b1bc12SLi RongQing if (e->updated != jiffies) 47296442e42SJason Wang e->updated = jiffies; 4739bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 47496442e42SJason Wang } else { 47596442e42SJason Wang spin_lock_bh(&tun->lock); 476b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 477b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 47896442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 47996442e42SJason Wang 48096442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 48196442e42SJason Wang mod_timer(&tun->flow_gc_timer, 48296442e42SJason Wang round_jiffies_up(jiffies + delay)); 48396442e42SJason Wang spin_unlock_bh(&tun->lock); 48496442e42SJason Wang } 48596442e42SJason Wang 48696442e42SJason Wang rcu_read_unlock(); 48796442e42SJason Wang } 48896442e42SJason Wang 489516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the 4909bc88939STom Herbert * flow_hash table accordingly. 4919bc88939STom Herbert */ 4929bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 4939bc88939STom Herbert { 494567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 4959bc88939STom Herbert e->rps_rxhash = hash; 4969bc88939STom Herbert } 4979bc88939STom Herbert 4984b035271SWang Li /* We try to identify a flow through its rxhash. The reason that 49992d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 500c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 501c8d68e6bSJason Wang * the userspace application move between processors, we may get a 5024b035271SWang Li * different rxq no. here. 503c8d68e6bSJason Wang */ 50496f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 505c8d68e6bSJason Wang { 50696442e42SJason Wang struct tun_flow_entry *e; 507c8d68e6bSJason Wang u32 txq = 0; 508c8d68e6bSJason Wang u32 numqueues = 0; 509c8d68e6bSJason Wang 5106aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 511c8d68e6bSJason Wang 512feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 51396442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5149bc88939STom Herbert if (e) { 5159bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 516fbe4d456SZhi Yong Wu txq = e->queue_index; 5174b035271SWang Li } else { 518c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 519c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 520c8d68e6bSJason Wang } 521c8d68e6bSJason Wang 522c8d68e6bSJason Wang return txq; 523c8d68e6bSJason Wang } 524c8d68e6bSJason Wang 52596f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 52696f84061SJason Wang { 527cd5681d7SJason Wang struct tun_prog *prog; 528a35d310fSJason Wang u32 numqueues; 52996f84061SJason Wang u16 ret = 0; 53096f84061SJason Wang 531a35d310fSJason Wang numqueues = READ_ONCE(tun->numqueues); 532a35d310fSJason Wang if (!numqueues) 533a35d310fSJason Wang return 0; 534a35d310fSJason Wang 53596f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 53696f84061SJason Wang if (prog) 53796f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 53896f84061SJason Wang 539a35d310fSJason Wang return ret % numqueues; 54096f84061SJason Wang } 54196f84061SJason Wang 54296f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 543a350ecceSPaolo Abeni struct net_device *sb_dev) 54496f84061SJason Wang { 54596f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 54696f84061SJason Wang u16 ret; 54796f84061SJason Wang 54896f84061SJason Wang rcu_read_lock(); 54996f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 55096f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 55196f84061SJason Wang else 55296f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 55396f84061SJason Wang rcu_read_unlock(); 55496f84061SJason Wang 55596f84061SJason Wang return ret; 55696f84061SJason Wang } 55796f84061SJason Wang 558cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 559cde8b15fSJason Wang { 560cde8b15fSJason Wang const struct cred *cred = current_cred(); 561c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 562cde8b15fSJason Wang 563cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 564cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 565c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 566cde8b15fSJason Wang } 567cde8b15fSJason Wang 568c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 569c8d68e6bSJason Wang { 570c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 571c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 572c8d68e6bSJason Wang } 573c8d68e6bSJason Wang 5744008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 5754008e97fSJason Wang { 5764008e97fSJason Wang tfile->detached = tun; 5774008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 5784008e97fSJason Wang ++tun->numdisabled; 5794008e97fSJason Wang } 5804008e97fSJason Wang 581d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 5824008e97fSJason Wang { 5834008e97fSJason Wang struct tun_struct *tun = tfile->detached; 5844008e97fSJason Wang 5854008e97fSJason Wang tfile->detached = NULL; 5864008e97fSJason Wang list_del_init(&tfile->next); 5874008e97fSJason Wang --tun->numdisabled; 5884008e97fSJason Wang return tun; 5894008e97fSJason Wang } 5904008e97fSJason Wang 5913a403076SJason Wang void tun_ptr_free(void *ptr) 592fc72d1d5SJason Wang { 593fc72d1d5SJason Wang if (!ptr) 594fc72d1d5SJason Wang return; 5951ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 5961ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 597fc72d1d5SJason Wang 59803993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 599fc72d1d5SJason Wang } else { 600fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 601fc72d1d5SJason Wang } 602fc72d1d5SJason Wang } 6033a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 604fc72d1d5SJason Wang 6054bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6064bfb0513SJason Wang { 607fc72d1d5SJason Wang void *ptr; 6081576d986SJason Wang 609fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 610fc72d1d5SJason Wang tun_ptr_free(ptr); 6111576d986SJason Wang 6125503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6134bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6144bfb0513SJason Wang } 6154bfb0513SJason Wang 616c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 617c8d68e6bSJason Wang { 618c8d68e6bSJason Wang struct tun_file *ntfile; 619c8d68e6bSJason Wang struct tun_struct *tun; 620c8d68e6bSJason Wang 621b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 622b8deabd3SJason Wang 62394317099SPetar Penkov if (tun && clean) { 62406e55addSEric Dumazet tun_napi_disable(tfile); 62506e55addSEric Dumazet tun_napi_del(tfile); 62694317099SPetar Penkov } 62794317099SPetar Penkov 6289e85722dSJason Wang if (tun && !tfile->detached) { 629c8d68e6bSJason Wang u16 index = tfile->queue_index; 630c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 631c8d68e6bSJason Wang 632c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 633c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 634b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 635c8d68e6bSJason Wang ntfile->queue_index = index; 6369871a9e4SJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], 6379871a9e4SJason Wang NULL); 638c8d68e6bSJason Wang 639c8d68e6bSJason Wang --tun->numqueues; 6409e85722dSJason Wang if (clean) { 641c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 642c8d68e6bSJason Wang sock_put(&tfile->sk); 6439e85722dSJason Wang } else 6444008e97fSJason Wang tun_disable_queue(tun, tfile); 645c8d68e6bSJason Wang 646c8d68e6bSJason Wang synchronize_net(); 64796442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 648c8d68e6bSJason Wang /* Drop read queue */ 6494bfb0513SJason Wang tun_queue_purge(tfile); 650c8d68e6bSJason Wang tun_set_real_num_queues(tun); 651dd38bd85SJason Wang } else if (tfile->detached && clean) { 6524008e97fSJason Wang tun = tun_enable_queue(tfile); 653dd38bd85SJason Wang sock_put(&tfile->sk); 654dd38bd85SJason Wang } 655c8d68e6bSJason Wang 656c8d68e6bSJason Wang if (clean) { 657af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 658af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 659af668b3cSMichael S. Tsirkin 66040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 661af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6624008e97fSJason Wang unregister_netdevice(tun->dev); 663af668b3cSMichael S. Tsirkin } 664b196d88aSJason Wang if (tun) 665b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 6667063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 667140e807dSEric W. Biederman sock_put(&tfile->sk); 668c8d68e6bSJason Wang } 669c8d68e6bSJason Wang } 670c8d68e6bSJason Wang 671c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 672c8d68e6bSJason Wang { 67383c1f36fSSabrina Dubroca struct tun_struct *tun; 67483c1f36fSSabrina Dubroca struct net_device *dev; 67583c1f36fSSabrina Dubroca 676c8d68e6bSJason Wang rtnl_lock(); 67783c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 67883c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 679c8d68e6bSJason Wang __tun_detach(tfile, clean); 68083c1f36fSSabrina Dubroca if (dev) 68183c1f36fSSabrina Dubroca netdev_state_change(dev); 682c8d68e6bSJason Wang rtnl_unlock(); 683c8d68e6bSJason Wang } 684c8d68e6bSJason Wang 685c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 686c8d68e6bSJason Wang { 687c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 6884008e97fSJason Wang struct tun_file *tfile, *tmp; 689c8d68e6bSJason Wang int i, n = tun->numqueues; 690c8d68e6bSJason Wang 691c8d68e6bSJason Wang for (i = 0; i < n; i++) { 692b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 693c8d68e6bSJason Wang BUG_ON(!tfile); 69406e55addSEric Dumazet tun_napi_disable(tfile); 695addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 6969e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 697c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 698c8d68e6bSJason Wang --tun->numqueues; 699c8d68e6bSJason Wang } 7009e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 701addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7029e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 703c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7049e85722dSJason Wang } 705c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 706c8d68e6bSJason Wang 707c8d68e6bSJason Wang synchronize_net(); 708c8d68e6bSJason Wang for (i = 0; i < n; i++) { 709b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 71006e55addSEric Dumazet tun_napi_del(tfile); 711c8d68e6bSJason Wang /* Drop read queue */ 7124bfb0513SJason Wang tun_queue_purge(tfile); 713b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 714c8d68e6bSJason Wang sock_put(&tfile->sk); 715c8d68e6bSJason Wang } 7164008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7174008e97fSJason Wang tun_enable_queue(tfile); 7184bfb0513SJason Wang tun_queue_purge(tfile); 719b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7204008e97fSJason Wang sock_put(&tfile->sk); 7214008e97fSJason Wang } 7224008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 723dd38bd85SJason Wang 72440630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 725dd38bd85SJason Wang module_put(THIS_MODULE); 726c8d68e6bSJason Wang } 727c8d68e6bSJason Wang 72894317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 72977f22f92SYang Yingliang bool skip_filter, bool napi, bool napi_frags, 73077f22f92SYang Yingliang bool publish_tun) 731a7385ba2SEric W. Biederman { 732631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7331576d986SJason Wang struct net_device *dev = tun->dev; 73438231b7aSEric W. Biederman int err; 735a7385ba2SEric W. Biederman 7365dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7375dbbaf2dSPaul Moore if (err < 0) 7385dbbaf2dSPaul Moore goto out; 7395dbbaf2dSPaul Moore 74038231b7aSEric W. Biederman err = -EINVAL; 7419e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 74238231b7aSEric W. Biederman goto out; 74338231b7aSEric W. Biederman 74438231b7aSEric W. Biederman err = -EBUSY; 74540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 746c8d68e6bSJason Wang goto out; 747c8d68e6bSJason Wang 748c8d68e6bSJason Wang err = -E2BIG; 7494008e97fSJason Wang if (!tfile->detached && 7504008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 75138231b7aSEric W. Biederman goto out; 75238231b7aSEric W. Biederman 75338231b7aSEric W. Biederman err = 0; 75454f968d6SJason Wang 75592d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 756849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7578ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7588ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7598ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 76054f968d6SJason Wang if (!err) 76154f968d6SJason Wang goto out; 76254f968d6SJason Wang } 7631576d986SJason Wang 7641576d986SJason Wang if (!tfile->detached && 765b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 766b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 7671576d986SJason Wang err = -ENOMEM; 7681576d986SJason Wang goto out; 7691576d986SJason Wang } 7701576d986SJason Wang 771c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 772addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 7738bf5c4eeSJesper Dangaard Brouer 7748bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 7758bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 7768bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 7778bf5c4eeSJesper Dangaard Brouer 7788bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 7798bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 7808bf5c4eeSJesper Dangaard Brouer } else { 7818bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 7828bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 783b02e5a0eSBjörn Töpel tun->dev, tfile->queue_index, 0); 7848bf5c4eeSJesper Dangaard Brouer if (err < 0) 7858bf5c4eeSJesper Dangaard Brouer goto out; 7868d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 7878d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 7888d5d8852SJesper Dangaard Brouer if (err < 0) { 7898d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 7908d5d8852SJesper Dangaard Brouer goto out; 7918d5d8852SJesper Dangaard Brouer } 7928bf5c4eeSJesper Dangaard Brouer err = 0; 7938bf5c4eeSJesper Dangaard Brouer } 7948bf5c4eeSJesper Dangaard Brouer 79594317099SPetar Penkov if (tfile->detached) { 7964008e97fSJason Wang tun_enable_queue(tfile); 79794317099SPetar Penkov } else { 7984008e97fSJason Wang sock_hold(&tfile->sk); 799af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 80094317099SPetar Penkov } 8014008e97fSJason Wang 802e4a2a304SJason Wang if (rtnl_dereference(tun->xdp_prog)) 803e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 804e4a2a304SJason Wang 805c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 806c8d68e6bSJason Wang * refcnt. 807c8d68e6bSJason Wang */ 808a7385ba2SEric W. Biederman 8090b7959b6SStanislav Fomichev /* Publish tfile->tun and tun->tfiles only after we've fully 8100b7959b6SStanislav Fomichev * initialized tfile; otherwise we risk using half-initialized 8110b7959b6SStanislav Fomichev * object. 8120b7959b6SStanislav Fomichev */ 81377f22f92SYang Yingliang if (publish_tun) 8140b7959b6SStanislav Fomichev rcu_assign_pointer(tfile->tun, tun); 8150b7959b6SStanislav Fomichev rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 8160b7959b6SStanislav Fomichev tun->numqueues++; 8173a03cb84SGeorge Amanakis tun_set_real_num_queues(tun); 81838231b7aSEric W. Biederman out: 81938231b7aSEric W. Biederman return err; 820a7385ba2SEric W. Biederman } 821a7385ba2SEric W. Biederman 8229484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 823631ab46bSEric W. Biederman { 8246e914fc7SJason Wang struct tun_struct *tun; 825c70f1829SEric W. Biederman 8266e914fc7SJason Wang rcu_read_lock(); 8276e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8286e914fc7SJason Wang if (tun) 8296e914fc7SJason Wang dev_hold(tun->dev); 8306e914fc7SJason Wang rcu_read_unlock(); 831c70f1829SEric W. Biederman 832c70f1829SEric W. Biederman return tun; 833631ab46bSEric W. Biederman } 834631ab46bSEric W. Biederman 835631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 836631ab46bSEric W. Biederman { 8376e914fc7SJason Wang dev_put(tun->dev); 838631ab46bSEric W. Biederman } 839631ab46bSEric W. Biederman 8406b8a66eeSJoe Perches /* TAP filtering */ 841f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 842f271b2ccSMax Krasnyansky { 843f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 844f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 845f271b2ccSMax Krasnyansky } 846f271b2ccSMax Krasnyansky 847f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 848f271b2ccSMax Krasnyansky { 849f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 850f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 851f271b2ccSMax Krasnyansky } 852f271b2ccSMax Krasnyansky 853f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 854f271b2ccSMax Krasnyansky { 855f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 856f271b2ccSMax Krasnyansky struct tun_filter uf; 857f271b2ccSMax Krasnyansky int err, alen, n, nexact; 858f271b2ccSMax Krasnyansky 859f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 860f271b2ccSMax Krasnyansky return -EFAULT; 861f271b2ccSMax Krasnyansky 862f271b2ccSMax Krasnyansky if (!uf.count) { 863f271b2ccSMax Krasnyansky /* Disabled */ 864f271b2ccSMax Krasnyansky filter->count = 0; 865f271b2ccSMax Krasnyansky return 0; 866f271b2ccSMax Krasnyansky } 867f271b2ccSMax Krasnyansky 868f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 86928e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 87028e8190dSMarkus Elfring if (IS_ERR(addr)) 87128e8190dSMarkus Elfring return PTR_ERR(addr); 872f271b2ccSMax Krasnyansky 873f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 874f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 875f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 876f271b2ccSMax Krasnyansky filter->count = 0; 877f271b2ccSMax Krasnyansky wmb(); 878f271b2ccSMax Krasnyansky 879f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 880f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 881f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 882f271b2ccSMax Krasnyansky 883f271b2ccSMax Krasnyansky nexact = n; 884f271b2ccSMax Krasnyansky 885cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 886cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 887f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 888cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 889cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 890cfbf84fcSAlex Williamson err = 0; /* no filter */ 8913b8d2a69SMarkus Elfring goto free_addr; 892cfbf84fcSAlex Williamson } 893f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 894cfbf84fcSAlex Williamson } 895f271b2ccSMax Krasnyansky 896f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 897f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 898f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 899f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 900f271b2ccSMax Krasnyansky 901f271b2ccSMax Krasnyansky /* Now enable the filter */ 902f271b2ccSMax Krasnyansky wmb(); 903f271b2ccSMax Krasnyansky filter->count = nexact; 904f271b2ccSMax Krasnyansky 905f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 906f271b2ccSMax Krasnyansky err = nexact; 9073b8d2a69SMarkus Elfring free_addr: 908f271b2ccSMax Krasnyansky kfree(addr); 909f271b2ccSMax Krasnyansky return err; 910f271b2ccSMax Krasnyansky } 911f271b2ccSMax Krasnyansky 912f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 913f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 914f271b2ccSMax Krasnyansky { 915f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 916f271b2ccSMax Krasnyansky * at this point. */ 917f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 918f271b2ccSMax Krasnyansky int i; 919f271b2ccSMax Krasnyansky 920f271b2ccSMax Krasnyansky /* Exact match */ 921f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9222e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 923f271b2ccSMax Krasnyansky return 1; 924f271b2ccSMax Krasnyansky 925f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 926f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 927f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 928f271b2ccSMax Krasnyansky 929f271b2ccSMax Krasnyansky return 0; 930f271b2ccSMax Krasnyansky } 931f271b2ccSMax Krasnyansky 932f271b2ccSMax Krasnyansky /* 933f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 934f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 935f271b2ccSMax Krasnyansky */ 936f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 937f271b2ccSMax Krasnyansky { 938f271b2ccSMax Krasnyansky if (!filter->count) 939f271b2ccSMax Krasnyansky return 1; 940f271b2ccSMax Krasnyansky 941f271b2ccSMax Krasnyansky return run_filter(filter, skb); 942f271b2ccSMax Krasnyansky } 943f271b2ccSMax Krasnyansky 9441da177e4SLinus Torvalds /* Network device part of the driver */ 9451da177e4SLinus Torvalds 9461da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 9471da177e4SLinus Torvalds 948c70f1829SEric W. Biederman /* Net device detach from fd. */ 949c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 950c70f1829SEric W. Biederman { 951c8d68e6bSJason Wang tun_detach_all(dev); 952c70f1829SEric W. Biederman } 953c70f1829SEric W. Biederman 9541da177e4SLinus Torvalds /* Net device open. */ 9551da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 9561da177e4SLinus Torvalds { 957c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 958b20e2d54SHannes Frederic Sowa 9591da177e4SLinus Torvalds return 0; 9601da177e4SLinus Torvalds } 9611da177e4SLinus Torvalds 9621da177e4SLinus Torvalds /* Net device close. */ 9631da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 9641da177e4SLinus Torvalds { 965c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 9661da177e4SLinus Torvalds return 0; 9671da177e4SLinus Torvalds } 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds /* Net device start xmit */ 97096f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 9711da177e4SLinus Torvalds { 9723df97ba8SJason Wang #ifdef CONFIG_RPS 973dc05360fSEric Dumazet if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 9749bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 9759bc88939STom Herbert * RPS hash and save it into the flow_table here. 9769bc88939STom Herbert */ 9774b035271SWang Li struct tun_flow_entry *e; 9789bc88939STom Herbert __u32 rxhash; 9799bc88939STom Herbert 980feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 9814b035271SWang Li e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 9829bc88939STom Herbert if (e) 9839bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 9849bc88939STom Herbert } 9853df97ba8SJason Wang #endif 98696f84061SJason Wang } 98796f84061SJason Wang 988aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 989aff3d70aSJason Wang struct sk_buff *skb, 990aff3d70aSJason Wang int len) 991aff3d70aSJason Wang { 992aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 993aff3d70aSJason Wang 994aff3d70aSJason Wang if (prog) 995aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 996aff3d70aSJason Wang 997aff3d70aSJason Wang return len; 998aff3d70aSJason Wang } 999aff3d70aSJason Wang 100096f84061SJason Wang /* Net device start xmit */ 100196f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 100296f84061SJason Wang { 100396f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 100496f84061SJason Wang int txq = skb->queue_mapping; 100596f84061SJason Wang struct tun_file *tfile; 1006aff3d70aSJason Wang int len = skb->len; 100796f84061SJason Wang 100896f84061SJason Wang rcu_read_lock(); 100996f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 101096f84061SJason Wang 101196f84061SJason Wang /* Drop packet if interface is not attached */ 10129871a9e4SJason Wang if (!tfile) 101396f84061SJason Wang goto drop; 101496f84061SJason Wang 101596f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 101696f84061SJason Wang tun_automq_xmit(tun, skb); 10179bc88939STom Herbert 10183424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); 10196e914fc7SJason Wang 1020f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1021f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1022f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1023f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1024f271b2ccSMax Krasnyansky goto drop; 1025f271b2ccSMax Krasnyansky 102654f968d6SJason Wang if (tfile->socket.sk->sk_filter && 102754f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 102899405162SMichael S. Tsirkin goto drop; 102999405162SMichael S. Tsirkin 1030aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 103181c89507SBjørn Mork if (len == 0 || pskb_trim(skb, len)) 1032aff3d70aSJason Wang goto drop; 1033aff3d70aSJason Wang 10341f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 10357bf66305SJason Wang goto drop; 10367bf66305SJason Wang 10377b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1038eda29772SRichard Cochran 10390110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 10407bf66305SJason Wang * for indefinite time. 10417bf66305SJason Wang */ 10420110d6f2SMichael S. Tsirkin skb_orphan(skb); 10430110d6f2SMichael S. Tsirkin 1044895b5c9fSFlorian Westphal nf_reset_ct(skb); 1045f8af75f3SEric Dumazet 10465990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 10471576d986SJason Wang goto drop; 10481da177e4SLinus Torvalds 10491da177e4SLinus Torvalds /* Notify and wake up reader process */ 105054f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 105154f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 10529e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 10536e914fc7SJason Wang 10546e914fc7SJason Wang rcu_read_unlock(); 10556ed10654SPatrick McHardy return NETDEV_TX_OK; 10561da177e4SLinus Torvalds 10571da177e4SLinus Torvalds drop: 1058497a5757SHeiner Kallweit atomic_long_inc(&dev->tx_dropped); 1059149d36f7SMichael S. Tsirkin skb_tx_error(skb); 10601da177e4SLinus Torvalds kfree_skb(skb); 10616e914fc7SJason Wang rcu_read_unlock(); 1062baeababbSJason Wang return NET_XMIT_DROP; 10631da177e4SLinus Torvalds } 10641da177e4SLinus Torvalds 1065f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 10661da177e4SLinus Torvalds { 1067f271b2ccSMax Krasnyansky /* 1068f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1069f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1070f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1071f271b2ccSMax Krasnyansky */ 10721da177e4SLinus Torvalds } 10731da177e4SLinus Torvalds 1074c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1075c8f44affSMichał Mirosław netdev_features_t features) 107688255375SMichał Mirosław { 107788255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 107888255375SMichał Mirosław 107988255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 108088255375SMichał Mirosław } 1081eaea34b2SPaolo Abeni 1082eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1083eaea34b2SPaolo Abeni { 1084eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1085eaea34b2SPaolo Abeni 1086eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1087eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1088eaea34b2SPaolo Abeni 1089eaea34b2SPaolo Abeni tun->align = new_hr; 1090eaea34b2SPaolo Abeni } 1091eaea34b2SPaolo Abeni 1092bc1f4470Sstephen hemminger static void 1093608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1094608b9977SPaolo Abeni { 1095608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1096608b9977SPaolo Abeni 1097497a5757SHeiner Kallweit dev_get_tstats64(dev, stats); 1098608b9977SPaolo Abeni 1099497a5757SHeiner Kallweit stats->rx_frame_errors += 1100497a5757SHeiner Kallweit (unsigned long)atomic_long_read(&tun->rx_frame_errors); 1101608b9977SPaolo Abeni } 1102608b9977SPaolo Abeni 1103761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1104761876c8SJason Wang struct netlink_ext_ack *extack) 1105761876c8SJason Wang { 1106761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1107e4a2a304SJason Wang struct tun_file *tfile; 1108761876c8SJason Wang struct bpf_prog *old_prog; 1109e4a2a304SJason Wang int i; 1110761876c8SJason Wang 1111761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1112761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1113761876c8SJason Wang if (old_prog) 1114761876c8SJason Wang bpf_prog_put(old_prog); 1115761876c8SJason Wang 1116e4a2a304SJason Wang for (i = 0; i < tun->numqueues; i++) { 1117e4a2a304SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 1118e4a2a304SJason Wang if (prog) 1119e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1120e4a2a304SJason Wang else 1121e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1122e4a2a304SJason Wang } 1123e4a2a304SJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 1124e4a2a304SJason Wang if (prog) 1125e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1126e4a2a304SJason Wang else 1127e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1128e4a2a304SJason Wang } 1129e4a2a304SJason Wang 1130761876c8SJason Wang return 0; 1131761876c8SJason Wang } 1132761876c8SJason Wang 1133f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1134761876c8SJason Wang { 1135761876c8SJason Wang switch (xdp->command) { 1136761876c8SJason Wang case XDP_SETUP_PROG: 1137761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1138761876c8SJason Wang default: 1139761876c8SJason Wang return -EINVAL; 1140761876c8SJason Wang } 1141761876c8SJason Wang } 1142761876c8SJason Wang 114326d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 114426d31925SNicolas Dichtel { 114526d31925SNicolas Dichtel if (new_carrier) { 114626d31925SNicolas Dichtel struct tun_struct *tun = netdev_priv(dev); 114726d31925SNicolas Dichtel 114826d31925SNicolas Dichtel if (!tun->numqueues) 114926d31925SNicolas Dichtel return -EPERM; 115026d31925SNicolas Dichtel 115126d31925SNicolas Dichtel netif_carrier_on(dev); 115226d31925SNicolas Dichtel } else { 115326d31925SNicolas Dichtel netif_carrier_off(dev); 115426d31925SNicolas Dichtel } 115526d31925SNicolas Dichtel return 0; 115626d31925SNicolas Dichtel } 115726d31925SNicolas Dichtel 1158758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1159c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1160758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1161758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 116200829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 116388255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1164c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1165eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1166608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 116726d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1168758e43b7SStephen Hemminger }; 1169758e43b7SStephen Hemminger 11700c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 11710c9d917bSJesper Dangaard Brouer { 11720c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 11730c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 11740c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11750c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11760c9d917bSJesper Dangaard Brouer } 11770c9d917bSJesper Dangaard Brouer 117842b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 117942b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1180fc72d1d5SJason Wang { 1181fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1182fc72d1d5SJason Wang struct tun_file *tfile; 1183fc72d1d5SJason Wang u32 numqueues; 1184735fc405SJesper Dangaard Brouer int drops = 0; 1185735fc405SJesper Dangaard Brouer int cnt = n; 1186735fc405SJesper Dangaard Brouer int i; 1187fc72d1d5SJason Wang 11880c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 118942b33468SJesper Dangaard Brouer return -EINVAL; 119042b33468SJesper Dangaard Brouer 1191fc72d1d5SJason Wang rcu_read_lock(); 1192fc72d1d5SJason Wang 11939871a9e4SJason Wang resample: 1194fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1195fc72d1d5SJason Wang if (!numqueues) { 1196735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1197735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1198fc72d1d5SJason Wang } 1199fc72d1d5SJason Wang 1200fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1201fc72d1d5SJason Wang numqueues]); 12029871a9e4SJason Wang if (unlikely(!tfile)) 12039871a9e4SJason Wang goto resample; 1204735fc405SJesper Dangaard Brouer 1205735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1206735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1207735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1208fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1209fc72d1d5SJason Wang * XDP buffer from sk_buff. 1210fc72d1d5SJason Wang */ 1211735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1212fc72d1d5SJason Wang 1213735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1214497a5757SHeiner Kallweit atomic_long_inc(&dev->tx_dropped); 1215735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdp); 1216735fc405SJesper Dangaard Brouer drops++; 1217735fc405SJesper Dangaard Brouer } 1218735fc405SJesper Dangaard Brouer } 1219735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1220735fc405SJesper Dangaard Brouer 12210c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 12220c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 12230c9d917bSJesper Dangaard Brouer 1224fc72d1d5SJason Wang rcu_read_unlock(); 1225735fc405SJesper Dangaard Brouer return cnt - drops; 1226fc72d1d5SJason Wang } 1227fc72d1d5SJason Wang 122844fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 122944fa2dbdSJesper Dangaard Brouer { 12301b698fa5SLorenzo Bianconi struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); 123144fa2dbdSJesper Dangaard Brouer 123244fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 123344fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 123444fa2dbdSJesper Dangaard Brouer 123542421a56SJesper Dangaard Brouer return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1236fc72d1d5SJason Wang } 1237fc72d1d5SJason Wang 1238758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1239c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1240758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1241758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 124200829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 124388255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1244afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1245758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1246758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1247c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 12485e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1249eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1250497a5757SHeiner Kallweit .ndo_get_stats64 = dev_get_tstats64, 1251f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1252fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 125326d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1254758e43b7SStephen Hemminger }; 1255758e43b7SStephen Hemminger 1256944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 125796442e42SJason Wang { 125896442e42SJason Wang int i; 125996442e42SJason Wang 126096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 126196442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 126296442e42SJason Wang 126396442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1264e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1265e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1266e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 126796442e42SJason Wang } 126896442e42SJason Wang 126996442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 127096442e42SJason Wang { 127196442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 127296442e42SJason Wang tun_flow_flush(tun); 127396442e42SJason Wang } 127496442e42SJason Wang 127591572088SJarod Wilson #define MIN_MTU 68 127691572088SJarod Wilson #define MAX_MTU 65535 127791572088SJarod Wilson 12781da177e4SLinus Torvalds /* Initialize net device. */ 12791da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 12801da177e4SLinus Torvalds { 12811da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 12821da177e4SLinus Torvalds 12831da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 128440630b82SMichael S. Tsirkin case IFF_TUN: 1285758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1286b9815eb1SJason A. Donenfeld dev->header_ops = &ip_tunnel_header_ops; 1287758e43b7SStephen Hemminger 12881da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 12891da177e4SLinus Torvalds dev->hard_header_len = 0; 12901da177e4SLinus Torvalds dev->addr_len = 0; 12911da177e4SLinus Torvalds dev->mtu = 1500; 12921da177e4SLinus Torvalds 12931da177e4SLinus Torvalds /* Zero header length */ 12941da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 12951da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 12961da177e4SLinus Torvalds break; 12971da177e4SLinus Torvalds 129840630b82SMichael S. Tsirkin case IFF_TAP: 12997a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 13001da177e4SLinus Torvalds /* Ethernet TAP Device */ 13011da177e4SLinus Torvalds ether_setup(dev); 1302550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1303a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 130436226a8dSBrian Braunstein 1305f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 130636226a8dSBrian Braunstein 13071da177e4SLinus Torvalds break; 13081da177e4SLinus Torvalds } 130991572088SJarod Wilson 131091572088SJarod Wilson dev->min_mtu = MIN_MTU; 131191572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 13121da177e4SLinus Torvalds } 13131da177e4SLinus Torvalds 13142f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 13152f3ab622SJason Wang { 13162f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 13172f3ab622SJason Wang 13182f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 13192f3ab622SJason Wang } 13202f3ab622SJason Wang 13211da177e4SLinus Torvalds /* Character device part */ 13221da177e4SLinus Torvalds 13231da177e4SLinus Torvalds /* Poll */ 1324afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 13251da177e4SLinus Torvalds { 1326b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 13279484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 13283c8a9c63SMariusz Kozlowski struct sock *sk; 1329afc9a42bSAl Viro __poll_t mask = 0; 13301da177e4SLinus Torvalds 13311da177e4SLinus Torvalds if (!tun) 1332a9a08845SLinus Torvalds return EPOLLERR; 13331da177e4SLinus Torvalds 133454f968d6SJason Wang sk = tfile->socket.sk; 13353c8a9c63SMariusz Kozlowski 13369e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 13371da177e4SLinus Torvalds 13385990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1339a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 13401da177e4SLinus Torvalds 13412f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 13422f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 13432f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 13442f3ab622SJason Wang * after it writes to a down device and meets -EIO. 13452f3ab622SJason Wang */ 13462f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 13479cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 13482f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1349a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 135033dccbb0SHerbert Xu 1351c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1352a9a08845SLinus Torvalds mask = EPOLLERR; 1353c70f1829SEric W. Biederman 1354631ab46bSEric W. Biederman tun_put(tun); 13551da177e4SLinus Torvalds return mask; 13561da177e4SLinus Torvalds } 13571da177e4SLinus Torvalds 135890e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 135990e33d45SPetar Penkov size_t len, 136090e33d45SPetar Penkov const struct iov_iter *it) 136190e33d45SPetar Penkov { 136290e33d45SPetar Penkov struct sk_buff *skb; 136390e33d45SPetar Penkov size_t linear; 136490e33d45SPetar Penkov int err; 136590e33d45SPetar Penkov int i; 136690e33d45SPetar Penkov 136790e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 1368950271d7SYunjian Wang return ERR_PTR(-EMSGSIZE); 136990e33d45SPetar Penkov 137090e33d45SPetar Penkov local_bh_disable(); 137190e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 137290e33d45SPetar Penkov local_bh_enable(); 137390e33d45SPetar Penkov if (!skb) 137490e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 137590e33d45SPetar Penkov 137690e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 137790e33d45SPetar Penkov err = __skb_grow(skb, linear); 137890e33d45SPetar Penkov if (err) 137990e33d45SPetar Penkov goto free; 138090e33d45SPetar Penkov 138190e33d45SPetar Penkov skb->len = len; 138290e33d45SPetar Penkov skb->data_len = len - linear; 138390e33d45SPetar Penkov skb->truesize += skb->data_len; 138490e33d45SPetar Penkov 138590e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 138690e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 1387aa6daacaSEric Dumazet struct page *page; 1388aa6daacaSEric Dumazet void *frag; 138990e33d45SPetar Penkov 139090e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 139190e33d45SPetar Penkov err = -EINVAL; 139290e33d45SPetar Penkov goto free; 139390e33d45SPetar Penkov } 1394aa6daacaSEric Dumazet frag = netdev_alloc_frag(fragsz); 1395aa6daacaSEric Dumazet if (!frag) { 139690e33d45SPetar Penkov err = -ENOMEM; 139790e33d45SPetar Penkov goto free; 139890e33d45SPetar Penkov } 1399aa6daacaSEric Dumazet page = virt_to_head_page(frag); 1400aa6daacaSEric Dumazet skb_fill_page_desc(skb, i - 1, page, 1401aa6daacaSEric Dumazet frag - page_address(page), fragsz); 140290e33d45SPetar Penkov } 140390e33d45SPetar Penkov 140490e33d45SPetar Penkov return skb; 140590e33d45SPetar Penkov free: 140690e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 140790e33d45SPetar Penkov napi_free_frags(&tfile->napi); 140890e33d45SPetar Penkov return ERR_PTR(err); 140990e33d45SPetar Penkov } 141090e33d45SPetar Penkov 1411f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1412f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 141354f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 141433dccbb0SHerbert Xu size_t prepad, size_t len, 141533dccbb0SHerbert Xu size_t linear, int noblock) 1416f42157cbSRusty Russell { 141754f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1418f42157cbSRusty Russell struct sk_buff *skb; 141933dccbb0SHerbert Xu int err; 1420f42157cbSRusty Russell 1421f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 14220eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 142333dccbb0SHerbert Xu linear = len; 1424f42157cbSRusty Russell 142533dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 142628d64271SEric Dumazet &err, 0); 1427f42157cbSRusty Russell if (!skb) 142833dccbb0SHerbert Xu return ERR_PTR(err); 1429f42157cbSRusty Russell 1430f42157cbSRusty Russell skb_reserve(skb, prepad); 1431f42157cbSRusty Russell skb_put(skb, linear); 143233dccbb0SHerbert Xu skb->data_len = len - linear; 143333dccbb0SHerbert Xu skb->len += len - linear; 1434f42157cbSRusty Russell 1435f42157cbSRusty Russell return skb; 1436f42157cbSRusty Russell } 1437f42157cbSRusty Russell 14385503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 14395503fcecSJason Wang struct sk_buff *skb, int more) 14405503fcecSJason Wang { 14415503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 14425503fcecSJason Wang struct sk_buff_head process_queue; 14435503fcecSJason Wang u32 rx_batched = tun->rx_batched; 14445503fcecSJason Wang bool rcv = false; 14455503fcecSJason Wang 14465503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 14475503fcecSJason Wang local_bh_disable(); 14488ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 14495503fcecSJason Wang netif_receive_skb(skb); 14505503fcecSJason Wang local_bh_enable(); 14515503fcecSJason Wang return; 14525503fcecSJason Wang } 14535503fcecSJason Wang 14545503fcecSJason Wang spin_lock(&queue->lock); 14555503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 14565503fcecSJason Wang __skb_queue_head_init(&process_queue); 14575503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 14585503fcecSJason Wang rcv = true; 14595503fcecSJason Wang } else { 14605503fcecSJason Wang __skb_queue_tail(queue, skb); 14615503fcecSJason Wang } 14625503fcecSJason Wang spin_unlock(&queue->lock); 14635503fcecSJason Wang 14645503fcecSJason Wang if (rcv) { 14655503fcecSJason Wang struct sk_buff *nskb; 14665503fcecSJason Wang 14675503fcecSJason Wang local_bh_disable(); 14688ebebcbaSMatthew Cover while ((nskb = __skb_dequeue(&process_queue))) { 14698ebebcbaSMatthew Cover skb_record_rx_queue(nskb, tfile->queue_index); 14705503fcecSJason Wang netif_receive_skb(nskb); 14718ebebcbaSMatthew Cover } 14728ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 14735503fcecSJason Wang netif_receive_skb(skb); 14745503fcecSJason Wang local_bh_enable(); 14755503fcecSJason Wang } 14765503fcecSJason Wang } 14775503fcecSJason Wang 147866ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 147966ccbc9cSJason Wang int len, int noblock, bool zerocopy) 148066ccbc9cSJason Wang { 148166ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 148266ccbc9cSJason Wang return false; 148366ccbc9cSJason Wang 148466ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 148566ccbc9cSJason Wang return false; 148666ccbc9cSJason Wang 148766ccbc9cSJason Wang if (!noblock) 148866ccbc9cSJason Wang return false; 148966ccbc9cSJason Wang 149066ccbc9cSJason Wang if (zerocopy) 149166ccbc9cSJason Wang return false; 149266ccbc9cSJason Wang 149366ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 149466ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 149566ccbc9cSJason Wang return false; 149666ccbc9cSJason Wang 149766ccbc9cSJason Wang return true; 149866ccbc9cSJason Wang } 149966ccbc9cSJason Wang 15004b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile, 15014b663366SAlexis Bauvin struct page_frag *alloc_frag, char *buf, 15028ae1aff0SJason Wang int buflen, int len, int pad) 1503ac1f1f6cSJason Wang { 1504ac1f1f6cSJason Wang struct sk_buff *skb = build_skb(buf, buflen); 1505ac1f1f6cSJason Wang 1506ac1f1f6cSJason Wang if (!skb) 1507ac1f1f6cSJason Wang return ERR_PTR(-ENOMEM); 1508ac1f1f6cSJason Wang 15098ae1aff0SJason Wang skb_reserve(skb, pad); 1510ac1f1f6cSJason Wang skb_put(skb, len); 15114b663366SAlexis Bauvin skb_set_owner_w(skb, tfile->socket.sk); 1512ac1f1f6cSJason Wang 1513ac1f1f6cSJason Wang get_page(alloc_frag->page); 1514ac1f1f6cSJason Wang alloc_frag->offset += buflen; 1515ac1f1f6cSJason Wang 1516ac1f1f6cSJason Wang return skb; 1517ac1f1f6cSJason Wang } 1518ac1f1f6cSJason Wang 15198ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 15208ae1aff0SJason Wang struct xdp_buff *xdp, u32 act) 15218ae1aff0SJason Wang { 15228ae1aff0SJason Wang int err; 15238ae1aff0SJason Wang 15248ae1aff0SJason Wang switch (act) { 15258ae1aff0SJason Wang case XDP_REDIRECT: 15268ae1aff0SJason Wang err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 15278ae1aff0SJason Wang if (err) 15288ae1aff0SJason Wang return err; 15298ae1aff0SJason Wang break; 15308ae1aff0SJason Wang case XDP_TX: 15318ae1aff0SJason Wang err = tun_xdp_tx(tun->dev, xdp); 15328ae1aff0SJason Wang if (err < 0) 15338ae1aff0SJason Wang return err; 15348ae1aff0SJason Wang break; 15358ae1aff0SJason Wang case XDP_PASS: 15368ae1aff0SJason Wang break; 15378ae1aff0SJason Wang default: 15388ae1aff0SJason Wang bpf_warn_invalid_xdp_action(act); 1539df561f66SGustavo A. R. Silva fallthrough; 15408ae1aff0SJason Wang case XDP_ABORTED: 15418ae1aff0SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1542df561f66SGustavo A. R. Silva fallthrough; 15438ae1aff0SJason Wang case XDP_DROP: 1544497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 15458ae1aff0SJason Wang break; 15468ae1aff0SJason Wang } 15478ae1aff0SJason Wang 15488ae1aff0SJason Wang return act; 15498ae1aff0SJason Wang } 15508ae1aff0SJason Wang 1551761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1552761876c8SJason Wang struct tun_file *tfile, 155366ccbc9cSJason Wang struct iov_iter *from, 1554761876c8SJason Wang struct virtio_net_hdr *hdr, 15551cfe6e93SJason Wang int len, int *skb_xdp) 155666ccbc9cSJason Wang { 15570bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 1558761876c8SJason Wang struct bpf_prog *xdp_prog; 15597df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 156066ccbc9cSJason Wang char *buf; 156166ccbc9cSJason Wang size_t copied; 15628ae1aff0SJason Wang int pad = TUN_RX_PAD; 15638ae1aff0SJason Wang int err = 0; 15647df13219SJason Wang 15657df13219SJason Wang rcu_read_lock(); 15667df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 15677df13219SJason Wang if (xdp_prog) 15684f23aff8SJason Wang pad += XDP_PACKET_HEADROOM; 15697df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 15707df13219SJason Wang rcu_read_unlock(); 157166ccbc9cSJason Wang 157263b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 157366ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 157466ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 157566ccbc9cSJason Wang 157666ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 157766ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 15787df13219SJason Wang alloc_frag->offset + pad, 157966ccbc9cSJason Wang len, from); 158066ccbc9cSJason Wang if (copied != len) 158166ccbc9cSJason Wang return ERR_PTR(-EFAULT); 158266ccbc9cSJason Wang 15837df13219SJason Wang /* There's a small window that XDP may be set after the check 15847df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 15857df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 15867df13219SJason Wang */ 1587ac1f1f6cSJason Wang if (hdr->gso_type || !xdp_prog) { 15881cfe6e93SJason Wang *skb_xdp = 1; 15894b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, 15904b663366SAlexis Bauvin pad); 1591ac1f1f6cSJason Wang } 1592ac1f1f6cSJason Wang 15931cfe6e93SJason Wang *skb_xdp = 0; 159466ccbc9cSJason Wang 15956547e387SToshiaki Makita local_bh_disable(); 1596761876c8SJason Wang rcu_read_lock(); 1597761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 15988ae1aff0SJason Wang if (xdp_prog) { 1599761876c8SJason Wang struct xdp_buff xdp; 1600761876c8SJason Wang u32 act; 1601761876c8SJason Wang 1602*43b5169dSLorenzo Bianconi xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); 1603761876c8SJason Wang xdp.data_hard_start = buf; 16047df13219SJason Wang xdp.data = buf + pad; 1605de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1606761876c8SJason Wang xdp.data_end = xdp.data + len; 1607761876c8SJason Wang 16088ae1aff0SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 16098ae1aff0SJason Wang if (act == XDP_REDIRECT || act == XDP_TX) { 1610761876c8SJason Wang get_page(alloc_frag->page); 1611761876c8SJason Wang alloc_frag->offset += buflen; 1612761876c8SJason Wang } 16138ae1aff0SJason Wang err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1614bee34890SWill Deacon if (err < 0) { 1615bee34890SWill Deacon if (act == XDP_REDIRECT || act == XDP_TX) 1616bee34890SWill Deacon put_page(alloc_frag->page); 1617bee34890SWill Deacon goto out; 1618bee34890SWill Deacon } 1619bee34890SWill Deacon 16201a097910SJason Wang if (err == XDP_REDIRECT) 16211d233886SToke Høiland-Jørgensen xdp_do_flush(); 16228ae1aff0SJason Wang if (err != XDP_PASS) 16238ae1aff0SJason Wang goto out; 16248ae1aff0SJason Wang 16258ae1aff0SJason Wang pad = xdp.data - xdp.data_hard_start; 16268ae1aff0SJason Wang len = xdp.data_end - xdp.data; 1627761876c8SJason Wang } 1628761876c8SJason Wang rcu_read_unlock(); 16296547e387SToshiaki Makita local_bh_enable(); 1630291aeb2bSJason Wang 16314b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); 1632761876c8SJason Wang 1633f7053b6cSJason Wang out: 1634761876c8SJason Wang rcu_read_unlock(); 16356547e387SToshiaki Makita local_bh_enable(); 1636761876c8SJason Wang return NULL; 163766ccbc9cSJason Wang } 163866ccbc9cSJason Wang 16391da177e4SLinus Torvalds /* Get packet from user space buffer */ 164054f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1641f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 16425503fcecSJason Wang int noblock, bool more) 16431da177e4SLinus Torvalds { 164409640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 16451da177e4SLinus Torvalds struct sk_buff *skb; 1646f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1647eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1648f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 164996f8d9ecSJason Wang int good_linear; 16500690899bSMichael S. Tsirkin int copylen; 16510690899bSMichael S. Tsirkin bool zerocopy = false; 16520690899bSMichael S. Tsirkin int err; 165396f84061SJason Wang u32 rxhash = 0; 16541cfe6e93SJason Wang int skb_xdp = 1; 1655af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 16561da177e4SLinus Torvalds 165740630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 165815718ea0SDan Carpenter if (len < sizeof(pi)) 16591da177e4SLinus Torvalds return -EINVAL; 166015718ea0SDan Carpenter len -= sizeof(pi); 16611da177e4SLinus Torvalds 1662cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 16631da177e4SLinus Torvalds return -EFAULT; 16641da177e4SLinus Torvalds } 16651da177e4SLinus Torvalds 166640630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1667e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1668e1edab87SWillem de Bruijn 1669e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1670f43798c2SRusty Russell return -EINVAL; 1671e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1672f43798c2SRusty Russell 1673cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1674f43798c2SRusty Russell return -EFAULT; 1675f43798c2SRusty Russell 16764909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 167756f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 167856f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 16794909122fSHerbert Xu 168056f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1681f43798c2SRusty Russell return -EINVAL; 1682e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1683f43798c2SRusty Russell } 1684f43798c2SRusty Russell 168540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1686a504b86eSstephen hemminger align += NET_IP_ALIGN; 16870eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 168856f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1689e01bf1c8SRusty Russell return -EINVAL; 1690e01bf1c8SRusty Russell } 16911da177e4SLinus Torvalds 169296f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 169396f8d9ecSJason Wang 169488529176SJason Wang if (msg_control) { 1695f5ff53b4SAl Viro struct iov_iter i = *from; 1696f5ff53b4SAl Viro 169788529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 169888529176SJason Wang * enough room for skb expand head in case it is used. 16990690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17000690899bSMichael S. Tsirkin */ 170156f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 170296f8d9ecSJason Wang if (copylen > good_linear) 170396f8d9ecSJason Wang copylen = good_linear; 17043dd5c330SJason Wang linear = copylen; 1705f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1706f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 170788529176SJason Wang zerocopy = true; 170888529176SJason Wang } 170988529176SJason Wang 171090e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17111cfe6e93SJason Wang /* For the packet that is not easy to be processed 17121cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 17131cfe6e93SJason Wang * skb was created with generic XDP routine. 17141cfe6e93SJason Wang */ 17151cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 171666ccbc9cSJason Wang if (IS_ERR(skb)) { 1717497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 171866ccbc9cSJason Wang return PTR_ERR(skb); 171966ccbc9cSJason Wang } 1720761876c8SJason Wang if (!skb) 1721761876c8SJason Wang return total_len; 172266ccbc9cSJason Wang } else { 172388529176SJason Wang if (!zerocopy) { 17240690899bSMichael S. Tsirkin copylen = len; 172556f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 172696f8d9ecSJason Wang linear = good_linear; 172796f8d9ecSJason Wang else 172856f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 17293dd5c330SJason Wang } 17300690899bSMichael S. Tsirkin 173190e33d45SPetar Penkov if (frags) { 173290e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 173390e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 173490e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 173590e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 173690e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 173790e33d45SPetar Penkov */ 173890e33d45SPetar Penkov zerocopy = false; 173990e33d45SPetar Penkov } else { 174090e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 174190e33d45SPetar Penkov noblock); 174290e33d45SPetar Penkov } 174390e33d45SPetar Penkov 174433dccbb0SHerbert Xu if (IS_ERR(skb)) { 174533dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1746497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 174790e33d45SPetar Penkov if (frags) 174890e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 174933dccbb0SHerbert Xu return PTR_ERR(skb); 17501da177e4SLinus Torvalds } 17511da177e4SLinus Torvalds 17520690899bSMichael S. Tsirkin if (zerocopy) 1753f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1754af1cc7a2SJason Wang else 1755f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 17560690899bSMichael S. Tsirkin 17570690899bSMichael S. Tsirkin if (err) { 17584477138fSEric Dumazet err = -EFAULT; 17594477138fSEric Dumazet drop: 1760497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 17618f22757eSDave Jones kfree_skb(skb); 176290e33d45SPetar Penkov if (frags) { 176390e33d45SPetar Penkov tfile->napi.skb = NULL; 176490e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 176590e33d45SPetar Penkov } 176690e33d45SPetar Penkov 17674477138fSEric Dumazet return err; 17688f22757eSDave Jones } 176966ccbc9cSJason Wang } 17701da177e4SLinus Torvalds 17713e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1772497a5757SHeiner Kallweit atomic_long_inc(&tun->rx_frame_errors); 1773df10db98SPaolo Abeni kfree_skb(skb); 177490e33d45SPetar Penkov if (frags) { 177590e33d45SPetar Penkov tfile->napi.skb = NULL; 177690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 177790e33d45SPetar Penkov } 177890e33d45SPetar Penkov 1779df10db98SPaolo Abeni return -EINVAL; 1780df10db98SPaolo Abeni } 1781df10db98SPaolo Abeni 17821da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 178340630b82SMichael S. Tsirkin case IFF_TUN: 178440630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 17852580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 17862580c4c1SAlexander Potapenko 17872580c4c1SAlexander Potapenko switch (ip_version) { 17882580c4c1SAlexander Potapenko case 4: 1789f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1790f09f7ee2SAng Way Chuang break; 17912580c4c1SAlexander Potapenko case 6: 1792f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1793f09f7ee2SAng Way Chuang break; 1794f09f7ee2SAng Way Chuang default: 1795497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 1796f09f7ee2SAng Way Chuang kfree_skb(skb); 1797f09f7ee2SAng Way Chuang return -EINVAL; 1798f09f7ee2SAng Way Chuang } 1799f09f7ee2SAng Way Chuang } 1800f09f7ee2SAng Way Chuang 1801459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18021da177e4SLinus Torvalds skb->protocol = pi.proto; 18034c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18041da177e4SLinus Torvalds break; 180540630b82SMichael S. Tsirkin case IFF_TAP: 180696aa1b22SWillem de Bruijn if (frags && !pskb_may_pull(skb, ETH_HLEN)) { 180796aa1b22SWillem de Bruijn err = -ENOMEM; 180896aa1b22SWillem de Bruijn goto drop; 180996aa1b22SWillem de Bruijn } 18101da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18111da177e4SLinus Torvalds break; 18126403eab1SJoe Perches } 18131da177e4SLinus Torvalds 18140690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18150690899bSMichael S. Tsirkin if (zerocopy) { 18169ee5e5adSJonathan Lemon skb_zcopy_init(skb, msg_control); 1817af1cc7a2SJason Wang } else if (msg_control) { 1818af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 181936177832SJonathan Lemon uarg->callback(NULL, uarg, false); 18200690899bSMichael S. Tsirkin } 18210690899bSMichael S. Tsirkin 182272f65107SVlad Yasevich skb_reset_network_header(skb); 1823d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 18243fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 182538502af7SJason Wang 18261cfe6e93SJason Wang if (skb_xdp) { 1827761876c8SJason Wang struct bpf_prog *xdp_prog; 1828761876c8SJason Wang int ret; 1829761876c8SJason Wang 18306547e387SToshiaki Makita local_bh_disable(); 1831761876c8SJason Wang rcu_read_lock(); 1832761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1833761876c8SJason Wang if (xdp_prog) { 1834761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1835761876c8SJason Wang if (ret != XDP_PASS) { 1836761876c8SJason Wang rcu_read_unlock(); 18376547e387SToshiaki Makita local_bh_enable(); 18381efba987SEric Dumazet if (frags) { 18391efba987SEric Dumazet tfile->napi.skb = NULL; 18401efba987SEric Dumazet mutex_unlock(&tfile->napi_mutex); 18411efba987SEric Dumazet } 1842761876c8SJason Wang return total_len; 1843761876c8SJason Wang } 1844761876c8SJason Wang } 1845761876c8SJason Wang rcu_read_unlock(); 18466547e387SToshiaki Makita local_bh_enable(); 1847761876c8SJason Wang } 1848761876c8SJason Wang 1849cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1850cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1851cf1a1e07SPaolo Abeni * worth to optimize. 1852cf1a1e07SPaolo Abeni */ 1853cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1854cf1a1e07SPaolo Abeni !tfile->detached) 1855feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 185694317099SPetar Penkov 18574477138fSEric Dumazet rcu_read_lock(); 18584477138fSEric Dumazet if (unlikely(!(tun->dev->flags & IFF_UP))) { 18594477138fSEric Dumazet err = -EIO; 18609180bb4fSEric Dumazet rcu_read_unlock(); 18614477138fSEric Dumazet goto drop; 18624477138fSEric Dumazet } 18634477138fSEric Dumazet 186490e33d45SPetar Penkov if (frags) { 186596aa1b22SWillem de Bruijn u32 headlen; 186696aa1b22SWillem de Bruijn 186790e33d45SPetar Penkov /* Exercise flow dissector code path. */ 186896aa1b22SWillem de Bruijn skb_push(skb, ETH_HLEN); 186996aa1b22SWillem de Bruijn headlen = eth_get_headlen(tun->dev, skb->data, 1870c43f1255SStanislav Fomichev skb_headlen(skb)); 187190e33d45SPetar Penkov 1872010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 1873497a5757SHeiner Kallweit atomic_long_inc(&tun->dev->rx_dropped); 187490e33d45SPetar Penkov napi_free_frags(&tfile->napi); 18754477138fSEric Dumazet rcu_read_unlock(); 187690e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 187790e33d45SPetar Penkov WARN_ON(1); 187890e33d45SPetar Penkov return -ENOMEM; 187990e33d45SPetar Penkov } 188090e33d45SPetar Penkov 188190e33d45SPetar Penkov local_bh_disable(); 188290e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 188390e33d45SPetar Penkov local_bh_enable(); 188490e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1885aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 188694317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 188794317099SPetar Penkov int queue_len; 188894317099SPetar Penkov 188994317099SPetar Penkov spin_lock_bh(&queue->lock); 189094317099SPetar Penkov __skb_queue_tail(queue, skb); 189194317099SPetar Penkov queue_len = skb_queue_len(queue); 189294317099SPetar Penkov spin_unlock(&queue->lock); 189394317099SPetar Penkov 189494317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 189594317099SPetar Penkov napi_schedule(&tfile->napi); 189694317099SPetar Penkov 189794317099SPetar Penkov local_bh_enable(); 189894317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 18995503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 190094317099SPetar Penkov } else { 19011da177e4SLinus Torvalds netif_rx_ni(skb); 190294317099SPetar Penkov } 19034477138fSEric Dumazet rcu_read_unlock(); 19041da177e4SLinus Torvalds 1905497a5757SHeiner Kallweit preempt_disable(); 1906497a5757SHeiner Kallweit dev_sw_netstats_rx_add(tun->dev, len); 1907497a5757SHeiner Kallweit preempt_enable(); 19081da177e4SLinus Torvalds 190996f84061SJason Wang if (rxhash) 19109e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 191196f84061SJason Wang 19120690899bSMichael S. Tsirkin return total_len; 19131da177e4SLinus Torvalds } 19141da177e4SLinus Torvalds 1915f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19161da177e4SLinus Torvalds { 191733dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 191854f968d6SJason Wang struct tun_file *tfile = file->private_data; 19199484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1920631ab46bSEric W. Biederman ssize_t result; 19215aac0390SJens Axboe int noblock = 0; 19221da177e4SLinus Torvalds 19231da177e4SLinus Torvalds if (!tun) 19241da177e4SLinus Torvalds return -EBADFD; 19251da177e4SLinus Torvalds 19265aac0390SJens Axboe if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 19275aac0390SJens Axboe noblock = 1; 19285aac0390SJens Axboe 19295aac0390SJens Axboe result = tun_get_user(tun, tfile, NULL, from, noblock, false); 1930631ab46bSEric W. Biederman 1931631ab46bSEric W. Biederman tun_put(tun); 1932631ab46bSEric W. Biederman return result; 19331da177e4SLinus Torvalds } 19341da177e4SLinus Torvalds 1935fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 1936fc72d1d5SJason Wang struct tun_file *tfile, 19371ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 1938fc72d1d5SJason Wang struct iov_iter *iter) 1939fc72d1d5SJason Wang { 1940fc72d1d5SJason Wang int vnet_hdr_sz = 0; 19411ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 1942fc72d1d5SJason Wang size_t ret; 1943fc72d1d5SJason Wang 1944fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 1945fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 1946fc72d1d5SJason Wang 1947fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1948fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 1949fc72d1d5SJason Wang return -EINVAL; 1950fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 1951fc72d1d5SJason Wang sizeof(gso))) 1952fc72d1d5SJason Wang return -EFAULT; 1953fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1954fc72d1d5SJason Wang } 1955fc72d1d5SJason Wang 19561ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 1957fc72d1d5SJason Wang 1958497a5757SHeiner Kallweit preempt_disable(); 1959497a5757SHeiner Kallweit dev_sw_netstats_tx_add(tun->dev, 1, ret); 1960497a5757SHeiner Kallweit preempt_enable(); 1961fc72d1d5SJason Wang 1962fc72d1d5SJason Wang return ret; 1963fc72d1d5SJason Wang } 1964fc72d1d5SJason Wang 19651da177e4SLinus Torvalds /* Put packet to the user space buffer */ 19666f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 196754f968d6SJason Wang struct tun_file *tfile, 19681da177e4SLinus Torvalds struct sk_buff *skb, 1969e0b46d0eSHerbert Xu struct iov_iter *iter) 19701da177e4SLinus Torvalds { 19711da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 1972e0b46d0eSHerbert Xu ssize_t total; 19738c847d25SJason Wang int vlan_offset = 0; 1974a8f9bfdfSHerbert Xu int vlan_hlen = 0; 19752eb783c4SHerbert Xu int vnet_hdr_sz = 0; 1976a8f9bfdfSHerbert Xu 1977df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 1978a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 19791da177e4SLinus Torvalds 198040630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 1981e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 19821da177e4SLinus Torvalds 1983e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 1984e0b46d0eSHerbert Xu 198540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 1986e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 19871da177e4SLinus Torvalds return -EINVAL; 19881da177e4SLinus Torvalds 1989e0b46d0eSHerbert Xu total += sizeof(pi); 1990e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 19911da177e4SLinus Torvalds /* Packet will be striped */ 19921da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 19931da177e4SLinus Torvalds } 19941da177e4SLinus Torvalds 1995e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 19961da177e4SLinus Torvalds return -EFAULT; 19971da177e4SLinus Torvalds } 19981da177e4SLinus Torvalds 19992eb783c4SHerbert Xu if (vnet_hdr_sz) { 20009403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 200134166093SMike Rapoport 2002e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2003f43798c2SRusty Russell return -EINVAL; 2004f43798c2SRusty Russell 20053e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2006fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2007fd3a8862SWillem de Bruijn vlan_hlen)) { 2008f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20096b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2010ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 201156f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 201256f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2013ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2014ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2015ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 201656f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2017ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2018ef3db4a5SMichael S. Tsirkin return -EINVAL; 2019ef3db4a5SMichael S. Tsirkin } 2020f43798c2SRusty Russell 2021e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2022f43798c2SRusty Russell return -EFAULT; 20238c847d25SJason Wang 20248c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2025f43798c2SRusty Russell } 2026f43798c2SRusty Russell 2027a8f9bfdfSHerbert Xu if (vlan_hlen) { 2028e0b46d0eSHerbert Xu int ret; 2029aff3d70aSJason Wang struct veth veth; 20301da177e4SLinus Torvalds 20316680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2032df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 20331da177e4SLinus Torvalds 20346680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 20356680ec68SJason Wang 2036e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2037e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 20386680ec68SJason Wang goto done; 20396680ec68SJason Wang 2040e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2041e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 20426680ec68SJason Wang goto done; 20436680ec68SJason Wang } 20446680ec68SJason Wang 2045e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 20466680ec68SJason Wang 20476680ec68SJason Wang done: 2048608b9977SPaolo Abeni /* caller is in process context, */ 2049497a5757SHeiner Kallweit preempt_disable(); 2050497a5757SHeiner Kallweit dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); 2051497a5757SHeiner Kallweit preempt_enable(); 20521da177e4SLinus Torvalds 20531da177e4SLinus Torvalds return total; 20541da177e4SLinus Torvalds } 20551da177e4SLinus Torvalds 2056fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 20571576d986SJason Wang { 20581576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2059fc72d1d5SJason Wang void *ptr = NULL; 2060f48cc6b2SJason Wang int error = 0; 20611576d986SJason Wang 2062fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2063fc72d1d5SJason Wang if (ptr) 20641576d986SJason Wang goto out; 20651576d986SJason Wang if (noblock) { 2066f48cc6b2SJason Wang error = -EAGAIN; 20671576d986SJason Wang goto out; 20681576d986SJason Wang } 20691576d986SJason Wang 2070333f7909SAl Viro add_wait_queue(&tfile->socket.wq.wait, &wait); 20711576d986SJason Wang 20721576d986SJason Wang while (1) { 207371828b22STimur Celik set_current_state(TASK_INTERRUPTIBLE); 2074fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2075fc72d1d5SJason Wang if (ptr) 20761576d986SJason Wang break; 20771576d986SJason Wang if (signal_pending(current)) { 2078f48cc6b2SJason Wang error = -ERESTARTSYS; 20791576d986SJason Wang break; 20801576d986SJason Wang } 20811576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2082f48cc6b2SJason Wang error = -EFAULT; 20831576d986SJason Wang break; 20841576d986SJason Wang } 20851576d986SJason Wang 20861576d986SJason Wang schedule(); 20871576d986SJason Wang } 20881576d986SJason Wang 2089ecef67cbSTimur Celik __set_current_state(TASK_RUNNING); 2090333f7909SAl Viro remove_wait_queue(&tfile->socket.wq.wait, &wait); 20911576d986SJason Wang 20921576d986SJason Wang out: 2093f48cc6b2SJason Wang *err = error; 2094fc72d1d5SJason Wang return ptr; 20951576d986SJason Wang } 20961576d986SJason Wang 209754f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 20989b067034SAl Viro struct iov_iter *to, 2099fc72d1d5SJason Wang int noblock, void *ptr) 21001da177e4SLinus Torvalds { 21019b067034SAl Viro ssize_t ret; 21021576d986SJason Wang int err; 21031da177e4SLinus Torvalds 2104c33ee15bSWei Xu if (!iov_iter_count(to)) { 2105fc72d1d5SJason Wang tun_ptr_free(ptr); 21069b067034SAl Viro return 0; 2107c33ee15bSWei Xu } 21081da177e4SLinus Torvalds 2109fc72d1d5SJason Wang if (!ptr) { 21101576d986SJason Wang /* Read frames from ring */ 2111fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2112fc72d1d5SJason Wang if (!ptr) 2113957f094fSAlex Gartrell return err; 2114ac77cfd4SJason Wang } 2115e0b46d0eSHerbert Xu 21161ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 21171ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2118fc72d1d5SJason Wang 21191ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 212003993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2121fc72d1d5SJason Wang } else { 2122fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2123fc72d1d5SJason Wang 21249b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2125f51a5e82SJason Wang if (unlikely(ret < 0)) 21261da177e4SLinus Torvalds kfree_skb(skb); 2127f51a5e82SJason Wang else 2128f51a5e82SJason Wang consume_skb(skb); 2129fc72d1d5SJason Wang } 21301da177e4SLinus Torvalds 213105c2828cSMichael S. Tsirkin return ret; 213205c2828cSMichael S. Tsirkin } 213305c2828cSMichael S. Tsirkin 21349b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 213505c2828cSMichael S. Tsirkin { 213605c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 213705c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 21389484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 21399b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 21405aac0390SJens Axboe int noblock = 0; 214105c2828cSMichael S. Tsirkin 214205c2828cSMichael S. Tsirkin if (!tun) 214305c2828cSMichael S. Tsirkin return -EBADFD; 21445aac0390SJens Axboe 21455aac0390SJens Axboe if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 21465aac0390SJens Axboe noblock = 1; 21475aac0390SJens Axboe 21485aac0390SJens Axboe ret = tun_do_read(tun, tfile, to, noblock, NULL); 214942404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2150d0b7da8aSZhi Yong Wu if (ret > 0) 2151d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2152631ab46bSEric W. Biederman tun_put(tun); 21531da177e4SLinus Torvalds return ret; 21541da177e4SLinus Torvalds } 21551da177e4SLinus Torvalds 2156cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 215796f84061SJason Wang { 2158cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 215996f84061SJason Wang 216096f84061SJason Wang bpf_prog_destroy(prog->prog); 216196f84061SJason Wang kfree(prog); 216296f84061SJason Wang } 216396f84061SJason Wang 21649d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 21659d6474e4SJason Wang struct tun_prog __rcu **prog_p, 216696f84061SJason Wang struct bpf_prog *prog) 216796f84061SJason Wang { 2168cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 216996f84061SJason Wang 217096f84061SJason Wang if (prog) { 217196f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 217296f84061SJason Wang if (!new) 217396f84061SJason Wang return -ENOMEM; 217496f84061SJason Wang new->prog = prog; 217596f84061SJason Wang } 217696f84061SJason Wang 2177124da8f6SJason Wang spin_lock_bh(&tun->lock); 2178cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2179124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2180cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2181124da8f6SJason Wang spin_unlock_bh(&tun->lock); 218296f84061SJason Wang 218396f84061SJason Wang if (old) 2184cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 218596f84061SJason Wang 218696f84061SJason Wang return 0; 218796f84061SJason Wang } 218896f84061SJason Wang 218996442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 219096442e42SJason Wang { 219196442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 219296442e42SJason Wang 21934008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 219411fc7d5aSEric Dumazet 2195497a5757SHeiner Kallweit free_percpu(dev->tstats); 2196497a5757SHeiner Kallweit /* We clear tstats so that tun_set_iff() can tell if 219711fc7d5aSEric Dumazet * tun_free_netdev() has been called from register_netdevice(). 219811fc7d5aSEric Dumazet */ 2199497a5757SHeiner Kallweit dev->tstats = NULL; 220011fc7d5aSEric Dumazet 220196442e42SJason Wang tun_flow_uninit(tun); 22025dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2203cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2204aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 220596442e42SJason Wang } 220696442e42SJason Wang 22071da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22081da177e4SLinus Torvalds { 22091da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22101da177e4SLinus Torvalds 22110625c883SEric W. Biederman tun->owner = INVALID_UID; 22120625c883SEric W. Biederman tun->group = INVALID_GID; 22134e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 22141da177e4SLinus Torvalds 22151da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2216cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2217cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2218016adb72SJason Wang /* We prefer our own queue length */ 2219016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22201da177e4SLinus Torvalds } 22211da177e4SLinus Torvalds 2222f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2223f019a7a5SEric W. Biederman * device with netlink. 2224f019a7a5SEric W. Biederman */ 2225a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2226a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2227f019a7a5SEric W. Biederman { 222835b827b6SNicolas Dichtel NL_SET_ERR_MSG(extack, 222935b827b6SNicolas Dichtel "tun/tap creation via rtnetlink is not supported."); 223035b827b6SNicolas Dichtel return -EOPNOTSUPP; 2231f019a7a5SEric W. Biederman } 2232f019a7a5SEric W. Biederman 22331ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 22341ec010e7SSabrina Dubroca { 22351ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 22361ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 22371ec010e7SSabrina Dubroca 22381ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 22391ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 22401ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 22411ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 22421ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 22431ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 22441ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 22451ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 22461ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 22471ec010e7SSabrina Dubroca 0; 22481ec010e7SSabrina Dubroca } 22491ec010e7SSabrina Dubroca 22501ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 22511ec010e7SSabrina Dubroca { 22521ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 22531ec010e7SSabrina Dubroca 22541ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 22551ec010e7SSabrina Dubroca goto nla_put_failure; 22561ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 22571ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 22581ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 22591ec010e7SSabrina Dubroca goto nla_put_failure; 22601ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 22611ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 22621ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 22631ec010e7SSabrina Dubroca goto nla_put_failure; 22641ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 22651ec010e7SSabrina Dubroca goto nla_put_failure; 22661ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 22671ec010e7SSabrina Dubroca goto nla_put_failure; 22681ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 22691ec010e7SSabrina Dubroca goto nla_put_failure; 22701ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 22711ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 22721ec010e7SSabrina Dubroca goto nla_put_failure; 22731ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 22741ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 22751ec010e7SSabrina Dubroca goto nla_put_failure; 22761ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 22771ec010e7SSabrina Dubroca tun->numdisabled)) 22781ec010e7SSabrina Dubroca goto nla_put_failure; 22791ec010e7SSabrina Dubroca } 22801ec010e7SSabrina Dubroca 22811ec010e7SSabrina Dubroca return 0; 22821ec010e7SSabrina Dubroca 22831ec010e7SSabrina Dubroca nla_put_failure: 22841ec010e7SSabrina Dubroca return -EMSGSIZE; 22851ec010e7SSabrina Dubroca } 22861ec010e7SSabrina Dubroca 2287f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2288f019a7a5SEric W. Biederman .kind = DRV_NAME, 2289f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2290f019a7a5SEric W. Biederman .setup = tun_setup, 2291f019a7a5SEric W. Biederman .validate = tun_validate, 22921ec010e7SSabrina Dubroca .get_size = tun_get_size, 22931ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2294f019a7a5SEric W. Biederman }; 2295f019a7a5SEric W. Biederman 229633dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 229733dccbb0SHerbert Xu { 229854f968d6SJason Wang struct tun_file *tfile; 229943815482SEric Dumazet wait_queue_head_t *wqueue; 230033dccbb0SHerbert Xu 230133dccbb0SHerbert Xu if (!sock_writeable(sk)) 230233dccbb0SHerbert Xu return; 230333dccbb0SHerbert Xu 23049cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 230533dccbb0SHerbert Xu return; 230633dccbb0SHerbert Xu 230743815482SEric Dumazet wqueue = sk_sleep(sk); 230843815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2309a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2310a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2311c722c625SHerbert Xu 231254f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 231354f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 231433dccbb0SHerbert Xu } 231533dccbb0SHerbert Xu 2316f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage) 2317f9e06c45SJason Wang { 2318f9e06c45SJason Wang if (tpage->page) 2319f9e06c45SJason Wang __page_frag_cache_drain(tpage->page, tpage->count); 2320f9e06c45SJason Wang } 2321f9e06c45SJason Wang 2322043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun, 2323043d222fSJason Wang struct tun_file *tfile, 2324f9e06c45SJason Wang struct xdp_buff *xdp, int *flush, 2325f9e06c45SJason Wang struct tun_page *tpage) 2326043d222fSJason Wang { 23274e4b08e5SPrashant Bhole unsigned int datasize = xdp->data_end - xdp->data; 2328043d222fSJason Wang struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2329043d222fSJason Wang struct virtio_net_hdr *gso = &hdr->gso; 2330043d222fSJason Wang struct bpf_prog *xdp_prog; 2331043d222fSJason Wang struct sk_buff *skb = NULL; 2332043d222fSJason Wang u32 rxhash = 0, act; 2333043d222fSJason Wang int buflen = hdr->buflen; 2334043d222fSJason Wang int err = 0; 2335043d222fSJason Wang bool skb_xdp = false; 2336f9e06c45SJason Wang struct page *page; 2337043d222fSJason Wang 2338043d222fSJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 2339043d222fSJason Wang if (xdp_prog) { 2340043d222fSJason Wang if (gso->gso_type) { 2341043d222fSJason Wang skb_xdp = true; 2342043d222fSJason Wang goto build; 2343043d222fSJason Wang } 2344*43b5169dSLorenzo Bianconi 2345*43b5169dSLorenzo Bianconi xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); 2346043d222fSJason Wang xdp_set_data_meta_invalid(xdp); 2347043d222fSJason Wang 2348043d222fSJason Wang act = bpf_prog_run_xdp(xdp_prog, xdp); 2349043d222fSJason Wang err = tun_xdp_act(tun, xdp_prog, xdp, act); 2350043d222fSJason Wang if (err < 0) { 2351043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2352043d222fSJason Wang return err; 2353043d222fSJason Wang } 2354043d222fSJason Wang 2355043d222fSJason Wang switch (err) { 2356043d222fSJason Wang case XDP_REDIRECT: 2357043d222fSJason Wang *flush = true; 2358df561f66SGustavo A. R. Silva fallthrough; 2359043d222fSJason Wang case XDP_TX: 2360043d222fSJason Wang return 0; 2361043d222fSJason Wang case XDP_PASS: 2362043d222fSJason Wang break; 2363043d222fSJason Wang default: 2364f9e06c45SJason Wang page = virt_to_head_page(xdp->data); 2365f9e06c45SJason Wang if (tpage->page == page) { 2366f9e06c45SJason Wang ++tpage->count; 2367f9e06c45SJason Wang } else { 2368f9e06c45SJason Wang tun_put_page(tpage); 2369f9e06c45SJason Wang tpage->page = page; 2370f9e06c45SJason Wang tpage->count = 1; 2371f9e06c45SJason Wang } 2372043d222fSJason Wang return 0; 2373043d222fSJason Wang } 2374043d222fSJason Wang } 2375043d222fSJason Wang 2376043d222fSJason Wang build: 2377043d222fSJason Wang skb = build_skb(xdp->data_hard_start, buflen); 2378043d222fSJason Wang if (!skb) { 2379043d222fSJason Wang err = -ENOMEM; 2380043d222fSJason Wang goto out; 2381043d222fSJason Wang } 2382043d222fSJason Wang 2383043d222fSJason Wang skb_reserve(skb, xdp->data - xdp->data_hard_start); 2384043d222fSJason Wang skb_put(skb, xdp->data_end - xdp->data); 2385043d222fSJason Wang 2386043d222fSJason Wang if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2387497a5757SHeiner Kallweit atomic_long_inc(&tun->rx_frame_errors); 2388043d222fSJason Wang kfree_skb(skb); 2389043d222fSJason Wang err = -EINVAL; 2390043d222fSJason Wang goto out; 2391043d222fSJason Wang } 2392043d222fSJason Wang 2393043d222fSJason Wang skb->protocol = eth_type_trans(skb, tun->dev); 2394043d222fSJason Wang skb_reset_network_header(skb); 2395d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 23963fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 2397043d222fSJason Wang 2398043d222fSJason Wang if (skb_xdp) { 2399043d222fSJason Wang err = do_xdp_generic(xdp_prog, skb); 2400043d222fSJason Wang if (err != XDP_PASS) 2401043d222fSJason Wang goto out; 2402043d222fSJason Wang } 2403043d222fSJason Wang 2404f29eb2a9SPaolo Abeni if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2405f29eb2a9SPaolo Abeni !tfile->detached) 2406043d222fSJason Wang rxhash = __skb_get_hash_symmetric(skb); 2407043d222fSJason Wang 2408043d222fSJason Wang netif_receive_skb(skb); 2409043d222fSJason Wang 2410497a5757SHeiner Kallweit /* No need to disable preemption here since this function is 24116342ca64SPrashant Bhole * always called with bh disabled 24126342ca64SPrashant Bhole */ 2413497a5757SHeiner Kallweit dev_sw_netstats_rx_add(tun->dev, datasize); 2414043d222fSJason Wang 2415043d222fSJason Wang if (rxhash) 2416043d222fSJason Wang tun_flow_update(tun, rxhash, tfile); 2417043d222fSJason Wang 2418043d222fSJason Wang out: 2419043d222fSJason Wang return err; 2420043d222fSJason Wang } 2421043d222fSJason Wang 24221b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 242305c2828cSMichael S. Tsirkin { 2424043d222fSJason Wang int ret, i; 242554f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24269484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2427fe8dd45bSJason Wang struct tun_msg_ctl *ctl = m->msg_control; 2428043d222fSJason Wang struct xdp_buff *xdp; 242954f968d6SJason Wang 243054f968d6SJason Wang if (!tun) 243154f968d6SJason Wang return -EBADFD; 2432f5ff53b4SAl Viro 2433043d222fSJason Wang if (ctl && (ctl->type == TUN_MSG_PTR)) { 24346f0271d9SDavid S. Miller struct tun_page tpage; 2435043d222fSJason Wang int n = ctl->num; 2436043d222fSJason Wang int flush = 0; 2437043d222fSJason Wang 24386f0271d9SDavid S. Miller memset(&tpage, 0, sizeof(tpage)); 24396f0271d9SDavid S. Miller 2440043d222fSJason Wang local_bh_disable(); 2441043d222fSJason Wang rcu_read_lock(); 2442043d222fSJason Wang 2443043d222fSJason Wang for (i = 0; i < n; i++) { 2444043d222fSJason Wang xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2445f9e06c45SJason Wang tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2446043d222fSJason Wang } 2447043d222fSJason Wang 2448043d222fSJason Wang if (flush) 24491d233886SToke Høiland-Jørgensen xdp_do_flush(); 2450043d222fSJason Wang 2451043d222fSJason Wang rcu_read_unlock(); 2452043d222fSJason Wang local_bh_enable(); 2453043d222fSJason Wang 2454f9e06c45SJason Wang tun_put_page(&tpage); 2455f9e06c45SJason Wang 2456043d222fSJason Wang ret = total_len; 2457043d222fSJason Wang goto out; 2458043d222fSJason Wang } 2459fe8dd45bSJason Wang 2460fe8dd45bSJason Wang ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 24615503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 24625503fcecSJason Wang m->msg_flags & MSG_MORE); 2463043d222fSJason Wang out: 246454f968d6SJason Wang tun_put(tun); 246554f968d6SJason Wang return ret; 246605c2828cSMichael S. Tsirkin } 246705c2828cSMichael S. Tsirkin 24681b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 246905c2828cSMichael S. Tsirkin int flags) 247005c2828cSMichael S. Tsirkin { 247154f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24729484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2473fc72d1d5SJason Wang void *ptr = m->msg_control; 247405c2828cSMichael S. Tsirkin int ret; 247554f968d6SJason Wang 2476c33ee15bSWei Xu if (!tun) { 2477c33ee15bSWei Xu ret = -EBADFD; 2478fc72d1d5SJason Wang goto out_free; 2479c33ee15bSWei Xu } 248054f968d6SJason Wang 2481eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 24823811ae76SGao feng ret = -EINVAL; 2483c33ee15bSWei Xu goto out_put_tun; 24843811ae76SGao feng } 2485eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2486eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2487eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2488eda29772SRichard Cochran goto out; 2489eda29772SRichard Cochran } 2490fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 249187897931SAlex Gartrell if (ret > (ssize_t)total_len) { 249242404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 249342404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 249442404c09SDavid S. Miller } 24953811ae76SGao feng out: 249654f968d6SJason Wang tun_put(tun); 249705c2828cSMichael S. Tsirkin return ret; 2498c33ee15bSWei Xu 2499c33ee15bSWei Xu out_put_tun: 2500c33ee15bSWei Xu tun_put(tun); 2501fc72d1d5SJason Wang out_free: 2502fc72d1d5SJason Wang tun_ptr_free(ptr); 2503c33ee15bSWei Xu return ret; 250405c2828cSMichael S. Tsirkin } 250505c2828cSMichael S. Tsirkin 2506fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2507fc72d1d5SJason Wang { 2508fc72d1d5SJason Wang if (likely(ptr)) { 25091ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 25101ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2511fc72d1d5SJason Wang 25121ffcbc85SJesper Dangaard Brouer return xdpf->len; 2513fc72d1d5SJason Wang } 2514fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2515fc72d1d5SJason Wang } else { 2516fc72d1d5SJason Wang return 0; 2517fc72d1d5SJason Wang } 2518fc72d1d5SJason Wang } 2519fc72d1d5SJason Wang 25201576d986SJason Wang static int tun_peek_len(struct socket *sock) 25211576d986SJason Wang { 25221576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25231576d986SJason Wang struct tun_struct *tun; 25241576d986SJason Wang int ret = 0; 25251576d986SJason Wang 25269484dc74Syuan linyu tun = tun_get(tfile); 25271576d986SJason Wang if (!tun) 25281576d986SJason Wang return 0; 25291576d986SJason Wang 2530fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 25311576d986SJason Wang tun_put(tun); 25321576d986SJason Wang 25331576d986SJason Wang return ret; 25341576d986SJason Wang } 25351576d986SJason Wang 253605c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 253705c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 25381576d986SJason Wang .peek_len = tun_peek_len, 253905c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 254005c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 254105c2828cSMichael S. Tsirkin }; 254205c2828cSMichael S. Tsirkin 254333dccbb0SHerbert Xu static struct proto tun_proto = { 254433dccbb0SHerbert Xu .name = "tun", 254533dccbb0SHerbert Xu .owner = THIS_MODULE, 254654f968d6SJason Wang .obj_size = sizeof(struct tun_file), 254733dccbb0SHerbert Xu }; 2548f019a7a5SEric W. Biederman 2549980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2550980c9e8cSDavid Woodhouse { 2551031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2552980c9e8cSDavid Woodhouse } 2553980c9e8cSDavid Woodhouse 2554980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2555980c9e8cSDavid Woodhouse char *buf) 2556980c9e8cSDavid Woodhouse { 2557980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2558980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2559980c9e8cSDavid Woodhouse } 2560980c9e8cSDavid Woodhouse 2561980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2562980c9e8cSDavid Woodhouse char *buf) 2563980c9e8cSDavid Woodhouse { 2564980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 25650625c883SEric W. Biederman return uid_valid(tun->owner)? 25660625c883SEric W. Biederman sprintf(buf, "%u\n", 25670625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 25680625c883SEric W. Biederman sprintf(buf, "-1\n"); 2569980c9e8cSDavid Woodhouse } 2570980c9e8cSDavid Woodhouse 2571980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2572980c9e8cSDavid Woodhouse char *buf) 2573980c9e8cSDavid Woodhouse { 2574980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 25750625c883SEric W. Biederman return gid_valid(tun->group) ? 25760625c883SEric W. Biederman sprintf(buf, "%u\n", 25770625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 25780625c883SEric W. Biederman sprintf(buf, "-1\n"); 2579980c9e8cSDavid Woodhouse } 2580980c9e8cSDavid Woodhouse 2581980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2582980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2583980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2584980c9e8cSDavid Woodhouse 2585c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2586c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2587c4d33e24STakashi Iwai &dev_attr_owner.attr, 2588c4d33e24STakashi Iwai &dev_attr_group.attr, 2589c4d33e24STakashi Iwai NULL 2590c4d33e24STakashi Iwai }; 2591c4d33e24STakashi Iwai 2592c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2593c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2594c4d33e24STakashi Iwai }; 2595c4d33e24STakashi Iwai 2596d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 25971da177e4SLinus Torvalds { 25981da177e4SLinus Torvalds struct tun_struct *tun; 259954f968d6SJason Wang struct tun_file *tfile = file->private_data; 26001da177e4SLinus Torvalds struct net_device *dev; 26011da177e4SLinus Torvalds int err; 26021da177e4SLinus Torvalds 26037c0c3b1aSJason Wang if (tfile->detached) 26047c0c3b1aSJason Wang return -EINVAL; 26057c0c3b1aSJason Wang 260690e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 260790e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 260890e33d45SPetar Penkov return -EPERM; 260990e33d45SPetar Penkov 261090e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 261190e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 261290e33d45SPetar Penkov return -EINVAL; 261390e33d45SPetar Penkov } 261490e33d45SPetar Penkov 261574a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 261674a3e5a7SEric W. Biederman if (dev) { 2617f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2618f85ba780SDavid Woodhouse return -EBUSY; 261974a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 262074a3e5a7SEric W. Biederman tun = netdev_priv(dev); 262174a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 262274a3e5a7SEric W. Biederman tun = netdev_priv(dev); 262374a3e5a7SEric W. Biederman else 262474a3e5a7SEric W. Biederman return -EINVAL; 262574a3e5a7SEric W. Biederman 26268e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 262740630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 26288e6d91aeSJason Wang return -EINVAL; 26298e6d91aeSJason Wang 2630cde8b15fSJason Wang if (tun_not_capable(tun)) 26312b980dbdSPaul Moore return -EPERM; 26325dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 26332b980dbdSPaul Moore if (err < 0) 26342b980dbdSPaul Moore return err; 26352b980dbdSPaul Moore 263694317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2637af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 263877f22f92SYang Yingliang ifr->ifr_flags & IFF_NAPI_FRAGS, true); 2639a7385ba2SEric W. Biederman if (err < 0) 2640a7385ba2SEric W. Biederman return err; 26414008e97fSJason Wang 264240630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2643e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2644e8dbad66SJason Wang /* One or more queue has already been attached, no need 2645e8dbad66SJason Wang * to initialize the device again. 2646e8dbad66SJason Wang */ 264783c1f36fSSabrina Dubroca netdev_state_change(dev); 2648e8dbad66SJason Wang return 0; 2649e8dbad66SJason Wang } 26509fffc5c6SSabrina Dubroca 26519fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 26529fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 265383c1f36fSSabrina Dubroca 265483c1f36fSSabrina Dubroca netdev_state_change(dev); 265583c1f36fSSabrina Dubroca } else { 26561da177e4SLinus Torvalds char *name; 26571da177e4SLinus Torvalds unsigned long flags = 0; 2658edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2659edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 26601da177e4SLinus Torvalds 2661c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2662ca6bb5d7SDavid Woodhouse return -EPERM; 26632b980dbdSPaul Moore err = security_tun_dev_create(); 26642b980dbdSPaul Moore if (err < 0) 26652b980dbdSPaul Moore return err; 2666ca6bb5d7SDavid Woodhouse 26671da177e4SLinus Torvalds /* Set dev type */ 26681da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 26691da177e4SLinus Torvalds /* TUN device */ 267040630b82SMichael S. Tsirkin flags |= IFF_TUN; 26711da177e4SLinus Torvalds name = "tun%d"; 26721da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 26731da177e4SLinus Torvalds /* TAP device */ 267440630b82SMichael S. Tsirkin flags |= IFF_TAP; 26751da177e4SLinus Torvalds name = "tap%d"; 26761da177e4SLinus Torvalds } else 267736989b90SKusanagi Kouichi return -EINVAL; 26781da177e4SLinus Torvalds 26791da177e4SLinus Torvalds if (*ifr->ifr_name) 26801da177e4SLinus Torvalds name = ifr->ifr_name; 26811da177e4SLinus Torvalds 2682c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2683c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2684c835a677STom Gundersen queues); 2685edfb6a14SJason Wang 26861da177e4SLinus Torvalds if (!dev) 26871da177e4SLinus Torvalds return -ENOMEM; 26881da177e4SLinus Torvalds 2689fc54c658SPavel Emelyanov dev_net_set(dev, net); 2690f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2691fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2692c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2693758e43b7SStephen Hemminger 26941da177e4SLinus Torvalds tun = netdev_priv(dev); 26951da177e4SLinus Torvalds tun->dev = dev; 26961da177e4SLinus Torvalds tun->flags = flags; 2697f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2698d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 26991da177e4SLinus Torvalds 2700eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 270154f968d6SJason Wang tun->filter_attached = false; 270254f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 27035503fcecSJason Wang tun->rx_batched = 0; 270496f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 270533dccbb0SHerbert Xu 2706497a5757SHeiner Kallweit dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 2707497a5757SHeiner Kallweit if (!dev->tstats) { 2708608b9977SPaolo Abeni err = -ENOMEM; 2709608b9977SPaolo Abeni goto err_free_dev; 2710608b9977SPaolo Abeni } 2711608b9977SPaolo Abeni 271296442e42SJason Wang spin_lock_init(&tun->lock); 271396442e42SJason Wang 27145dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 27155dbbaf2dSPaul Moore if (err < 0) 2716608b9977SPaolo Abeni goto err_free_stat; 27172b980dbdSPaul Moore 27181da177e4SLinus Torvalds tun_net_init(dev); 2719944a1376SPavel Emelyanov tun_flow_init(tun); 272096442e42SJason Wang 272188255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 27226680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 27236680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 27242a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 27256671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 27266671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 27276671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 272888255375SMichał Mirosław 27299fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 27309fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 27319fffc5c6SSabrina Dubroca 27324008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 2733af3fb24eSEric Dumazet err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 273477f22f92SYang Yingliang ifr->ifr_flags & IFF_NAPI_FRAGS, false); 2735eb0fb363SJason Wang if (err < 0) 2736662ca437SJason Wang goto err_free_flow; 2737eb0fb363SJason Wang 27381da177e4SLinus Torvalds err = register_netdevice(tun->dev); 27391da177e4SLinus Torvalds if (err < 0) 2740662ca437SJason Wang goto err_detach; 274177f22f92SYang Yingliang /* free_netdev() won't check refcnt, to aovid race 274277f22f92SYang Yingliang * with dev_put() we need publish tun after registration. 274377f22f92SYang Yingliang */ 274477f22f92SYang Yingliang rcu_assign_pointer(tfile->tun, tun); 2745af668b3cSMichael S. Tsirkin } 2746980c9e8cSDavid Woodhouse 2747eb0fb363SJason Wang netif_carrier_on(tun->dev); 27481da177e4SLinus Torvalds 2749e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2750e35259a9SMax Krasnyansky * xoff state. 2751e35259a9SMax Krasnyansky */ 2752e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2753c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2754e35259a9SMax Krasnyansky 27551da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 27561da177e4SLinus Torvalds return 0; 27571da177e4SLinus Torvalds 2758662ca437SJason Wang err_detach: 2759662ca437SJason Wang tun_detach_all(dev); 276011fc7d5aSEric Dumazet /* We are here because register_netdevice() has failed. 276111fc7d5aSEric Dumazet * If register_netdevice() already called tun_free_netdev() 2762497a5757SHeiner Kallweit * while dealing with the error, dev->stats has been cleared. 276311fc7d5aSEric Dumazet */ 2764497a5757SHeiner Kallweit if (!dev->tstats) 2765ff244c6bSEric Dumazet goto err_free_dev; 2766ff244c6bSEric Dumazet 2767662ca437SJason Wang err_free_flow: 2768662ca437SJason Wang tun_flow_uninit(tun); 2769662ca437SJason Wang security_tun_dev_free_security(tun->security); 2770608b9977SPaolo Abeni err_free_stat: 2771497a5757SHeiner Kallweit free_percpu(dev->tstats); 27721da177e4SLinus Torvalds err_free_dev: 27731da177e4SLinus Torvalds free_netdev(dev); 27741da177e4SLinus Torvalds return err; 27751da177e4SLinus Torvalds } 27761da177e4SLinus Torvalds 277712132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2778e3b99556SMark McLoughlin { 2779e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2780e3b99556SMark McLoughlin 2781980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2782e3b99556SMark McLoughlin 2783e3b99556SMark McLoughlin } 2784e3b99556SMark McLoughlin 27855228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 27865228ddc9SRusty Russell * privs required. */ 278788255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 27885228ddc9SRusty Russell { 2789c8f44affSMichał Mirosław netdev_features_t features = 0; 27905228ddc9SRusty Russell 27915228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 279288255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 27935228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 27945228ddc9SRusty Russell 27955228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 27965228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 27975228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 27985228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 27995228ddc9SRusty Russell } 28005228ddc9SRusty Russell if (arg & TUN_F_TSO4) 28015228ddc9SRusty Russell features |= NETIF_F_TSO; 28025228ddc9SRusty Russell if (arg & TUN_F_TSO6) 28035228ddc9SRusty Russell features |= NETIF_F_TSO6; 28045228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 28055228ddc9SRusty Russell } 28060c19f846SWillem de Bruijn 28070c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 28085228ddc9SRusty Russell } 28095228ddc9SRusty Russell 28105228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 28115228ddc9SRusty Russell * trying to set them. */ 28125228ddc9SRusty Russell if (arg) 28135228ddc9SRusty Russell return -EINVAL; 28145228ddc9SRusty Russell 281588255375SMichał Mirosław tun->set_features = features; 281609050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 281709050957SYaroslav Isakov tun->dev->wanted_features |= features; 281888255375SMichał Mirosław netdev_update_features(tun->dev); 28195228ddc9SRusty Russell 28205228ddc9SRusty Russell return 0; 28215228ddc9SRusty Russell } 28225228ddc9SRusty Russell 2823c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2824c8d68e6bSJason Wang { 2825c8d68e6bSJason Wang int i; 2826c8d68e6bSJason Wang struct tun_file *tfile; 2827c8d68e6bSJason Wang 2828c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2829b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 28308ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 28318ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 28328ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2833c8d68e6bSJason Wang } 2834c8d68e6bSJason Wang 2835c8d68e6bSJason Wang tun->filter_attached = false; 2836c8d68e6bSJason Wang } 2837c8d68e6bSJason Wang 2838c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2839c8d68e6bSJason Wang { 2840c8d68e6bSJason Wang int i, ret = 0; 2841c8d68e6bSJason Wang struct tun_file *tfile; 2842c8d68e6bSJason Wang 2843c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2844b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 28458ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 28468ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 28478ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2848c8d68e6bSJason Wang if (ret) { 2849c8d68e6bSJason Wang tun_detach_filter(tun, i); 2850c8d68e6bSJason Wang return ret; 2851c8d68e6bSJason Wang } 2852c8d68e6bSJason Wang } 2853c8d68e6bSJason Wang 2854c8d68e6bSJason Wang tun->filter_attached = true; 2855c8d68e6bSJason Wang return ret; 2856c8d68e6bSJason Wang } 2857c8d68e6bSJason Wang 2858c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2859c8d68e6bSJason Wang { 2860c8d68e6bSJason Wang struct tun_file *tfile; 2861c8d68e6bSJason Wang int i; 2862c8d68e6bSJason Wang 2863c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2864b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2865c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2866c8d68e6bSJason Wang } 2867c8d68e6bSJason Wang } 2868c8d68e6bSJason Wang 2869cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2870cde8b15fSJason Wang { 2871cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2872cde8b15fSJason Wang struct tun_struct *tun; 2873cde8b15fSJason Wang int ret = 0; 2874cde8b15fSJason Wang 2875cde8b15fSJason Wang rtnl_lock(); 2876cde8b15fSJason Wang 2877cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 28784008e97fSJason Wang tun = tfile->detached; 28795dbbaf2dSPaul Moore if (!tun) { 2880cde8b15fSJason Wang ret = -EINVAL; 28815dbbaf2dSPaul Moore goto unlock; 28825dbbaf2dSPaul Moore } 28835dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 28845dbbaf2dSPaul Moore if (ret < 0) 28855dbbaf2dSPaul Moore goto unlock; 2886af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 288777f22f92SYang Yingliang tun->flags & IFF_NAPI_FRAGS, true); 28884008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2889b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 289040630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 28914008e97fSJason Wang ret = -EINVAL; 2892cde8b15fSJason Wang else 28934008e97fSJason Wang __tun_detach(tfile, false); 28944008e97fSJason Wang } else 2895cde8b15fSJason Wang ret = -EINVAL; 2896cde8b15fSJason Wang 289783c1f36fSSabrina Dubroca if (ret >= 0) 289883c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 289983c1f36fSSabrina Dubroca 29005dbbaf2dSPaul Moore unlock: 2901cde8b15fSJason Wang rtnl_unlock(); 2902cde8b15fSJason Wang return ret; 2903cde8b15fSJason Wang } 2904cde8b15fSJason Wang 29058f3f330dSJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, 2906cd5681d7SJason Wang void __user *data) 290796f84061SJason Wang { 290896f84061SJason Wang struct bpf_prog *prog; 290996f84061SJason Wang int fd; 291096f84061SJason Wang 291196f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 291296f84061SJason Wang return -EFAULT; 291396f84061SJason Wang 291496f84061SJason Wang if (fd == -1) { 291596f84061SJason Wang prog = NULL; 291696f84061SJason Wang } else { 291796f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 291896f84061SJason Wang if (IS_ERR(prog)) 291996f84061SJason Wang return PTR_ERR(prog); 292096f84061SJason Wang } 292196f84061SJason Wang 2922cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 292396f84061SJason Wang } 292496f84061SJason Wang 292550857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 292650857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 29271da177e4SLinus Torvalds { 292836b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2929f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 2930631ab46bSEric W. Biederman struct tun_struct *tun; 29311da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 293226d31925SNicolas Dichtel unsigned int ifindex, carrier; 29331da177e4SLinus Torvalds struct ifreq ifr; 29340625c883SEric W. Biederman kuid_t owner; 29350625c883SEric W. Biederman kgid_t group; 293633dccbb0SHerbert Xu int sndbuf; 2937d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 29381cf8e410SMichael S. Tsirkin int le; 2939f271b2ccSMax Krasnyansky int ret; 294083c1f36fSSabrina Dubroca bool do_notify = false; 29411da177e4SLinus Torvalds 2942f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2943f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 294450857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 29451da177e4SLinus Torvalds return -EFAULT; 29468bbb1813SDavid S. Miller } else { 2947a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 29488bbb1813SDavid S. Miller } 2949631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2950631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2951631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2952031f5e03SMichael S. Tsirkin * TUNSETIFF. 2953031f5e03SMichael S. Tsirkin */ 2954031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2955631ab46bSEric W. Biederman (unsigned int __user*)argp); 2956f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 2957cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2958f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 2959f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2960f663706aSKirill Tkhai return -EPERM; 2961f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 2962f663706aSKirill Tkhai } 2963631ab46bSEric W. Biederman 2964c8d68e6bSJason Wang ret = 0; 2965876bfd4dSHerbert Xu rtnl_lock(); 2966876bfd4dSHerbert Xu 29679484dc74Syuan linyu tun = tun_get(tfile); 29680f16bc13SGao Feng if (cmd == TUNSETIFF) { 29690f16bc13SGao Feng ret = -EEXIST; 29700f16bc13SGao Feng if (tun) 29710f16bc13SGao Feng goto unlock; 29720f16bc13SGao Feng 29731da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 29741da177e4SLinus Torvalds 2975f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 29761da177e4SLinus Torvalds 2977876bfd4dSHerbert Xu if (ret) 2978876bfd4dSHerbert Xu goto unlock; 29791da177e4SLinus Torvalds 298050857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2981876bfd4dSHerbert Xu ret = -EFAULT; 2982876bfd4dSHerbert Xu goto unlock; 29831da177e4SLinus Torvalds } 2984fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2985fb7589a1SPavel Emelyanov ret = -EPERM; 2986fb7589a1SPavel Emelyanov if (tun) 2987fb7589a1SPavel Emelyanov goto unlock; 2988fb7589a1SPavel Emelyanov 2989fb7589a1SPavel Emelyanov ret = -EFAULT; 2990fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2991fb7589a1SPavel Emelyanov goto unlock; 2992fb7589a1SPavel Emelyanov 2993fb7589a1SPavel Emelyanov ret = 0; 2994fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2995fb7589a1SPavel Emelyanov goto unlock; 2996fb7589a1SPavel Emelyanov } 29971da177e4SLinus Torvalds 2998876bfd4dSHerbert Xu ret = -EBADFD; 29991da177e4SLinus Torvalds if (!tun) 3000876bfd4dSHerbert Xu goto unlock; 30011da177e4SLinus Torvalds 30023424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); 30031da177e4SLinus Torvalds 30040c3e0e3bSKirill Tkhai net = dev_net(tun->dev); 3005631ab46bSEric W. Biederman ret = 0; 30061da177e4SLinus Torvalds switch (cmd) { 3007e3b99556SMark McLoughlin case TUNGETIFF: 300812132768SKirill Tkhai tun_get_iff(tun, &ifr); 3009e3b99556SMark McLoughlin 30103d407a80SPavel Emelyanov if (tfile->detached) 30113d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 3012849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 3013849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 30143d407a80SPavel Emelyanov 301550857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3016631ab46bSEric W. Biederman ret = -EFAULT; 3017e3b99556SMark McLoughlin break; 3018e3b99556SMark McLoughlin 30191da177e4SLinus Torvalds case TUNSETNOCSUM: 30201da177e4SLinus Torvalds /* Disable/Enable checksum */ 30211da177e4SLinus Torvalds 302288255375SMichał Mirosław /* [unimplemented] */ 30233424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", 30246b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 30251da177e4SLinus Torvalds break; 30261da177e4SLinus Torvalds 30271da177e4SLinus Torvalds case TUNSETPERSIST: 302854f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 302954f968d6SJason Wang * module to prevent the module being unprobed. 303054f968d6SJason Wang */ 303140630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 303240630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 303354f968d6SJason Wang __module_get(THIS_MODULE); 303483c1f36fSSabrina Dubroca do_notify = true; 3035dd38bd85SJason Wang } 303640630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 303740630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 303854f968d6SJason Wang module_put(THIS_MODULE); 303983c1f36fSSabrina Dubroca do_notify = true; 304054f968d6SJason Wang } 30411da177e4SLinus Torvalds 30423424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "persist %s\n", 30436b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 30441da177e4SLinus Torvalds break; 30451da177e4SLinus Torvalds 30461da177e4SLinus Torvalds case TUNSETOWNER: 30471da177e4SLinus Torvalds /* Set owner of the device */ 30480625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 30490625c883SEric W. Biederman if (!uid_valid(owner)) { 30500625c883SEric W. Biederman ret = -EINVAL; 30510625c883SEric W. Biederman break; 30520625c883SEric W. Biederman } 30530625c883SEric W. Biederman tun->owner = owner; 305483c1f36fSSabrina Dubroca do_notify = true; 30553424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "owner set to %u\n", 30560625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 30571da177e4SLinus Torvalds break; 30581da177e4SLinus Torvalds 30598c644623SGuido Guenther case TUNSETGROUP: 30608c644623SGuido Guenther /* Set group of the device */ 30610625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 30620625c883SEric W. Biederman if (!gid_valid(group)) { 30630625c883SEric W. Biederman ret = -EINVAL; 30640625c883SEric W. Biederman break; 30650625c883SEric W. Biederman } 30660625c883SEric W. Biederman tun->group = group; 306783c1f36fSSabrina Dubroca do_notify = true; 30683424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "group set to %u\n", 30690625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 30708c644623SGuido Guenther break; 30718c644623SGuido Guenther 3072ff4cc3acSMike Kershaw case TUNSETLINK: 3073ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 3074ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 30753424170fSMichal Kubecek netif_info(tun, drv, tun->dev, 30766b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 307748abfe05SDavid S. Miller ret = -EBUSY; 3078ff4cc3acSMike Kershaw } else { 30798e1e33ffSMartin Schiller ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 30808e1e33ffSMartin Schiller tun->dev); 30818e1e33ffSMartin Schiller ret = notifier_to_errno(ret); 30828e1e33ffSMartin Schiller if (ret) { 30838e1e33ffSMartin Schiller netif_info(tun, drv, tun->dev, 30848e1e33ffSMartin Schiller "Refused to change device type\n"); 30858e1e33ffSMartin Schiller break; 30868e1e33ffSMartin Schiller } 3087ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 30883424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "linktype set to %d\n", 30896b8a66eeSJoe Perches tun->dev->type); 30908e1e33ffSMartin Schiller call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 30918e1e33ffSMartin Schiller tun->dev); 3092ff4cc3acSMike Kershaw } 3093631ab46bSEric W. Biederman break; 3094ff4cc3acSMike Kershaw 30951da177e4SLinus Torvalds case TUNSETDEBUG: 30963424170fSMichal Kubecek tun->msg_enable = (u32)arg; 30971da177e4SLinus Torvalds break; 30983424170fSMichal Kubecek 30995228ddc9SRusty Russell case TUNSETOFFLOAD: 310088255375SMichał Mirosław ret = set_offload(tun, arg); 3101631ab46bSEric W. Biederman break; 31025228ddc9SRusty Russell 3103f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3104f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3105631ab46bSEric W. Biederman ret = -EINVAL; 310640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3107631ab46bSEric W. Biederman break; 3108c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3109631ab46bSEric W. Biederman break; 31101da177e4SLinus Torvalds 31111da177e4SLinus Torvalds case SIOCGIFHWADDR: 3112b595076aSUwe Kleine-König /* Get hw address */ 3113f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3114f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 311550857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3116631ab46bSEric W. Biederman ret = -EFAULT; 3117631ab46bSEric W. Biederman break; 31181da177e4SLinus Torvalds 31191da177e4SLinus Torvalds case SIOCSIFHWADDR: 3120f271b2ccSMax Krasnyansky /* Set hw address */ 31213a37a963SPetr Machata ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); 3122631ab46bSEric W. Biederman break; 312333dccbb0SHerbert Xu 312433dccbb0SHerbert Xu case TUNGETSNDBUF: 312554f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 312633dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 312733dccbb0SHerbert Xu ret = -EFAULT; 312833dccbb0SHerbert Xu break; 312933dccbb0SHerbert Xu 313033dccbb0SHerbert Xu case TUNSETSNDBUF: 313133dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 313233dccbb0SHerbert Xu ret = -EFAULT; 313333dccbb0SHerbert Xu break; 313433dccbb0SHerbert Xu } 313593161922SCraig Gallek if (sndbuf <= 0) { 313693161922SCraig Gallek ret = -EINVAL; 313793161922SCraig Gallek break; 313893161922SCraig Gallek } 313933dccbb0SHerbert Xu 3140c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3141c8d68e6bSJason Wang tun_set_sndbuf(tun); 314233dccbb0SHerbert Xu break; 314333dccbb0SHerbert Xu 3144d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3145d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3146d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3147d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3148d9d52b51SMichael S. Tsirkin break; 3149d9d52b51SMichael S. Tsirkin 3150d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3151d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3152d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3153d9d52b51SMichael S. Tsirkin break; 3154d9d52b51SMichael S. Tsirkin } 3155d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3156d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3157d9d52b51SMichael S. Tsirkin break; 3158d9d52b51SMichael S. Tsirkin } 3159d9d52b51SMichael S. Tsirkin 3160d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3161d9d52b51SMichael S. Tsirkin break; 3162d9d52b51SMichael S. Tsirkin 31631cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 31641cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 31651cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 31661cf8e410SMichael S. Tsirkin ret = -EFAULT; 31671cf8e410SMichael S. Tsirkin break; 31681cf8e410SMichael S. Tsirkin 31691cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 31701cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 31711cf8e410SMichael S. Tsirkin ret = -EFAULT; 31721cf8e410SMichael S. Tsirkin break; 31731cf8e410SMichael S. Tsirkin } 31741cf8e410SMichael S. Tsirkin if (le) 31751cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 31761cf8e410SMichael S. Tsirkin else 31771cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 31781cf8e410SMichael S. Tsirkin break; 31791cf8e410SMichael S. Tsirkin 31808b8e658bSGreg Kurz case TUNGETVNETBE: 31818b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 31828b8e658bSGreg Kurz break; 31838b8e658bSGreg Kurz 31848b8e658bSGreg Kurz case TUNSETVNETBE: 31858b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 31868b8e658bSGreg Kurz break; 31878b8e658bSGreg Kurz 318899405162SMichael S. Tsirkin case TUNATTACHFILTER: 318999405162SMichael S. Tsirkin /* Can be set only for TAPs */ 319099405162SMichael S. Tsirkin ret = -EINVAL; 319140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 319299405162SMichael S. Tsirkin break; 319399405162SMichael S. Tsirkin ret = -EFAULT; 319454f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 319599405162SMichael S. Tsirkin break; 319699405162SMichael S. Tsirkin 3197c8d68e6bSJason Wang ret = tun_attach_filter(tun); 319899405162SMichael S. Tsirkin break; 319999405162SMichael S. Tsirkin 320099405162SMichael S. Tsirkin case TUNDETACHFILTER: 320199405162SMichael S. Tsirkin /* Can be set only for TAPs */ 320299405162SMichael S. Tsirkin ret = -EINVAL; 320340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 320499405162SMichael S. Tsirkin break; 3205c8d68e6bSJason Wang ret = 0; 3206c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 320799405162SMichael S. Tsirkin break; 320899405162SMichael S. Tsirkin 320976975e9cSPavel Emelyanov case TUNGETFILTER: 321076975e9cSPavel Emelyanov ret = -EINVAL; 321140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 321276975e9cSPavel Emelyanov break; 321376975e9cSPavel Emelyanov ret = -EFAULT; 321476975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 321576975e9cSPavel Emelyanov break; 321676975e9cSPavel Emelyanov ret = 0; 321776975e9cSPavel Emelyanov break; 321876975e9cSPavel Emelyanov 321996f84061SJason Wang case TUNSETSTEERINGEBPF: 3220cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 322196f84061SJason Wang break; 322296f84061SJason Wang 3223aff3d70aSJason Wang case TUNSETFILTEREBPF: 3224aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3225aff3d70aSJason Wang break; 3226aff3d70aSJason Wang 322726d31925SNicolas Dichtel case TUNSETCARRIER: 322826d31925SNicolas Dichtel ret = -EFAULT; 322926d31925SNicolas Dichtel if (copy_from_user(&carrier, argp, sizeof(carrier))) 323026d31925SNicolas Dichtel goto unlock; 323126d31925SNicolas Dichtel 323226d31925SNicolas Dichtel ret = tun_net_change_carrier(tun->dev, (bool)carrier); 323326d31925SNicolas Dichtel break; 323426d31925SNicolas Dichtel 32350c3e0e3bSKirill Tkhai case TUNGETDEVNETNS: 32360c3e0e3bSKirill Tkhai ret = -EPERM; 32370c3e0e3bSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 32380c3e0e3bSKirill Tkhai goto unlock; 32390c3e0e3bSKirill Tkhai ret = open_related_ns(&net->ns, get_net_ns); 32400c3e0e3bSKirill Tkhai break; 32410c3e0e3bSKirill Tkhai 32421da177e4SLinus Torvalds default: 3243631ab46bSEric W. Biederman ret = -EINVAL; 3244631ab46bSEric W. Biederman break; 3245ee289b64SJoe Perches } 32461da177e4SLinus Torvalds 324783c1f36fSSabrina Dubroca if (do_notify) 324883c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 324983c1f36fSSabrina Dubroca 3250876bfd4dSHerbert Xu unlock: 3251876bfd4dSHerbert Xu rtnl_unlock(); 3252876bfd4dSHerbert Xu if (tun) 3253631ab46bSEric W. Biederman tun_put(tun); 3254631ab46bSEric W. Biederman return ret; 32551da177e4SLinus Torvalds } 32561da177e4SLinus Torvalds 325750857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 325850857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 325950857e2aSArnd Bergmann { 326050857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 326150857e2aSArnd Bergmann } 326250857e2aSArnd Bergmann 326350857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 326450857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 326550857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 326650857e2aSArnd Bergmann { 326750857e2aSArnd Bergmann switch (cmd) { 326850857e2aSArnd Bergmann case TUNSETIFF: 326950857e2aSArnd Bergmann case TUNGETIFF: 327050857e2aSArnd Bergmann case TUNSETTXFILTER: 327150857e2aSArnd Bergmann case TUNGETSNDBUF: 327250857e2aSArnd Bergmann case TUNSETSNDBUF: 327350857e2aSArnd Bergmann case SIOCGIFHWADDR: 327450857e2aSArnd Bergmann case SIOCSIFHWADDR: 327550857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 327650857e2aSArnd Bergmann break; 327750857e2aSArnd Bergmann default: 327850857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 327950857e2aSArnd Bergmann break; 328050857e2aSArnd Bergmann } 328150857e2aSArnd Bergmann 328250857e2aSArnd Bergmann /* 328350857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 328450857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 328550857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 328650857e2aSArnd Bergmann * contents. 328750857e2aSArnd Bergmann */ 328850857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 328950857e2aSArnd Bergmann } 329050857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 329150857e2aSArnd Bergmann 32921da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 32931da177e4SLinus Torvalds { 329454f968d6SJason Wang struct tun_file *tfile = file->private_data; 32951da177e4SLinus Torvalds int ret; 32961da177e4SLinus Torvalds 329754f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 32989d319522SJonathan Corbet goto out; 32991da177e4SLinus Torvalds 33001da177e4SLinus Torvalds if (on) { 330101919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 330254f968d6SJason Wang tfile->flags |= TUN_FASYNC; 33031da177e4SLinus Torvalds } else 330454f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 33059d319522SJonathan Corbet ret = 0; 33069d319522SJonathan Corbet out: 33079d319522SJonathan Corbet return ret; 33081da177e4SLinus Torvalds } 33091da177e4SLinus Torvalds 33101da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 33111da177e4SLinus Torvalds { 3312140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3313631ab46bSEric W. Biederman struct tun_file *tfile; 3314deed49fbSThomas Gleixner 3315140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 331611aa9c28SEric W. Biederman &tun_proto, 0); 3317631ab46bSEric W. Biederman if (!tfile) 3318631ab46bSEric W. Biederman return -ENOMEM; 3319b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3320b196d88aSJason Wang sk_free(&tfile->sk); 3321b196d88aSJason Wang return -ENOMEM; 3322b196d88aSJason Wang } 3323b196d88aSJason Wang 3324c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3325c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 332654f968d6SJason Wang tfile->flags = 0; 3327fb7589a1SPavel Emelyanov tfile->ifindex = 0; 332854f968d6SJason Wang 3329333f7909SAl Viro init_waitqueue_head(&tfile->socket.wq.wait); 333054f968d6SJason Wang 333154f968d6SJason Wang tfile->socket.file = file; 333254f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 333354f968d6SJason Wang 333454f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 333554f968d6SJason Wang 333654f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 333754f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 333854f968d6SJason Wang 3339631ab46bSEric W. Biederman file->private_data = tfile; 33404008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 334154f968d6SJason Wang 334219a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 334319a6afb2SJason Wang 33441da177e4SLinus Torvalds return 0; 33451da177e4SLinus Torvalds } 33461da177e4SLinus Torvalds 33471da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 33481da177e4SLinus Torvalds { 3349631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 33501da177e4SLinus Torvalds 3351c8d68e6bSJason Wang tun_detach(tfile, true); 33521da177e4SLinus Torvalds 33531da177e4SLinus Torvalds return 0; 33541da177e4SLinus Torvalds } 33551da177e4SLinus Torvalds 335693e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 33579484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 335893e14b6dSMasatake YAMATO { 33599484dc74Syuan linyu struct tun_file *tfile = file->private_data; 336093e14b6dSMasatake YAMATO struct tun_struct *tun; 336193e14b6dSMasatake YAMATO struct ifreq ifr; 336293e14b6dSMasatake YAMATO 336393e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 336493e14b6dSMasatake YAMATO 336593e14b6dSMasatake YAMATO rtnl_lock(); 33669484dc74Syuan linyu tun = tun_get(tfile); 336793e14b6dSMasatake YAMATO if (tun) 336812132768SKirill Tkhai tun_get_iff(tun, &ifr); 336993e14b6dSMasatake YAMATO rtnl_unlock(); 337093e14b6dSMasatake YAMATO 337193e14b6dSMasatake YAMATO if (tun) 337293e14b6dSMasatake YAMATO tun_put(tun); 337393e14b6dSMasatake YAMATO 3374a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 337593e14b6dSMasatake YAMATO } 337693e14b6dSMasatake YAMATO #endif 337793e14b6dSMasatake YAMATO 3378d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 33791da177e4SLinus Torvalds .owner = THIS_MODULE, 33801da177e4SLinus Torvalds .llseek = no_llseek, 33819b067034SAl Viro .read_iter = tun_chr_read_iter, 3382f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 33831da177e4SLinus Torvalds .poll = tun_chr_poll, 3384876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 338550857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 338650857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 338750857e2aSArnd Bergmann #endif 33881da177e4SLinus Torvalds .open = tun_chr_open, 33891da177e4SLinus Torvalds .release = tun_chr_close, 339093e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 339193e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 339293e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 339393e14b6dSMasatake YAMATO #endif 33941da177e4SLinus Torvalds }; 33951da177e4SLinus Torvalds 33961da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 33971da177e4SLinus Torvalds .minor = TUN_MINOR, 33981da177e4SLinus Torvalds .name = "tun", 3399e454cea2SKay Sievers .nodename = "net/tun", 34001da177e4SLinus Torvalds .fops = &tun_fops, 34011da177e4SLinus Torvalds }; 34021da177e4SLinus Torvalds 34031da177e4SLinus Torvalds /* ethtool interface */ 34041da177e4SLinus Torvalds 34054e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 340629ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 34071da177e4SLinus Torvalds { 340829ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 340929ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 341029ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 341129ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 341229ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 341329ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 341429ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 34154e24f2ddSChas Williams } 34164e24f2ddSChas Williams 34174e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 34184e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 34194e24f2ddSChas Williams { 34204e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 34214e24f2ddSChas Williams 34224e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 34234e24f2ddSChas Williams return 0; 34244e24f2ddSChas Williams } 34254e24f2ddSChas Williams 34264e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 34274e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 34284e24f2ddSChas Williams { 34294e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 34304e24f2ddSChas Williams 34314e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 34321da177e4SLinus Torvalds return 0; 34331da177e4SLinus Torvalds } 34341da177e4SLinus Torvalds 34351da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 34361da177e4SLinus Torvalds { 34371da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34381da177e4SLinus Torvalds 343933a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 344033a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 34411da177e4SLinus Torvalds 34421da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 344340630b82SMichael S. Tsirkin case IFF_TUN: 344433a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 34451da177e4SLinus Torvalds break; 344640630b82SMichael S. Tsirkin case IFF_TAP: 344733a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 34481da177e4SLinus Torvalds break; 34491da177e4SLinus Torvalds } 34501da177e4SLinus Torvalds } 34511da177e4SLinus Torvalds 34521da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 34531da177e4SLinus Torvalds { 34541da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34553424170fSMichal Kubecek 34563424170fSMichal Kubecek return tun->msg_enable; 34571da177e4SLinus Torvalds } 34581da177e4SLinus Torvalds 34591da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 34601da177e4SLinus Torvalds { 34611da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34623424170fSMichal Kubecek 34633424170fSMichal Kubecek tun->msg_enable = value; 34641da177e4SLinus Torvalds } 34651da177e4SLinus Torvalds 34665503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 34675503fcecSJason Wang struct ethtool_coalesce *ec) 34685503fcecSJason Wang { 34695503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 34705503fcecSJason Wang 34715503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 34725503fcecSJason Wang 34735503fcecSJason Wang return 0; 34745503fcecSJason Wang } 34755503fcecSJason Wang 34765503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 34775503fcecSJason Wang struct ethtool_coalesce *ec) 34785503fcecSJason Wang { 34795503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 34805503fcecSJason Wang 34815503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 34825503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 34835503fcecSJason Wang else 34845503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 34855503fcecSJason Wang 34865503fcecSJason Wang return 0; 34875503fcecSJason Wang } 34885503fcecSJason Wang 34897282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 3490e5ad00b3SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, 34911da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 34921da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 34931da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3494bee31369SNolan Leake .get_link = ethtool_op_get_link, 3495eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 34965503fcecSJason Wang .get_coalesce = tun_get_coalesce, 34975503fcecSJason Wang .set_coalesce = tun_set_coalesce, 349829ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 34994e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 35001da177e4SLinus Torvalds }; 35011da177e4SLinus Torvalds 35021576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 35031576d986SJason Wang { 35041576d986SJason Wang struct net_device *dev = tun->dev; 35051576d986SJason Wang struct tun_file *tfile; 35065990a305SJason Wang struct ptr_ring **rings; 35071576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 35081576d986SJason Wang int ret, i; 35091576d986SJason Wang 35105990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 35115990a305SJason Wang if (!rings) 35121576d986SJason Wang return -ENOMEM; 35131576d986SJason Wang 35141576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 35151576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 35165990a305SJason Wang rings[i] = &tfile->tx_ring; 35171576d986SJason Wang } 35181576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 35195990a305SJason Wang rings[i++] = &tfile->tx_ring; 35201576d986SJason Wang 35215990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 35225990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3523fc72d1d5SJason Wang tun_ptr_free); 35241576d986SJason Wang 35255990a305SJason Wang kfree(rings); 35261576d986SJason Wang return ret; 35271576d986SJason Wang } 35281576d986SJason Wang 35291576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 35301576d986SJason Wang unsigned long event, void *ptr) 35311576d986SJason Wang { 35321576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 35331576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 353472b319dcSFei Li int i; 35351576d986SJason Wang 353686dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 353786dfb4acSCraig Gallek return NOTIFY_DONE; 353886dfb4acSCraig Gallek 35391576d986SJason Wang switch (event) { 35401576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 35411576d986SJason Wang if (tun_queue_resize(tun)) 35421576d986SJason Wang return NOTIFY_BAD; 35431576d986SJason Wang break; 354472b319dcSFei Li case NETDEV_UP: 354572b319dcSFei Li for (i = 0; i < tun->numqueues; i++) { 354672b319dcSFei Li struct tun_file *tfile; 354772b319dcSFei Li 354872b319dcSFei Li tfile = rtnl_dereference(tun->tfiles[i]); 354972b319dcSFei Li tfile->socket.sk->sk_write_space(tfile->socket.sk); 355072b319dcSFei Li } 355172b319dcSFei Li break; 35521576d986SJason Wang default: 35531576d986SJason Wang break; 35541576d986SJason Wang } 35551576d986SJason Wang 35561576d986SJason Wang return NOTIFY_DONE; 35571576d986SJason Wang } 35581576d986SJason Wang 35591576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 35601576d986SJason Wang .notifier_call = tun_device_event, 35611576d986SJason Wang }; 356279d17604SPavel Emelyanov 35631da177e4SLinus Torvalds static int __init tun_init(void) 35641da177e4SLinus Torvalds { 35651da177e4SLinus Torvalds int ret = 0; 35661da177e4SLinus Torvalds 35676b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 35681da177e4SLinus Torvalds 3569f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 357079d17604SPavel Emelyanov if (ret) { 35716b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3572f019a7a5SEric W. Biederman goto err_linkops; 357379d17604SPavel Emelyanov } 357479d17604SPavel Emelyanov 35751da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 357679d17604SPavel Emelyanov if (ret) { 35776b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 357879d17604SPavel Emelyanov goto err_misc; 357979d17604SPavel Emelyanov } 35801576d986SJason Wang 35815edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 35825edfbd3cSTonghao Zhang if (ret) { 35835edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 35845edfbd3cSTonghao Zhang goto err_notifier; 35855edfbd3cSTonghao Zhang } 35865edfbd3cSTonghao Zhang 358779d17604SPavel Emelyanov return 0; 35885edfbd3cSTonghao Zhang 35895edfbd3cSTonghao Zhang err_notifier: 35905edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 359179d17604SPavel Emelyanov err_misc: 3592f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3593f019a7a5SEric W. Biederman err_linkops: 35941da177e4SLinus Torvalds return ret; 35951da177e4SLinus Torvalds } 35961da177e4SLinus Torvalds 35971da177e4SLinus Torvalds static void tun_cleanup(void) 35981da177e4SLinus Torvalds { 35991da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3600f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 36011576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 36021da177e4SLinus Torvalds } 36031da177e4SLinus Torvalds 360405c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 360505c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 360605c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 360705c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 360805c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 360905c2828cSMichael S. Tsirkin { 36106e914fc7SJason Wang struct tun_file *tfile; 361105c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 361205c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 36136e914fc7SJason Wang tfile = file->private_data; 36146e914fc7SJason Wang if (!tfile) 361505c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 361654f968d6SJason Wang return &tfile->socket; 361705c2828cSMichael S. Tsirkin } 361805c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 361905c2828cSMichael S. Tsirkin 36205990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 362183339c6bSJason Wang { 362283339c6bSJason Wang struct tun_file *tfile; 362383339c6bSJason Wang 362483339c6bSJason Wang if (file->f_op != &tun_fops) 362583339c6bSJason Wang return ERR_PTR(-EINVAL); 362683339c6bSJason Wang tfile = file->private_data; 362783339c6bSJason Wang if (!tfile) 362883339c6bSJason Wang return ERR_PTR(-EBADFD); 36295990a305SJason Wang return &tfile->tx_ring; 363083339c6bSJason Wang } 36315990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 363283339c6bSJason Wang 36331da177e4SLinus Torvalds module_init(tun_init); 36341da177e4SLinus Torvalds module_exit(tun_cleanup); 36351da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 36361da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 36371da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 36381da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3639578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3640