1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 41da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds /* 101da177e4SLinus Torvalds * Changes: 111da177e4SLinus Torvalds * 12ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 13ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 14ff4cc3acSMike Kershaw * 151da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 16344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 171da177e4SLinus Torvalds * 181da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 191da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 201da177e4SLinus Torvalds * Increased default tx queue length. 211da177e4SLinus Torvalds * Added ethtool API. 221da177e4SLinus Torvalds * Minor cleanups 231da177e4SLinus Torvalds * 241da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 251da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 261da177e4SLinus Torvalds */ 271da177e4SLinus Torvalds 286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 296b8a66eeSJoe Perches 301da177e4SLinus Torvalds #define DRV_NAME "tun" 311da177e4SLinus Torvalds #define DRV_VERSION "1.6" 321da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 331da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 341da177e4SLinus Torvalds 351da177e4SLinus Torvalds #include <linux/module.h> 361da177e4SLinus Torvalds #include <linux/errno.h> 371da177e4SLinus Torvalds #include <linux/kernel.h> 38174cd4b1SIngo Molnar #include <linux/sched/signal.h> 391da177e4SLinus Torvalds #include <linux/major.h> 401da177e4SLinus Torvalds #include <linux/slab.h> 411da177e4SLinus Torvalds #include <linux/poll.h> 421da177e4SLinus Torvalds #include <linux/fcntl.h> 431da177e4SLinus Torvalds #include <linux/init.h> 441da177e4SLinus Torvalds #include <linux/skbuff.h> 451da177e4SLinus Torvalds #include <linux/netdevice.h> 461da177e4SLinus Torvalds #include <linux/etherdevice.h> 471da177e4SLinus Torvalds #include <linux/miscdevice.h> 481da177e4SLinus Torvalds #include <linux/ethtool.h> 491da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5050857e2aSArnd Bergmann #include <linux/compat.h> 511da177e4SLinus Torvalds #include <linux/if.h> 521da177e4SLinus Torvalds #include <linux/if_arp.h> 531da177e4SLinus Torvalds #include <linux/if_ether.h> 541da177e4SLinus Torvalds #include <linux/if_tun.h> 556680ec68SJason Wang #include <linux/if_vlan.h> 561da177e4SLinus Torvalds #include <linux/crc32.h> 57d647a591SPavel Emelyanov #include <linux/nsproxy.h> 58f43798c2SRusty Russell #include <linux/virtio_net.h> 5999405162SMichael S. Tsirkin #include <linux/rcupdate.h> 60881d966bSEric W. Biederman #include <net/net_namespace.h> 6179d17604SPavel Emelyanov #include <net/netns/generic.h> 62f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 6333dccbb0SHerbert Xu #include <net/sock.h> 64735fc405SJesper Dangaard Brouer #include <net/xdp.h> 65*b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h> 6693e14b6dSMasatake YAMATO #include <linux/seq_file.h> 67e0b46d0eSHerbert Xu #include <linux/uio.h> 681576d986SJason Wang #include <linux/skb_array.h> 69761876c8SJason Wang #include <linux/bpf.h> 70761876c8SJason Wang #include <linux/bpf_trace.h> 7190e33d45SPetar Penkov #include <linux/mutex.h> 721da177e4SLinus Torvalds 737c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 74f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 751da177e4SLinus Torvalds 764e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 774e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 784e24f2ddSChas Williams 797df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 8066ccbc9cSJason Wang 81031f5e03SMichael S. Tsirkin /* TUN device flags */ 82031f5e03SMichael S. Tsirkin 83031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 84031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 85031f5e03SMichael S. Tsirkin */ 86031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 871cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 881cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 898b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 90031f5e03SMichael S. Tsirkin 91031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 9290e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 9390e33d45SPetar Penkov 940690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 950690899bSMichael S. Tsirkin 96f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 97f271b2ccSMax Krasnyansky struct tap_filter { 98f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 99f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 100f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 101f271b2ccSMax Krasnyansky }; 102f271b2ccSMax Krasnyansky 103baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 104baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 105baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 106b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 107c8d68e6bSJason Wang 10896442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 10996442e42SJason Wang 110608b9977SPaolo Abeni struct tun_pcpu_stats { 1115260dd3eSEric Dumazet u64_stats_t rx_packets; 1125260dd3eSEric Dumazet u64_stats_t rx_bytes; 1135260dd3eSEric Dumazet u64_stats_t tx_packets; 1145260dd3eSEric Dumazet u64_stats_t tx_bytes; 115608b9977SPaolo Abeni struct u64_stats_sync syncp; 116608b9977SPaolo Abeni u32 rx_dropped; 117608b9977SPaolo Abeni u32 tx_dropped; 118608b9977SPaolo Abeni u32 rx_frame_errors; 119608b9977SPaolo Abeni }; 120608b9977SPaolo Abeni 12154f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 12292d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 12354f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 12454f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 12536fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 12654f968d6SJason Wang * this). 1276e914fc7SJason Wang * 1286e914fc7SJason Wang * RCU usage: 12936fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1306e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 13154f968d6SJason Wang */ 132631ab46bSEric W. Biederman struct tun_file { 13354f968d6SJason Wang struct sock sk; 13454f968d6SJason Wang struct socket socket; 1356e914fc7SJason Wang struct tun_struct __rcu *tun; 13654f968d6SJason Wang struct fasync_struct *fasync; 13754f968d6SJason Wang /* only used for fasnyc */ 13854f968d6SJason Wang unsigned int flags; 139fb7589a1SPavel Emelyanov union { 140c8d68e6bSJason Wang u16 queue_index; 141fb7589a1SPavel Emelyanov unsigned int ifindex; 142fb7589a1SPavel Emelyanov }; 14394317099SPetar Penkov struct napi_struct napi; 144aec72f33SEric Dumazet bool napi_enabled; 145af3fb24eSEric Dumazet bool napi_frags_enabled; 14690e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1474008e97fSJason Wang struct list_head next; 1484008e97fSJason Wang struct tun_struct *detached; 1495990a305SJason Wang struct ptr_ring tx_ring; 1508bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 151631ab46bSEric W. Biederman }; 152631ab46bSEric W. Biederman 153f9e06c45SJason Wang struct tun_page { 154f9e06c45SJason Wang struct page *page; 155f9e06c45SJason Wang int count; 156f9e06c45SJason Wang }; 157f9e06c45SJason Wang 15896442e42SJason Wang struct tun_flow_entry { 15996442e42SJason Wang struct hlist_node hash_link; 16096442e42SJason Wang struct rcu_head rcu; 16196442e42SJason Wang struct tun_struct *tun; 16296442e42SJason Wang 16396442e42SJason Wang u32 rxhash; 1649bc88939STom Herbert u32 rps_rxhash; 16596442e42SJason Wang int queue_index; 16683b1bc12SLi RongQing unsigned long updated ____cacheline_aligned_in_smp; 16796442e42SJason Wang }; 16896442e42SJason Wang 16996442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 170f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 17196442e42SJason Wang 172cd5681d7SJason Wang struct tun_prog { 17396f84061SJason Wang struct rcu_head rcu; 17496f84061SJason Wang struct bpf_prog *prog; 17596f84061SJason Wang }; 17696f84061SJason Wang 17754f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 17836fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 17954f968d6SJason Wang * file were attached to a persist device. 18054f968d6SJason Wang */ 18114daa021SRusty Russell struct tun_struct { 182c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 183c8d68e6bSJason Wang unsigned int numqueues; 184f271b2ccSMax Krasnyansky unsigned int flags; 1850625c883SEric W. Biederman kuid_t owner; 1860625c883SEric W. Biederman kgid_t group; 18714daa021SRusty Russell 18814daa021SRusty Russell struct net_device *dev; 189c8f44affSMichał Mirosław netdev_features_t set_features; 19088255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 191d591a1f3SDavid S. Miller NETIF_F_TSO6) 192d9d52b51SMichael S. Tsirkin 193eaea34b2SPaolo Abeni int align; 194d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 19554f968d6SJason Wang int sndbuf; 19654f968d6SJason Wang struct tap_filter txflt; 19754f968d6SJason Wang struct sock_fprog fprog; 19854f968d6SJason Wang /* protected by rtnl lock */ 19954f968d6SJason Wang bool filter_attached; 2003424170fSMichal Kubecek u32 msg_enable; 20196442e42SJason Wang spinlock_t lock; 20296442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 20396442e42SJason Wang struct timer_list flow_gc_timer; 20496442e42SJason Wang unsigned long ageing_time; 2054008e97fSJason Wang unsigned int numdisabled; 2064008e97fSJason Wang struct list_head disabled; 2075dbbaf2dSPaul Moore void *security; 208b8732fb7SJason Wang u32 flow_count; 2095503fcecSJason Wang u32 rx_batched; 210608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 211761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 212cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 213aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2144e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 21514daa021SRusty Russell }; 21614daa021SRusty Russell 217aff3d70aSJason Wang struct veth { 218aff3d70aSJason Wang __be16 h_vlan_proto; 219aff3d70aSJason Wang __be16 h_vlan_TCI; 2201da177e4SLinus Torvalds }; 2211da177e4SLinus Torvalds 2221ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr) 223fc72d1d5SJason Wang { 224fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 225fc72d1d5SJason Wang } 2261ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame); 227fc72d1d5SJason Wang 228fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 229fc72d1d5SJason Wang { 230fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 231fc72d1d5SJason Wang } 232fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 233fc72d1d5SJason Wang 234fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 235fc72d1d5SJason Wang { 236fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 237fc72d1d5SJason Wang } 238fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 239fc72d1d5SJason Wang 24094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 24194317099SPetar Penkov { 24294317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 24394317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 24494317099SPetar Penkov struct sk_buff_head process_queue; 24594317099SPetar Penkov struct sk_buff *skb; 24694317099SPetar Penkov int received = 0; 24794317099SPetar Penkov 24894317099SPetar Penkov __skb_queue_head_init(&process_queue); 24994317099SPetar Penkov 25094317099SPetar Penkov spin_lock(&queue->lock); 25194317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 25294317099SPetar Penkov spin_unlock(&queue->lock); 25394317099SPetar Penkov 25494317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 25594317099SPetar Penkov napi_gro_receive(napi, skb); 25694317099SPetar Penkov ++received; 25794317099SPetar Penkov } 25894317099SPetar Penkov 25994317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 26094317099SPetar Penkov spin_lock(&queue->lock); 26194317099SPetar Penkov skb_queue_splice(&process_queue, queue); 26294317099SPetar Penkov spin_unlock(&queue->lock); 26394317099SPetar Penkov } 26494317099SPetar Penkov 26594317099SPetar Penkov return received; 26694317099SPetar Penkov } 26794317099SPetar Penkov 26894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 26994317099SPetar Penkov { 27094317099SPetar Penkov unsigned int received; 27194317099SPetar Penkov 27294317099SPetar Penkov received = tun_napi_receive(napi, budget); 27394317099SPetar Penkov 27494317099SPetar Penkov if (received < budget) 27594317099SPetar Penkov napi_complete_done(napi, received); 27694317099SPetar Penkov 27794317099SPetar Penkov return received; 27894317099SPetar Penkov } 27994317099SPetar Penkov 28094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 281af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 28294317099SPetar Penkov { 283aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 284af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 28594317099SPetar Penkov if (napi_en) { 286c39e342aSPetar Penkov netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 28794317099SPetar Penkov NAPI_POLL_WEIGHT); 28894317099SPetar Penkov napi_enable(&tfile->napi); 28994317099SPetar Penkov } 29094317099SPetar Penkov } 29194317099SPetar Penkov 29206e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 29394317099SPetar Penkov { 294aec72f33SEric Dumazet if (tfile->napi_enabled) 29594317099SPetar Penkov napi_disable(&tfile->napi); 29694317099SPetar Penkov } 29794317099SPetar Penkov 29806e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 29994317099SPetar Penkov { 300aec72f33SEric Dumazet if (tfile->napi_enabled) 30194317099SPetar Penkov netif_napi_del(&tfile->napi); 30294317099SPetar Penkov } 30394317099SPetar Penkov 304af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 30590e33d45SPetar Penkov { 306af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 30790e33d45SPetar Penkov } 30890e33d45SPetar Penkov 3098b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3108b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3118b8e658bSGreg Kurz { 3128b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3138b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3148b8e658bSGreg Kurz } 3158b8e658bSGreg Kurz 3168b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3178b8e658bSGreg Kurz { 3188b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3198b8e658bSGreg Kurz 3208b8e658bSGreg Kurz if (put_user(be, argp)) 3218b8e658bSGreg Kurz return -EFAULT; 3228b8e658bSGreg Kurz 3238b8e658bSGreg Kurz return 0; 3248b8e658bSGreg Kurz } 3258b8e658bSGreg Kurz 3268b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3278b8e658bSGreg Kurz { 3288b8e658bSGreg Kurz int be; 3298b8e658bSGreg Kurz 3308b8e658bSGreg Kurz if (get_user(be, argp)) 3318b8e658bSGreg Kurz return -EFAULT; 3328b8e658bSGreg Kurz 3338b8e658bSGreg Kurz if (be) 3348b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3358b8e658bSGreg Kurz else 3368b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3378b8e658bSGreg Kurz 3388b8e658bSGreg Kurz return 0; 3398b8e658bSGreg Kurz } 3408b8e658bSGreg Kurz #else 3418b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3428b8e658bSGreg Kurz { 3438b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3448b8e658bSGreg Kurz } 3458b8e658bSGreg Kurz 3468b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3478b8e658bSGreg Kurz { 3488b8e658bSGreg Kurz return -EINVAL; 3498b8e658bSGreg Kurz } 3508b8e658bSGreg Kurz 3518b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3528b8e658bSGreg Kurz { 3538b8e658bSGreg Kurz return -EINVAL; 3548b8e658bSGreg Kurz } 3558b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3568b8e658bSGreg Kurz 35725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 35825bd55bbSGreg Kurz { 3597d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3608b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 36125bd55bbSGreg Kurz } 36225bd55bbSGreg Kurz 36356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 36456f0dcc5SMichael S. Tsirkin { 36525bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 36656f0dcc5SMichael S. Tsirkin } 36756f0dcc5SMichael S. Tsirkin 36856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 36956f0dcc5SMichael S. Tsirkin { 37025bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 37156f0dcc5SMichael S. Tsirkin } 37256f0dcc5SMichael S. Tsirkin 37396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 37496442e42SJason Wang { 375f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 37696442e42SJason Wang } 37796442e42SJason Wang 37896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 37996442e42SJason Wang { 38096442e42SJason Wang struct tun_flow_entry *e; 38196442e42SJason Wang 382b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 38396442e42SJason Wang if (e->rxhash == rxhash) 38496442e42SJason Wang return e; 38596442e42SJason Wang } 38696442e42SJason Wang return NULL; 38796442e42SJason Wang } 38896442e42SJason Wang 38996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 39096442e42SJason Wang struct hlist_head *head, 39196442e42SJason Wang u32 rxhash, u16 queue_index) 39296442e42SJason Wang { 3939fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 3949fdc6befSEric Dumazet 39596442e42SJason Wang if (e) { 3963424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, 3973424170fSMichal Kubecek "create flow: hash %u index %u\n", 39896442e42SJason Wang rxhash, queue_index); 39996442e42SJason Wang e->updated = jiffies; 40096442e42SJason Wang e->rxhash = rxhash; 4019bc88939STom Herbert e->rps_rxhash = 0; 40296442e42SJason Wang e->queue_index = queue_index; 40396442e42SJason Wang e->tun = tun; 40496442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 405b8732fb7SJason Wang ++tun->flow_count; 40696442e42SJason Wang } 40796442e42SJason Wang return e; 40896442e42SJason Wang } 40996442e42SJason Wang 41096442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 41196442e42SJason Wang { 4123424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", 41396442e42SJason Wang e->rxhash, e->queue_index); 41496442e42SJason Wang hlist_del_rcu(&e->hash_link); 4159fdc6befSEric Dumazet kfree_rcu(e, rcu); 416b8732fb7SJason Wang --tun->flow_count; 41796442e42SJason Wang } 41896442e42SJason Wang 41996442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 42096442e42SJason Wang { 42196442e42SJason Wang int i; 42296442e42SJason Wang 42396442e42SJason Wang spin_lock_bh(&tun->lock); 42496442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 42596442e42SJason Wang struct tun_flow_entry *e; 426b67bfe0dSSasha Levin struct hlist_node *n; 42796442e42SJason Wang 428b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 42996442e42SJason Wang tun_flow_delete(tun, e); 43096442e42SJason Wang } 43196442e42SJason Wang spin_unlock_bh(&tun->lock); 43296442e42SJason Wang } 43396442e42SJason Wang 43496442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 43596442e42SJason Wang { 43696442e42SJason Wang int i; 43796442e42SJason Wang 43896442e42SJason Wang spin_lock_bh(&tun->lock); 43996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 44096442e42SJason Wang struct tun_flow_entry *e; 441b67bfe0dSSasha Levin struct hlist_node *n; 44296442e42SJason Wang 443b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 44496442e42SJason Wang if (e->queue_index == queue_index) 44596442e42SJason Wang tun_flow_delete(tun, e); 44696442e42SJason Wang } 44796442e42SJason Wang } 44896442e42SJason Wang spin_unlock_bh(&tun->lock); 44996442e42SJason Wang } 45096442e42SJason Wang 451e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 45296442e42SJason Wang { 453e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 45496442e42SJason Wang unsigned long delay = tun->ageing_time; 45596442e42SJason Wang unsigned long next_timer = jiffies + delay; 45696442e42SJason Wang unsigned long count = 0; 45796442e42SJason Wang int i; 45896442e42SJason Wang 4597dbfb4efSEric Dumazet spin_lock(&tun->lock); 46096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46196442e42SJason Wang struct tun_flow_entry *e; 462b67bfe0dSSasha Levin struct hlist_node *n; 46396442e42SJason Wang 464b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 46596442e42SJason Wang unsigned long this_timer; 46681d98fa4SEric Dumazet 46796442e42SJason Wang this_timer = e->updated + delay; 46881d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 46996442e42SJason Wang tun_flow_delete(tun, e); 47081d98fa4SEric Dumazet continue; 47181d98fa4SEric Dumazet } 47281d98fa4SEric Dumazet count++; 47381d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 47496442e42SJason Wang next_timer = this_timer; 47596442e42SJason Wang } 47696442e42SJason Wang } 47796442e42SJason Wang 47896442e42SJason Wang if (count) 47996442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 4807dbfb4efSEric Dumazet spin_unlock(&tun->lock); 48196442e42SJason Wang } 48296442e42SJason Wang 48349974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 4849e85722dSJason Wang struct tun_file *tfile) 48596442e42SJason Wang { 48696442e42SJason Wang struct hlist_head *head; 48796442e42SJason Wang struct tun_flow_entry *e; 48896442e42SJason Wang unsigned long delay = tun->ageing_time; 4899e85722dSJason Wang u16 queue_index = tfile->queue_index; 49096442e42SJason Wang 49196442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 49296442e42SJason Wang 49396442e42SJason Wang rcu_read_lock(); 49496442e42SJason Wang 49596442e42SJason Wang e = tun_flow_find(head, rxhash); 49696442e42SJason Wang if (likely(e)) { 49796442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 4984ffdd22eSEric Dumazet if (READ_ONCE(e->queue_index) != queue_index) 4994ffdd22eSEric Dumazet WRITE_ONCE(e->queue_index, queue_index); 50083b1bc12SLi RongQing if (e->updated != jiffies) 50196442e42SJason Wang e->updated = jiffies; 5029bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 50396442e42SJason Wang } else { 50496442e42SJason Wang spin_lock_bh(&tun->lock); 505b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 506b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 50796442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 50896442e42SJason Wang 50996442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 51096442e42SJason Wang mod_timer(&tun->flow_gc_timer, 51196442e42SJason Wang round_jiffies_up(jiffies + delay)); 51296442e42SJason Wang spin_unlock_bh(&tun->lock); 51396442e42SJason Wang } 51496442e42SJason Wang 51596442e42SJason Wang rcu_read_unlock(); 51696442e42SJason Wang } 51796442e42SJason Wang 518516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the 5199bc88939STom Herbert * flow_hash table accordingly. 5209bc88939STom Herbert */ 5219bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5229bc88939STom Herbert { 523567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5249bc88939STom Herbert e->rps_rxhash = hash; 5259bc88939STom Herbert } 5269bc88939STom Herbert 5274b035271SWang Li /* We try to identify a flow through its rxhash. The reason that 52892d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 529c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 530c8d68e6bSJason Wang * the userspace application move between processors, we may get a 5314b035271SWang Li * different rxq no. here. 532c8d68e6bSJason Wang */ 53396f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 534c8d68e6bSJason Wang { 53596442e42SJason Wang struct tun_flow_entry *e; 536c8d68e6bSJason Wang u32 txq = 0; 537c8d68e6bSJason Wang u32 numqueues = 0; 538c8d68e6bSJason Wang 5396aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 540c8d68e6bSJason Wang 541feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 54296442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5439bc88939STom Herbert if (e) { 5449bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 545fbe4d456SZhi Yong Wu txq = e->queue_index; 5464b035271SWang Li } else { 547c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 548c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 549c8d68e6bSJason Wang } 550c8d68e6bSJason Wang 551c8d68e6bSJason Wang return txq; 552c8d68e6bSJason Wang } 553c8d68e6bSJason Wang 55496f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 55596f84061SJason Wang { 556cd5681d7SJason Wang struct tun_prog *prog; 557a35d310fSJason Wang u32 numqueues; 55896f84061SJason Wang u16 ret = 0; 55996f84061SJason Wang 560a35d310fSJason Wang numqueues = READ_ONCE(tun->numqueues); 561a35d310fSJason Wang if (!numqueues) 562a35d310fSJason Wang return 0; 563a35d310fSJason Wang 56496f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 56596f84061SJason Wang if (prog) 56696f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 56796f84061SJason Wang 568a35d310fSJason Wang return ret % numqueues; 56996f84061SJason Wang } 57096f84061SJason Wang 57196f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 572a350ecceSPaolo Abeni struct net_device *sb_dev) 57396f84061SJason Wang { 57496f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 57596f84061SJason Wang u16 ret; 57696f84061SJason Wang 57796f84061SJason Wang rcu_read_lock(); 57896f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 57996f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 58096f84061SJason Wang else 58196f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 58296f84061SJason Wang rcu_read_unlock(); 58396f84061SJason Wang 58496f84061SJason Wang return ret; 58596f84061SJason Wang } 58696f84061SJason Wang 587cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 588cde8b15fSJason Wang { 589cde8b15fSJason Wang const struct cred *cred = current_cred(); 590c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 591cde8b15fSJason Wang 592cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 593cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 594c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 595cde8b15fSJason Wang } 596cde8b15fSJason Wang 597c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 598c8d68e6bSJason Wang { 599c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 600c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 601c8d68e6bSJason Wang } 602c8d68e6bSJason Wang 6034008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6044008e97fSJason Wang { 6054008e97fSJason Wang tfile->detached = tun; 6064008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6074008e97fSJason Wang ++tun->numdisabled; 6084008e97fSJason Wang } 6094008e97fSJason Wang 610d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6114008e97fSJason Wang { 6124008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6134008e97fSJason Wang 6144008e97fSJason Wang tfile->detached = NULL; 6154008e97fSJason Wang list_del_init(&tfile->next); 6164008e97fSJason Wang --tun->numdisabled; 6174008e97fSJason Wang return tun; 6184008e97fSJason Wang } 6194008e97fSJason Wang 6203a403076SJason Wang void tun_ptr_free(void *ptr) 621fc72d1d5SJason Wang { 622fc72d1d5SJason Wang if (!ptr) 623fc72d1d5SJason Wang return; 6241ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 6251ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 626fc72d1d5SJason Wang 62703993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 628fc72d1d5SJason Wang } else { 629fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 630fc72d1d5SJason Wang } 631fc72d1d5SJason Wang } 6323a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 633fc72d1d5SJason Wang 6344bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6354bfb0513SJason Wang { 636fc72d1d5SJason Wang void *ptr; 6371576d986SJason Wang 638fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 639fc72d1d5SJason Wang tun_ptr_free(ptr); 6401576d986SJason Wang 6415503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6424bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6434bfb0513SJason Wang } 6444bfb0513SJason Wang 645c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 646c8d68e6bSJason Wang { 647c8d68e6bSJason Wang struct tun_file *ntfile; 648c8d68e6bSJason Wang struct tun_struct *tun; 649c8d68e6bSJason Wang 650b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 651b8deabd3SJason Wang 65294317099SPetar Penkov if (tun && clean) { 65306e55addSEric Dumazet tun_napi_disable(tfile); 65406e55addSEric Dumazet tun_napi_del(tfile); 65594317099SPetar Penkov } 65694317099SPetar Penkov 6579e85722dSJason Wang if (tun && !tfile->detached) { 658c8d68e6bSJason Wang u16 index = tfile->queue_index; 659c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 660c8d68e6bSJason Wang 661c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 662c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 663b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 664c8d68e6bSJason Wang ntfile->queue_index = index; 6659871a9e4SJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], 6669871a9e4SJason Wang NULL); 667c8d68e6bSJason Wang 668c8d68e6bSJason Wang --tun->numqueues; 6699e85722dSJason Wang if (clean) { 670c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 671c8d68e6bSJason Wang sock_put(&tfile->sk); 6729e85722dSJason Wang } else 6734008e97fSJason Wang tun_disable_queue(tun, tfile); 674c8d68e6bSJason Wang 675c8d68e6bSJason Wang synchronize_net(); 67696442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 677c8d68e6bSJason Wang /* Drop read queue */ 6784bfb0513SJason Wang tun_queue_purge(tfile); 679c8d68e6bSJason Wang tun_set_real_num_queues(tun); 680dd38bd85SJason Wang } else if (tfile->detached && clean) { 6814008e97fSJason Wang tun = tun_enable_queue(tfile); 682dd38bd85SJason Wang sock_put(&tfile->sk); 683dd38bd85SJason Wang } 684c8d68e6bSJason Wang 685c8d68e6bSJason Wang if (clean) { 686af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 687af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 688af668b3cSMichael S. Tsirkin 68940630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 690af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 6914008e97fSJason Wang unregister_netdevice(tun->dev); 692af668b3cSMichael S. Tsirkin } 693b196d88aSJason Wang if (tun) 694b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 6957063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 696140e807dSEric W. Biederman sock_put(&tfile->sk); 697c8d68e6bSJason Wang } 698c8d68e6bSJason Wang } 699c8d68e6bSJason Wang 700c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 701c8d68e6bSJason Wang { 70283c1f36fSSabrina Dubroca struct tun_struct *tun; 70383c1f36fSSabrina Dubroca struct net_device *dev; 70483c1f36fSSabrina Dubroca 705c8d68e6bSJason Wang rtnl_lock(); 70683c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 70783c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 708c8d68e6bSJason Wang __tun_detach(tfile, clean); 70983c1f36fSSabrina Dubroca if (dev) 71083c1f36fSSabrina Dubroca netdev_state_change(dev); 711c8d68e6bSJason Wang rtnl_unlock(); 712c8d68e6bSJason Wang } 713c8d68e6bSJason Wang 714c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 715c8d68e6bSJason Wang { 716c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7174008e97fSJason Wang struct tun_file *tfile, *tmp; 718c8d68e6bSJason Wang int i, n = tun->numqueues; 719c8d68e6bSJason Wang 720c8d68e6bSJason Wang for (i = 0; i < n; i++) { 721b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 722c8d68e6bSJason Wang BUG_ON(!tfile); 72306e55addSEric Dumazet tun_napi_disable(tfile); 724addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7259e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 726c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 727c8d68e6bSJason Wang --tun->numqueues; 728c8d68e6bSJason Wang } 7299e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 730addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7319e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 732c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7339e85722dSJason Wang } 734c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 735c8d68e6bSJason Wang 736c8d68e6bSJason Wang synchronize_net(); 737c8d68e6bSJason Wang for (i = 0; i < n; i++) { 738b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 73906e55addSEric Dumazet tun_napi_del(tfile); 740c8d68e6bSJason Wang /* Drop read queue */ 7414bfb0513SJason Wang tun_queue_purge(tfile); 742b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 743c8d68e6bSJason Wang sock_put(&tfile->sk); 744c8d68e6bSJason Wang } 7454008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7464008e97fSJason Wang tun_enable_queue(tfile); 7474bfb0513SJason Wang tun_queue_purge(tfile); 748b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7494008e97fSJason Wang sock_put(&tfile->sk); 7504008e97fSJason Wang } 7514008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 752dd38bd85SJason Wang 75340630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 754dd38bd85SJason Wang module_put(THIS_MODULE); 755c8d68e6bSJason Wang } 756c8d68e6bSJason Wang 75794317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 75877f22f92SYang Yingliang bool skip_filter, bool napi, bool napi_frags, 75977f22f92SYang Yingliang bool publish_tun) 760a7385ba2SEric W. Biederman { 761631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7621576d986SJason Wang struct net_device *dev = tun->dev; 76338231b7aSEric W. Biederman int err; 764a7385ba2SEric W. Biederman 7655dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7665dbbaf2dSPaul Moore if (err < 0) 7675dbbaf2dSPaul Moore goto out; 7685dbbaf2dSPaul Moore 76938231b7aSEric W. Biederman err = -EINVAL; 7709e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 77138231b7aSEric W. Biederman goto out; 77238231b7aSEric W. Biederman 77338231b7aSEric W. Biederman err = -EBUSY; 77440630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 775c8d68e6bSJason Wang goto out; 776c8d68e6bSJason Wang 777c8d68e6bSJason Wang err = -E2BIG; 7784008e97fSJason Wang if (!tfile->detached && 7794008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 78038231b7aSEric W. Biederman goto out; 78138231b7aSEric W. Biederman 78238231b7aSEric W. Biederman err = 0; 78354f968d6SJason Wang 78492d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 785849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 7868ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 7878ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 7888ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 78954f968d6SJason Wang if (!err) 79054f968d6SJason Wang goto out; 79154f968d6SJason Wang } 7921576d986SJason Wang 7931576d986SJason Wang if (!tfile->detached && 794b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 795b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 7961576d986SJason Wang err = -ENOMEM; 7971576d986SJason Wang goto out; 7981576d986SJason Wang } 7991576d986SJason Wang 800c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 801addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8028bf5c4eeSJesper Dangaard Brouer 8038bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8048bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8058bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8068bf5c4eeSJesper Dangaard Brouer 8078bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8088bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8098bf5c4eeSJesper Dangaard Brouer } else { 8108bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8118bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8128bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8138bf5c4eeSJesper Dangaard Brouer if (err < 0) 8148bf5c4eeSJesper Dangaard Brouer goto out; 8158d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 8168d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 8178d5d8852SJesper Dangaard Brouer if (err < 0) { 8188d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 8198d5d8852SJesper Dangaard Brouer goto out; 8208d5d8852SJesper Dangaard Brouer } 8218bf5c4eeSJesper Dangaard Brouer err = 0; 8228bf5c4eeSJesper Dangaard Brouer } 8238bf5c4eeSJesper Dangaard Brouer 82494317099SPetar Penkov if (tfile->detached) { 8254008e97fSJason Wang tun_enable_queue(tfile); 82694317099SPetar Penkov } else { 8274008e97fSJason Wang sock_hold(&tfile->sk); 828af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 82994317099SPetar Penkov } 8304008e97fSJason Wang 831e4a2a304SJason Wang if (rtnl_dereference(tun->xdp_prog)) 832e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 833e4a2a304SJason Wang 834c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 835c8d68e6bSJason Wang * refcnt. 836c8d68e6bSJason Wang */ 837a7385ba2SEric W. Biederman 8380b7959b6SStanislav Fomichev /* Publish tfile->tun and tun->tfiles only after we've fully 8390b7959b6SStanislav Fomichev * initialized tfile; otherwise we risk using half-initialized 8400b7959b6SStanislav Fomichev * object. 8410b7959b6SStanislav Fomichev */ 84277f22f92SYang Yingliang if (publish_tun) 8430b7959b6SStanislav Fomichev rcu_assign_pointer(tfile->tun, tun); 8440b7959b6SStanislav Fomichev rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 8450b7959b6SStanislav Fomichev tun->numqueues++; 8463a03cb84SGeorge Amanakis tun_set_real_num_queues(tun); 84738231b7aSEric W. Biederman out: 84838231b7aSEric W. Biederman return err; 849a7385ba2SEric W. Biederman } 850a7385ba2SEric W. Biederman 8519484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 852631ab46bSEric W. Biederman { 8536e914fc7SJason Wang struct tun_struct *tun; 854c70f1829SEric W. Biederman 8556e914fc7SJason Wang rcu_read_lock(); 8566e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8576e914fc7SJason Wang if (tun) 8586e914fc7SJason Wang dev_hold(tun->dev); 8596e914fc7SJason Wang rcu_read_unlock(); 860c70f1829SEric W. Biederman 861c70f1829SEric W. Biederman return tun; 862631ab46bSEric W. Biederman } 863631ab46bSEric W. Biederman 864631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 865631ab46bSEric W. Biederman { 8666e914fc7SJason Wang dev_put(tun->dev); 867631ab46bSEric W. Biederman } 868631ab46bSEric W. Biederman 8696b8a66eeSJoe Perches /* TAP filtering */ 870f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 871f271b2ccSMax Krasnyansky { 872f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 873f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 874f271b2ccSMax Krasnyansky } 875f271b2ccSMax Krasnyansky 876f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 877f271b2ccSMax Krasnyansky { 878f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 879f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 880f271b2ccSMax Krasnyansky } 881f271b2ccSMax Krasnyansky 882f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 883f271b2ccSMax Krasnyansky { 884f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 885f271b2ccSMax Krasnyansky struct tun_filter uf; 886f271b2ccSMax Krasnyansky int err, alen, n, nexact; 887f271b2ccSMax Krasnyansky 888f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 889f271b2ccSMax Krasnyansky return -EFAULT; 890f271b2ccSMax Krasnyansky 891f271b2ccSMax Krasnyansky if (!uf.count) { 892f271b2ccSMax Krasnyansky /* Disabled */ 893f271b2ccSMax Krasnyansky filter->count = 0; 894f271b2ccSMax Krasnyansky return 0; 895f271b2ccSMax Krasnyansky } 896f271b2ccSMax Krasnyansky 897f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 89828e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 89928e8190dSMarkus Elfring if (IS_ERR(addr)) 90028e8190dSMarkus Elfring return PTR_ERR(addr); 901f271b2ccSMax Krasnyansky 902f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 903f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 904f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 905f271b2ccSMax Krasnyansky filter->count = 0; 906f271b2ccSMax Krasnyansky wmb(); 907f271b2ccSMax Krasnyansky 908f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 909f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 910f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 911f271b2ccSMax Krasnyansky 912f271b2ccSMax Krasnyansky nexact = n; 913f271b2ccSMax Krasnyansky 914cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 915cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 916f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 917cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 918cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 919cfbf84fcSAlex Williamson err = 0; /* no filter */ 9203b8d2a69SMarkus Elfring goto free_addr; 921cfbf84fcSAlex Williamson } 922f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 923cfbf84fcSAlex Williamson } 924f271b2ccSMax Krasnyansky 925f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 926f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 927f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 928f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 929f271b2ccSMax Krasnyansky 930f271b2ccSMax Krasnyansky /* Now enable the filter */ 931f271b2ccSMax Krasnyansky wmb(); 932f271b2ccSMax Krasnyansky filter->count = nexact; 933f271b2ccSMax Krasnyansky 934f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 935f271b2ccSMax Krasnyansky err = nexact; 9363b8d2a69SMarkus Elfring free_addr: 937f271b2ccSMax Krasnyansky kfree(addr); 938f271b2ccSMax Krasnyansky return err; 939f271b2ccSMax Krasnyansky } 940f271b2ccSMax Krasnyansky 941f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 942f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 943f271b2ccSMax Krasnyansky { 944f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 945f271b2ccSMax Krasnyansky * at this point. */ 946f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 947f271b2ccSMax Krasnyansky int i; 948f271b2ccSMax Krasnyansky 949f271b2ccSMax Krasnyansky /* Exact match */ 950f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9512e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 952f271b2ccSMax Krasnyansky return 1; 953f271b2ccSMax Krasnyansky 954f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 955f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 956f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 957f271b2ccSMax Krasnyansky 958f271b2ccSMax Krasnyansky return 0; 959f271b2ccSMax Krasnyansky } 960f271b2ccSMax Krasnyansky 961f271b2ccSMax Krasnyansky /* 962f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 963f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 964f271b2ccSMax Krasnyansky */ 965f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 966f271b2ccSMax Krasnyansky { 967f271b2ccSMax Krasnyansky if (!filter->count) 968f271b2ccSMax Krasnyansky return 1; 969f271b2ccSMax Krasnyansky 970f271b2ccSMax Krasnyansky return run_filter(filter, skb); 971f271b2ccSMax Krasnyansky } 972f271b2ccSMax Krasnyansky 9731da177e4SLinus Torvalds /* Network device part of the driver */ 9741da177e4SLinus Torvalds 9751da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 9761da177e4SLinus Torvalds 977c70f1829SEric W. Biederman /* Net device detach from fd. */ 978c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 979c70f1829SEric W. Biederman { 980c8d68e6bSJason Wang tun_detach_all(dev); 981c70f1829SEric W. Biederman } 982c70f1829SEric W. Biederman 9831da177e4SLinus Torvalds /* Net device open. */ 9841da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 9851da177e4SLinus Torvalds { 986c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 987b20e2d54SHannes Frederic Sowa 9881da177e4SLinus Torvalds return 0; 9891da177e4SLinus Torvalds } 9901da177e4SLinus Torvalds 9911da177e4SLinus Torvalds /* Net device close. */ 9921da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 9931da177e4SLinus Torvalds { 994c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 9951da177e4SLinus Torvalds return 0; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds 9981da177e4SLinus Torvalds /* Net device start xmit */ 99996f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10001da177e4SLinus Torvalds { 10013df97ba8SJason Wang #ifdef CONFIG_RPS 1002dc05360fSEric Dumazet if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 10039bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10049bc88939STom Herbert * RPS hash and save it into the flow_table here. 10059bc88939STom Herbert */ 10064b035271SWang Li struct tun_flow_entry *e; 10079bc88939STom Herbert __u32 rxhash; 10089bc88939STom Herbert 1009feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10104b035271SWang Li e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 10119bc88939STom Herbert if (e) 10129bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10139bc88939STom Herbert } 10143df97ba8SJason Wang #endif 101596f84061SJason Wang } 101696f84061SJason Wang 1017aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1018aff3d70aSJason Wang struct sk_buff *skb, 1019aff3d70aSJason Wang int len) 1020aff3d70aSJason Wang { 1021aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1022aff3d70aSJason Wang 1023aff3d70aSJason Wang if (prog) 1024aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1025aff3d70aSJason Wang 1026aff3d70aSJason Wang return len; 1027aff3d70aSJason Wang } 1028aff3d70aSJason Wang 102996f84061SJason Wang /* Net device start xmit */ 103096f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 103196f84061SJason Wang { 103296f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 103396f84061SJason Wang int txq = skb->queue_mapping; 103496f84061SJason Wang struct tun_file *tfile; 1035aff3d70aSJason Wang int len = skb->len; 103696f84061SJason Wang 103796f84061SJason Wang rcu_read_lock(); 103896f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 103996f84061SJason Wang 104096f84061SJason Wang /* Drop packet if interface is not attached */ 10419871a9e4SJason Wang if (!tfile) 104296f84061SJason Wang goto drop; 104396f84061SJason Wang 104496f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 104596f84061SJason Wang tun_automq_xmit(tun, skb); 10469bc88939STom Herbert 10473424170fSMichal Kubecek netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); 10486e914fc7SJason Wang 1049f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1050f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1051f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1052f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1053f271b2ccSMax Krasnyansky goto drop; 1054f271b2ccSMax Krasnyansky 105554f968d6SJason Wang if (tfile->socket.sk->sk_filter && 105654f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 105799405162SMichael S. Tsirkin goto drop; 105899405162SMichael S. Tsirkin 1059aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 106081c89507SBjørn Mork if (len == 0 || pskb_trim(skb, len)) 1061aff3d70aSJason Wang goto drop; 1062aff3d70aSJason Wang 10631f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 10647bf66305SJason Wang goto drop; 10657bf66305SJason Wang 10667b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1067eda29772SRichard Cochran 10680110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 10697bf66305SJason Wang * for indefinite time. 10707bf66305SJason Wang */ 10710110d6f2SMichael S. Tsirkin skb_orphan(skb); 10720110d6f2SMichael S. Tsirkin 1073895b5c9fSFlorian Westphal nf_reset_ct(skb); 1074f8af75f3SEric Dumazet 10755990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 10761576d986SJason Wang goto drop; 10771da177e4SLinus Torvalds 10781da177e4SLinus Torvalds /* Notify and wake up reader process */ 107954f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 108054f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 10819e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 10826e914fc7SJason Wang 10836e914fc7SJason Wang rcu_read_unlock(); 10846ed10654SPatrick McHardy return NETDEV_TX_OK; 10851da177e4SLinus Torvalds 10861da177e4SLinus Torvalds drop: 1087608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1088149d36f7SMichael S. Tsirkin skb_tx_error(skb); 10891da177e4SLinus Torvalds kfree_skb(skb); 10906e914fc7SJason Wang rcu_read_unlock(); 1091baeababbSJason Wang return NET_XMIT_DROP; 10921da177e4SLinus Torvalds } 10931da177e4SLinus Torvalds 1094f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 10951da177e4SLinus Torvalds { 1096f271b2ccSMax Krasnyansky /* 1097f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1098f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1099f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1100f271b2ccSMax Krasnyansky */ 11011da177e4SLinus Torvalds } 11021da177e4SLinus Torvalds 1103c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1104c8f44affSMichał Mirosław netdev_features_t features) 110588255375SMichał Mirosław { 110688255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 110788255375SMichał Mirosław 110888255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 110988255375SMichał Mirosław } 1110eaea34b2SPaolo Abeni 1111eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1112eaea34b2SPaolo Abeni { 1113eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1114eaea34b2SPaolo Abeni 1115eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1116eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1117eaea34b2SPaolo Abeni 1118eaea34b2SPaolo Abeni tun->align = new_hr; 1119eaea34b2SPaolo Abeni } 1120eaea34b2SPaolo Abeni 1121bc1f4470Sstephen hemminger static void 1122608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1123608b9977SPaolo Abeni { 1124608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1125608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1126608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1127608b9977SPaolo Abeni int i; 1128608b9977SPaolo Abeni 1129608b9977SPaolo Abeni for_each_possible_cpu(i) { 1130608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1131608b9977SPaolo Abeni unsigned int start; 1132608b9977SPaolo Abeni 1133608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1134608b9977SPaolo Abeni do { 1135608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 11365260dd3eSEric Dumazet rxpackets = u64_stats_read(&p->rx_packets); 11375260dd3eSEric Dumazet rxbytes = u64_stats_read(&p->rx_bytes); 11385260dd3eSEric Dumazet txpackets = u64_stats_read(&p->tx_packets); 11395260dd3eSEric Dumazet txbytes = u64_stats_read(&p->tx_bytes); 1140608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1141608b9977SPaolo Abeni 1142608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1143608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1144608b9977SPaolo Abeni stats->tx_packets += txpackets; 1145608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1146608b9977SPaolo Abeni 1147608b9977SPaolo Abeni /* u32 counters */ 1148608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1149608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1150608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1151608b9977SPaolo Abeni } 1152608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1153608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1154608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1155608b9977SPaolo Abeni } 1156608b9977SPaolo Abeni 1157761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1158761876c8SJason Wang struct netlink_ext_ack *extack) 1159761876c8SJason Wang { 1160761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1161e4a2a304SJason Wang struct tun_file *tfile; 1162761876c8SJason Wang struct bpf_prog *old_prog; 1163e4a2a304SJason Wang int i; 1164761876c8SJason Wang 1165761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1166761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1167761876c8SJason Wang if (old_prog) 1168761876c8SJason Wang bpf_prog_put(old_prog); 1169761876c8SJason Wang 1170e4a2a304SJason Wang for (i = 0; i < tun->numqueues; i++) { 1171e4a2a304SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 1172e4a2a304SJason Wang if (prog) 1173e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1174e4a2a304SJason Wang else 1175e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1176e4a2a304SJason Wang } 1177e4a2a304SJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 1178e4a2a304SJason Wang if (prog) 1179e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1180e4a2a304SJason Wang else 1181e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1182e4a2a304SJason Wang } 1183e4a2a304SJason Wang 1184761876c8SJason Wang return 0; 1185761876c8SJason Wang } 1186761876c8SJason Wang 1187761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1188761876c8SJason Wang { 1189761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1190761876c8SJason Wang const struct bpf_prog *xdp_prog; 1191761876c8SJason Wang 1192761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1193761876c8SJason Wang if (xdp_prog) 1194761876c8SJason Wang return xdp_prog->aux->id; 1195761876c8SJason Wang 1196761876c8SJason Wang return 0; 1197761876c8SJason Wang } 1198761876c8SJason Wang 1199f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1200761876c8SJason Wang { 1201761876c8SJason Wang switch (xdp->command) { 1202761876c8SJason Wang case XDP_SETUP_PROG: 1203761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1204761876c8SJason Wang case XDP_QUERY_PROG: 1205761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1206761876c8SJason Wang return 0; 1207761876c8SJason Wang default: 1208761876c8SJason Wang return -EINVAL; 1209761876c8SJason Wang } 1210761876c8SJason Wang } 1211761876c8SJason Wang 121226d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 121326d31925SNicolas Dichtel { 121426d31925SNicolas Dichtel if (new_carrier) { 121526d31925SNicolas Dichtel struct tun_struct *tun = netdev_priv(dev); 121626d31925SNicolas Dichtel 121726d31925SNicolas Dichtel if (!tun->numqueues) 121826d31925SNicolas Dichtel return -EPERM; 121926d31925SNicolas Dichtel 122026d31925SNicolas Dichtel netif_carrier_on(dev); 122126d31925SNicolas Dichtel } else { 122226d31925SNicolas Dichtel netif_carrier_off(dev); 122326d31925SNicolas Dichtel } 122426d31925SNicolas Dichtel return 0; 122526d31925SNicolas Dichtel } 122626d31925SNicolas Dichtel 1227758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1228c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1229758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1230758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 123100829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 123288255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1233c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1234eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1235608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 123626d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1237758e43b7SStephen Hemminger }; 1238758e43b7SStephen Hemminger 12390c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 12400c9d917bSJesper Dangaard Brouer { 12410c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 12420c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 12430c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 12440c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 12450c9d917bSJesper Dangaard Brouer } 12460c9d917bSJesper Dangaard Brouer 124742b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 124842b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1249fc72d1d5SJason Wang { 1250fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1251fc72d1d5SJason Wang struct tun_file *tfile; 1252fc72d1d5SJason Wang u32 numqueues; 1253735fc405SJesper Dangaard Brouer int drops = 0; 1254735fc405SJesper Dangaard Brouer int cnt = n; 1255735fc405SJesper Dangaard Brouer int i; 1256fc72d1d5SJason Wang 12570c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 125842b33468SJesper Dangaard Brouer return -EINVAL; 125942b33468SJesper Dangaard Brouer 1260fc72d1d5SJason Wang rcu_read_lock(); 1261fc72d1d5SJason Wang 12629871a9e4SJason Wang resample: 1263fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1264fc72d1d5SJason Wang if (!numqueues) { 1265735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1266735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1267fc72d1d5SJason Wang } 1268fc72d1d5SJason Wang 1269fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1270fc72d1d5SJason Wang numqueues]); 12719871a9e4SJason Wang if (unlikely(!tfile)) 12729871a9e4SJason Wang goto resample; 1273735fc405SJesper Dangaard Brouer 1274735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1275735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1276735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1277fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1278fc72d1d5SJason Wang * XDP buffer from sk_buff. 1279fc72d1d5SJason Wang */ 1280735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1281fc72d1d5SJason Wang 1282735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1283735fc405SJesper Dangaard Brouer this_cpu_inc(tun->pcpu_stats->tx_dropped); 1284735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdp); 1285735fc405SJesper Dangaard Brouer drops++; 1286735fc405SJesper Dangaard Brouer } 1287735fc405SJesper Dangaard Brouer } 1288735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1289735fc405SJesper Dangaard Brouer 12900c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 12910c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 12920c9d917bSJesper Dangaard Brouer 1293fc72d1d5SJason Wang rcu_read_unlock(); 1294735fc405SJesper Dangaard Brouer return cnt - drops; 1295fc72d1d5SJason Wang } 1296fc72d1d5SJason Wang 129744fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 129844fa2dbdSJesper Dangaard Brouer { 12991b698fa5SLorenzo Bianconi struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); 130044fa2dbdSJesper Dangaard Brouer 130144fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 130244fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 130344fa2dbdSJesper Dangaard Brouer 130442421a56SJesper Dangaard Brouer return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1305fc72d1d5SJason Wang } 1306fc72d1d5SJason Wang 1307758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1308c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1309758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1310758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 131100829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 131288255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1313afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1314758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1315758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1316c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 13175e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1318eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1319608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1320f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1321fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 132226d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1323758e43b7SStephen Hemminger }; 1324758e43b7SStephen Hemminger 1325944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 132696442e42SJason Wang { 132796442e42SJason Wang int i; 132896442e42SJason Wang 132996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 133096442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 133196442e42SJason Wang 133296442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1333e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1334e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1335e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 133696442e42SJason Wang } 133796442e42SJason Wang 133896442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 133996442e42SJason Wang { 134096442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 134196442e42SJason Wang tun_flow_flush(tun); 134296442e42SJason Wang } 134396442e42SJason Wang 134491572088SJarod Wilson #define MIN_MTU 68 134591572088SJarod Wilson #define MAX_MTU 65535 134691572088SJarod Wilson 13471da177e4SLinus Torvalds /* Initialize net device. */ 13481da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13491da177e4SLinus Torvalds { 13501da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13511da177e4SLinus Torvalds 13521da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 135340630b82SMichael S. Tsirkin case IFF_TUN: 1354758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1355*b9815eb1SJason A. Donenfeld dev->header_ops = &ip_tunnel_header_ops; 1356758e43b7SStephen Hemminger 13571da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 13581da177e4SLinus Torvalds dev->hard_header_len = 0; 13591da177e4SLinus Torvalds dev->addr_len = 0; 13601da177e4SLinus Torvalds dev->mtu = 1500; 13611da177e4SLinus Torvalds 13621da177e4SLinus Torvalds /* Zero header length */ 13631da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 13641da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 13651da177e4SLinus Torvalds break; 13661da177e4SLinus Torvalds 136740630b82SMichael S. Tsirkin case IFF_TAP: 13687a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 13691da177e4SLinus Torvalds /* Ethernet TAP Device */ 13701da177e4SLinus Torvalds ether_setup(dev); 1371550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1372a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 137336226a8dSBrian Braunstein 1374f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 137536226a8dSBrian Braunstein 13761da177e4SLinus Torvalds break; 13771da177e4SLinus Torvalds } 137891572088SJarod Wilson 137991572088SJarod Wilson dev->min_mtu = MIN_MTU; 138091572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 13811da177e4SLinus Torvalds } 13821da177e4SLinus Torvalds 13832f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 13842f3ab622SJason Wang { 13852f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 13862f3ab622SJason Wang 13872f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 13882f3ab622SJason Wang } 13892f3ab622SJason Wang 13901da177e4SLinus Torvalds /* Character device part */ 13911da177e4SLinus Torvalds 13921da177e4SLinus Torvalds /* Poll */ 1393afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 13941da177e4SLinus Torvalds { 1395b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 13969484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 13973c8a9c63SMariusz Kozlowski struct sock *sk; 1398afc9a42bSAl Viro __poll_t mask = 0; 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds if (!tun) 1401a9a08845SLinus Torvalds return EPOLLERR; 14021da177e4SLinus Torvalds 140354f968d6SJason Wang sk = tfile->socket.sk; 14043c8a9c63SMariusz Kozlowski 14059e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14061da177e4SLinus Torvalds 14075990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1408a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14091da177e4SLinus Torvalds 14102f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 14112f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 14122f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 14132f3ab622SJason Wang * after it writes to a down device and meets -EIO. 14142f3ab622SJason Wang */ 14152f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 14169cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 14172f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1418a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 141933dccbb0SHerbert Xu 1420c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1421a9a08845SLinus Torvalds mask = EPOLLERR; 1422c70f1829SEric W. Biederman 1423631ab46bSEric W. Biederman tun_put(tun); 14241da177e4SLinus Torvalds return mask; 14251da177e4SLinus Torvalds } 14261da177e4SLinus Torvalds 142790e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 142890e33d45SPetar Penkov size_t len, 142990e33d45SPetar Penkov const struct iov_iter *it) 143090e33d45SPetar Penkov { 143190e33d45SPetar Penkov struct sk_buff *skb; 143290e33d45SPetar Penkov size_t linear; 143390e33d45SPetar Penkov int err; 143490e33d45SPetar Penkov int i; 143590e33d45SPetar Penkov 143690e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 143790e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 143890e33d45SPetar Penkov 143990e33d45SPetar Penkov local_bh_disable(); 144090e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 144190e33d45SPetar Penkov local_bh_enable(); 144290e33d45SPetar Penkov if (!skb) 144390e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 144490e33d45SPetar Penkov 144590e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 144690e33d45SPetar Penkov err = __skb_grow(skb, linear); 144790e33d45SPetar Penkov if (err) 144890e33d45SPetar Penkov goto free; 144990e33d45SPetar Penkov 145090e33d45SPetar Penkov skb->len = len; 145190e33d45SPetar Penkov skb->data_len = len - linear; 145290e33d45SPetar Penkov skb->truesize += skb->data_len; 145390e33d45SPetar Penkov 145490e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 145590e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 1456aa6daacaSEric Dumazet struct page *page; 1457aa6daacaSEric Dumazet void *frag; 145890e33d45SPetar Penkov 145990e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 146090e33d45SPetar Penkov err = -EINVAL; 146190e33d45SPetar Penkov goto free; 146290e33d45SPetar Penkov } 1463aa6daacaSEric Dumazet frag = netdev_alloc_frag(fragsz); 1464aa6daacaSEric Dumazet if (!frag) { 146590e33d45SPetar Penkov err = -ENOMEM; 146690e33d45SPetar Penkov goto free; 146790e33d45SPetar Penkov } 1468aa6daacaSEric Dumazet page = virt_to_head_page(frag); 1469aa6daacaSEric Dumazet skb_fill_page_desc(skb, i - 1, page, 1470aa6daacaSEric Dumazet frag - page_address(page), fragsz); 147190e33d45SPetar Penkov } 147290e33d45SPetar Penkov 147390e33d45SPetar Penkov return skb; 147490e33d45SPetar Penkov free: 147590e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 147690e33d45SPetar Penkov napi_free_frags(&tfile->napi); 147790e33d45SPetar Penkov return ERR_PTR(err); 147890e33d45SPetar Penkov } 147990e33d45SPetar Penkov 1480f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1481f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 148254f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 148333dccbb0SHerbert Xu size_t prepad, size_t len, 148433dccbb0SHerbert Xu size_t linear, int noblock) 1485f42157cbSRusty Russell { 148654f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1487f42157cbSRusty Russell struct sk_buff *skb; 148833dccbb0SHerbert Xu int err; 1489f42157cbSRusty Russell 1490f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 14910eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 149233dccbb0SHerbert Xu linear = len; 1493f42157cbSRusty Russell 149433dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 149528d64271SEric Dumazet &err, 0); 1496f42157cbSRusty Russell if (!skb) 149733dccbb0SHerbert Xu return ERR_PTR(err); 1498f42157cbSRusty Russell 1499f42157cbSRusty Russell skb_reserve(skb, prepad); 1500f42157cbSRusty Russell skb_put(skb, linear); 150133dccbb0SHerbert Xu skb->data_len = len - linear; 150233dccbb0SHerbert Xu skb->len += len - linear; 1503f42157cbSRusty Russell 1504f42157cbSRusty Russell return skb; 1505f42157cbSRusty Russell } 1506f42157cbSRusty Russell 15075503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15085503fcecSJason Wang struct sk_buff *skb, int more) 15095503fcecSJason Wang { 15105503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15115503fcecSJason Wang struct sk_buff_head process_queue; 15125503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15135503fcecSJason Wang bool rcv = false; 15145503fcecSJason Wang 15155503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15165503fcecSJason Wang local_bh_disable(); 15178ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15185503fcecSJason Wang netif_receive_skb(skb); 15195503fcecSJason Wang local_bh_enable(); 15205503fcecSJason Wang return; 15215503fcecSJason Wang } 15225503fcecSJason Wang 15235503fcecSJason Wang spin_lock(&queue->lock); 15245503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15255503fcecSJason Wang __skb_queue_head_init(&process_queue); 15265503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15275503fcecSJason Wang rcv = true; 15285503fcecSJason Wang } else { 15295503fcecSJason Wang __skb_queue_tail(queue, skb); 15305503fcecSJason Wang } 15315503fcecSJason Wang spin_unlock(&queue->lock); 15325503fcecSJason Wang 15335503fcecSJason Wang if (rcv) { 15345503fcecSJason Wang struct sk_buff *nskb; 15355503fcecSJason Wang 15365503fcecSJason Wang local_bh_disable(); 15378ebebcbaSMatthew Cover while ((nskb = __skb_dequeue(&process_queue))) { 15388ebebcbaSMatthew Cover skb_record_rx_queue(nskb, tfile->queue_index); 15395503fcecSJason Wang netif_receive_skb(nskb); 15408ebebcbaSMatthew Cover } 15418ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15425503fcecSJason Wang netif_receive_skb(skb); 15435503fcecSJason Wang local_bh_enable(); 15445503fcecSJason Wang } 15455503fcecSJason Wang } 15465503fcecSJason Wang 154766ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 154866ccbc9cSJason Wang int len, int noblock, bool zerocopy) 154966ccbc9cSJason Wang { 155066ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 155166ccbc9cSJason Wang return false; 155266ccbc9cSJason Wang 155366ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 155466ccbc9cSJason Wang return false; 155566ccbc9cSJason Wang 155666ccbc9cSJason Wang if (!noblock) 155766ccbc9cSJason Wang return false; 155866ccbc9cSJason Wang 155966ccbc9cSJason Wang if (zerocopy) 156066ccbc9cSJason Wang return false; 156166ccbc9cSJason Wang 156266ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 156366ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 156466ccbc9cSJason Wang return false; 156566ccbc9cSJason Wang 156666ccbc9cSJason Wang return true; 156766ccbc9cSJason Wang } 156866ccbc9cSJason Wang 15694b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile, 15704b663366SAlexis Bauvin struct page_frag *alloc_frag, char *buf, 15718ae1aff0SJason Wang int buflen, int len, int pad) 1572ac1f1f6cSJason Wang { 1573ac1f1f6cSJason Wang struct sk_buff *skb = build_skb(buf, buflen); 1574ac1f1f6cSJason Wang 1575ac1f1f6cSJason Wang if (!skb) 1576ac1f1f6cSJason Wang return ERR_PTR(-ENOMEM); 1577ac1f1f6cSJason Wang 15788ae1aff0SJason Wang skb_reserve(skb, pad); 1579ac1f1f6cSJason Wang skb_put(skb, len); 15804b663366SAlexis Bauvin skb_set_owner_w(skb, tfile->socket.sk); 1581ac1f1f6cSJason Wang 1582ac1f1f6cSJason Wang get_page(alloc_frag->page); 1583ac1f1f6cSJason Wang alloc_frag->offset += buflen; 1584ac1f1f6cSJason Wang 1585ac1f1f6cSJason Wang return skb; 1586ac1f1f6cSJason Wang } 1587ac1f1f6cSJason Wang 15888ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 15898ae1aff0SJason Wang struct xdp_buff *xdp, u32 act) 15908ae1aff0SJason Wang { 15918ae1aff0SJason Wang int err; 15928ae1aff0SJason Wang 15938ae1aff0SJason Wang switch (act) { 15948ae1aff0SJason Wang case XDP_REDIRECT: 15958ae1aff0SJason Wang err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 15968ae1aff0SJason Wang if (err) 15978ae1aff0SJason Wang return err; 15988ae1aff0SJason Wang break; 15998ae1aff0SJason Wang case XDP_TX: 16008ae1aff0SJason Wang err = tun_xdp_tx(tun->dev, xdp); 16018ae1aff0SJason Wang if (err < 0) 16028ae1aff0SJason Wang return err; 16038ae1aff0SJason Wang break; 16048ae1aff0SJason Wang case XDP_PASS: 16058ae1aff0SJason Wang break; 16068ae1aff0SJason Wang default: 16078ae1aff0SJason Wang bpf_warn_invalid_xdp_action(act); 16088ae1aff0SJason Wang /* fall through */ 16098ae1aff0SJason Wang case XDP_ABORTED: 16108ae1aff0SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 16118ae1aff0SJason Wang /* fall through */ 16128ae1aff0SJason Wang case XDP_DROP: 16138ae1aff0SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 16148ae1aff0SJason Wang break; 16158ae1aff0SJason Wang } 16168ae1aff0SJason Wang 16178ae1aff0SJason Wang return act; 16188ae1aff0SJason Wang } 16198ae1aff0SJason Wang 1620761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1621761876c8SJason Wang struct tun_file *tfile, 162266ccbc9cSJason Wang struct iov_iter *from, 1623761876c8SJason Wang struct virtio_net_hdr *hdr, 16241cfe6e93SJason Wang int len, int *skb_xdp) 162566ccbc9cSJason Wang { 16260bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 1627761876c8SJason Wang struct bpf_prog *xdp_prog; 16287df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 162966ccbc9cSJason Wang char *buf; 163066ccbc9cSJason Wang size_t copied; 16318ae1aff0SJason Wang int pad = TUN_RX_PAD; 16328ae1aff0SJason Wang int err = 0; 16337df13219SJason Wang 16347df13219SJason Wang rcu_read_lock(); 16357df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16367df13219SJason Wang if (xdp_prog) 16374f23aff8SJason Wang pad += XDP_PACKET_HEADROOM; 16387df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16397df13219SJason Wang rcu_read_unlock(); 164066ccbc9cSJason Wang 164163b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 164266ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 164366ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 164466ccbc9cSJason Wang 164566ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 164666ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16477df13219SJason Wang alloc_frag->offset + pad, 164866ccbc9cSJason Wang len, from); 164966ccbc9cSJason Wang if (copied != len) 165066ccbc9cSJason Wang return ERR_PTR(-EFAULT); 165166ccbc9cSJason Wang 16527df13219SJason Wang /* There's a small window that XDP may be set after the check 16537df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16547df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16557df13219SJason Wang */ 1656ac1f1f6cSJason Wang if (hdr->gso_type || !xdp_prog) { 16571cfe6e93SJason Wang *skb_xdp = 1; 16584b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, 16594b663366SAlexis Bauvin pad); 1660ac1f1f6cSJason Wang } 1661ac1f1f6cSJason Wang 16621cfe6e93SJason Wang *skb_xdp = 0; 166366ccbc9cSJason Wang 16646547e387SToshiaki Makita local_bh_disable(); 1665761876c8SJason Wang rcu_read_lock(); 1666761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16678ae1aff0SJason Wang if (xdp_prog) { 1668761876c8SJason Wang struct xdp_buff xdp; 1669761876c8SJason Wang u32 act; 1670761876c8SJason Wang 1671761876c8SJason Wang xdp.data_hard_start = buf; 16727df13219SJason Wang xdp.data = buf + pad; 1673de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1674761876c8SJason Wang xdp.data_end = xdp.data + len; 16758bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1676fb3e6e93SJesper Dangaard Brouer xdp.frame_sz = buflen; 1677761876c8SJason Wang 16788ae1aff0SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 16798ae1aff0SJason Wang if (act == XDP_REDIRECT || act == XDP_TX) { 1680761876c8SJason Wang get_page(alloc_frag->page); 1681761876c8SJason Wang alloc_frag->offset += buflen; 1682761876c8SJason Wang } 16838ae1aff0SJason Wang err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1684bee34890SWill Deacon if (err < 0) { 1685bee34890SWill Deacon if (act == XDP_REDIRECT || act == XDP_TX) 1686bee34890SWill Deacon put_page(alloc_frag->page); 1687bee34890SWill Deacon goto out; 1688bee34890SWill Deacon } 1689bee34890SWill Deacon 16901a097910SJason Wang if (err == XDP_REDIRECT) 16911d233886SToke Høiland-Jørgensen xdp_do_flush(); 16928ae1aff0SJason Wang if (err != XDP_PASS) 16938ae1aff0SJason Wang goto out; 16948ae1aff0SJason Wang 16958ae1aff0SJason Wang pad = xdp.data - xdp.data_hard_start; 16968ae1aff0SJason Wang len = xdp.data_end - xdp.data; 1697761876c8SJason Wang } 1698761876c8SJason Wang rcu_read_unlock(); 16996547e387SToshiaki Makita local_bh_enable(); 1700291aeb2bSJason Wang 17014b663366SAlexis Bauvin return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); 1702761876c8SJason Wang 1703f7053b6cSJason Wang out: 1704761876c8SJason Wang rcu_read_unlock(); 17056547e387SToshiaki Makita local_bh_enable(); 1706761876c8SJason Wang return NULL; 170766ccbc9cSJason Wang } 170866ccbc9cSJason Wang 17091da177e4SLinus Torvalds /* Get packet from user space buffer */ 171054f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1711f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17125503fcecSJason Wang int noblock, bool more) 17131da177e4SLinus Torvalds { 171409640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17151da177e4SLinus Torvalds struct sk_buff *skb; 1716f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1717eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1718f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1719608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 172096f8d9ecSJason Wang int good_linear; 17210690899bSMichael S. Tsirkin int copylen; 17220690899bSMichael S. Tsirkin bool zerocopy = false; 17230690899bSMichael S. Tsirkin int err; 172496f84061SJason Wang u32 rxhash = 0; 17251cfe6e93SJason Wang int skb_xdp = 1; 1726af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 17271da177e4SLinus Torvalds 172840630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 172915718ea0SDan Carpenter if (len < sizeof(pi)) 17301da177e4SLinus Torvalds return -EINVAL; 173115718ea0SDan Carpenter len -= sizeof(pi); 17321da177e4SLinus Torvalds 1733cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17341da177e4SLinus Torvalds return -EFAULT; 17351da177e4SLinus Torvalds } 17361da177e4SLinus Torvalds 173740630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1738e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1739e1edab87SWillem de Bruijn 1740e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1741f43798c2SRusty Russell return -EINVAL; 1742e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1743f43798c2SRusty Russell 1744cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1745f43798c2SRusty Russell return -EFAULT; 1746f43798c2SRusty Russell 17474909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 174856f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 174956f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17504909122fSHerbert Xu 175156f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1752f43798c2SRusty Russell return -EINVAL; 1753e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1754f43798c2SRusty Russell } 1755f43798c2SRusty Russell 175640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1757a504b86eSstephen hemminger align += NET_IP_ALIGN; 17580eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 175956f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1760e01bf1c8SRusty Russell return -EINVAL; 1761e01bf1c8SRusty Russell } 17621da177e4SLinus Torvalds 176396f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 176496f8d9ecSJason Wang 176588529176SJason Wang if (msg_control) { 1766f5ff53b4SAl Viro struct iov_iter i = *from; 1767f5ff53b4SAl Viro 176888529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 176988529176SJason Wang * enough room for skb expand head in case it is used. 17700690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17710690899bSMichael S. Tsirkin */ 177256f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 177396f8d9ecSJason Wang if (copylen > good_linear) 177496f8d9ecSJason Wang copylen = good_linear; 17753dd5c330SJason Wang linear = copylen; 1776f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1777f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 177888529176SJason Wang zerocopy = true; 177988529176SJason Wang } 178088529176SJason Wang 178190e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17821cfe6e93SJason Wang /* For the packet that is not easy to be processed 17831cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 17841cfe6e93SJason Wang * skb was created with generic XDP routine. 17851cfe6e93SJason Wang */ 17861cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 178766ccbc9cSJason Wang if (IS_ERR(skb)) { 178866ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 178966ccbc9cSJason Wang return PTR_ERR(skb); 179066ccbc9cSJason Wang } 1791761876c8SJason Wang if (!skb) 1792761876c8SJason Wang return total_len; 179366ccbc9cSJason Wang } else { 179488529176SJason Wang if (!zerocopy) { 17950690899bSMichael S. Tsirkin copylen = len; 179656f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 179796f8d9ecSJason Wang linear = good_linear; 179896f8d9ecSJason Wang else 179956f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18003dd5c330SJason Wang } 18010690899bSMichael S. Tsirkin 180290e33d45SPetar Penkov if (frags) { 180390e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 180490e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 180590e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 180690e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 180790e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 180890e33d45SPetar Penkov */ 180990e33d45SPetar Penkov zerocopy = false; 181090e33d45SPetar Penkov } else { 181190e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 181290e33d45SPetar Penkov noblock); 181390e33d45SPetar Penkov } 181490e33d45SPetar Penkov 181533dccbb0SHerbert Xu if (IS_ERR(skb)) { 181633dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1817608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 181890e33d45SPetar Penkov if (frags) 181990e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 182033dccbb0SHerbert Xu return PTR_ERR(skb); 18211da177e4SLinus Torvalds } 18221da177e4SLinus Torvalds 18230690899bSMichael S. Tsirkin if (zerocopy) 1824f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1825af1cc7a2SJason Wang else 1826f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18270690899bSMichael S. Tsirkin 18280690899bSMichael S. Tsirkin if (err) { 18294477138fSEric Dumazet err = -EFAULT; 18304477138fSEric Dumazet drop: 1831608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18328f22757eSDave Jones kfree_skb(skb); 183390e33d45SPetar Penkov if (frags) { 183490e33d45SPetar Penkov tfile->napi.skb = NULL; 183590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 183690e33d45SPetar Penkov } 183790e33d45SPetar Penkov 18384477138fSEric Dumazet return err; 18398f22757eSDave Jones } 184066ccbc9cSJason Wang } 18411da177e4SLinus Torvalds 18423e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1843df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1844df10db98SPaolo Abeni kfree_skb(skb); 184590e33d45SPetar Penkov if (frags) { 184690e33d45SPetar Penkov tfile->napi.skb = NULL; 184790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 184890e33d45SPetar Penkov } 184990e33d45SPetar Penkov 1850df10db98SPaolo Abeni return -EINVAL; 1851df10db98SPaolo Abeni } 1852df10db98SPaolo Abeni 18531da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 185440630b82SMichael S. Tsirkin case IFF_TUN: 185540630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18562580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18572580c4c1SAlexander Potapenko 18582580c4c1SAlexander Potapenko switch (ip_version) { 18592580c4c1SAlexander Potapenko case 4: 1860f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1861f09f7ee2SAng Way Chuang break; 18622580c4c1SAlexander Potapenko case 6: 1863f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1864f09f7ee2SAng Way Chuang break; 1865f09f7ee2SAng Way Chuang default: 1866608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1867f09f7ee2SAng Way Chuang kfree_skb(skb); 1868f09f7ee2SAng Way Chuang return -EINVAL; 1869f09f7ee2SAng Way Chuang } 1870f09f7ee2SAng Way Chuang } 1871f09f7ee2SAng Way Chuang 1872459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18731da177e4SLinus Torvalds skb->protocol = pi.proto; 18744c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18751da177e4SLinus Torvalds break; 187640630b82SMichael S. Tsirkin case IFF_TAP: 187796aa1b22SWillem de Bruijn if (frags && !pskb_may_pull(skb, ETH_HLEN)) { 187896aa1b22SWillem de Bruijn err = -ENOMEM; 187996aa1b22SWillem de Bruijn goto drop; 188096aa1b22SWillem de Bruijn } 18811da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18821da177e4SLinus Torvalds break; 18836403eab1SJoe Perches } 18841da177e4SLinus Torvalds 18850690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18860690899bSMichael S. Tsirkin if (zerocopy) { 18870690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 18880690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1889c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1890af1cc7a2SJason Wang } else if (msg_control) { 1891af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1892af1cc7a2SJason Wang uarg->callback(uarg, false); 18930690899bSMichael S. Tsirkin } 18940690899bSMichael S. Tsirkin 189572f65107SVlad Yasevich skb_reset_network_header(skb); 1896d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 18973fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 189838502af7SJason Wang 18991cfe6e93SJason Wang if (skb_xdp) { 1900761876c8SJason Wang struct bpf_prog *xdp_prog; 1901761876c8SJason Wang int ret; 1902761876c8SJason Wang 19036547e387SToshiaki Makita local_bh_disable(); 1904761876c8SJason Wang rcu_read_lock(); 1905761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1906761876c8SJason Wang if (xdp_prog) { 1907761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1908761876c8SJason Wang if (ret != XDP_PASS) { 1909761876c8SJason Wang rcu_read_unlock(); 19106547e387SToshiaki Makita local_bh_enable(); 19111efba987SEric Dumazet if (frags) { 19121efba987SEric Dumazet tfile->napi.skb = NULL; 19131efba987SEric Dumazet mutex_unlock(&tfile->napi_mutex); 19141efba987SEric Dumazet } 1915761876c8SJason Wang return total_len; 1916761876c8SJason Wang } 1917761876c8SJason Wang } 1918761876c8SJason Wang rcu_read_unlock(); 19196547e387SToshiaki Makita local_bh_enable(); 1920761876c8SJason Wang } 1921761876c8SJason Wang 1922cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1923cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1924cf1a1e07SPaolo Abeni * worth to optimize. 1925cf1a1e07SPaolo Abeni */ 1926cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1927cf1a1e07SPaolo Abeni !tfile->detached) 1928feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 192994317099SPetar Penkov 19304477138fSEric Dumazet rcu_read_lock(); 19314477138fSEric Dumazet if (unlikely(!(tun->dev->flags & IFF_UP))) { 19324477138fSEric Dumazet err = -EIO; 19339180bb4fSEric Dumazet rcu_read_unlock(); 19344477138fSEric Dumazet goto drop; 19354477138fSEric Dumazet } 19364477138fSEric Dumazet 193790e33d45SPetar Penkov if (frags) { 193896aa1b22SWillem de Bruijn u32 headlen; 193996aa1b22SWillem de Bruijn 194090e33d45SPetar Penkov /* Exercise flow dissector code path. */ 194196aa1b22SWillem de Bruijn skb_push(skb, ETH_HLEN); 194296aa1b22SWillem de Bruijn headlen = eth_get_headlen(tun->dev, skb->data, 1943c43f1255SStanislav Fomichev skb_headlen(skb)); 194490e33d45SPetar Penkov 1945010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 194690e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 194790e33d45SPetar Penkov napi_free_frags(&tfile->napi); 19484477138fSEric Dumazet rcu_read_unlock(); 194990e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 195090e33d45SPetar Penkov WARN_ON(1); 195190e33d45SPetar Penkov return -ENOMEM; 195290e33d45SPetar Penkov } 195390e33d45SPetar Penkov 195490e33d45SPetar Penkov local_bh_disable(); 195590e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 195690e33d45SPetar Penkov local_bh_enable(); 195790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1958aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 195994317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 196094317099SPetar Penkov int queue_len; 196194317099SPetar Penkov 196294317099SPetar Penkov spin_lock_bh(&queue->lock); 196394317099SPetar Penkov __skb_queue_tail(queue, skb); 196494317099SPetar Penkov queue_len = skb_queue_len(queue); 196594317099SPetar Penkov spin_unlock(&queue->lock); 196694317099SPetar Penkov 196794317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 196894317099SPetar Penkov napi_schedule(&tfile->napi); 196994317099SPetar Penkov 197094317099SPetar Penkov local_bh_enable(); 197194317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 19725503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 197394317099SPetar Penkov } else { 19741da177e4SLinus Torvalds netif_rx_ni(skb); 197594317099SPetar Penkov } 19764477138fSEric Dumazet rcu_read_unlock(); 19771da177e4SLinus Torvalds 1978608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1979608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 19805260dd3eSEric Dumazet u64_stats_inc(&stats->rx_packets); 19815260dd3eSEric Dumazet u64_stats_add(&stats->rx_bytes, len); 1982608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1983608b9977SPaolo Abeni put_cpu_ptr(stats); 19841da177e4SLinus Torvalds 198596f84061SJason Wang if (rxhash) 19869e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 198796f84061SJason Wang 19880690899bSMichael S. Tsirkin return total_len; 19891da177e4SLinus Torvalds } 19901da177e4SLinus Torvalds 1991f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19921da177e4SLinus Torvalds { 199333dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 199454f968d6SJason Wang struct tun_file *tfile = file->private_data; 19959484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1996631ab46bSEric W. Biederman ssize_t result; 19971da177e4SLinus Torvalds 19981da177e4SLinus Torvalds if (!tun) 19991da177e4SLinus Torvalds return -EBADFD; 20001da177e4SLinus Torvalds 20015503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 20025503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 2003631ab46bSEric W. Biederman 2004631ab46bSEric W. Biederman tun_put(tun); 2005631ab46bSEric W. Biederman return result; 20061da177e4SLinus Torvalds } 20071da177e4SLinus Torvalds 2008fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2009fc72d1d5SJason Wang struct tun_file *tfile, 20101ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 2011fc72d1d5SJason Wang struct iov_iter *iter) 2012fc72d1d5SJason Wang { 2013fc72d1d5SJason Wang int vnet_hdr_sz = 0; 20141ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 2015fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 2016fc72d1d5SJason Wang size_t ret; 2017fc72d1d5SJason Wang 2018fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2019fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2020fc72d1d5SJason Wang 2021fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2022fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2023fc72d1d5SJason Wang return -EINVAL; 2024fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2025fc72d1d5SJason Wang sizeof(gso))) 2026fc72d1d5SJason Wang return -EFAULT; 2027fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2028fc72d1d5SJason Wang } 2029fc72d1d5SJason Wang 20301ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2031fc72d1d5SJason Wang 2032fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2033fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 20345260dd3eSEric Dumazet u64_stats_inc(&stats->tx_packets); 20355260dd3eSEric Dumazet u64_stats_add(&stats->tx_bytes, ret); 2036fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2037fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2038fc72d1d5SJason Wang 2039fc72d1d5SJason Wang return ret; 2040fc72d1d5SJason Wang } 2041fc72d1d5SJason Wang 20421da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20436f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 204454f968d6SJason Wang struct tun_file *tfile, 20451da177e4SLinus Torvalds struct sk_buff *skb, 2046e0b46d0eSHerbert Xu struct iov_iter *iter) 20471da177e4SLinus Torvalds { 20481da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2049608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2050e0b46d0eSHerbert Xu ssize_t total; 20518c847d25SJason Wang int vlan_offset = 0; 2052a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20532eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2054a8f9bfdfSHerbert Xu 2055df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2056a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20571da177e4SLinus Torvalds 205840630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2059e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20601da177e4SLinus Torvalds 2061e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2062e0b46d0eSHerbert Xu 206340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2064e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20651da177e4SLinus Torvalds return -EINVAL; 20661da177e4SLinus Torvalds 2067e0b46d0eSHerbert Xu total += sizeof(pi); 2068e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 20691da177e4SLinus Torvalds /* Packet will be striped */ 20701da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 20711da177e4SLinus Torvalds } 20721da177e4SLinus Torvalds 2073e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 20741da177e4SLinus Torvalds return -EFAULT; 20751da177e4SLinus Torvalds } 20761da177e4SLinus Torvalds 20772eb783c4SHerbert Xu if (vnet_hdr_sz) { 20789403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 207934166093SMike Rapoport 2080e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2081f43798c2SRusty Russell return -EINVAL; 2082f43798c2SRusty Russell 20833e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2084fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2085fd3a8862SWillem de Bruijn vlan_hlen)) { 2086f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20876b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2088ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 208956f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 209056f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2091ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2092ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2093ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 209456f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2095ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2096ef3db4a5SMichael S. Tsirkin return -EINVAL; 2097ef3db4a5SMichael S. Tsirkin } 2098f43798c2SRusty Russell 2099e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2100f43798c2SRusty Russell return -EFAULT; 21018c847d25SJason Wang 21028c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2103f43798c2SRusty Russell } 2104f43798c2SRusty Russell 2105a8f9bfdfSHerbert Xu if (vlan_hlen) { 2106e0b46d0eSHerbert Xu int ret; 2107aff3d70aSJason Wang struct veth veth; 21081da177e4SLinus Torvalds 21096680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2110df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 21111da177e4SLinus Torvalds 21126680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21136680ec68SJason Wang 2114e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2115e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21166680ec68SJason Wang goto done; 21176680ec68SJason Wang 2118e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2119e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21206680ec68SJason Wang goto done; 21216680ec68SJason Wang } 21226680ec68SJason Wang 2123e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21246680ec68SJason Wang 21256680ec68SJason Wang done: 2126608b9977SPaolo Abeni /* caller is in process context, */ 2127608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2128608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 21295260dd3eSEric Dumazet u64_stats_inc(&stats->tx_packets); 21305260dd3eSEric Dumazet u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen); 2131608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2132608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21331da177e4SLinus Torvalds 21341da177e4SLinus Torvalds return total; 21351da177e4SLinus Torvalds } 21361da177e4SLinus Torvalds 2137fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21381576d986SJason Wang { 21391576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2140fc72d1d5SJason Wang void *ptr = NULL; 2141f48cc6b2SJason Wang int error = 0; 21421576d986SJason Wang 2143fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2144fc72d1d5SJason Wang if (ptr) 21451576d986SJason Wang goto out; 21461576d986SJason Wang if (noblock) { 2147f48cc6b2SJason Wang error = -EAGAIN; 21481576d986SJason Wang goto out; 21491576d986SJason Wang } 21501576d986SJason Wang 2151333f7909SAl Viro add_wait_queue(&tfile->socket.wq.wait, &wait); 21521576d986SJason Wang 21531576d986SJason Wang while (1) { 215471828b22STimur Celik set_current_state(TASK_INTERRUPTIBLE); 2155fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2156fc72d1d5SJason Wang if (ptr) 21571576d986SJason Wang break; 21581576d986SJason Wang if (signal_pending(current)) { 2159f48cc6b2SJason Wang error = -ERESTARTSYS; 21601576d986SJason Wang break; 21611576d986SJason Wang } 21621576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2163f48cc6b2SJason Wang error = -EFAULT; 21641576d986SJason Wang break; 21651576d986SJason Wang } 21661576d986SJason Wang 21671576d986SJason Wang schedule(); 21681576d986SJason Wang } 21691576d986SJason Wang 2170ecef67cbSTimur Celik __set_current_state(TASK_RUNNING); 2171333f7909SAl Viro remove_wait_queue(&tfile->socket.wq.wait, &wait); 21721576d986SJason Wang 21731576d986SJason Wang out: 2174f48cc6b2SJason Wang *err = error; 2175fc72d1d5SJason Wang return ptr; 21761576d986SJason Wang } 21771576d986SJason Wang 217854f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 21799b067034SAl Viro struct iov_iter *to, 2180fc72d1d5SJason Wang int noblock, void *ptr) 21811da177e4SLinus Torvalds { 21829b067034SAl Viro ssize_t ret; 21831576d986SJason Wang int err; 21841da177e4SLinus Torvalds 2185c33ee15bSWei Xu if (!iov_iter_count(to)) { 2186fc72d1d5SJason Wang tun_ptr_free(ptr); 21879b067034SAl Viro return 0; 2188c33ee15bSWei Xu } 21891da177e4SLinus Torvalds 2190fc72d1d5SJason Wang if (!ptr) { 21911576d986SJason Wang /* Read frames from ring */ 2192fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2193fc72d1d5SJason Wang if (!ptr) 2194957f094fSAlex Gartrell return err; 2195ac77cfd4SJason Wang } 2196e0b46d0eSHerbert Xu 21971ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 21981ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2199fc72d1d5SJason Wang 22001ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 220103993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2202fc72d1d5SJason Wang } else { 2203fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2204fc72d1d5SJason Wang 22059b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2206f51a5e82SJason Wang if (unlikely(ret < 0)) 22071da177e4SLinus Torvalds kfree_skb(skb); 2208f51a5e82SJason Wang else 2209f51a5e82SJason Wang consume_skb(skb); 2210fc72d1d5SJason Wang } 22111da177e4SLinus Torvalds 221205c2828cSMichael S. Tsirkin return ret; 221305c2828cSMichael S. Tsirkin } 221405c2828cSMichael S. Tsirkin 22159b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 221605c2828cSMichael S. Tsirkin { 221705c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 221805c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22199484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22209b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 222105c2828cSMichael S. Tsirkin 222205c2828cSMichael S. Tsirkin if (!tun) 222305c2828cSMichael S. Tsirkin return -EBADFD; 2224ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 222542404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2226d0b7da8aSZhi Yong Wu if (ret > 0) 2227d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2228631ab46bSEric W. Biederman tun_put(tun); 22291da177e4SLinus Torvalds return ret; 22301da177e4SLinus Torvalds } 22311da177e4SLinus Torvalds 2232cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 223396f84061SJason Wang { 2234cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 223596f84061SJason Wang 223696f84061SJason Wang bpf_prog_destroy(prog->prog); 223796f84061SJason Wang kfree(prog); 223896f84061SJason Wang } 223996f84061SJason Wang 22409d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22419d6474e4SJason Wang struct tun_prog __rcu **prog_p, 224296f84061SJason Wang struct bpf_prog *prog) 224396f84061SJason Wang { 2244cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 224596f84061SJason Wang 224696f84061SJason Wang if (prog) { 224796f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 224896f84061SJason Wang if (!new) 224996f84061SJason Wang return -ENOMEM; 225096f84061SJason Wang new->prog = prog; 225196f84061SJason Wang } 225296f84061SJason Wang 2253124da8f6SJason Wang spin_lock_bh(&tun->lock); 2254cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2255124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2256cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2257124da8f6SJason Wang spin_unlock_bh(&tun->lock); 225896f84061SJason Wang 225996f84061SJason Wang if (old) 2260cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 226196f84061SJason Wang 226296f84061SJason Wang return 0; 226396f84061SJason Wang } 226496f84061SJason Wang 226596442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 226696442e42SJason Wang { 226796442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 226896442e42SJason Wang 22694008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 227011fc7d5aSEric Dumazet 2271608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 227211fc7d5aSEric Dumazet /* We clear pcpu_stats so that tun_set_iff() can tell if 227311fc7d5aSEric Dumazet * tun_free_netdev() has been called from register_netdevice(). 227411fc7d5aSEric Dumazet */ 227511fc7d5aSEric Dumazet tun->pcpu_stats = NULL; 227611fc7d5aSEric Dumazet 227796442e42SJason Wang tun_flow_uninit(tun); 22785dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2279cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2280aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 228196442e42SJason Wang } 228296442e42SJason Wang 22831da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22841da177e4SLinus Torvalds { 22851da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22861da177e4SLinus Torvalds 22870625c883SEric W. Biederman tun->owner = INVALID_UID; 22880625c883SEric W. Biederman tun->group = INVALID_GID; 22894e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 22901da177e4SLinus Torvalds 22911da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2292cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2293cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2294016adb72SJason Wang /* We prefer our own queue length */ 2295016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22961da177e4SLinus Torvalds } 22971da177e4SLinus Torvalds 2298f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2299f019a7a5SEric W. Biederman * device with netlink. 2300f019a7a5SEric W. Biederman */ 2301a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2302a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2303f019a7a5SEric W. Biederman { 230435b827b6SNicolas Dichtel NL_SET_ERR_MSG(extack, 230535b827b6SNicolas Dichtel "tun/tap creation via rtnetlink is not supported."); 230635b827b6SNicolas Dichtel return -EOPNOTSUPP; 2307f019a7a5SEric W. Biederman } 2308f019a7a5SEric W. Biederman 23091ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 23101ec010e7SSabrina Dubroca { 23111ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 23121ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 23131ec010e7SSabrina Dubroca 23141ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 23151ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 23161ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 23171ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 23181ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 23191ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 23201ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 23211ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 23221ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 23231ec010e7SSabrina Dubroca 0; 23241ec010e7SSabrina Dubroca } 23251ec010e7SSabrina Dubroca 23261ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 23271ec010e7SSabrina Dubroca { 23281ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 23291ec010e7SSabrina Dubroca 23301ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 23311ec010e7SSabrina Dubroca goto nla_put_failure; 23321ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 23331ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 23341ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 23351ec010e7SSabrina Dubroca goto nla_put_failure; 23361ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 23371ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 23381ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 23391ec010e7SSabrina Dubroca goto nla_put_failure; 23401ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 23411ec010e7SSabrina Dubroca goto nla_put_failure; 23421ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 23431ec010e7SSabrina Dubroca goto nla_put_failure; 23441ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 23451ec010e7SSabrina Dubroca goto nla_put_failure; 23461ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 23471ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 23481ec010e7SSabrina Dubroca goto nla_put_failure; 23491ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 23501ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 23511ec010e7SSabrina Dubroca goto nla_put_failure; 23521ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 23531ec010e7SSabrina Dubroca tun->numdisabled)) 23541ec010e7SSabrina Dubroca goto nla_put_failure; 23551ec010e7SSabrina Dubroca } 23561ec010e7SSabrina Dubroca 23571ec010e7SSabrina Dubroca return 0; 23581ec010e7SSabrina Dubroca 23591ec010e7SSabrina Dubroca nla_put_failure: 23601ec010e7SSabrina Dubroca return -EMSGSIZE; 23611ec010e7SSabrina Dubroca } 23621ec010e7SSabrina Dubroca 2363f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2364f019a7a5SEric W. Biederman .kind = DRV_NAME, 2365f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2366f019a7a5SEric W. Biederman .setup = tun_setup, 2367f019a7a5SEric W. Biederman .validate = tun_validate, 23681ec010e7SSabrina Dubroca .get_size = tun_get_size, 23691ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2370f019a7a5SEric W. Biederman }; 2371f019a7a5SEric W. Biederman 237233dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 237333dccbb0SHerbert Xu { 237454f968d6SJason Wang struct tun_file *tfile; 237543815482SEric Dumazet wait_queue_head_t *wqueue; 237633dccbb0SHerbert Xu 237733dccbb0SHerbert Xu if (!sock_writeable(sk)) 237833dccbb0SHerbert Xu return; 237933dccbb0SHerbert Xu 23809cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 238133dccbb0SHerbert Xu return; 238233dccbb0SHerbert Xu 238343815482SEric Dumazet wqueue = sk_sleep(sk); 238443815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2385a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2386a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2387c722c625SHerbert Xu 238854f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 238954f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 239033dccbb0SHerbert Xu } 239133dccbb0SHerbert Xu 2392f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage) 2393f9e06c45SJason Wang { 2394f9e06c45SJason Wang if (tpage->page) 2395f9e06c45SJason Wang __page_frag_cache_drain(tpage->page, tpage->count); 2396f9e06c45SJason Wang } 2397f9e06c45SJason Wang 2398043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun, 2399043d222fSJason Wang struct tun_file *tfile, 2400f9e06c45SJason Wang struct xdp_buff *xdp, int *flush, 2401f9e06c45SJason Wang struct tun_page *tpage) 2402043d222fSJason Wang { 24034e4b08e5SPrashant Bhole unsigned int datasize = xdp->data_end - xdp->data; 2404043d222fSJason Wang struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2405043d222fSJason Wang struct virtio_net_hdr *gso = &hdr->gso; 2406043d222fSJason Wang struct tun_pcpu_stats *stats; 2407043d222fSJason Wang struct bpf_prog *xdp_prog; 2408043d222fSJason Wang struct sk_buff *skb = NULL; 2409043d222fSJason Wang u32 rxhash = 0, act; 2410043d222fSJason Wang int buflen = hdr->buflen; 2411043d222fSJason Wang int err = 0; 2412043d222fSJason Wang bool skb_xdp = false; 2413f9e06c45SJason Wang struct page *page; 2414043d222fSJason Wang 2415043d222fSJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 2416043d222fSJason Wang if (xdp_prog) { 2417043d222fSJason Wang if (gso->gso_type) { 2418043d222fSJason Wang skb_xdp = true; 2419043d222fSJason Wang goto build; 2420043d222fSJason Wang } 2421043d222fSJason Wang xdp_set_data_meta_invalid(xdp); 2422043d222fSJason Wang xdp->rxq = &tfile->xdp_rxq; 2423fb3e6e93SJesper Dangaard Brouer xdp->frame_sz = buflen; 2424043d222fSJason Wang 2425043d222fSJason Wang act = bpf_prog_run_xdp(xdp_prog, xdp); 2426043d222fSJason Wang err = tun_xdp_act(tun, xdp_prog, xdp, act); 2427043d222fSJason Wang if (err < 0) { 2428043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2429043d222fSJason Wang return err; 2430043d222fSJason Wang } 2431043d222fSJason Wang 2432043d222fSJason Wang switch (err) { 2433043d222fSJason Wang case XDP_REDIRECT: 2434043d222fSJason Wang *flush = true; 2435043d222fSJason Wang /* fall through */ 2436043d222fSJason Wang case XDP_TX: 2437043d222fSJason Wang return 0; 2438043d222fSJason Wang case XDP_PASS: 2439043d222fSJason Wang break; 2440043d222fSJason Wang default: 2441f9e06c45SJason Wang page = virt_to_head_page(xdp->data); 2442f9e06c45SJason Wang if (tpage->page == page) { 2443f9e06c45SJason Wang ++tpage->count; 2444f9e06c45SJason Wang } else { 2445f9e06c45SJason Wang tun_put_page(tpage); 2446f9e06c45SJason Wang tpage->page = page; 2447f9e06c45SJason Wang tpage->count = 1; 2448f9e06c45SJason Wang } 2449043d222fSJason Wang return 0; 2450043d222fSJason Wang } 2451043d222fSJason Wang } 2452043d222fSJason Wang 2453043d222fSJason Wang build: 2454043d222fSJason Wang skb = build_skb(xdp->data_hard_start, buflen); 2455043d222fSJason Wang if (!skb) { 2456043d222fSJason Wang err = -ENOMEM; 2457043d222fSJason Wang goto out; 2458043d222fSJason Wang } 2459043d222fSJason Wang 2460043d222fSJason Wang skb_reserve(skb, xdp->data - xdp->data_hard_start); 2461043d222fSJason Wang skb_put(skb, xdp->data_end - xdp->data); 2462043d222fSJason Wang 2463043d222fSJason Wang if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2464043d222fSJason Wang this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 2465043d222fSJason Wang kfree_skb(skb); 2466043d222fSJason Wang err = -EINVAL; 2467043d222fSJason Wang goto out; 2468043d222fSJason Wang } 2469043d222fSJason Wang 2470043d222fSJason Wang skb->protocol = eth_type_trans(skb, tun->dev); 2471043d222fSJason Wang skb_reset_network_header(skb); 2472d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 24733fe260e0SGilberto Bertin skb_record_rx_queue(skb, tfile->queue_index); 2474043d222fSJason Wang 2475043d222fSJason Wang if (skb_xdp) { 2476043d222fSJason Wang err = do_xdp_generic(xdp_prog, skb); 2477043d222fSJason Wang if (err != XDP_PASS) 2478043d222fSJason Wang goto out; 2479043d222fSJason Wang } 2480043d222fSJason Wang 2481f29eb2a9SPaolo Abeni if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2482f29eb2a9SPaolo Abeni !tfile->detached) 2483043d222fSJason Wang rxhash = __skb_get_hash_symmetric(skb); 2484043d222fSJason Wang 2485043d222fSJason Wang netif_receive_skb(skb); 2486043d222fSJason Wang 24876342ca64SPrashant Bhole /* No need for get_cpu_ptr() here since this function is 24886342ca64SPrashant Bhole * always called with bh disabled 24896342ca64SPrashant Bhole */ 24906342ca64SPrashant Bhole stats = this_cpu_ptr(tun->pcpu_stats); 2491043d222fSJason Wang u64_stats_update_begin(&stats->syncp); 24925260dd3eSEric Dumazet u64_stats_inc(&stats->rx_packets); 24935260dd3eSEric Dumazet u64_stats_add(&stats->rx_bytes, datasize); 2494043d222fSJason Wang u64_stats_update_end(&stats->syncp); 2495043d222fSJason Wang 2496043d222fSJason Wang if (rxhash) 2497043d222fSJason Wang tun_flow_update(tun, rxhash, tfile); 2498043d222fSJason Wang 2499043d222fSJason Wang out: 2500043d222fSJason Wang return err; 2501043d222fSJason Wang } 2502043d222fSJason Wang 25031b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 250405c2828cSMichael S. Tsirkin { 2505043d222fSJason Wang int ret, i; 250654f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25079484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2508fe8dd45bSJason Wang struct tun_msg_ctl *ctl = m->msg_control; 2509043d222fSJason Wang struct xdp_buff *xdp; 251054f968d6SJason Wang 251154f968d6SJason Wang if (!tun) 251254f968d6SJason Wang return -EBADFD; 2513f5ff53b4SAl Viro 2514043d222fSJason Wang if (ctl && (ctl->type == TUN_MSG_PTR)) { 25156f0271d9SDavid S. Miller struct tun_page tpage; 2516043d222fSJason Wang int n = ctl->num; 2517043d222fSJason Wang int flush = 0; 2518043d222fSJason Wang 25196f0271d9SDavid S. Miller memset(&tpage, 0, sizeof(tpage)); 25206f0271d9SDavid S. Miller 2521043d222fSJason Wang local_bh_disable(); 2522043d222fSJason Wang rcu_read_lock(); 2523043d222fSJason Wang 2524043d222fSJason Wang for (i = 0; i < n; i++) { 2525043d222fSJason Wang xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2526f9e06c45SJason Wang tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2527043d222fSJason Wang } 2528043d222fSJason Wang 2529043d222fSJason Wang if (flush) 25301d233886SToke Høiland-Jørgensen xdp_do_flush(); 2531043d222fSJason Wang 2532043d222fSJason Wang rcu_read_unlock(); 2533043d222fSJason Wang local_bh_enable(); 2534043d222fSJason Wang 2535f9e06c45SJason Wang tun_put_page(&tpage); 2536f9e06c45SJason Wang 2537043d222fSJason Wang ret = total_len; 2538043d222fSJason Wang goto out; 2539043d222fSJason Wang } 2540fe8dd45bSJason Wang 2541fe8dd45bSJason Wang ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 25425503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 25435503fcecSJason Wang m->msg_flags & MSG_MORE); 2544043d222fSJason Wang out: 254554f968d6SJason Wang tun_put(tun); 254654f968d6SJason Wang return ret; 254705c2828cSMichael S. Tsirkin } 254805c2828cSMichael S. Tsirkin 25491b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 255005c2828cSMichael S. Tsirkin int flags) 255105c2828cSMichael S. Tsirkin { 255254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25539484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2554fc72d1d5SJason Wang void *ptr = m->msg_control; 255505c2828cSMichael S. Tsirkin int ret; 255654f968d6SJason Wang 2557c33ee15bSWei Xu if (!tun) { 2558c33ee15bSWei Xu ret = -EBADFD; 2559fc72d1d5SJason Wang goto out_free; 2560c33ee15bSWei Xu } 256154f968d6SJason Wang 2562eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 25633811ae76SGao feng ret = -EINVAL; 2564c33ee15bSWei Xu goto out_put_tun; 25653811ae76SGao feng } 2566eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2567eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2568eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2569eda29772SRichard Cochran goto out; 2570eda29772SRichard Cochran } 2571fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 257287897931SAlex Gartrell if (ret > (ssize_t)total_len) { 257342404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 257442404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 257542404c09SDavid S. Miller } 25763811ae76SGao feng out: 257754f968d6SJason Wang tun_put(tun); 257805c2828cSMichael S. Tsirkin return ret; 2579c33ee15bSWei Xu 2580c33ee15bSWei Xu out_put_tun: 2581c33ee15bSWei Xu tun_put(tun); 2582fc72d1d5SJason Wang out_free: 2583fc72d1d5SJason Wang tun_ptr_free(ptr); 2584c33ee15bSWei Xu return ret; 258505c2828cSMichael S. Tsirkin } 258605c2828cSMichael S. Tsirkin 2587fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2588fc72d1d5SJason Wang { 2589fc72d1d5SJason Wang if (likely(ptr)) { 25901ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 25911ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2592fc72d1d5SJason Wang 25931ffcbc85SJesper Dangaard Brouer return xdpf->len; 2594fc72d1d5SJason Wang } 2595fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2596fc72d1d5SJason Wang } else { 2597fc72d1d5SJason Wang return 0; 2598fc72d1d5SJason Wang } 2599fc72d1d5SJason Wang } 2600fc72d1d5SJason Wang 26011576d986SJason Wang static int tun_peek_len(struct socket *sock) 26021576d986SJason Wang { 26031576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 26041576d986SJason Wang struct tun_struct *tun; 26051576d986SJason Wang int ret = 0; 26061576d986SJason Wang 26079484dc74Syuan linyu tun = tun_get(tfile); 26081576d986SJason Wang if (!tun) 26091576d986SJason Wang return 0; 26101576d986SJason Wang 2611fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 26121576d986SJason Wang tun_put(tun); 26131576d986SJason Wang 26141576d986SJason Wang return ret; 26151576d986SJason Wang } 26161576d986SJason Wang 261705c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 261805c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 26191576d986SJason Wang .peek_len = tun_peek_len, 262005c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 262105c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 262205c2828cSMichael S. Tsirkin }; 262305c2828cSMichael S. Tsirkin 262433dccbb0SHerbert Xu static struct proto tun_proto = { 262533dccbb0SHerbert Xu .name = "tun", 262633dccbb0SHerbert Xu .owner = THIS_MODULE, 262754f968d6SJason Wang .obj_size = sizeof(struct tun_file), 262833dccbb0SHerbert Xu }; 2629f019a7a5SEric W. Biederman 2630980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2631980c9e8cSDavid Woodhouse { 2632031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2633980c9e8cSDavid Woodhouse } 2634980c9e8cSDavid Woodhouse 2635980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2636980c9e8cSDavid Woodhouse char *buf) 2637980c9e8cSDavid Woodhouse { 2638980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2639980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2640980c9e8cSDavid Woodhouse } 2641980c9e8cSDavid Woodhouse 2642980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2643980c9e8cSDavid Woodhouse char *buf) 2644980c9e8cSDavid Woodhouse { 2645980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26460625c883SEric W. Biederman return uid_valid(tun->owner)? 26470625c883SEric W. Biederman sprintf(buf, "%u\n", 26480625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 26490625c883SEric W. Biederman sprintf(buf, "-1\n"); 2650980c9e8cSDavid Woodhouse } 2651980c9e8cSDavid Woodhouse 2652980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2653980c9e8cSDavid Woodhouse char *buf) 2654980c9e8cSDavid Woodhouse { 2655980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26560625c883SEric W. Biederman return gid_valid(tun->group) ? 26570625c883SEric W. Biederman sprintf(buf, "%u\n", 26580625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 26590625c883SEric W. Biederman sprintf(buf, "-1\n"); 2660980c9e8cSDavid Woodhouse } 2661980c9e8cSDavid Woodhouse 2662980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2663980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2664980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2665980c9e8cSDavid Woodhouse 2666c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2667c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2668c4d33e24STakashi Iwai &dev_attr_owner.attr, 2669c4d33e24STakashi Iwai &dev_attr_group.attr, 2670c4d33e24STakashi Iwai NULL 2671c4d33e24STakashi Iwai }; 2672c4d33e24STakashi Iwai 2673c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2674c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2675c4d33e24STakashi Iwai }; 2676c4d33e24STakashi Iwai 2677d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 26781da177e4SLinus Torvalds { 26791da177e4SLinus Torvalds struct tun_struct *tun; 268054f968d6SJason Wang struct tun_file *tfile = file->private_data; 26811da177e4SLinus Torvalds struct net_device *dev; 26821da177e4SLinus Torvalds int err; 26831da177e4SLinus Torvalds 26847c0c3b1aSJason Wang if (tfile->detached) 26857c0c3b1aSJason Wang return -EINVAL; 26867c0c3b1aSJason Wang 268790e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 268890e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 268990e33d45SPetar Penkov return -EPERM; 269090e33d45SPetar Penkov 269190e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 269290e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 269390e33d45SPetar Penkov return -EINVAL; 269490e33d45SPetar Penkov } 269590e33d45SPetar Penkov 269674a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 269774a3e5a7SEric W. Biederman if (dev) { 2698f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2699f85ba780SDavid Woodhouse return -EBUSY; 270074a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 270174a3e5a7SEric W. Biederman tun = netdev_priv(dev); 270274a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 270374a3e5a7SEric W. Biederman tun = netdev_priv(dev); 270474a3e5a7SEric W. Biederman else 270574a3e5a7SEric W. Biederman return -EINVAL; 270674a3e5a7SEric W. Biederman 27078e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 270840630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 27098e6d91aeSJason Wang return -EINVAL; 27108e6d91aeSJason Wang 2711cde8b15fSJason Wang if (tun_not_capable(tun)) 27122b980dbdSPaul Moore return -EPERM; 27135dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 27142b980dbdSPaul Moore if (err < 0) 27152b980dbdSPaul Moore return err; 27162b980dbdSPaul Moore 271794317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2718af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 271977f22f92SYang Yingliang ifr->ifr_flags & IFF_NAPI_FRAGS, true); 2720a7385ba2SEric W. Biederman if (err < 0) 2721a7385ba2SEric W. Biederman return err; 27224008e97fSJason Wang 272340630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2724e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2725e8dbad66SJason Wang /* One or more queue has already been attached, no need 2726e8dbad66SJason Wang * to initialize the device again. 2727e8dbad66SJason Wang */ 272883c1f36fSSabrina Dubroca netdev_state_change(dev); 2729e8dbad66SJason Wang return 0; 2730e8dbad66SJason Wang } 27319fffc5c6SSabrina Dubroca 27329fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 27339fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 273483c1f36fSSabrina Dubroca 273583c1f36fSSabrina Dubroca netdev_state_change(dev); 273683c1f36fSSabrina Dubroca } else { 27371da177e4SLinus Torvalds char *name; 27381da177e4SLinus Torvalds unsigned long flags = 0; 2739edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2740edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 27411da177e4SLinus Torvalds 2742c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2743ca6bb5d7SDavid Woodhouse return -EPERM; 27442b980dbdSPaul Moore err = security_tun_dev_create(); 27452b980dbdSPaul Moore if (err < 0) 27462b980dbdSPaul Moore return err; 2747ca6bb5d7SDavid Woodhouse 27481da177e4SLinus Torvalds /* Set dev type */ 27491da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 27501da177e4SLinus Torvalds /* TUN device */ 275140630b82SMichael S. Tsirkin flags |= IFF_TUN; 27521da177e4SLinus Torvalds name = "tun%d"; 27531da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 27541da177e4SLinus Torvalds /* TAP device */ 275540630b82SMichael S. Tsirkin flags |= IFF_TAP; 27561da177e4SLinus Torvalds name = "tap%d"; 27571da177e4SLinus Torvalds } else 275836989b90SKusanagi Kouichi return -EINVAL; 27591da177e4SLinus Torvalds 27601da177e4SLinus Torvalds if (*ifr->ifr_name) 27611da177e4SLinus Torvalds name = ifr->ifr_name; 27621da177e4SLinus Torvalds 2763c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2764c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2765c835a677STom Gundersen queues); 2766edfb6a14SJason Wang 27671da177e4SLinus Torvalds if (!dev) 27681da177e4SLinus Torvalds return -ENOMEM; 27691da177e4SLinus Torvalds 2770fc54c658SPavel Emelyanov dev_net_set(dev, net); 2771f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2772fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2773c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2774758e43b7SStephen Hemminger 27751da177e4SLinus Torvalds tun = netdev_priv(dev); 27761da177e4SLinus Torvalds tun->dev = dev; 27771da177e4SLinus Torvalds tun->flags = flags; 2778f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2779d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 27801da177e4SLinus Torvalds 2781eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 278254f968d6SJason Wang tun->filter_attached = false; 278354f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 27845503fcecSJason Wang tun->rx_batched = 0; 278596f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 278633dccbb0SHerbert Xu 2787608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2788608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2789608b9977SPaolo Abeni err = -ENOMEM; 2790608b9977SPaolo Abeni goto err_free_dev; 2791608b9977SPaolo Abeni } 2792608b9977SPaolo Abeni 279396442e42SJason Wang spin_lock_init(&tun->lock); 279496442e42SJason Wang 27955dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 27965dbbaf2dSPaul Moore if (err < 0) 2797608b9977SPaolo Abeni goto err_free_stat; 27982b980dbdSPaul Moore 27991da177e4SLinus Torvalds tun_net_init(dev); 2800944a1376SPavel Emelyanov tun_flow_init(tun); 280196442e42SJason Wang 280288255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 28036680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 28046680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 28052a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 28066671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 28076671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 28086671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 280988255375SMichał Mirosław 28109fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 28119fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 28129fffc5c6SSabrina Dubroca 28134008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 2814af3fb24eSEric Dumazet err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 281577f22f92SYang Yingliang ifr->ifr_flags & IFF_NAPI_FRAGS, false); 2816eb0fb363SJason Wang if (err < 0) 2817662ca437SJason Wang goto err_free_flow; 2818eb0fb363SJason Wang 28191da177e4SLinus Torvalds err = register_netdevice(tun->dev); 28201da177e4SLinus Torvalds if (err < 0) 2821662ca437SJason Wang goto err_detach; 282277f22f92SYang Yingliang /* free_netdev() won't check refcnt, to aovid race 282377f22f92SYang Yingliang * with dev_put() we need publish tun after registration. 282477f22f92SYang Yingliang */ 282577f22f92SYang Yingliang rcu_assign_pointer(tfile->tun, tun); 2826af668b3cSMichael S. Tsirkin } 2827980c9e8cSDavid Woodhouse 2828eb0fb363SJason Wang netif_carrier_on(tun->dev); 28291da177e4SLinus Torvalds 2830e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2831e35259a9SMax Krasnyansky * xoff state. 2832e35259a9SMax Krasnyansky */ 2833e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2834c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2835e35259a9SMax Krasnyansky 28361da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 28371da177e4SLinus Torvalds return 0; 28381da177e4SLinus Torvalds 2839662ca437SJason Wang err_detach: 2840662ca437SJason Wang tun_detach_all(dev); 284111fc7d5aSEric Dumazet /* We are here because register_netdevice() has failed. 284211fc7d5aSEric Dumazet * If register_netdevice() already called tun_free_netdev() 284311fc7d5aSEric Dumazet * while dealing with the error, tun->pcpu_stats has been cleared. 284411fc7d5aSEric Dumazet */ 284511fc7d5aSEric Dumazet if (!tun->pcpu_stats) 2846ff244c6bSEric Dumazet goto err_free_dev; 2847ff244c6bSEric Dumazet 2848662ca437SJason Wang err_free_flow: 2849662ca437SJason Wang tun_flow_uninit(tun); 2850662ca437SJason Wang security_tun_dev_free_security(tun->security); 2851608b9977SPaolo Abeni err_free_stat: 2852608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 28531da177e4SLinus Torvalds err_free_dev: 28541da177e4SLinus Torvalds free_netdev(dev); 28551da177e4SLinus Torvalds return err; 28561da177e4SLinus Torvalds } 28571da177e4SLinus Torvalds 285812132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2859e3b99556SMark McLoughlin { 2860e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2861e3b99556SMark McLoughlin 2862980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2863e3b99556SMark McLoughlin 2864e3b99556SMark McLoughlin } 2865e3b99556SMark McLoughlin 28665228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 28675228ddc9SRusty Russell * privs required. */ 286888255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 28695228ddc9SRusty Russell { 2870c8f44affSMichał Mirosław netdev_features_t features = 0; 28715228ddc9SRusty Russell 28725228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 287388255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 28745228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 28755228ddc9SRusty Russell 28765228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 28775228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 28785228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 28795228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 28805228ddc9SRusty Russell } 28815228ddc9SRusty Russell if (arg & TUN_F_TSO4) 28825228ddc9SRusty Russell features |= NETIF_F_TSO; 28835228ddc9SRusty Russell if (arg & TUN_F_TSO6) 28845228ddc9SRusty Russell features |= NETIF_F_TSO6; 28855228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 28865228ddc9SRusty Russell } 28870c19f846SWillem de Bruijn 28880c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 28895228ddc9SRusty Russell } 28905228ddc9SRusty Russell 28915228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 28925228ddc9SRusty Russell * trying to set them. */ 28935228ddc9SRusty Russell if (arg) 28945228ddc9SRusty Russell return -EINVAL; 28955228ddc9SRusty Russell 289688255375SMichał Mirosław tun->set_features = features; 289709050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 289809050957SYaroslav Isakov tun->dev->wanted_features |= features; 289988255375SMichał Mirosław netdev_update_features(tun->dev); 29005228ddc9SRusty Russell 29015228ddc9SRusty Russell return 0; 29025228ddc9SRusty Russell } 29035228ddc9SRusty Russell 2904c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2905c8d68e6bSJason Wang { 2906c8d68e6bSJason Wang int i; 2907c8d68e6bSJason Wang struct tun_file *tfile; 2908c8d68e6bSJason Wang 2909c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2910b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29118ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29128ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 29138ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2914c8d68e6bSJason Wang } 2915c8d68e6bSJason Wang 2916c8d68e6bSJason Wang tun->filter_attached = false; 2917c8d68e6bSJason Wang } 2918c8d68e6bSJason Wang 2919c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2920c8d68e6bSJason Wang { 2921c8d68e6bSJason Wang int i, ret = 0; 2922c8d68e6bSJason Wang struct tun_file *tfile; 2923c8d68e6bSJason Wang 2924c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2925b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29268ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29278ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 29288ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2929c8d68e6bSJason Wang if (ret) { 2930c8d68e6bSJason Wang tun_detach_filter(tun, i); 2931c8d68e6bSJason Wang return ret; 2932c8d68e6bSJason Wang } 2933c8d68e6bSJason Wang } 2934c8d68e6bSJason Wang 2935c8d68e6bSJason Wang tun->filter_attached = true; 2936c8d68e6bSJason Wang return ret; 2937c8d68e6bSJason Wang } 2938c8d68e6bSJason Wang 2939c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2940c8d68e6bSJason Wang { 2941c8d68e6bSJason Wang struct tun_file *tfile; 2942c8d68e6bSJason Wang int i; 2943c8d68e6bSJason Wang 2944c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2945b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2946c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2947c8d68e6bSJason Wang } 2948c8d68e6bSJason Wang } 2949c8d68e6bSJason Wang 2950cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2951cde8b15fSJason Wang { 2952cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2953cde8b15fSJason Wang struct tun_struct *tun; 2954cde8b15fSJason Wang int ret = 0; 2955cde8b15fSJason Wang 2956cde8b15fSJason Wang rtnl_lock(); 2957cde8b15fSJason Wang 2958cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 29594008e97fSJason Wang tun = tfile->detached; 29605dbbaf2dSPaul Moore if (!tun) { 2961cde8b15fSJason Wang ret = -EINVAL; 29625dbbaf2dSPaul Moore goto unlock; 29635dbbaf2dSPaul Moore } 29645dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 29655dbbaf2dSPaul Moore if (ret < 0) 29665dbbaf2dSPaul Moore goto unlock; 2967af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 296877f22f92SYang Yingliang tun->flags & IFF_NAPI_FRAGS, true); 29694008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2970b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 297140630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 29724008e97fSJason Wang ret = -EINVAL; 2973cde8b15fSJason Wang else 29744008e97fSJason Wang __tun_detach(tfile, false); 29754008e97fSJason Wang } else 2976cde8b15fSJason Wang ret = -EINVAL; 2977cde8b15fSJason Wang 297883c1f36fSSabrina Dubroca if (ret >= 0) 297983c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 298083c1f36fSSabrina Dubroca 29815dbbaf2dSPaul Moore unlock: 2982cde8b15fSJason Wang rtnl_unlock(); 2983cde8b15fSJason Wang return ret; 2984cde8b15fSJason Wang } 2985cde8b15fSJason Wang 2986cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2987cd5681d7SJason Wang void __user *data) 298896f84061SJason Wang { 298996f84061SJason Wang struct bpf_prog *prog; 299096f84061SJason Wang int fd; 299196f84061SJason Wang 299296f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 299396f84061SJason Wang return -EFAULT; 299496f84061SJason Wang 299596f84061SJason Wang if (fd == -1) { 299696f84061SJason Wang prog = NULL; 299796f84061SJason Wang } else { 299896f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 299996f84061SJason Wang if (IS_ERR(prog)) 300096f84061SJason Wang return PTR_ERR(prog); 300196f84061SJason Wang } 300296f84061SJason Wang 3003cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 300496f84061SJason Wang } 300596f84061SJason Wang 300650857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 300750857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 30081da177e4SLinus Torvalds { 300936b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 3010f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 3011631ab46bSEric W. Biederman struct tun_struct *tun; 30121da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 301326d31925SNicolas Dichtel unsigned int ifindex, carrier; 30141da177e4SLinus Torvalds struct ifreq ifr; 30150625c883SEric W. Biederman kuid_t owner; 30160625c883SEric W. Biederman kgid_t group; 301733dccbb0SHerbert Xu int sndbuf; 3018d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 30191cf8e410SMichael S. Tsirkin int le; 3020f271b2ccSMax Krasnyansky int ret; 302183c1f36fSSabrina Dubroca bool do_notify = false; 30221da177e4SLinus Torvalds 3023f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 3024f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 302550857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 30261da177e4SLinus Torvalds return -EFAULT; 30278bbb1813SDavid S. Miller } else { 3028a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 30298bbb1813SDavid S. Miller } 3030631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 3031631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 3032631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 3033031f5e03SMichael S. Tsirkin * TUNSETIFF. 3034031f5e03SMichael S. Tsirkin */ 3035031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 3036631ab46bSEric W. Biederman (unsigned int __user*)argp); 3037f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 3038cde8b15fSJason Wang return tun_set_queue(file, &ifr); 3039f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 3040f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3041f663706aSKirill Tkhai return -EPERM; 3042f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 3043f663706aSKirill Tkhai } 3044631ab46bSEric W. Biederman 3045c8d68e6bSJason Wang ret = 0; 3046876bfd4dSHerbert Xu rtnl_lock(); 3047876bfd4dSHerbert Xu 30489484dc74Syuan linyu tun = tun_get(tfile); 30490f16bc13SGao Feng if (cmd == TUNSETIFF) { 30500f16bc13SGao Feng ret = -EEXIST; 30510f16bc13SGao Feng if (tun) 30520f16bc13SGao Feng goto unlock; 30530f16bc13SGao Feng 30541da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 30551da177e4SLinus Torvalds 3056f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 30571da177e4SLinus Torvalds 3058876bfd4dSHerbert Xu if (ret) 3059876bfd4dSHerbert Xu goto unlock; 30601da177e4SLinus Torvalds 306150857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3062876bfd4dSHerbert Xu ret = -EFAULT; 3063876bfd4dSHerbert Xu goto unlock; 30641da177e4SLinus Torvalds } 3065fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 3066fb7589a1SPavel Emelyanov ret = -EPERM; 3067fb7589a1SPavel Emelyanov if (tun) 3068fb7589a1SPavel Emelyanov goto unlock; 3069fb7589a1SPavel Emelyanov 3070fb7589a1SPavel Emelyanov ret = -EFAULT; 3071fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3072fb7589a1SPavel Emelyanov goto unlock; 3073fb7589a1SPavel Emelyanov 3074fb7589a1SPavel Emelyanov ret = 0; 3075fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 3076fb7589a1SPavel Emelyanov goto unlock; 3077fb7589a1SPavel Emelyanov } 30781da177e4SLinus Torvalds 3079876bfd4dSHerbert Xu ret = -EBADFD; 30801da177e4SLinus Torvalds if (!tun) 3081876bfd4dSHerbert Xu goto unlock; 30821da177e4SLinus Torvalds 30833424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); 30841da177e4SLinus Torvalds 30850c3e0e3bSKirill Tkhai net = dev_net(tun->dev); 3086631ab46bSEric W. Biederman ret = 0; 30871da177e4SLinus Torvalds switch (cmd) { 3088e3b99556SMark McLoughlin case TUNGETIFF: 308912132768SKirill Tkhai tun_get_iff(tun, &ifr); 3090e3b99556SMark McLoughlin 30913d407a80SPavel Emelyanov if (tfile->detached) 30923d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 3093849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 3094849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 30953d407a80SPavel Emelyanov 309650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3097631ab46bSEric W. Biederman ret = -EFAULT; 3098e3b99556SMark McLoughlin break; 3099e3b99556SMark McLoughlin 31001da177e4SLinus Torvalds case TUNSETNOCSUM: 31011da177e4SLinus Torvalds /* Disable/Enable checksum */ 31021da177e4SLinus Torvalds 310388255375SMichał Mirosław /* [unimplemented] */ 31043424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", 31056b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 31061da177e4SLinus Torvalds break; 31071da177e4SLinus Torvalds 31081da177e4SLinus Torvalds case TUNSETPERSIST: 310954f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 311054f968d6SJason Wang * module to prevent the module being unprobed. 311154f968d6SJason Wang */ 311240630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 311340630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 311454f968d6SJason Wang __module_get(THIS_MODULE); 311583c1f36fSSabrina Dubroca do_notify = true; 3116dd38bd85SJason Wang } 311740630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 311840630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 311954f968d6SJason Wang module_put(THIS_MODULE); 312083c1f36fSSabrina Dubroca do_notify = true; 312154f968d6SJason Wang } 31221da177e4SLinus Torvalds 31233424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "persist %s\n", 31246b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 31251da177e4SLinus Torvalds break; 31261da177e4SLinus Torvalds 31271da177e4SLinus Torvalds case TUNSETOWNER: 31281da177e4SLinus Torvalds /* Set owner of the device */ 31290625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 31300625c883SEric W. Biederman if (!uid_valid(owner)) { 31310625c883SEric W. Biederman ret = -EINVAL; 31320625c883SEric W. Biederman break; 31330625c883SEric W. Biederman } 31340625c883SEric W. Biederman tun->owner = owner; 313583c1f36fSSabrina Dubroca do_notify = true; 31363424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "owner set to %u\n", 31370625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 31381da177e4SLinus Torvalds break; 31391da177e4SLinus Torvalds 31408c644623SGuido Guenther case TUNSETGROUP: 31418c644623SGuido Guenther /* Set group of the device */ 31420625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 31430625c883SEric W. Biederman if (!gid_valid(group)) { 31440625c883SEric W. Biederman ret = -EINVAL; 31450625c883SEric W. Biederman break; 31460625c883SEric W. Biederman } 31470625c883SEric W. Biederman tun->group = group; 314883c1f36fSSabrina Dubroca do_notify = true; 31493424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "group set to %u\n", 31500625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 31518c644623SGuido Guenther break; 31528c644623SGuido Guenther 3153ff4cc3acSMike Kershaw case TUNSETLINK: 3154ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 3155ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 31563424170fSMichal Kubecek netif_info(tun, drv, tun->dev, 31576b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 315848abfe05SDavid S. Miller ret = -EBUSY; 3159ff4cc3acSMike Kershaw } else { 3160ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 31613424170fSMichal Kubecek netif_info(tun, drv, tun->dev, "linktype set to %d\n", 31626b8a66eeSJoe Perches tun->dev->type); 316348abfe05SDavid S. Miller ret = 0; 3164ff4cc3acSMike Kershaw } 3165631ab46bSEric W. Biederman break; 3166ff4cc3acSMike Kershaw 31671da177e4SLinus Torvalds case TUNSETDEBUG: 31683424170fSMichal Kubecek tun->msg_enable = (u32)arg; 31691da177e4SLinus Torvalds break; 31703424170fSMichal Kubecek 31715228ddc9SRusty Russell case TUNSETOFFLOAD: 317288255375SMichał Mirosław ret = set_offload(tun, arg); 3173631ab46bSEric W. Biederman break; 31745228ddc9SRusty Russell 3175f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3176f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3177631ab46bSEric W. Biederman ret = -EINVAL; 317840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3179631ab46bSEric W. Biederman break; 3180c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3181631ab46bSEric W. Biederman break; 31821da177e4SLinus Torvalds 31831da177e4SLinus Torvalds case SIOCGIFHWADDR: 3184b595076aSUwe Kleine-König /* Get hw address */ 3185f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3186f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 318750857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3188631ab46bSEric W. Biederman ret = -EFAULT; 3189631ab46bSEric W. Biederman break; 31901da177e4SLinus Torvalds 31911da177e4SLinus Torvalds case SIOCSIFHWADDR: 3192f271b2ccSMax Krasnyansky /* Set hw address */ 31933a37a963SPetr Machata ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); 3194631ab46bSEric W. Biederman break; 319533dccbb0SHerbert Xu 319633dccbb0SHerbert Xu case TUNGETSNDBUF: 319754f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 319833dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 319933dccbb0SHerbert Xu ret = -EFAULT; 320033dccbb0SHerbert Xu break; 320133dccbb0SHerbert Xu 320233dccbb0SHerbert Xu case TUNSETSNDBUF: 320333dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 320433dccbb0SHerbert Xu ret = -EFAULT; 320533dccbb0SHerbert Xu break; 320633dccbb0SHerbert Xu } 320793161922SCraig Gallek if (sndbuf <= 0) { 320893161922SCraig Gallek ret = -EINVAL; 320993161922SCraig Gallek break; 321093161922SCraig Gallek } 321133dccbb0SHerbert Xu 3212c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3213c8d68e6bSJason Wang tun_set_sndbuf(tun); 321433dccbb0SHerbert Xu break; 321533dccbb0SHerbert Xu 3216d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3217d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3218d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3219d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3220d9d52b51SMichael S. Tsirkin break; 3221d9d52b51SMichael S. Tsirkin 3222d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3223d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3224d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3225d9d52b51SMichael S. Tsirkin break; 3226d9d52b51SMichael S. Tsirkin } 3227d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3228d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3229d9d52b51SMichael S. Tsirkin break; 3230d9d52b51SMichael S. Tsirkin } 3231d9d52b51SMichael S. Tsirkin 3232d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3233d9d52b51SMichael S. Tsirkin break; 3234d9d52b51SMichael S. Tsirkin 32351cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 32361cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 32371cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 32381cf8e410SMichael S. Tsirkin ret = -EFAULT; 32391cf8e410SMichael S. Tsirkin break; 32401cf8e410SMichael S. Tsirkin 32411cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 32421cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 32431cf8e410SMichael S. Tsirkin ret = -EFAULT; 32441cf8e410SMichael S. Tsirkin break; 32451cf8e410SMichael S. Tsirkin } 32461cf8e410SMichael S. Tsirkin if (le) 32471cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 32481cf8e410SMichael S. Tsirkin else 32491cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 32501cf8e410SMichael S. Tsirkin break; 32511cf8e410SMichael S. Tsirkin 32528b8e658bSGreg Kurz case TUNGETVNETBE: 32538b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 32548b8e658bSGreg Kurz break; 32558b8e658bSGreg Kurz 32568b8e658bSGreg Kurz case TUNSETVNETBE: 32578b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 32588b8e658bSGreg Kurz break; 32598b8e658bSGreg Kurz 326099405162SMichael S. Tsirkin case TUNATTACHFILTER: 326199405162SMichael S. Tsirkin /* Can be set only for TAPs */ 326299405162SMichael S. Tsirkin ret = -EINVAL; 326340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 326499405162SMichael S. Tsirkin break; 326599405162SMichael S. Tsirkin ret = -EFAULT; 326654f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 326799405162SMichael S. Tsirkin break; 326899405162SMichael S. Tsirkin 3269c8d68e6bSJason Wang ret = tun_attach_filter(tun); 327099405162SMichael S. Tsirkin break; 327199405162SMichael S. Tsirkin 327299405162SMichael S. Tsirkin case TUNDETACHFILTER: 327399405162SMichael S. Tsirkin /* Can be set only for TAPs */ 327499405162SMichael S. Tsirkin ret = -EINVAL; 327540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 327699405162SMichael S. Tsirkin break; 3277c8d68e6bSJason Wang ret = 0; 3278c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 327999405162SMichael S. Tsirkin break; 328099405162SMichael S. Tsirkin 328176975e9cSPavel Emelyanov case TUNGETFILTER: 328276975e9cSPavel Emelyanov ret = -EINVAL; 328340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 328476975e9cSPavel Emelyanov break; 328576975e9cSPavel Emelyanov ret = -EFAULT; 328676975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 328776975e9cSPavel Emelyanov break; 328876975e9cSPavel Emelyanov ret = 0; 328976975e9cSPavel Emelyanov break; 329076975e9cSPavel Emelyanov 329196f84061SJason Wang case TUNSETSTEERINGEBPF: 3292cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 329396f84061SJason Wang break; 329496f84061SJason Wang 3295aff3d70aSJason Wang case TUNSETFILTEREBPF: 3296aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3297aff3d70aSJason Wang break; 3298aff3d70aSJason Wang 329926d31925SNicolas Dichtel case TUNSETCARRIER: 330026d31925SNicolas Dichtel ret = -EFAULT; 330126d31925SNicolas Dichtel if (copy_from_user(&carrier, argp, sizeof(carrier))) 330226d31925SNicolas Dichtel goto unlock; 330326d31925SNicolas Dichtel 330426d31925SNicolas Dichtel ret = tun_net_change_carrier(tun->dev, (bool)carrier); 330526d31925SNicolas Dichtel break; 330626d31925SNicolas Dichtel 33070c3e0e3bSKirill Tkhai case TUNGETDEVNETNS: 33080c3e0e3bSKirill Tkhai ret = -EPERM; 33090c3e0e3bSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 33100c3e0e3bSKirill Tkhai goto unlock; 33110c3e0e3bSKirill Tkhai ret = open_related_ns(&net->ns, get_net_ns); 33120c3e0e3bSKirill Tkhai break; 33130c3e0e3bSKirill Tkhai 33141da177e4SLinus Torvalds default: 3315631ab46bSEric W. Biederman ret = -EINVAL; 3316631ab46bSEric W. Biederman break; 3317ee289b64SJoe Perches } 33181da177e4SLinus Torvalds 331983c1f36fSSabrina Dubroca if (do_notify) 332083c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 332183c1f36fSSabrina Dubroca 3322876bfd4dSHerbert Xu unlock: 3323876bfd4dSHerbert Xu rtnl_unlock(); 3324876bfd4dSHerbert Xu if (tun) 3325631ab46bSEric W. Biederman tun_put(tun); 3326631ab46bSEric W. Biederman return ret; 33271da177e4SLinus Torvalds } 33281da177e4SLinus Torvalds 332950857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 333050857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 333150857e2aSArnd Bergmann { 333250857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 333350857e2aSArnd Bergmann } 333450857e2aSArnd Bergmann 333550857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 333650857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 333750857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 333850857e2aSArnd Bergmann { 333950857e2aSArnd Bergmann switch (cmd) { 334050857e2aSArnd Bergmann case TUNSETIFF: 334150857e2aSArnd Bergmann case TUNGETIFF: 334250857e2aSArnd Bergmann case TUNSETTXFILTER: 334350857e2aSArnd Bergmann case TUNGETSNDBUF: 334450857e2aSArnd Bergmann case TUNSETSNDBUF: 334550857e2aSArnd Bergmann case SIOCGIFHWADDR: 334650857e2aSArnd Bergmann case SIOCSIFHWADDR: 334750857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 334850857e2aSArnd Bergmann break; 334950857e2aSArnd Bergmann default: 335050857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 335150857e2aSArnd Bergmann break; 335250857e2aSArnd Bergmann } 335350857e2aSArnd Bergmann 335450857e2aSArnd Bergmann /* 335550857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 335650857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 335750857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 335850857e2aSArnd Bergmann * contents. 335950857e2aSArnd Bergmann */ 336050857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 336150857e2aSArnd Bergmann } 336250857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 336350857e2aSArnd Bergmann 33641da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 33651da177e4SLinus Torvalds { 336654f968d6SJason Wang struct tun_file *tfile = file->private_data; 33671da177e4SLinus Torvalds int ret; 33681da177e4SLinus Torvalds 336954f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 33709d319522SJonathan Corbet goto out; 33711da177e4SLinus Torvalds 33721da177e4SLinus Torvalds if (on) { 337301919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 337454f968d6SJason Wang tfile->flags |= TUN_FASYNC; 33751da177e4SLinus Torvalds } else 337654f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 33779d319522SJonathan Corbet ret = 0; 33789d319522SJonathan Corbet out: 33799d319522SJonathan Corbet return ret; 33801da177e4SLinus Torvalds } 33811da177e4SLinus Torvalds 33821da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 33831da177e4SLinus Torvalds { 3384140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3385631ab46bSEric W. Biederman struct tun_file *tfile; 3386deed49fbSThomas Gleixner 3387140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 338811aa9c28SEric W. Biederman &tun_proto, 0); 3389631ab46bSEric W. Biederman if (!tfile) 3390631ab46bSEric W. Biederman return -ENOMEM; 3391b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3392b196d88aSJason Wang sk_free(&tfile->sk); 3393b196d88aSJason Wang return -ENOMEM; 3394b196d88aSJason Wang } 3395b196d88aSJason Wang 3396c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3397c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 339854f968d6SJason Wang tfile->flags = 0; 3399fb7589a1SPavel Emelyanov tfile->ifindex = 0; 340054f968d6SJason Wang 3401333f7909SAl Viro init_waitqueue_head(&tfile->socket.wq.wait); 340254f968d6SJason Wang 340354f968d6SJason Wang tfile->socket.file = file; 340454f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 340554f968d6SJason Wang 340654f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 340754f968d6SJason Wang 340854f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 340954f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 341054f968d6SJason Wang 3411631ab46bSEric W. Biederman file->private_data = tfile; 34124008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 341354f968d6SJason Wang 341419a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 341519a6afb2SJason Wang 34161da177e4SLinus Torvalds return 0; 34171da177e4SLinus Torvalds } 34181da177e4SLinus Torvalds 34191da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 34201da177e4SLinus Torvalds { 3421631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 34221da177e4SLinus Torvalds 3423c8d68e6bSJason Wang tun_detach(tfile, true); 34241da177e4SLinus Torvalds 34251da177e4SLinus Torvalds return 0; 34261da177e4SLinus Torvalds } 34271da177e4SLinus Torvalds 342893e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 34299484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 343093e14b6dSMasatake YAMATO { 34319484dc74Syuan linyu struct tun_file *tfile = file->private_data; 343293e14b6dSMasatake YAMATO struct tun_struct *tun; 343393e14b6dSMasatake YAMATO struct ifreq ifr; 343493e14b6dSMasatake YAMATO 343593e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 343693e14b6dSMasatake YAMATO 343793e14b6dSMasatake YAMATO rtnl_lock(); 34389484dc74Syuan linyu tun = tun_get(tfile); 343993e14b6dSMasatake YAMATO if (tun) 344012132768SKirill Tkhai tun_get_iff(tun, &ifr); 344193e14b6dSMasatake YAMATO rtnl_unlock(); 344293e14b6dSMasatake YAMATO 344393e14b6dSMasatake YAMATO if (tun) 344493e14b6dSMasatake YAMATO tun_put(tun); 344593e14b6dSMasatake YAMATO 3446a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 344793e14b6dSMasatake YAMATO } 344893e14b6dSMasatake YAMATO #endif 344993e14b6dSMasatake YAMATO 3450d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 34511da177e4SLinus Torvalds .owner = THIS_MODULE, 34521da177e4SLinus Torvalds .llseek = no_llseek, 34539b067034SAl Viro .read_iter = tun_chr_read_iter, 3454f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 34551da177e4SLinus Torvalds .poll = tun_chr_poll, 3456876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 345750857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 345850857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 345950857e2aSArnd Bergmann #endif 34601da177e4SLinus Torvalds .open = tun_chr_open, 34611da177e4SLinus Torvalds .release = tun_chr_close, 346293e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 346393e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 346493e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 346593e14b6dSMasatake YAMATO #endif 34661da177e4SLinus Torvalds }; 34671da177e4SLinus Torvalds 34681da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 34691da177e4SLinus Torvalds .minor = TUN_MINOR, 34701da177e4SLinus Torvalds .name = "tun", 3471e454cea2SKay Sievers .nodename = "net/tun", 34721da177e4SLinus Torvalds .fops = &tun_fops, 34731da177e4SLinus Torvalds }; 34741da177e4SLinus Torvalds 34751da177e4SLinus Torvalds /* ethtool interface */ 34761da177e4SLinus Torvalds 34774e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 347829ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 34791da177e4SLinus Torvalds { 348029ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 348129ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 348229ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 348329ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 348429ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 348529ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 348629ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 34874e24f2ddSChas Williams } 34884e24f2ddSChas Williams 34894e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 34904e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 34914e24f2ddSChas Williams { 34924e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 34934e24f2ddSChas Williams 34944e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 34954e24f2ddSChas Williams return 0; 34964e24f2ddSChas Williams } 34974e24f2ddSChas Williams 34984e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 34994e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 35004e24f2ddSChas Williams { 35014e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 35024e24f2ddSChas Williams 35034e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 35041da177e4SLinus Torvalds return 0; 35051da177e4SLinus Torvalds } 35061da177e4SLinus Torvalds 35071da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 35081da177e4SLinus Torvalds { 35091da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35101da177e4SLinus Torvalds 351133a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 351233a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 35131da177e4SLinus Torvalds 35141da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 351540630b82SMichael S. Tsirkin case IFF_TUN: 351633a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 35171da177e4SLinus Torvalds break; 351840630b82SMichael S. Tsirkin case IFF_TAP: 351933a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 35201da177e4SLinus Torvalds break; 35211da177e4SLinus Torvalds } 35221da177e4SLinus Torvalds } 35231da177e4SLinus Torvalds 35241da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 35251da177e4SLinus Torvalds { 35261da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35273424170fSMichal Kubecek 35283424170fSMichal Kubecek return tun->msg_enable; 35291da177e4SLinus Torvalds } 35301da177e4SLinus Torvalds 35311da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 35321da177e4SLinus Torvalds { 35331da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35343424170fSMichal Kubecek 35353424170fSMichal Kubecek tun->msg_enable = value; 35361da177e4SLinus Torvalds } 35371da177e4SLinus Torvalds 35385503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 35395503fcecSJason Wang struct ethtool_coalesce *ec) 35405503fcecSJason Wang { 35415503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 35425503fcecSJason Wang 35435503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 35445503fcecSJason Wang 35455503fcecSJason Wang return 0; 35465503fcecSJason Wang } 35475503fcecSJason Wang 35485503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 35495503fcecSJason Wang struct ethtool_coalesce *ec) 35505503fcecSJason Wang { 35515503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 35525503fcecSJason Wang 35535503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 35545503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 35555503fcecSJason Wang else 35565503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 35575503fcecSJason Wang 35585503fcecSJason Wang return 0; 35595503fcecSJason Wang } 35605503fcecSJason Wang 35617282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 3562e5ad00b3SJakub Kicinski .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, 35631da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 35641da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 35651da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3566bee31369SNolan Leake .get_link = ethtool_op_get_link, 3567eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 35685503fcecSJason Wang .get_coalesce = tun_get_coalesce, 35695503fcecSJason Wang .set_coalesce = tun_set_coalesce, 357029ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 35714e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 35721da177e4SLinus Torvalds }; 35731da177e4SLinus Torvalds 35741576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 35751576d986SJason Wang { 35761576d986SJason Wang struct net_device *dev = tun->dev; 35771576d986SJason Wang struct tun_file *tfile; 35785990a305SJason Wang struct ptr_ring **rings; 35791576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 35801576d986SJason Wang int ret, i; 35811576d986SJason Wang 35825990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 35835990a305SJason Wang if (!rings) 35841576d986SJason Wang return -ENOMEM; 35851576d986SJason Wang 35861576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 35871576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 35885990a305SJason Wang rings[i] = &tfile->tx_ring; 35891576d986SJason Wang } 35901576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 35915990a305SJason Wang rings[i++] = &tfile->tx_ring; 35921576d986SJason Wang 35935990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 35945990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3595fc72d1d5SJason Wang tun_ptr_free); 35961576d986SJason Wang 35975990a305SJason Wang kfree(rings); 35981576d986SJason Wang return ret; 35991576d986SJason Wang } 36001576d986SJason Wang 36011576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 36021576d986SJason Wang unsigned long event, void *ptr) 36031576d986SJason Wang { 36041576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 36051576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 360672b319dcSFei Li int i; 36071576d986SJason Wang 360886dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 360986dfb4acSCraig Gallek return NOTIFY_DONE; 361086dfb4acSCraig Gallek 36111576d986SJason Wang switch (event) { 36121576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 36131576d986SJason Wang if (tun_queue_resize(tun)) 36141576d986SJason Wang return NOTIFY_BAD; 36151576d986SJason Wang break; 361672b319dcSFei Li case NETDEV_UP: 361772b319dcSFei Li for (i = 0; i < tun->numqueues; i++) { 361872b319dcSFei Li struct tun_file *tfile; 361972b319dcSFei Li 362072b319dcSFei Li tfile = rtnl_dereference(tun->tfiles[i]); 362172b319dcSFei Li tfile->socket.sk->sk_write_space(tfile->socket.sk); 362272b319dcSFei Li } 362372b319dcSFei Li break; 36241576d986SJason Wang default: 36251576d986SJason Wang break; 36261576d986SJason Wang } 36271576d986SJason Wang 36281576d986SJason Wang return NOTIFY_DONE; 36291576d986SJason Wang } 36301576d986SJason Wang 36311576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 36321576d986SJason Wang .notifier_call = tun_device_event, 36331576d986SJason Wang }; 363479d17604SPavel Emelyanov 36351da177e4SLinus Torvalds static int __init tun_init(void) 36361da177e4SLinus Torvalds { 36371da177e4SLinus Torvalds int ret = 0; 36381da177e4SLinus Torvalds 36396b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 36401da177e4SLinus Torvalds 3641f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 364279d17604SPavel Emelyanov if (ret) { 36436b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3644f019a7a5SEric W. Biederman goto err_linkops; 364579d17604SPavel Emelyanov } 364679d17604SPavel Emelyanov 36471da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 364879d17604SPavel Emelyanov if (ret) { 36496b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 365079d17604SPavel Emelyanov goto err_misc; 365179d17604SPavel Emelyanov } 36521576d986SJason Wang 36535edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 36545edfbd3cSTonghao Zhang if (ret) { 36555edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 36565edfbd3cSTonghao Zhang goto err_notifier; 36575edfbd3cSTonghao Zhang } 36585edfbd3cSTonghao Zhang 365979d17604SPavel Emelyanov return 0; 36605edfbd3cSTonghao Zhang 36615edfbd3cSTonghao Zhang err_notifier: 36625edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 366379d17604SPavel Emelyanov err_misc: 3664f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3665f019a7a5SEric W. Biederman err_linkops: 36661da177e4SLinus Torvalds return ret; 36671da177e4SLinus Torvalds } 36681da177e4SLinus Torvalds 36691da177e4SLinus Torvalds static void tun_cleanup(void) 36701da177e4SLinus Torvalds { 36711da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3672f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 36731576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 36741da177e4SLinus Torvalds } 36751da177e4SLinus Torvalds 367605c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 367705c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 367805c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 367905c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 368005c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 368105c2828cSMichael S. Tsirkin { 36826e914fc7SJason Wang struct tun_file *tfile; 368305c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 368405c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 36856e914fc7SJason Wang tfile = file->private_data; 36866e914fc7SJason Wang if (!tfile) 368705c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 368854f968d6SJason Wang return &tfile->socket; 368905c2828cSMichael S. Tsirkin } 369005c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 369105c2828cSMichael S. Tsirkin 36925990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 369383339c6bSJason Wang { 369483339c6bSJason Wang struct tun_file *tfile; 369583339c6bSJason Wang 369683339c6bSJason Wang if (file->f_op != &tun_fops) 369783339c6bSJason Wang return ERR_PTR(-EINVAL); 369883339c6bSJason Wang tfile = file->private_data; 369983339c6bSJason Wang if (!tfile) 370083339c6bSJason Wang return ERR_PTR(-EBADFD); 37015990a305SJason Wang return &tfile->tx_ring; 370283339c6bSJason Wang } 37035990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 370483339c6bSJason Wang 37051da177e4SLinus Torvalds module_init(tun_init); 37061da177e4SLinus Torvalds module_exit(tun_cleanup); 37071da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 37081da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 37091da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 37101da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3711578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3712