11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 7393e14b6dSMasatake YAMATO #include <linux/seq_file.h> 74e0b46d0eSHerbert Xu #include <linux/uio.h> 751576d986SJason Wang #include <linux/skb_array.h> 76761876c8SJason Wang #include <linux/bpf.h> 77761876c8SJason Wang #include <linux/bpf_trace.h> 7890e33d45SPetar Penkov #include <linux/mutex.h> 791da177e4SLinus Torvalds 807c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 811da177e4SLinus Torvalds 8214daa021SRusty Russell /* Uncomment to enable debugging */ 8314daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8414daa021SRusty Russell 851da177e4SLinus Torvalds #ifdef TUN_DEBUG 861da177e4SLinus Torvalds static int debug; 8714daa021SRusty Russell 886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 896b8a66eeSJoe Perches do { \ 906b8a66eeSJoe Perches if (tun->debug) \ 916b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 926b8a66eeSJoe Perches } while (0) 936b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (debug == 2) \ 966b8a66eeSJoe Perches printk(level fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 9814daa021SRusty Russell #else 996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1006b8a66eeSJoe Perches do { \ 1016b8a66eeSJoe Perches if (0) \ 1026b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1036b8a66eeSJoe Perches } while (0) 1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches printk(level fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1091da177e4SLinus Torvalds #endif 1101da177e4SLinus Torvalds 111761876c8SJason Wang #define TUN_HEADROOM 256 1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11366ccbc9cSJason Wang 114031f5e03SMichael S. Tsirkin /* TUN device flags */ 115031f5e03SMichael S. Tsirkin 116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 117031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 118031f5e03SMichael S. Tsirkin */ 119031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1228b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 123031f5e03SMichael S. Tsirkin 124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12590e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 12690e33d45SPetar Penkov 1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1280690899bSMichael S. Tsirkin 129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 130f271b2ccSMax Krasnyansky struct tap_filter { 131f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 132f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 133f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134f271b2ccSMax Krasnyansky }; 135f271b2ccSMax Krasnyansky 136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 139b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 140c8d68e6bSJason Wang 14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14296442e42SJason Wang 143608b9977SPaolo Abeni struct tun_pcpu_stats { 144608b9977SPaolo Abeni u64 rx_packets; 145608b9977SPaolo Abeni u64 rx_bytes; 146608b9977SPaolo Abeni u64 tx_packets; 147608b9977SPaolo Abeni u64 tx_bytes; 148608b9977SPaolo Abeni struct u64_stats_sync syncp; 149608b9977SPaolo Abeni u32 rx_dropped; 150608b9977SPaolo Abeni u32 tx_dropped; 151608b9977SPaolo Abeni u32 rx_frame_errors; 152608b9977SPaolo Abeni }; 153608b9977SPaolo Abeni 15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15592d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 15654f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 15754f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 15836fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 15954f968d6SJason Wang * this). 1606e914fc7SJason Wang * 1616e914fc7SJason Wang * RCU usage: 16236fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1636e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16454f968d6SJason Wang */ 165631ab46bSEric W. Biederman struct tun_file { 16654f968d6SJason Wang struct sock sk; 16754f968d6SJason Wang struct socket socket; 16854f968d6SJason Wang struct socket_wq wq; 1696e914fc7SJason Wang struct tun_struct __rcu *tun; 17054f968d6SJason Wang struct fasync_struct *fasync; 17154f968d6SJason Wang /* only used for fasnyc */ 17254f968d6SJason Wang unsigned int flags; 173fb7589a1SPavel Emelyanov union { 174c8d68e6bSJason Wang u16 queue_index; 175fb7589a1SPavel Emelyanov unsigned int ifindex; 176fb7589a1SPavel Emelyanov }; 17794317099SPetar Penkov struct napi_struct napi; 178aec72f33SEric Dumazet bool napi_enabled; 17990e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1804008e97fSJason Wang struct list_head next; 1814008e97fSJason Wang struct tun_struct *detached; 1825990a305SJason Wang struct ptr_ring tx_ring; 1838bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 184762c330dSJason Wang int xdp_pending_pkts; 185631ab46bSEric W. Biederman }; 186631ab46bSEric W. Biederman 18796442e42SJason Wang struct tun_flow_entry { 18896442e42SJason Wang struct hlist_node hash_link; 18996442e42SJason Wang struct rcu_head rcu; 19096442e42SJason Wang struct tun_struct *tun; 19196442e42SJason Wang 19296442e42SJason Wang u32 rxhash; 1939bc88939STom Herbert u32 rps_rxhash; 19496442e42SJason Wang int queue_index; 19596442e42SJason Wang unsigned long updated; 19696442e42SJason Wang }; 19796442e42SJason Wang 19896442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 19996442e42SJason Wang 200cd5681d7SJason Wang struct tun_prog { 20196f84061SJason Wang struct rcu_head rcu; 20296f84061SJason Wang struct bpf_prog *prog; 20396f84061SJason Wang }; 20496f84061SJason Wang 20554f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 20636fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 20754f968d6SJason Wang * file were attached to a persist device. 20854f968d6SJason Wang */ 20914daa021SRusty Russell struct tun_struct { 210c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 211c8d68e6bSJason Wang unsigned int numqueues; 212f271b2ccSMax Krasnyansky unsigned int flags; 2130625c883SEric W. Biederman kuid_t owner; 2140625c883SEric W. Biederman kgid_t group; 21514daa021SRusty Russell 21614daa021SRusty Russell struct net_device *dev; 217c8f44affSMichał Mirosław netdev_features_t set_features; 21888255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 219d591a1f3SDavid S. Miller NETIF_F_TSO6) 220d9d52b51SMichael S. Tsirkin 221eaea34b2SPaolo Abeni int align; 222d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 22354f968d6SJason Wang int sndbuf; 22454f968d6SJason Wang struct tap_filter txflt; 22554f968d6SJason Wang struct sock_fprog fprog; 22654f968d6SJason Wang /* protected by rtnl lock */ 22754f968d6SJason Wang bool filter_attached; 22814daa021SRusty Russell #ifdef TUN_DEBUG 22914daa021SRusty Russell int debug; 23014daa021SRusty Russell #endif 23196442e42SJason Wang spinlock_t lock; 23296442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 23396442e42SJason Wang struct timer_list flow_gc_timer; 23496442e42SJason Wang unsigned long ageing_time; 2354008e97fSJason Wang unsigned int numdisabled; 2364008e97fSJason Wang struct list_head disabled; 2375dbbaf2dSPaul Moore void *security; 238b8732fb7SJason Wang u32 flow_count; 2395503fcecSJason Wang u32 rx_batched; 240608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 241761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 242cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 243aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 24414daa021SRusty Russell }; 24514daa021SRusty Russell 246aff3d70aSJason Wang struct veth { 247aff3d70aSJason Wang __be16 h_vlan_proto; 248aff3d70aSJason Wang __be16 h_vlan_TCI; 2491da177e4SLinus Torvalds }; 2501da177e4SLinus Torvalds 251fc72d1d5SJason Wang bool tun_is_xdp_buff(void *ptr) 252fc72d1d5SJason Wang { 253fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 254fc72d1d5SJason Wang } 255fc72d1d5SJason Wang EXPORT_SYMBOL(tun_is_xdp_buff); 256fc72d1d5SJason Wang 257fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 258fc72d1d5SJason Wang { 259fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 260fc72d1d5SJason Wang } 261fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 262fc72d1d5SJason Wang 263fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 264fc72d1d5SJason Wang { 265fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 266fc72d1d5SJason Wang } 267fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 268fc72d1d5SJason Wang 26994317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 27094317099SPetar Penkov { 27194317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 27294317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 27394317099SPetar Penkov struct sk_buff_head process_queue; 27494317099SPetar Penkov struct sk_buff *skb; 27594317099SPetar Penkov int received = 0; 27694317099SPetar Penkov 27794317099SPetar Penkov __skb_queue_head_init(&process_queue); 27894317099SPetar Penkov 27994317099SPetar Penkov spin_lock(&queue->lock); 28094317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 28194317099SPetar Penkov spin_unlock(&queue->lock); 28294317099SPetar Penkov 28394317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 28494317099SPetar Penkov napi_gro_receive(napi, skb); 28594317099SPetar Penkov ++received; 28694317099SPetar Penkov } 28794317099SPetar Penkov 28894317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 28994317099SPetar Penkov spin_lock(&queue->lock); 29094317099SPetar Penkov skb_queue_splice(&process_queue, queue); 29194317099SPetar Penkov spin_unlock(&queue->lock); 29294317099SPetar Penkov } 29394317099SPetar Penkov 29494317099SPetar Penkov return received; 29594317099SPetar Penkov } 29694317099SPetar Penkov 29794317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 29894317099SPetar Penkov { 29994317099SPetar Penkov unsigned int received; 30094317099SPetar Penkov 30194317099SPetar Penkov received = tun_napi_receive(napi, budget); 30294317099SPetar Penkov 30394317099SPetar Penkov if (received < budget) 30494317099SPetar Penkov napi_complete_done(napi, received); 30594317099SPetar Penkov 30694317099SPetar Penkov return received; 30794317099SPetar Penkov } 30894317099SPetar Penkov 30994317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 31094317099SPetar Penkov bool napi_en) 31194317099SPetar Penkov { 312aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 31394317099SPetar Penkov if (napi_en) { 31494317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 31594317099SPetar Penkov NAPI_POLL_WEIGHT); 31694317099SPetar Penkov napi_enable(&tfile->napi); 31790e33d45SPetar Penkov mutex_init(&tfile->napi_mutex); 31894317099SPetar Penkov } 31994317099SPetar Penkov } 32094317099SPetar Penkov 32194317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 32294317099SPetar Penkov { 323aec72f33SEric Dumazet if (tfile->napi_enabled) 32494317099SPetar Penkov napi_disable(&tfile->napi); 32594317099SPetar Penkov } 32694317099SPetar Penkov 32794317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 32894317099SPetar Penkov { 329aec72f33SEric Dumazet if (tfile->napi_enabled) 33094317099SPetar Penkov netif_napi_del(&tfile->napi); 33194317099SPetar Penkov } 33294317099SPetar Penkov 33390e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun) 33490e33d45SPetar Penkov { 33590e33d45SPetar Penkov return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 33690e33d45SPetar Penkov } 33790e33d45SPetar Penkov 3388b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3398b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3408b8e658bSGreg Kurz { 3418b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3428b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3438b8e658bSGreg Kurz } 3448b8e658bSGreg Kurz 3458b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3468b8e658bSGreg Kurz { 3478b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3488b8e658bSGreg Kurz 3498b8e658bSGreg Kurz if (put_user(be, argp)) 3508b8e658bSGreg Kurz return -EFAULT; 3518b8e658bSGreg Kurz 3528b8e658bSGreg Kurz return 0; 3538b8e658bSGreg Kurz } 3548b8e658bSGreg Kurz 3558b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3568b8e658bSGreg Kurz { 3578b8e658bSGreg Kurz int be; 3588b8e658bSGreg Kurz 3598b8e658bSGreg Kurz if (get_user(be, argp)) 3608b8e658bSGreg Kurz return -EFAULT; 3618b8e658bSGreg Kurz 3628b8e658bSGreg Kurz if (be) 3638b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3648b8e658bSGreg Kurz else 3658b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3668b8e658bSGreg Kurz 3678b8e658bSGreg Kurz return 0; 3688b8e658bSGreg Kurz } 3698b8e658bSGreg Kurz #else 3708b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3718b8e658bSGreg Kurz { 3728b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3738b8e658bSGreg Kurz } 3748b8e658bSGreg Kurz 3758b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3768b8e658bSGreg Kurz { 3778b8e658bSGreg Kurz return -EINVAL; 3788b8e658bSGreg Kurz } 3798b8e658bSGreg Kurz 3808b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3818b8e658bSGreg Kurz { 3828b8e658bSGreg Kurz return -EINVAL; 3838b8e658bSGreg Kurz } 3848b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3858b8e658bSGreg Kurz 38625bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 38725bd55bbSGreg Kurz { 3887d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3898b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 39025bd55bbSGreg Kurz } 39125bd55bbSGreg Kurz 39256f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 39356f0dcc5SMichael S. Tsirkin { 39425bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 39556f0dcc5SMichael S. Tsirkin } 39656f0dcc5SMichael S. Tsirkin 39756f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 39856f0dcc5SMichael S. Tsirkin { 39925bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 40056f0dcc5SMichael S. Tsirkin } 40156f0dcc5SMichael S. Tsirkin 40296442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 40396442e42SJason Wang { 40496442e42SJason Wang return rxhash & 0x3ff; 40596442e42SJason Wang } 40696442e42SJason Wang 40796442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 40896442e42SJason Wang { 40996442e42SJason Wang struct tun_flow_entry *e; 41096442e42SJason Wang 411b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 41296442e42SJason Wang if (e->rxhash == rxhash) 41396442e42SJason Wang return e; 41496442e42SJason Wang } 41596442e42SJason Wang return NULL; 41696442e42SJason Wang } 41796442e42SJason Wang 41896442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 41996442e42SJason Wang struct hlist_head *head, 42096442e42SJason Wang u32 rxhash, u16 queue_index) 42196442e42SJason Wang { 4229fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 4239fdc6befSEric Dumazet 42496442e42SJason Wang if (e) { 42596442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 42696442e42SJason Wang rxhash, queue_index); 42796442e42SJason Wang e->updated = jiffies; 42896442e42SJason Wang e->rxhash = rxhash; 4299bc88939STom Herbert e->rps_rxhash = 0; 43096442e42SJason Wang e->queue_index = queue_index; 43196442e42SJason Wang e->tun = tun; 43296442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 433b8732fb7SJason Wang ++tun->flow_count; 43496442e42SJason Wang } 43596442e42SJason Wang return e; 43696442e42SJason Wang } 43796442e42SJason Wang 43896442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 43996442e42SJason Wang { 44096442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 44196442e42SJason Wang e->rxhash, e->queue_index); 44296442e42SJason Wang hlist_del_rcu(&e->hash_link); 4439fdc6befSEric Dumazet kfree_rcu(e, rcu); 444b8732fb7SJason Wang --tun->flow_count; 44596442e42SJason Wang } 44696442e42SJason Wang 44796442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 44896442e42SJason Wang { 44996442e42SJason Wang int i; 45096442e42SJason Wang 45196442e42SJason Wang spin_lock_bh(&tun->lock); 45296442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45396442e42SJason Wang struct tun_flow_entry *e; 454b67bfe0dSSasha Levin struct hlist_node *n; 45596442e42SJason Wang 456b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 45796442e42SJason Wang tun_flow_delete(tun, e); 45896442e42SJason Wang } 45996442e42SJason Wang spin_unlock_bh(&tun->lock); 46096442e42SJason Wang } 46196442e42SJason Wang 46296442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 46396442e42SJason Wang { 46496442e42SJason Wang int i; 46596442e42SJason Wang 46696442e42SJason Wang spin_lock_bh(&tun->lock); 46796442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46896442e42SJason Wang struct tun_flow_entry *e; 469b67bfe0dSSasha Levin struct hlist_node *n; 47096442e42SJason Wang 471b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 47296442e42SJason Wang if (e->queue_index == queue_index) 47396442e42SJason Wang tun_flow_delete(tun, e); 47496442e42SJason Wang } 47596442e42SJason Wang } 47696442e42SJason Wang spin_unlock_bh(&tun->lock); 47796442e42SJason Wang } 47896442e42SJason Wang 479e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 48096442e42SJason Wang { 481e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 48296442e42SJason Wang unsigned long delay = tun->ageing_time; 48396442e42SJason Wang unsigned long next_timer = jiffies + delay; 48496442e42SJason Wang unsigned long count = 0; 48596442e42SJason Wang int i; 48696442e42SJason Wang 48796442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 48896442e42SJason Wang 4897dbfb4efSEric Dumazet spin_lock(&tun->lock); 49096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 49196442e42SJason Wang struct tun_flow_entry *e; 492b67bfe0dSSasha Levin struct hlist_node *n; 49396442e42SJason Wang 494b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 49596442e42SJason Wang unsigned long this_timer; 49681d98fa4SEric Dumazet 49796442e42SJason Wang this_timer = e->updated + delay; 49881d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 49996442e42SJason Wang tun_flow_delete(tun, e); 50081d98fa4SEric Dumazet continue; 50181d98fa4SEric Dumazet } 50281d98fa4SEric Dumazet count++; 50381d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 50496442e42SJason Wang next_timer = this_timer; 50596442e42SJason Wang } 50696442e42SJason Wang } 50796442e42SJason Wang 50896442e42SJason Wang if (count) 50996442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 5107dbfb4efSEric Dumazet spin_unlock(&tun->lock); 51196442e42SJason Wang } 51296442e42SJason Wang 51349974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 5149e85722dSJason Wang struct tun_file *tfile) 51596442e42SJason Wang { 51696442e42SJason Wang struct hlist_head *head; 51796442e42SJason Wang struct tun_flow_entry *e; 51896442e42SJason Wang unsigned long delay = tun->ageing_time; 5199e85722dSJason Wang u16 queue_index = tfile->queue_index; 52096442e42SJason Wang 52196442e42SJason Wang if (!rxhash) 52296442e42SJason Wang return; 52396442e42SJason Wang else 52496442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 52596442e42SJason Wang 52696442e42SJason Wang rcu_read_lock(); 52796442e42SJason Wang 5289e85722dSJason Wang /* We may get a very small possibility of OOO during switching, not 5299e85722dSJason Wang * worth to optimize.*/ 5309e85722dSJason Wang if (tun->numqueues == 1 || tfile->detached) 53196442e42SJason Wang goto unlock; 53296442e42SJason Wang 53396442e42SJason Wang e = tun_flow_find(head, rxhash); 53496442e42SJason Wang if (likely(e)) { 53596442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 53696442e42SJason Wang e->queue_index = queue_index; 53796442e42SJason Wang e->updated = jiffies; 5389bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 53996442e42SJason Wang } else { 54096442e42SJason Wang spin_lock_bh(&tun->lock); 541b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 542b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 54396442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 54496442e42SJason Wang 54596442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 54696442e42SJason Wang mod_timer(&tun->flow_gc_timer, 54796442e42SJason Wang round_jiffies_up(jiffies + delay)); 54896442e42SJason Wang spin_unlock_bh(&tun->lock); 54996442e42SJason Wang } 55096442e42SJason Wang 55196442e42SJason Wang unlock: 55296442e42SJason Wang rcu_read_unlock(); 55396442e42SJason Wang } 55496442e42SJason Wang 5559bc88939STom Herbert /** 5569bc88939STom Herbert * Save the hash received in the stack receive path and update the 5579bc88939STom Herbert * flow_hash table accordingly. 5589bc88939STom Herbert */ 5599bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5609bc88939STom Herbert { 561567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5629bc88939STom Herbert e->rps_rxhash = hash; 5639bc88939STom Herbert } 5649bc88939STom Herbert 565c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 56692d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 567c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 568c8d68e6bSJason Wang * the userspace application move between processors, we may get a 569c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 570c8d68e6bSJason Wang * hope the rxq no. may help here. 571c8d68e6bSJason Wang */ 57296f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 573c8d68e6bSJason Wang { 57496442e42SJason Wang struct tun_flow_entry *e; 575c8d68e6bSJason Wang u32 txq = 0; 576c8d68e6bSJason Wang u32 numqueues = 0; 577c8d68e6bSJason Wang 5786aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 579c8d68e6bSJason Wang 580feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 581c8d68e6bSJason Wang if (txq) { 58296442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5839bc88939STom Herbert if (e) { 5849bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 585fbe4d456SZhi Yong Wu txq = e->queue_index; 5869bc88939STom Herbert } else 587c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 588c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 589c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 590c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 591c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 592c8d68e6bSJason Wang txq -= numqueues; 593c8d68e6bSJason Wang } 594c8d68e6bSJason Wang 595c8d68e6bSJason Wang return txq; 596c8d68e6bSJason Wang } 597c8d68e6bSJason Wang 59896f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 59996f84061SJason Wang { 600cd5681d7SJason Wang struct tun_prog *prog; 60196f84061SJason Wang u16 ret = 0; 60296f84061SJason Wang 60396f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 60496f84061SJason Wang if (prog) 60596f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 60696f84061SJason Wang 60796f84061SJason Wang return ret % tun->numqueues; 60896f84061SJason Wang } 60996f84061SJason Wang 61096f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 61196f84061SJason Wang void *accel_priv, select_queue_fallback_t fallback) 61296f84061SJason Wang { 61396f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 61496f84061SJason Wang u16 ret; 61596f84061SJason Wang 61696f84061SJason Wang rcu_read_lock(); 61796f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 61896f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 61996f84061SJason Wang else 62096f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 62196f84061SJason Wang rcu_read_unlock(); 62296f84061SJason Wang 62396f84061SJason Wang return ret; 62496f84061SJason Wang } 62596f84061SJason Wang 626cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 627cde8b15fSJason Wang { 628cde8b15fSJason Wang const struct cred *cred = current_cred(); 629c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 630cde8b15fSJason Wang 631cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 632cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 633c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 634cde8b15fSJason Wang } 635cde8b15fSJason Wang 636c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 637c8d68e6bSJason Wang { 638c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 639c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 640c8d68e6bSJason Wang } 641c8d68e6bSJason Wang 6424008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6434008e97fSJason Wang { 6444008e97fSJason Wang tfile->detached = tun; 6454008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6464008e97fSJason Wang ++tun->numdisabled; 6474008e97fSJason Wang } 6484008e97fSJason Wang 649d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6504008e97fSJason Wang { 6514008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6524008e97fSJason Wang 6534008e97fSJason Wang tfile->detached = NULL; 6544008e97fSJason Wang list_del_init(&tfile->next); 6554008e97fSJason Wang --tun->numdisabled; 6564008e97fSJason Wang return tun; 6574008e97fSJason Wang } 6584008e97fSJason Wang 659fc72d1d5SJason Wang static void tun_ptr_free(void *ptr) 660fc72d1d5SJason Wang { 661fc72d1d5SJason Wang if (!ptr) 662fc72d1d5SJason Wang return; 663fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 664fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 665fc72d1d5SJason Wang 666fc72d1d5SJason Wang put_page(virt_to_head_page(xdp->data)); 667fc72d1d5SJason Wang } else { 668fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 669fc72d1d5SJason Wang } 670fc72d1d5SJason Wang } 671fc72d1d5SJason Wang 6724bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6734bfb0513SJason Wang { 674fc72d1d5SJason Wang void *ptr; 6751576d986SJason Wang 676fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 677fc72d1d5SJason Wang tun_ptr_free(ptr); 6781576d986SJason Wang 6795503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6804bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6814bfb0513SJason Wang } 6824bfb0513SJason Wang 6838565d26bSDavid S. Miller static void tun_cleanup_tx_ring(struct tun_file *tfile) 6844df0bfc7SCong Wang { 6858565d26bSDavid S. Miller if (tfile->tx_ring.queue) { 6868565d26bSDavid S. Miller ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 6878565d26bSDavid S. Miller xdp_rxq_info_unreg(&tfile->xdp_rxq); 6888565d26bSDavid S. Miller memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 6894df0bfc7SCong Wang } 6904df0bfc7SCong Wang } 6914df0bfc7SCong Wang 692c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 693c8d68e6bSJason Wang { 694c8d68e6bSJason Wang struct tun_file *ntfile; 695c8d68e6bSJason Wang struct tun_struct *tun; 696c8d68e6bSJason Wang 697b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 698b8deabd3SJason Wang 69994317099SPetar Penkov if (tun && clean) { 70094317099SPetar Penkov tun_napi_disable(tun, tfile); 70194317099SPetar Penkov tun_napi_del(tun, tfile); 70294317099SPetar Penkov } 70394317099SPetar Penkov 7049e85722dSJason Wang if (tun && !tfile->detached) { 705c8d68e6bSJason Wang u16 index = tfile->queue_index; 706c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 707c8d68e6bSJason Wang 708c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 709c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 710b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 711c8d68e6bSJason Wang ntfile->queue_index = index; 712c8d68e6bSJason Wang 713c8d68e6bSJason Wang --tun->numqueues; 7149e85722dSJason Wang if (clean) { 715c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 716c8d68e6bSJason Wang sock_put(&tfile->sk); 7179e85722dSJason Wang } else 7184008e97fSJason Wang tun_disable_queue(tun, tfile); 719c8d68e6bSJason Wang 720c8d68e6bSJason Wang synchronize_net(); 72196442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 722c8d68e6bSJason Wang /* Drop read queue */ 7234bfb0513SJason Wang tun_queue_purge(tfile); 724c8d68e6bSJason Wang tun_set_real_num_queues(tun); 725dd38bd85SJason Wang } else if (tfile->detached && clean) { 7264008e97fSJason Wang tun = tun_enable_queue(tfile); 727dd38bd85SJason Wang sock_put(&tfile->sk); 728dd38bd85SJason Wang } 729c8d68e6bSJason Wang 730c8d68e6bSJason Wang if (clean) { 731af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 732af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 733af668b3cSMichael S. Tsirkin 73440630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 735af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 7364008e97fSJason Wang unregister_netdevice(tun->dev); 737af668b3cSMichael S. Tsirkin } 7388565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 739140e807dSEric W. Biederman sock_put(&tfile->sk); 740c8d68e6bSJason Wang } 741c8d68e6bSJason Wang } 742c8d68e6bSJason Wang 743c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 744c8d68e6bSJason Wang { 745c8d68e6bSJason Wang rtnl_lock(); 746c8d68e6bSJason Wang __tun_detach(tfile, clean); 747c8d68e6bSJason Wang rtnl_unlock(); 748c8d68e6bSJason Wang } 749c8d68e6bSJason Wang 750c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 751c8d68e6bSJason Wang { 752c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7534008e97fSJason Wang struct tun_file *tfile, *tmp; 754c8d68e6bSJason Wang int i, n = tun->numqueues; 755c8d68e6bSJason Wang 756c8d68e6bSJason Wang for (i = 0; i < n; i++) { 757b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 758c8d68e6bSJason Wang BUG_ON(!tfile); 75994317099SPetar Penkov tun_napi_disable(tun, tfile); 760addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7619e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 762c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 763c8d68e6bSJason Wang --tun->numqueues; 764c8d68e6bSJason Wang } 7659e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 766addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7679e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 768c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7699e85722dSJason Wang } 770c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 771c8d68e6bSJason Wang 772c8d68e6bSJason Wang synchronize_net(); 773c8d68e6bSJason Wang for (i = 0; i < n; i++) { 774b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 77594317099SPetar Penkov tun_napi_del(tun, tfile); 776c8d68e6bSJason Wang /* Drop read queue */ 7774bfb0513SJason Wang tun_queue_purge(tfile); 778c8d68e6bSJason Wang sock_put(&tfile->sk); 7798565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 780c8d68e6bSJason Wang } 7814008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7824008e97fSJason Wang tun_enable_queue(tfile); 7834bfb0513SJason Wang tun_queue_purge(tfile); 7844008e97fSJason Wang sock_put(&tfile->sk); 7858565d26bSDavid S. Miller tun_cleanup_tx_ring(tfile); 7864008e97fSJason Wang } 7874008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 788dd38bd85SJason Wang 78940630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 790dd38bd85SJason Wang module_put(THIS_MODULE); 791c8d68e6bSJason Wang } 792c8d68e6bSJason Wang 79394317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 79494317099SPetar Penkov bool skip_filter, bool napi) 795a7385ba2SEric W. Biederman { 796631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7971576d986SJason Wang struct net_device *dev = tun->dev; 79838231b7aSEric W. Biederman int err; 799a7385ba2SEric W. Biederman 8005dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 8015dbbaf2dSPaul Moore if (err < 0) 8025dbbaf2dSPaul Moore goto out; 8035dbbaf2dSPaul Moore 80438231b7aSEric W. Biederman err = -EINVAL; 8059e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 80638231b7aSEric W. Biederman goto out; 80738231b7aSEric W. Biederman 80838231b7aSEric W. Biederman err = -EBUSY; 80940630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 810c8d68e6bSJason Wang goto out; 811c8d68e6bSJason Wang 812c8d68e6bSJason Wang err = -E2BIG; 8134008e97fSJason Wang if (!tfile->detached && 8144008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 81538231b7aSEric W. Biederman goto out; 81638231b7aSEric W. Biederman 81738231b7aSEric W. Biederman err = 0; 81854f968d6SJason Wang 81992d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 820849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 8218ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 8228ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 8238ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 82454f968d6SJason Wang if (!err) 82554f968d6SJason Wang goto out; 82654f968d6SJason Wang } 8271576d986SJason Wang 8281576d986SJason Wang if (!tfile->detached && 8295990a305SJason Wang ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { 8301576d986SJason Wang err = -ENOMEM; 8311576d986SJason Wang goto out; 8321576d986SJason Wang } 8331576d986SJason Wang 834c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 835addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8368bf5c4eeSJesper Dangaard Brouer 8378bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8388bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8398bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8408bf5c4eeSJesper Dangaard Brouer 8418bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8428bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8438bf5c4eeSJesper Dangaard Brouer } else { 8448bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8458bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8468bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8478bf5c4eeSJesper Dangaard Brouer if (err < 0) 8488bf5c4eeSJesper Dangaard Brouer goto out; 8498bf5c4eeSJesper Dangaard Brouer err = 0; 8508bf5c4eeSJesper Dangaard Brouer } 8518bf5c4eeSJesper Dangaard Brouer 8526e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 853c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 854c8d68e6bSJason Wang tun->numqueues++; 855c8d68e6bSJason Wang 85694317099SPetar Penkov if (tfile->detached) { 8574008e97fSJason Wang tun_enable_queue(tfile); 85894317099SPetar Penkov } else { 8594008e97fSJason Wang sock_hold(&tfile->sk); 86094317099SPetar Penkov tun_napi_init(tun, tfile, napi); 86194317099SPetar Penkov } 8624008e97fSJason Wang 863c8d68e6bSJason Wang tun_set_real_num_queues(tun); 864c8d68e6bSJason Wang 865c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 866c8d68e6bSJason Wang * refcnt. 867c8d68e6bSJason Wang */ 868a7385ba2SEric W. Biederman 86938231b7aSEric W. Biederman out: 87038231b7aSEric W. Biederman return err; 871a7385ba2SEric W. Biederman } 872a7385ba2SEric W. Biederman 8739484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 874631ab46bSEric W. Biederman { 8756e914fc7SJason Wang struct tun_struct *tun; 876c70f1829SEric W. Biederman 8776e914fc7SJason Wang rcu_read_lock(); 8786e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8796e914fc7SJason Wang if (tun) 8806e914fc7SJason Wang dev_hold(tun->dev); 8816e914fc7SJason Wang rcu_read_unlock(); 882c70f1829SEric W. Biederman 883c70f1829SEric W. Biederman return tun; 884631ab46bSEric W. Biederman } 885631ab46bSEric W. Biederman 886631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 887631ab46bSEric W. Biederman { 8886e914fc7SJason Wang dev_put(tun->dev); 889631ab46bSEric W. Biederman } 890631ab46bSEric W. Biederman 8916b8a66eeSJoe Perches /* TAP filtering */ 892f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 893f271b2ccSMax Krasnyansky { 894f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 895f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 896f271b2ccSMax Krasnyansky } 897f271b2ccSMax Krasnyansky 898f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 899f271b2ccSMax Krasnyansky { 900f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 901f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 902f271b2ccSMax Krasnyansky } 903f271b2ccSMax Krasnyansky 904f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 905f271b2ccSMax Krasnyansky { 906f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 907f271b2ccSMax Krasnyansky struct tun_filter uf; 908f271b2ccSMax Krasnyansky int err, alen, n, nexact; 909f271b2ccSMax Krasnyansky 910f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 911f271b2ccSMax Krasnyansky return -EFAULT; 912f271b2ccSMax Krasnyansky 913f271b2ccSMax Krasnyansky if (!uf.count) { 914f271b2ccSMax Krasnyansky /* Disabled */ 915f271b2ccSMax Krasnyansky filter->count = 0; 916f271b2ccSMax Krasnyansky return 0; 917f271b2ccSMax Krasnyansky } 918f271b2ccSMax Krasnyansky 919f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 92028e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 92128e8190dSMarkus Elfring if (IS_ERR(addr)) 92228e8190dSMarkus Elfring return PTR_ERR(addr); 923f271b2ccSMax Krasnyansky 924f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 925f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 926f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 927f271b2ccSMax Krasnyansky filter->count = 0; 928f271b2ccSMax Krasnyansky wmb(); 929f271b2ccSMax Krasnyansky 930f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 931f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 932f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 933f271b2ccSMax Krasnyansky 934f271b2ccSMax Krasnyansky nexact = n; 935f271b2ccSMax Krasnyansky 936cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 937cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 938f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 939cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 940cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 941cfbf84fcSAlex Williamson err = 0; /* no filter */ 9423b8d2a69SMarkus Elfring goto free_addr; 943cfbf84fcSAlex Williamson } 944f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 945cfbf84fcSAlex Williamson } 946f271b2ccSMax Krasnyansky 947f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 948f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 949f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 950f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 951f271b2ccSMax Krasnyansky 952f271b2ccSMax Krasnyansky /* Now enable the filter */ 953f271b2ccSMax Krasnyansky wmb(); 954f271b2ccSMax Krasnyansky filter->count = nexact; 955f271b2ccSMax Krasnyansky 956f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 957f271b2ccSMax Krasnyansky err = nexact; 9583b8d2a69SMarkus Elfring free_addr: 959f271b2ccSMax Krasnyansky kfree(addr); 960f271b2ccSMax Krasnyansky return err; 961f271b2ccSMax Krasnyansky } 962f271b2ccSMax Krasnyansky 963f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 964f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 965f271b2ccSMax Krasnyansky { 966f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 967f271b2ccSMax Krasnyansky * at this point. */ 968f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 969f271b2ccSMax Krasnyansky int i; 970f271b2ccSMax Krasnyansky 971f271b2ccSMax Krasnyansky /* Exact match */ 972f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9732e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 974f271b2ccSMax Krasnyansky return 1; 975f271b2ccSMax Krasnyansky 976f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 977f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 978f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 979f271b2ccSMax Krasnyansky 980f271b2ccSMax Krasnyansky return 0; 981f271b2ccSMax Krasnyansky } 982f271b2ccSMax Krasnyansky 983f271b2ccSMax Krasnyansky /* 984f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 985f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 986f271b2ccSMax Krasnyansky */ 987f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 988f271b2ccSMax Krasnyansky { 989f271b2ccSMax Krasnyansky if (!filter->count) 990f271b2ccSMax Krasnyansky return 1; 991f271b2ccSMax Krasnyansky 992f271b2ccSMax Krasnyansky return run_filter(filter, skb); 993f271b2ccSMax Krasnyansky } 994f271b2ccSMax Krasnyansky 9951da177e4SLinus Torvalds /* Network device part of the driver */ 9961da177e4SLinus Torvalds 9971da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 9981da177e4SLinus Torvalds 999c70f1829SEric W. Biederman /* Net device detach from fd. */ 1000c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1001c70f1829SEric W. Biederman { 1002c8d68e6bSJason Wang tun_detach_all(dev); 1003c70f1829SEric W. Biederman } 1004c70f1829SEric W. Biederman 10051da177e4SLinus Torvalds /* Net device open. */ 10061da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10071da177e4SLinus Torvalds { 1008b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 1009b20e2d54SHannes Frederic Sowa int i; 1010b20e2d54SHannes Frederic Sowa 1011c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1012b20e2d54SHannes Frederic Sowa 1013b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 1014b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 1015b20e2d54SHannes Frederic Sowa 1016b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 1017b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 1018b20e2d54SHannes Frederic Sowa } 1019b20e2d54SHannes Frederic Sowa 10201da177e4SLinus Torvalds return 0; 10211da177e4SLinus Torvalds } 10221da177e4SLinus Torvalds 10231da177e4SLinus Torvalds /* Net device close. */ 10241da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10251da177e4SLinus Torvalds { 1026c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10271da177e4SLinus Torvalds return 0; 10281da177e4SLinus Torvalds } 10291da177e4SLinus Torvalds 10301da177e4SLinus Torvalds /* Net device start xmit */ 103196f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10321da177e4SLinus Torvalds { 10333df97ba8SJason Wang #ifdef CONFIG_RPS 103496f84061SJason Wang if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 10359bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10369bc88939STom Herbert * RPS hash and save it into the flow_table here. 10379bc88939STom Herbert */ 10389bc88939STom Herbert __u32 rxhash; 10399bc88939STom Herbert 1040feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10419bc88939STom Herbert if (rxhash) { 10429bc88939STom Herbert struct tun_flow_entry *e; 10439bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 10449bc88939STom Herbert rxhash); 10459bc88939STom Herbert if (e) 10469bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10479bc88939STom Herbert } 10489bc88939STom Herbert } 10493df97ba8SJason Wang #endif 105096f84061SJason Wang } 105196f84061SJason Wang 1052aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1053aff3d70aSJason Wang struct sk_buff *skb, 1054aff3d70aSJason Wang int len) 1055aff3d70aSJason Wang { 1056aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1057aff3d70aSJason Wang 1058aff3d70aSJason Wang if (prog) 1059aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1060aff3d70aSJason Wang 1061aff3d70aSJason Wang return len; 1062aff3d70aSJason Wang } 1063aff3d70aSJason Wang 106496f84061SJason Wang /* Net device start xmit */ 106596f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 106696f84061SJason Wang { 106796f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 106896f84061SJason Wang int txq = skb->queue_mapping; 106996f84061SJason Wang struct tun_file *tfile; 1070aff3d70aSJason Wang int len = skb->len; 107196f84061SJason Wang 107296f84061SJason Wang rcu_read_lock(); 107396f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 107496f84061SJason Wang 107596f84061SJason Wang /* Drop packet if interface is not attached */ 1076cc166427SWillem de Bruijn if (txq >= tun->numqueues) 107796f84061SJason Wang goto drop; 107896f84061SJason Wang 107996f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 108096f84061SJason Wang tun_automq_xmit(tun, skb); 10819bc88939STom Herbert 10826e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10836e914fc7SJason Wang 1084c8d68e6bSJason Wang BUG_ON(!tfile); 1085c8d68e6bSJason Wang 1086f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1087f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1088f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1089f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1090f271b2ccSMax Krasnyansky goto drop; 1091f271b2ccSMax Krasnyansky 109254f968d6SJason Wang if (tfile->socket.sk->sk_filter && 109354f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 109499405162SMichael S. Tsirkin goto drop; 109599405162SMichael S. Tsirkin 1096aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 1097aff3d70aSJason Wang 1098aff3d70aSJason Wang /* Trim extra bytes since we may insert vlan proto & TCI 1099aff3d70aSJason Wang * in tun_put_user(). 1100aff3d70aSJason Wang */ 1101aff3d70aSJason Wang len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; 1102aff3d70aSJason Wang if (len <= 0 || pskb_trim(skb, len)) 1103aff3d70aSJason Wang goto drop; 1104aff3d70aSJason Wang 11051f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 11067bf66305SJason Wang goto drop; 11077bf66305SJason Wang 11087b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1109eda29772SRichard Cochran 11100110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11117bf66305SJason Wang * for indefinite time. 11127bf66305SJason Wang */ 11130110d6f2SMichael S. Tsirkin skb_orphan(skb); 11140110d6f2SMichael S. Tsirkin 1115f8af75f3SEric Dumazet nf_reset(skb); 1116f8af75f3SEric Dumazet 11175990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 11181576d986SJason Wang goto drop; 11191da177e4SLinus Torvalds 11201da177e4SLinus Torvalds /* Notify and wake up reader process */ 112154f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 112254f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11239e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11246e914fc7SJason Wang 11256e914fc7SJason Wang rcu_read_unlock(); 11266ed10654SPatrick McHardy return NETDEV_TX_OK; 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds drop: 1129608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1130149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11311da177e4SLinus Torvalds kfree_skb(skb); 11326e914fc7SJason Wang rcu_read_unlock(); 1133baeababbSJason Wang return NET_XMIT_DROP; 11341da177e4SLinus Torvalds } 11351da177e4SLinus Torvalds 1136f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11371da177e4SLinus Torvalds { 1138f271b2ccSMax Krasnyansky /* 1139f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1140f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1141f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1142f271b2ccSMax Krasnyansky */ 11431da177e4SLinus Torvalds } 11441da177e4SLinus Torvalds 1145c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1146c8f44affSMichał Mirosław netdev_features_t features) 114788255375SMichał Mirosław { 114888255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 114988255375SMichał Mirosław 115088255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 115188255375SMichał Mirosław } 1152bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1153bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev) 1154bebd097aSNeil Horman { 1155bebd097aSNeil Horman /* 1156bebd097aSNeil Horman * Tun only receives frames when: 1157bebd097aSNeil Horman * 1) the char device endpoint gets data from user space 1158bebd097aSNeil Horman * 2) the tun socket gets a sendmsg call from user space 115994317099SPetar Penkov * If NAPI is not enabled, since both of those are synchronous 116094317099SPetar Penkov * operations, we are guaranteed never to have pending data when we poll 116194317099SPetar Penkov * for it so there is nothing to do here but return. 1162bebd097aSNeil Horman * We need this though so netpoll recognizes us as an interface that 1163bebd097aSNeil Horman * supports polling, which enables bridge devices in virt setups to 1164bebd097aSNeil Horman * still use netconsole 116594317099SPetar Penkov * If NAPI is enabled, however, we need to schedule polling for all 116690e33d45SPetar Penkov * queues unless we are using napi_gro_frags(), which we call in 116790e33d45SPetar Penkov * process context and not in NAPI context. 1168bebd097aSNeil Horman */ 116994317099SPetar Penkov struct tun_struct *tun = netdev_priv(dev); 117094317099SPetar Penkov 117194317099SPetar Penkov if (tun->flags & IFF_NAPI) { 117294317099SPetar Penkov struct tun_file *tfile; 117394317099SPetar Penkov int i; 117494317099SPetar Penkov 117590e33d45SPetar Penkov if (tun_napi_frags_enabled(tun)) 117690e33d45SPetar Penkov return; 117790e33d45SPetar Penkov 117894317099SPetar Penkov rcu_read_lock(); 117994317099SPetar Penkov for (i = 0; i < tun->numqueues; i++) { 118094317099SPetar Penkov tfile = rcu_dereference(tun->tfiles[i]); 1181aec72f33SEric Dumazet if (tfile->napi_enabled) 118294317099SPetar Penkov napi_schedule(&tfile->napi); 118394317099SPetar Penkov } 118494317099SPetar Penkov rcu_read_unlock(); 118594317099SPetar Penkov } 1186bebd097aSNeil Horman return; 1187bebd097aSNeil Horman } 1188bebd097aSNeil Horman #endif 1189eaea34b2SPaolo Abeni 1190eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1191eaea34b2SPaolo Abeni { 1192eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1193eaea34b2SPaolo Abeni 1194eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1195eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1196eaea34b2SPaolo Abeni 1197eaea34b2SPaolo Abeni tun->align = new_hr; 1198eaea34b2SPaolo Abeni } 1199eaea34b2SPaolo Abeni 1200bc1f4470Sstephen hemminger static void 1201608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1202608b9977SPaolo Abeni { 1203608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1204608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1205608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1206608b9977SPaolo Abeni int i; 1207608b9977SPaolo Abeni 1208608b9977SPaolo Abeni for_each_possible_cpu(i) { 1209608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1210608b9977SPaolo Abeni unsigned int start; 1211608b9977SPaolo Abeni 1212608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1213608b9977SPaolo Abeni do { 1214608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1215608b9977SPaolo Abeni rxpackets = p->rx_packets; 1216608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1217608b9977SPaolo Abeni txpackets = p->tx_packets; 1218608b9977SPaolo Abeni txbytes = p->tx_bytes; 1219608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1220608b9977SPaolo Abeni 1221608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1222608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1223608b9977SPaolo Abeni stats->tx_packets += txpackets; 1224608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1225608b9977SPaolo Abeni 1226608b9977SPaolo Abeni /* u32 counters */ 1227608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1228608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1229608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1230608b9977SPaolo Abeni } 1231608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1232608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1233608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1234608b9977SPaolo Abeni } 1235608b9977SPaolo Abeni 1236761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1237761876c8SJason Wang struct netlink_ext_ack *extack) 1238761876c8SJason Wang { 1239761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1240761876c8SJason Wang struct bpf_prog *old_prog; 1241761876c8SJason Wang 1242761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1243761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1244761876c8SJason Wang if (old_prog) 1245761876c8SJason Wang bpf_prog_put(old_prog); 1246761876c8SJason Wang 1247761876c8SJason Wang return 0; 1248761876c8SJason Wang } 1249761876c8SJason Wang 1250761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1251761876c8SJason Wang { 1252761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1253761876c8SJason Wang const struct bpf_prog *xdp_prog; 1254761876c8SJason Wang 1255761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1256761876c8SJason Wang if (xdp_prog) 1257761876c8SJason Wang return xdp_prog->aux->id; 1258761876c8SJason Wang 1259761876c8SJason Wang return 0; 1260761876c8SJason Wang } 1261761876c8SJason Wang 1262f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1263761876c8SJason Wang { 1264761876c8SJason Wang switch (xdp->command) { 1265761876c8SJason Wang case XDP_SETUP_PROG: 1266761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1267761876c8SJason Wang case XDP_QUERY_PROG: 1268761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1269761876c8SJason Wang xdp->prog_attached = !!xdp->prog_id; 1270761876c8SJason Wang return 0; 1271761876c8SJason Wang default: 1272761876c8SJason Wang return -EINVAL; 1273761876c8SJason Wang } 1274761876c8SJason Wang } 1275761876c8SJason Wang 1276758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1277c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1278758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1279758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 128000829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 128188255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1282c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1283bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1284bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1285bebd097aSNeil Horman #endif 1286eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1287608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1288758e43b7SStephen Hemminger }; 1289758e43b7SStephen Hemminger 1290fc72d1d5SJason Wang static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) 1291fc72d1d5SJason Wang { 1292fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1293fc72d1d5SJason Wang struct xdp_buff *buff = xdp->data_hard_start; 1294fc72d1d5SJason Wang int headroom = xdp->data - xdp->data_hard_start; 1295fc72d1d5SJason Wang struct tun_file *tfile; 1296fc72d1d5SJason Wang u32 numqueues; 1297fc72d1d5SJason Wang int ret = 0; 1298fc72d1d5SJason Wang 1299fc72d1d5SJason Wang /* Assure headroom is available and buff is properly aligned */ 1300fc72d1d5SJason Wang if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) 1301fc72d1d5SJason Wang return -ENOSPC; 1302fc72d1d5SJason Wang 1303fc72d1d5SJason Wang *buff = *xdp; 1304fc72d1d5SJason Wang 1305fc72d1d5SJason Wang rcu_read_lock(); 1306fc72d1d5SJason Wang 1307fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1308fc72d1d5SJason Wang if (!numqueues) { 1309fc72d1d5SJason Wang ret = -ENOSPC; 1310fc72d1d5SJason Wang goto out; 1311fc72d1d5SJason Wang } 1312fc72d1d5SJason Wang 1313fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1314fc72d1d5SJason Wang numqueues]); 1315fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1316fc72d1d5SJason Wang * XDP buffer from sk_buff. 1317fc72d1d5SJason Wang */ 1318fc72d1d5SJason Wang if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { 1319fc72d1d5SJason Wang this_cpu_inc(tun->pcpu_stats->tx_dropped); 1320fc72d1d5SJason Wang ret = -ENOSPC; 1321fc72d1d5SJason Wang } 1322fc72d1d5SJason Wang 1323fc72d1d5SJason Wang out: 1324fc72d1d5SJason Wang rcu_read_unlock(); 1325fc72d1d5SJason Wang return ret; 1326fc72d1d5SJason Wang } 1327fc72d1d5SJason Wang 1328fc72d1d5SJason Wang static void tun_xdp_flush(struct net_device *dev) 1329fc72d1d5SJason Wang { 1330fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1331fc72d1d5SJason Wang struct tun_file *tfile; 1332fc72d1d5SJason Wang u32 numqueues; 1333fc72d1d5SJason Wang 1334fc72d1d5SJason Wang rcu_read_lock(); 1335fc72d1d5SJason Wang 1336fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1337fc72d1d5SJason Wang if (!numqueues) 1338fc72d1d5SJason Wang goto out; 1339fc72d1d5SJason Wang 1340fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1341fc72d1d5SJason Wang numqueues]); 1342fc72d1d5SJason Wang /* Notify and wake up reader process */ 1343fc72d1d5SJason Wang if (tfile->flags & TUN_FASYNC) 1344fc72d1d5SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1345fc72d1d5SJason Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1346fc72d1d5SJason Wang 1347fc72d1d5SJason Wang out: 1348fc72d1d5SJason Wang rcu_read_unlock(); 1349fc72d1d5SJason Wang } 1350fc72d1d5SJason Wang 1351758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1352c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1353758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1354758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 135500829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 135688255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1357afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1358758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1359758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1360c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1361bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER 1362bebd097aSNeil Horman .ndo_poll_controller = tun_poll_controller, 1363bebd097aSNeil Horman #endif 13645e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1365eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1366608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1367f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1368fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 1369fc72d1d5SJason Wang .ndo_xdp_flush = tun_xdp_flush, 1370758e43b7SStephen Hemminger }; 1371758e43b7SStephen Hemminger 1372944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 137396442e42SJason Wang { 137496442e42SJason Wang int i; 137596442e42SJason Wang 137696442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 137796442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 137896442e42SJason Wang 137996442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1380e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1381e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1382e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 138396442e42SJason Wang } 138496442e42SJason Wang 138596442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 138696442e42SJason Wang { 138796442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 138896442e42SJason Wang tun_flow_flush(tun); 138996442e42SJason Wang } 139096442e42SJason Wang 139191572088SJarod Wilson #define MIN_MTU 68 139291572088SJarod Wilson #define MAX_MTU 65535 139391572088SJarod Wilson 13941da177e4SLinus Torvalds /* Initialize net device. */ 13951da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13961da177e4SLinus Torvalds { 13971da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13981da177e4SLinus Torvalds 13991da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 140040630b82SMichael S. Tsirkin case IFF_TUN: 1401758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1402758e43b7SStephen Hemminger 14031da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 14041da177e4SLinus Torvalds dev->hard_header_len = 0; 14051da177e4SLinus Torvalds dev->addr_len = 0; 14061da177e4SLinus Torvalds dev->mtu = 1500; 14071da177e4SLinus Torvalds 14081da177e4SLinus Torvalds /* Zero header length */ 14091da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 14101da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 14111da177e4SLinus Torvalds break; 14121da177e4SLinus Torvalds 141340630b82SMichael S. Tsirkin case IFF_TAP: 14147a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 14151da177e4SLinus Torvalds /* Ethernet TAP Device */ 14161da177e4SLinus Torvalds ether_setup(dev); 1417550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1418a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 141936226a8dSBrian Braunstein 1420f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 142136226a8dSBrian Braunstein 14221da177e4SLinus Torvalds break; 14231da177e4SLinus Torvalds } 142491572088SJarod Wilson 142591572088SJarod Wilson dev->min_mtu = MIN_MTU; 142691572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 14271da177e4SLinus Torvalds } 14281da177e4SLinus Torvalds 14291da177e4SLinus Torvalds /* Character device part */ 14301da177e4SLinus Torvalds 14311da177e4SLinus Torvalds /* Poll */ 1432afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14331da177e4SLinus Torvalds { 1434b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14359484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14363c8a9c63SMariusz Kozlowski struct sock *sk; 1437afc9a42bSAl Viro __poll_t mask = 0; 14381da177e4SLinus Torvalds 14391da177e4SLinus Torvalds if (!tun) 1440a9a08845SLinus Torvalds return EPOLLERR; 14411da177e4SLinus Torvalds 144254f968d6SJason Wang sk = tfile->socket.sk; 14433c8a9c63SMariusz Kozlowski 14446b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 14451da177e4SLinus Torvalds 14469e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14471da177e4SLinus Torvalds 14485990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1449a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14501da177e4SLinus Torvalds 1451b20e2d54SHannes Frederic Sowa if (tun->dev->flags & IFF_UP && 1452b20e2d54SHannes Frederic Sowa (sock_writeable(sk) || 14539cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1454b20e2d54SHannes Frederic Sowa sock_writeable(sk)))) 1455a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 145633dccbb0SHerbert Xu 1457c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1458a9a08845SLinus Torvalds mask = EPOLLERR; 1459c70f1829SEric W. Biederman 1460631ab46bSEric W. Biederman tun_put(tun); 14611da177e4SLinus Torvalds return mask; 14621da177e4SLinus Torvalds } 14631da177e4SLinus Torvalds 146490e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 146590e33d45SPetar Penkov size_t len, 146690e33d45SPetar Penkov const struct iov_iter *it) 146790e33d45SPetar Penkov { 146890e33d45SPetar Penkov struct sk_buff *skb; 146990e33d45SPetar Penkov size_t linear; 147090e33d45SPetar Penkov int err; 147190e33d45SPetar Penkov int i; 147290e33d45SPetar Penkov 147390e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 147490e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 147590e33d45SPetar Penkov 147690e33d45SPetar Penkov local_bh_disable(); 147790e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 147890e33d45SPetar Penkov local_bh_enable(); 147990e33d45SPetar Penkov if (!skb) 148090e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 148190e33d45SPetar Penkov 148290e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 148390e33d45SPetar Penkov err = __skb_grow(skb, linear); 148490e33d45SPetar Penkov if (err) 148590e33d45SPetar Penkov goto free; 148690e33d45SPetar Penkov 148790e33d45SPetar Penkov skb->len = len; 148890e33d45SPetar Penkov skb->data_len = len - linear; 148990e33d45SPetar Penkov skb->truesize += skb->data_len; 149090e33d45SPetar Penkov 149190e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 1492*43a08e0fSEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 149390e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 149490e33d45SPetar Penkov 149590e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 149690e33d45SPetar Penkov err = -EINVAL; 149790e33d45SPetar Penkov goto free; 149890e33d45SPetar Penkov } 149990e33d45SPetar Penkov 1500*43a08e0fSEric Dumazet if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 150190e33d45SPetar Penkov err = -ENOMEM; 150290e33d45SPetar Penkov goto free; 150390e33d45SPetar Penkov } 150490e33d45SPetar Penkov 1505*43a08e0fSEric Dumazet skb_fill_page_desc(skb, i - 1, pfrag->page, 1506*43a08e0fSEric Dumazet pfrag->offset, fragsz); 1507*43a08e0fSEric Dumazet page_ref_inc(pfrag->page); 1508*43a08e0fSEric Dumazet pfrag->offset += fragsz; 150990e33d45SPetar Penkov } 151090e33d45SPetar Penkov 151190e33d45SPetar Penkov return skb; 151290e33d45SPetar Penkov free: 151390e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 151490e33d45SPetar Penkov napi_free_frags(&tfile->napi); 151590e33d45SPetar Penkov return ERR_PTR(err); 151690e33d45SPetar Penkov } 151790e33d45SPetar Penkov 1518f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1519f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 152054f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 152133dccbb0SHerbert Xu size_t prepad, size_t len, 152233dccbb0SHerbert Xu size_t linear, int noblock) 1523f42157cbSRusty Russell { 152454f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1525f42157cbSRusty Russell struct sk_buff *skb; 152633dccbb0SHerbert Xu int err; 1527f42157cbSRusty Russell 1528f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15290eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 153033dccbb0SHerbert Xu linear = len; 1531f42157cbSRusty Russell 153233dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 153328d64271SEric Dumazet &err, 0); 1534f42157cbSRusty Russell if (!skb) 153533dccbb0SHerbert Xu return ERR_PTR(err); 1536f42157cbSRusty Russell 1537f42157cbSRusty Russell skb_reserve(skb, prepad); 1538f42157cbSRusty Russell skb_put(skb, linear); 153933dccbb0SHerbert Xu skb->data_len = len - linear; 154033dccbb0SHerbert Xu skb->len += len - linear; 1541f42157cbSRusty Russell 1542f42157cbSRusty Russell return skb; 1543f42157cbSRusty Russell } 1544f42157cbSRusty Russell 15455503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15465503fcecSJason Wang struct sk_buff *skb, int more) 15475503fcecSJason Wang { 15485503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15495503fcecSJason Wang struct sk_buff_head process_queue; 15505503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15515503fcecSJason Wang bool rcv = false; 15525503fcecSJason Wang 15535503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15545503fcecSJason Wang local_bh_disable(); 15555503fcecSJason Wang netif_receive_skb(skb); 15565503fcecSJason Wang local_bh_enable(); 15575503fcecSJason Wang return; 15585503fcecSJason Wang } 15595503fcecSJason Wang 15605503fcecSJason Wang spin_lock(&queue->lock); 15615503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15625503fcecSJason Wang __skb_queue_head_init(&process_queue); 15635503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15645503fcecSJason Wang rcv = true; 15655503fcecSJason Wang } else { 15665503fcecSJason Wang __skb_queue_tail(queue, skb); 15675503fcecSJason Wang } 15685503fcecSJason Wang spin_unlock(&queue->lock); 15695503fcecSJason Wang 15705503fcecSJason Wang if (rcv) { 15715503fcecSJason Wang struct sk_buff *nskb; 15725503fcecSJason Wang 15735503fcecSJason Wang local_bh_disable(); 15745503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 15755503fcecSJason Wang netif_receive_skb(nskb); 15765503fcecSJason Wang netif_receive_skb(skb); 15775503fcecSJason Wang local_bh_enable(); 15785503fcecSJason Wang } 15795503fcecSJason Wang } 15805503fcecSJason Wang 158166ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 158266ccbc9cSJason Wang int len, int noblock, bool zerocopy) 158366ccbc9cSJason Wang { 158466ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 158566ccbc9cSJason Wang return false; 158666ccbc9cSJason Wang 158766ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 158866ccbc9cSJason Wang return false; 158966ccbc9cSJason Wang 159066ccbc9cSJason Wang if (!noblock) 159166ccbc9cSJason Wang return false; 159266ccbc9cSJason Wang 159366ccbc9cSJason Wang if (zerocopy) 159466ccbc9cSJason Wang return false; 159566ccbc9cSJason Wang 159666ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 159766ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 159866ccbc9cSJason Wang return false; 159966ccbc9cSJason Wang 160066ccbc9cSJason Wang return true; 160166ccbc9cSJason Wang } 160266ccbc9cSJason Wang 1603761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1604761876c8SJason Wang struct tun_file *tfile, 160566ccbc9cSJason Wang struct iov_iter *from, 1606761876c8SJason Wang struct virtio_net_hdr *hdr, 16071cfe6e93SJason Wang int len, int *skb_xdp) 160866ccbc9cSJason Wang { 16090bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 161066ccbc9cSJason Wang struct sk_buff *skb; 1611761876c8SJason Wang struct bpf_prog *xdp_prog; 16127df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1613761876c8SJason Wang unsigned int delta = 0; 161466ccbc9cSJason Wang char *buf; 161566ccbc9cSJason Wang size_t copied; 1616761876c8SJason Wang bool xdp_xmit = false; 16177df13219SJason Wang int err, pad = TUN_RX_PAD; 16187df13219SJason Wang 16197df13219SJason Wang rcu_read_lock(); 16207df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16217df13219SJason Wang if (xdp_prog) 16227df13219SJason Wang pad += TUN_HEADROOM; 16237df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16247df13219SJason Wang rcu_read_unlock(); 162566ccbc9cSJason Wang 162663b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 162766ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 162866ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 162966ccbc9cSJason Wang 163066ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 163166ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16327df13219SJason Wang alloc_frag->offset + pad, 163366ccbc9cSJason Wang len, from); 163466ccbc9cSJason Wang if (copied != len) 163566ccbc9cSJason Wang return ERR_PTR(-EFAULT); 163666ccbc9cSJason Wang 16377df13219SJason Wang /* There's a small window that XDP may be set after the check 16387df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16397df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16407df13219SJason Wang */ 16417df13219SJason Wang if (hdr->gso_type || !xdp_prog) 16421cfe6e93SJason Wang *skb_xdp = 1; 1643761876c8SJason Wang else 16441cfe6e93SJason Wang *skb_xdp = 0; 164566ccbc9cSJason Wang 1646761876c8SJason Wang rcu_read_lock(); 1647761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16481cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1649761876c8SJason Wang struct xdp_buff xdp; 1650761876c8SJason Wang void *orig_data; 1651761876c8SJason Wang u32 act; 1652761876c8SJason Wang 1653761876c8SJason Wang xdp.data_hard_start = buf; 16547df13219SJason Wang xdp.data = buf + pad; 1655de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1656761876c8SJason Wang xdp.data_end = xdp.data + len; 16578bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1658761876c8SJason Wang orig_data = xdp.data; 1659761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1660761876c8SJason Wang 1661761876c8SJason Wang switch (act) { 1662761876c8SJason Wang case XDP_REDIRECT: 1663761876c8SJason Wang get_page(alloc_frag->page); 1664761876c8SJason Wang alloc_frag->offset += buflen; 1665762c330dSJason Wang ++tfile->xdp_pending_pkts; 1666761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1667761876c8SJason Wang if (err) 1668761876c8SJason Wang goto err_redirect; 1669654d5738SXin Long rcu_read_unlock(); 1670761876c8SJason Wang return NULL; 1671761876c8SJason Wang case XDP_TX: 1672761876c8SJason Wang xdp_xmit = true; 1673761876c8SJason Wang /* fall through */ 1674761876c8SJason Wang case XDP_PASS: 1675761876c8SJason Wang delta = orig_data - xdp.data; 1676761876c8SJason Wang break; 1677761876c8SJason Wang default: 1678761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1679761876c8SJason Wang /* fall through */ 1680761876c8SJason Wang case XDP_ABORTED: 1681761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1682761876c8SJason Wang /* fall through */ 1683761876c8SJason Wang case XDP_DROP: 1684761876c8SJason Wang goto err_xdp; 1685761876c8SJason Wang } 1686761876c8SJason Wang } 1687761876c8SJason Wang 1688761876c8SJason Wang skb = build_skb(buf, buflen); 1689761876c8SJason Wang if (!skb) { 1690761876c8SJason Wang rcu_read_unlock(); 1691761876c8SJason Wang return ERR_PTR(-ENOMEM); 1692761876c8SJason Wang } 1693761876c8SJason Wang 16947df13219SJason Wang skb_reserve(skb, pad - delta); 1695761876c8SJason Wang skb_put(skb, len + delta); 169666ccbc9cSJason Wang get_page(alloc_frag->page); 169766ccbc9cSJason Wang alloc_frag->offset += buflen; 169866ccbc9cSJason Wang 1699761876c8SJason Wang if (xdp_xmit) { 1700761876c8SJason Wang skb->dev = tun->dev; 1701761876c8SJason Wang generic_xdp_tx(skb, xdp_prog); 1702654d5738SXin Long rcu_read_unlock(); 1703761876c8SJason Wang return NULL; 1704761876c8SJason Wang } 1705761876c8SJason Wang 1706761876c8SJason Wang rcu_read_unlock(); 1707761876c8SJason Wang 170866ccbc9cSJason Wang return skb; 1709761876c8SJason Wang 1710761876c8SJason Wang err_redirect: 1711761876c8SJason Wang put_page(alloc_frag->page); 1712761876c8SJason Wang err_xdp: 1713761876c8SJason Wang rcu_read_unlock(); 1714761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1715761876c8SJason Wang return NULL; 171666ccbc9cSJason Wang } 171766ccbc9cSJason Wang 17181da177e4SLinus Torvalds /* Get packet from user space buffer */ 171954f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1720f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17215503fcecSJason Wang int noblock, bool more) 17221da177e4SLinus Torvalds { 172309640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17241da177e4SLinus Torvalds struct sk_buff *skb; 1725f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1726eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1727f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1728608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 172996f8d9ecSJason Wang int good_linear; 17300690899bSMichael S. Tsirkin int copylen; 17310690899bSMichael S. Tsirkin bool zerocopy = false; 17320690899bSMichael S. Tsirkin int err; 173396f84061SJason Wang u32 rxhash = 0; 17341cfe6e93SJason Wang int skb_xdp = 1; 173590e33d45SPetar Penkov bool frags = tun_napi_frags_enabled(tun); 17361da177e4SLinus Torvalds 17371bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 17381bd4978aSEric Dumazet return -EIO; 17391bd4978aSEric Dumazet 174040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 174115718ea0SDan Carpenter if (len < sizeof(pi)) 17421da177e4SLinus Torvalds return -EINVAL; 174315718ea0SDan Carpenter len -= sizeof(pi); 17441da177e4SLinus Torvalds 1745cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17461da177e4SLinus Torvalds return -EFAULT; 17471da177e4SLinus Torvalds } 17481da177e4SLinus Torvalds 174940630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1750e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1751e1edab87SWillem de Bruijn 1752e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1753f43798c2SRusty Russell return -EINVAL; 1754e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1755f43798c2SRusty Russell 1756cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1757f43798c2SRusty Russell return -EFAULT; 1758f43798c2SRusty Russell 17594909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 176056f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 176156f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17624909122fSHerbert Xu 176356f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1764f43798c2SRusty Russell return -EINVAL; 1765e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1766f43798c2SRusty Russell } 1767f43798c2SRusty Russell 176840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1769a504b86eSstephen hemminger align += NET_IP_ALIGN; 17700eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 177156f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1772e01bf1c8SRusty Russell return -EINVAL; 1773e01bf1c8SRusty Russell } 17741da177e4SLinus Torvalds 177596f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 177696f8d9ecSJason Wang 177788529176SJason Wang if (msg_control) { 1778f5ff53b4SAl Viro struct iov_iter i = *from; 1779f5ff53b4SAl Viro 178088529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 178188529176SJason Wang * enough room for skb expand head in case it is used. 17820690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17830690899bSMichael S. Tsirkin */ 178456f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 178596f8d9ecSJason Wang if (copylen > good_linear) 178696f8d9ecSJason Wang copylen = good_linear; 17873dd5c330SJason Wang linear = copylen; 1788f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1789f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 179088529176SJason Wang zerocopy = true; 179188529176SJason Wang } 179288529176SJason Wang 179390e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17941cfe6e93SJason Wang /* For the packet that is not easy to be processed 17951cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 17961cfe6e93SJason Wang * skb was created with generic XDP routine. 17971cfe6e93SJason Wang */ 17981cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 179966ccbc9cSJason Wang if (IS_ERR(skb)) { 180066ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 180166ccbc9cSJason Wang return PTR_ERR(skb); 180266ccbc9cSJason Wang } 1803761876c8SJason Wang if (!skb) 1804761876c8SJason Wang return total_len; 180566ccbc9cSJason Wang } else { 180688529176SJason Wang if (!zerocopy) { 18070690899bSMichael S. Tsirkin copylen = len; 180856f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 180996f8d9ecSJason Wang linear = good_linear; 181096f8d9ecSJason Wang else 181156f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18123dd5c330SJason Wang } 18130690899bSMichael S. Tsirkin 181490e33d45SPetar Penkov if (frags) { 181590e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 181690e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 181790e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 181890e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 181990e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 182090e33d45SPetar Penkov */ 182190e33d45SPetar Penkov zerocopy = false; 182290e33d45SPetar Penkov } else { 182390e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 182490e33d45SPetar Penkov noblock); 182590e33d45SPetar Penkov } 182690e33d45SPetar Penkov 182733dccbb0SHerbert Xu if (IS_ERR(skb)) { 182833dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1829608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 183090e33d45SPetar Penkov if (frags) 183190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 183233dccbb0SHerbert Xu return PTR_ERR(skb); 18331da177e4SLinus Torvalds } 18341da177e4SLinus Torvalds 18350690899bSMichael S. Tsirkin if (zerocopy) 1836f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1837af1cc7a2SJason Wang else 1838f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18390690899bSMichael S. Tsirkin 18400690899bSMichael S. Tsirkin if (err) { 1841608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18428f22757eSDave Jones kfree_skb(skb); 184390e33d45SPetar Penkov if (frags) { 184490e33d45SPetar Penkov tfile->napi.skb = NULL; 184590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 184690e33d45SPetar Penkov } 184790e33d45SPetar Penkov 18481da177e4SLinus Torvalds return -EFAULT; 18498f22757eSDave Jones } 185066ccbc9cSJason Wang } 18511da177e4SLinus Torvalds 18523e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1853df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1854df10db98SPaolo Abeni kfree_skb(skb); 185590e33d45SPetar Penkov if (frags) { 185690e33d45SPetar Penkov tfile->napi.skb = NULL; 185790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 185890e33d45SPetar Penkov } 185990e33d45SPetar Penkov 1860df10db98SPaolo Abeni return -EINVAL; 1861df10db98SPaolo Abeni } 1862df10db98SPaolo Abeni 18631da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 186440630b82SMichael S. Tsirkin case IFF_TUN: 186540630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18662580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18672580c4c1SAlexander Potapenko 18682580c4c1SAlexander Potapenko switch (ip_version) { 18692580c4c1SAlexander Potapenko case 4: 1870f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1871f09f7ee2SAng Way Chuang break; 18722580c4c1SAlexander Potapenko case 6: 1873f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1874f09f7ee2SAng Way Chuang break; 1875f09f7ee2SAng Way Chuang default: 1876608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1877f09f7ee2SAng Way Chuang kfree_skb(skb); 1878f09f7ee2SAng Way Chuang return -EINVAL; 1879f09f7ee2SAng Way Chuang } 1880f09f7ee2SAng Way Chuang } 1881f09f7ee2SAng Way Chuang 1882459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18831da177e4SLinus Torvalds skb->protocol = pi.proto; 18844c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18851da177e4SLinus Torvalds break; 188640630b82SMichael S. Tsirkin case IFF_TAP: 188790e33d45SPetar Penkov if (!frags) 18881da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18891da177e4SLinus Torvalds break; 18906403eab1SJoe Perches } 18911da177e4SLinus Torvalds 18920690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18930690899bSMichael S. Tsirkin if (zerocopy) { 18940690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 18950690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1896c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1897af1cc7a2SJason Wang } else if (msg_control) { 1898af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1899af1cc7a2SJason Wang uarg->callback(uarg, false); 19000690899bSMichael S. Tsirkin } 19010690899bSMichael S. Tsirkin 190272f65107SVlad Yasevich skb_reset_network_header(skb); 190340893fd0SJason Wang skb_probe_transport_header(skb, 0); 190438502af7SJason Wang 19051cfe6e93SJason Wang if (skb_xdp) { 1906761876c8SJason Wang struct bpf_prog *xdp_prog; 1907761876c8SJason Wang int ret; 1908761876c8SJason Wang 1909761876c8SJason Wang rcu_read_lock(); 1910761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1911761876c8SJason Wang if (xdp_prog) { 1912761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1913761876c8SJason Wang if (ret != XDP_PASS) { 1914761876c8SJason Wang rcu_read_unlock(); 1915761876c8SJason Wang return total_len; 1916761876c8SJason Wang } 1917761876c8SJason Wang } 1918761876c8SJason Wang rcu_read_unlock(); 1919761876c8SJason Wang } 1920761876c8SJason Wang 192196f84061SJason Wang rcu_read_lock(); 192296f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 1923feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 192496f84061SJason Wang rcu_read_unlock(); 192594317099SPetar Penkov 192690e33d45SPetar Penkov if (frags) { 192790e33d45SPetar Penkov /* Exercise flow dissector code path. */ 192890e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 192990e33d45SPetar Penkov 1930010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 193190e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 193290e33d45SPetar Penkov napi_free_frags(&tfile->napi); 193390e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 193490e33d45SPetar Penkov WARN_ON(1); 193590e33d45SPetar Penkov return -ENOMEM; 193690e33d45SPetar Penkov } 193790e33d45SPetar Penkov 193890e33d45SPetar Penkov local_bh_disable(); 193990e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 194090e33d45SPetar Penkov local_bh_enable(); 194190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1942aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 194394317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 194494317099SPetar Penkov int queue_len; 194594317099SPetar Penkov 194694317099SPetar Penkov spin_lock_bh(&queue->lock); 194794317099SPetar Penkov __skb_queue_tail(queue, skb); 194894317099SPetar Penkov queue_len = skb_queue_len(queue); 194994317099SPetar Penkov spin_unlock(&queue->lock); 195094317099SPetar Penkov 195194317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 195294317099SPetar Penkov napi_schedule(&tfile->napi); 195394317099SPetar Penkov 195494317099SPetar Penkov local_bh_enable(); 195594317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 19565503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 195794317099SPetar Penkov } else { 19581da177e4SLinus Torvalds netif_rx_ni(skb); 195994317099SPetar Penkov } 19601da177e4SLinus Torvalds 1961608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1962608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1963608b9977SPaolo Abeni stats->rx_packets++; 1964608b9977SPaolo Abeni stats->rx_bytes += len; 1965608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1966608b9977SPaolo Abeni put_cpu_ptr(stats); 19671da177e4SLinus Torvalds 196896f84061SJason Wang if (rxhash) 19699e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 197096f84061SJason Wang 19710690899bSMichael S. Tsirkin return total_len; 19721da177e4SLinus Torvalds } 19731da177e4SLinus Torvalds 1974f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19751da177e4SLinus Torvalds { 197633dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 197754f968d6SJason Wang struct tun_file *tfile = file->private_data; 19789484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1979631ab46bSEric W. Biederman ssize_t result; 19801da177e4SLinus Torvalds 19811da177e4SLinus Torvalds if (!tun) 19821da177e4SLinus Torvalds return -EBADFD; 19831da177e4SLinus Torvalds 19845503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 19855503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1986631ab46bSEric W. Biederman 1987762c330dSJason Wang if (tfile->xdp_pending_pkts) { 1988762c330dSJason Wang tfile->xdp_pending_pkts = 0; 1989762c330dSJason Wang xdp_do_flush_map(); 1990762c330dSJason Wang } 1991762c330dSJason Wang 1992631ab46bSEric W. Biederman tun_put(tun); 1993631ab46bSEric W. Biederman return result; 19941da177e4SLinus Torvalds } 19951da177e4SLinus Torvalds 1996fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 1997fc72d1d5SJason Wang struct tun_file *tfile, 1998fc72d1d5SJason Wang struct xdp_buff *xdp, 1999fc72d1d5SJason Wang struct iov_iter *iter) 2000fc72d1d5SJason Wang { 2001fc72d1d5SJason Wang int vnet_hdr_sz = 0; 2002fc72d1d5SJason Wang size_t size = xdp->data_end - xdp->data; 2003fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 2004fc72d1d5SJason Wang size_t ret; 2005fc72d1d5SJason Wang 2006fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2007fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2008fc72d1d5SJason Wang 2009fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2010fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2011fc72d1d5SJason Wang return -EINVAL; 2012fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2013fc72d1d5SJason Wang sizeof(gso))) 2014fc72d1d5SJason Wang return -EFAULT; 2015fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2016fc72d1d5SJason Wang } 2017fc72d1d5SJason Wang 2018fc72d1d5SJason Wang ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; 2019fc72d1d5SJason Wang 2020fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2021fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 2022fc72d1d5SJason Wang stats->tx_packets++; 2023fc72d1d5SJason Wang stats->tx_bytes += ret; 2024fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2025fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2026fc72d1d5SJason Wang 2027fc72d1d5SJason Wang return ret; 2028fc72d1d5SJason Wang } 2029fc72d1d5SJason Wang 20301da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20316f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 203254f968d6SJason Wang struct tun_file *tfile, 20331da177e4SLinus Torvalds struct sk_buff *skb, 2034e0b46d0eSHerbert Xu struct iov_iter *iter) 20351da177e4SLinus Torvalds { 20361da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2037608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2038e0b46d0eSHerbert Xu ssize_t total; 20398c847d25SJason Wang int vlan_offset = 0; 2040a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20412eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2042a8f9bfdfSHerbert Xu 2043df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2044a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20451da177e4SLinus Torvalds 204640630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2047e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20481da177e4SLinus Torvalds 2049e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2050e0b46d0eSHerbert Xu 205140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2052e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20531da177e4SLinus Torvalds return -EINVAL; 20541da177e4SLinus Torvalds 2055e0b46d0eSHerbert Xu total += sizeof(pi); 2056e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 20571da177e4SLinus Torvalds /* Packet will be striped */ 20581da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 20591da177e4SLinus Torvalds } 20601da177e4SLinus Torvalds 2061e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 20621da177e4SLinus Torvalds return -EFAULT; 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds 20652eb783c4SHerbert Xu if (vnet_hdr_sz) { 20669403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 206734166093SMike Rapoport 2068e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2069f43798c2SRusty Russell return -EINVAL; 2070f43798c2SRusty Russell 20713e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 20726391a448SJason Wang tun_is_little_endian(tun), true)) { 2073f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20746b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2075ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 207656f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 207756f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2078ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2079ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2080ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 208156f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2082ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2083ef3db4a5SMichael S. Tsirkin return -EINVAL; 2084ef3db4a5SMichael S. Tsirkin } 2085f43798c2SRusty Russell 2086e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2087f43798c2SRusty Russell return -EFAULT; 20888c847d25SJason Wang 20898c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2090f43798c2SRusty Russell } 2091f43798c2SRusty Russell 2092a8f9bfdfSHerbert Xu if (vlan_hlen) { 2093e0b46d0eSHerbert Xu int ret; 2094aff3d70aSJason Wang struct veth veth; 20951da177e4SLinus Torvalds 20966680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2097df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 20981da177e4SLinus Torvalds 20996680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21006680ec68SJason Wang 2101e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2102e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21036680ec68SJason Wang goto done; 21046680ec68SJason Wang 2105e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2106e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21076680ec68SJason Wang goto done; 21086680ec68SJason Wang } 21096680ec68SJason Wang 2110e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21116680ec68SJason Wang 21126680ec68SJason Wang done: 2113608b9977SPaolo Abeni /* caller is in process context, */ 2114608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2115608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2116608b9977SPaolo Abeni stats->tx_packets++; 2117608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 2118608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2119608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21201da177e4SLinus Torvalds 21211da177e4SLinus Torvalds return total; 21221da177e4SLinus Torvalds } 21231da177e4SLinus Torvalds 2124fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21251576d986SJason Wang { 21261576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2127fc72d1d5SJason Wang void *ptr = NULL; 2128f48cc6b2SJason Wang int error = 0; 21291576d986SJason Wang 2130fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2131fc72d1d5SJason Wang if (ptr) 21321576d986SJason Wang goto out; 21331576d986SJason Wang if (noblock) { 2134f48cc6b2SJason Wang error = -EAGAIN; 21351576d986SJason Wang goto out; 21361576d986SJason Wang } 21371576d986SJason Wang 21381576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 21391576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 21401576d986SJason Wang 21411576d986SJason Wang while (1) { 2142fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2143fc72d1d5SJason Wang if (ptr) 21441576d986SJason Wang break; 21451576d986SJason Wang if (signal_pending(current)) { 2146f48cc6b2SJason Wang error = -ERESTARTSYS; 21471576d986SJason Wang break; 21481576d986SJason Wang } 21491576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2150f48cc6b2SJason Wang error = -EFAULT; 21511576d986SJason Wang break; 21521576d986SJason Wang } 21531576d986SJason Wang 21541576d986SJason Wang schedule(); 21551576d986SJason Wang } 21561576d986SJason Wang 21571576d986SJason Wang current->state = TASK_RUNNING; 21581576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 21591576d986SJason Wang 21601576d986SJason Wang out: 2161f48cc6b2SJason Wang *err = error; 2162fc72d1d5SJason Wang return ptr; 21631576d986SJason Wang } 21641576d986SJason Wang 216554f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 21669b067034SAl Viro struct iov_iter *to, 2167fc72d1d5SJason Wang int noblock, void *ptr) 21681da177e4SLinus Torvalds { 21699b067034SAl Viro ssize_t ret; 21701576d986SJason Wang int err; 21711da177e4SLinus Torvalds 21723872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 21731da177e4SLinus Torvalds 2174c33ee15bSWei Xu if (!iov_iter_count(to)) { 2175fc72d1d5SJason Wang tun_ptr_free(ptr); 21769b067034SAl Viro return 0; 2177c33ee15bSWei Xu } 21781da177e4SLinus Torvalds 2179fc72d1d5SJason Wang if (!ptr) { 21801576d986SJason Wang /* Read frames from ring */ 2181fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2182fc72d1d5SJason Wang if (!ptr) 2183957f094fSAlex Gartrell return err; 2184ac77cfd4SJason Wang } 2185e0b46d0eSHerbert Xu 2186fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 2187fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2188fc72d1d5SJason Wang 2189fc72d1d5SJason Wang ret = tun_put_user_xdp(tun, tfile, xdp, to); 2190fc72d1d5SJason Wang put_page(virt_to_head_page(xdp->data)); 2191fc72d1d5SJason Wang } else { 2192fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2193fc72d1d5SJason Wang 21949b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2195f51a5e82SJason Wang if (unlikely(ret < 0)) 21961da177e4SLinus Torvalds kfree_skb(skb); 2197f51a5e82SJason Wang else 2198f51a5e82SJason Wang consume_skb(skb); 2199fc72d1d5SJason Wang } 22001da177e4SLinus Torvalds 220105c2828cSMichael S. Tsirkin return ret; 220205c2828cSMichael S. Tsirkin } 220305c2828cSMichael S. Tsirkin 22049b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 220505c2828cSMichael S. Tsirkin { 220605c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 220705c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22089484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22099b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 221005c2828cSMichael S. Tsirkin 221105c2828cSMichael S. Tsirkin if (!tun) 221205c2828cSMichael S. Tsirkin return -EBADFD; 2213ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 221442404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2215d0b7da8aSZhi Yong Wu if (ret > 0) 2216d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2217631ab46bSEric W. Biederman tun_put(tun); 22181da177e4SLinus Torvalds return ret; 22191da177e4SLinus Torvalds } 22201da177e4SLinus Torvalds 2221cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 222296f84061SJason Wang { 2223cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 222496f84061SJason Wang 222596f84061SJason Wang bpf_prog_destroy(prog->prog); 222696f84061SJason Wang kfree(prog); 222796f84061SJason Wang } 222896f84061SJason Wang 22299d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22309d6474e4SJason Wang struct tun_prog __rcu **prog_p, 223196f84061SJason Wang struct bpf_prog *prog) 223296f84061SJason Wang { 2233cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 223496f84061SJason Wang 223596f84061SJason Wang if (prog) { 223696f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 223796f84061SJason Wang if (!new) 223896f84061SJason Wang return -ENOMEM; 223996f84061SJason Wang new->prog = prog; 224096f84061SJason Wang } 224196f84061SJason Wang 2242124da8f6SJason Wang spin_lock_bh(&tun->lock); 2243cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2244124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2245cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2246124da8f6SJason Wang spin_unlock_bh(&tun->lock); 224796f84061SJason Wang 224896f84061SJason Wang if (old) 2249cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 225096f84061SJason Wang 225196f84061SJason Wang return 0; 225296f84061SJason Wang } 225396f84061SJason Wang 225496442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 225596442e42SJason Wang { 225696442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 225796442e42SJason Wang 22584008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2259608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 226096442e42SJason Wang tun_flow_uninit(tun); 22615dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2262cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2263aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 226496442e42SJason Wang } 226596442e42SJason Wang 22661da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22671da177e4SLinus Torvalds { 22681da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22691da177e4SLinus Torvalds 22700625c883SEric W. Biederman tun->owner = INVALID_UID; 22710625c883SEric W. Biederman tun->group = INVALID_GID; 22721da177e4SLinus Torvalds 22731da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2274cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2275cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2276016adb72SJason Wang /* We prefer our own queue length */ 2277016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22781da177e4SLinus Torvalds } 22791da177e4SLinus Torvalds 2280f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2281f019a7a5SEric W. Biederman * device with netlink. 2282f019a7a5SEric W. Biederman */ 2283a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2284a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2285f019a7a5SEric W. Biederman { 2286f019a7a5SEric W. Biederman return -EINVAL; 2287f019a7a5SEric W. Biederman } 2288f019a7a5SEric W. Biederman 2289f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2290f019a7a5SEric W. Biederman .kind = DRV_NAME, 2291f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2292f019a7a5SEric W. Biederman .setup = tun_setup, 2293f019a7a5SEric W. Biederman .validate = tun_validate, 2294f019a7a5SEric W. Biederman }; 2295f019a7a5SEric W. Biederman 229633dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 229733dccbb0SHerbert Xu { 229854f968d6SJason Wang struct tun_file *tfile; 229943815482SEric Dumazet wait_queue_head_t *wqueue; 230033dccbb0SHerbert Xu 230133dccbb0SHerbert Xu if (!sock_writeable(sk)) 230233dccbb0SHerbert Xu return; 230333dccbb0SHerbert Xu 23049cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 230533dccbb0SHerbert Xu return; 230633dccbb0SHerbert Xu 230743815482SEric Dumazet wqueue = sk_sleep(sk); 230843815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2309a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2310a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2311c722c625SHerbert Xu 231254f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 231354f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 231433dccbb0SHerbert Xu } 231533dccbb0SHerbert Xu 23161b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 231705c2828cSMichael S. Tsirkin { 231854f968d6SJason Wang int ret; 231954f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23209484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 232154f968d6SJason Wang 232254f968d6SJason Wang if (!tun) 232354f968d6SJason Wang return -EBADFD; 2324f5ff53b4SAl Viro 2325c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 23265503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 23275503fcecSJason Wang m->msg_flags & MSG_MORE); 2328762c330dSJason Wang 2329762c330dSJason Wang if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || 2330762c330dSJason Wang !(m->msg_flags & MSG_MORE)) { 2331762c330dSJason Wang tfile->xdp_pending_pkts = 0; 2332762c330dSJason Wang xdp_do_flush_map(); 2333762c330dSJason Wang } 2334762c330dSJason Wang 233554f968d6SJason Wang tun_put(tun); 233654f968d6SJason Wang return ret; 233705c2828cSMichael S. Tsirkin } 233805c2828cSMichael S. Tsirkin 23391b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 234005c2828cSMichael S. Tsirkin int flags) 234105c2828cSMichael S. Tsirkin { 234254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23439484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2344fc72d1d5SJason Wang void *ptr = m->msg_control; 234505c2828cSMichael S. Tsirkin int ret; 234654f968d6SJason Wang 2347c33ee15bSWei Xu if (!tun) { 2348c33ee15bSWei Xu ret = -EBADFD; 2349fc72d1d5SJason Wang goto out_free; 2350c33ee15bSWei Xu } 235154f968d6SJason Wang 2352eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 23533811ae76SGao feng ret = -EINVAL; 2354c33ee15bSWei Xu goto out_put_tun; 23553811ae76SGao feng } 2356eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2357eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2358eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2359eda29772SRichard Cochran goto out; 2360eda29772SRichard Cochran } 2361fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 236287897931SAlex Gartrell if (ret > (ssize_t)total_len) { 236342404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 236442404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 236542404c09SDavid S. Miller } 23663811ae76SGao feng out: 236754f968d6SJason Wang tun_put(tun); 236805c2828cSMichael S. Tsirkin return ret; 2369c33ee15bSWei Xu 2370c33ee15bSWei Xu out_put_tun: 2371c33ee15bSWei Xu tun_put(tun); 2372fc72d1d5SJason Wang out_free: 2373fc72d1d5SJason Wang tun_ptr_free(ptr); 2374c33ee15bSWei Xu return ret; 237505c2828cSMichael S. Tsirkin } 237605c2828cSMichael S. Tsirkin 2377fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2378fc72d1d5SJason Wang { 2379fc72d1d5SJason Wang if (likely(ptr)) { 2380fc72d1d5SJason Wang if (tun_is_xdp_buff(ptr)) { 2381fc72d1d5SJason Wang struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2382fc72d1d5SJason Wang 2383fc72d1d5SJason Wang return xdp->data_end - xdp->data; 2384fc72d1d5SJason Wang } 2385fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2386fc72d1d5SJason Wang } else { 2387fc72d1d5SJason Wang return 0; 2388fc72d1d5SJason Wang } 2389fc72d1d5SJason Wang } 2390fc72d1d5SJason Wang 23911576d986SJason Wang static int tun_peek_len(struct socket *sock) 23921576d986SJason Wang { 23931576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23941576d986SJason Wang struct tun_struct *tun; 23951576d986SJason Wang int ret = 0; 23961576d986SJason Wang 23979484dc74Syuan linyu tun = tun_get(tfile); 23981576d986SJason Wang if (!tun) 23991576d986SJason Wang return 0; 24001576d986SJason Wang 2401fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 24021576d986SJason Wang tun_put(tun); 24031576d986SJason Wang 24041576d986SJason Wang return ret; 24051576d986SJason Wang } 24061576d986SJason Wang 240705c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 240805c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 24091576d986SJason Wang .peek_len = tun_peek_len, 241005c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 241105c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 241205c2828cSMichael S. Tsirkin }; 241305c2828cSMichael S. Tsirkin 241433dccbb0SHerbert Xu static struct proto tun_proto = { 241533dccbb0SHerbert Xu .name = "tun", 241633dccbb0SHerbert Xu .owner = THIS_MODULE, 241754f968d6SJason Wang .obj_size = sizeof(struct tun_file), 241833dccbb0SHerbert Xu }; 2419f019a7a5SEric W. Biederman 2420980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2421980c9e8cSDavid Woodhouse { 2422031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2423980c9e8cSDavid Woodhouse } 2424980c9e8cSDavid Woodhouse 2425980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2426980c9e8cSDavid Woodhouse char *buf) 2427980c9e8cSDavid Woodhouse { 2428980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2429980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2430980c9e8cSDavid Woodhouse } 2431980c9e8cSDavid Woodhouse 2432980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2433980c9e8cSDavid Woodhouse char *buf) 2434980c9e8cSDavid Woodhouse { 2435980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 24360625c883SEric W. Biederman return uid_valid(tun->owner)? 24370625c883SEric W. Biederman sprintf(buf, "%u\n", 24380625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 24390625c883SEric W. Biederman sprintf(buf, "-1\n"); 2440980c9e8cSDavid Woodhouse } 2441980c9e8cSDavid Woodhouse 2442980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2443980c9e8cSDavid Woodhouse char *buf) 2444980c9e8cSDavid Woodhouse { 2445980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 24460625c883SEric W. Biederman return gid_valid(tun->group) ? 24470625c883SEric W. Biederman sprintf(buf, "%u\n", 24480625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 24490625c883SEric W. Biederman sprintf(buf, "-1\n"); 2450980c9e8cSDavid Woodhouse } 2451980c9e8cSDavid Woodhouse 2452980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2453980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2454980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2455980c9e8cSDavid Woodhouse 2456c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2457c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2458c4d33e24STakashi Iwai &dev_attr_owner.attr, 2459c4d33e24STakashi Iwai &dev_attr_group.attr, 2460c4d33e24STakashi Iwai NULL 2461c4d33e24STakashi Iwai }; 2462c4d33e24STakashi Iwai 2463c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2464c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2465c4d33e24STakashi Iwai }; 2466c4d33e24STakashi Iwai 2467d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 24681da177e4SLinus Torvalds { 24691da177e4SLinus Torvalds struct tun_struct *tun; 247054f968d6SJason Wang struct tun_file *tfile = file->private_data; 24711da177e4SLinus Torvalds struct net_device *dev; 24721da177e4SLinus Torvalds int err; 24731da177e4SLinus Torvalds 24747c0c3b1aSJason Wang if (tfile->detached) 24757c0c3b1aSJason Wang return -EINVAL; 24767c0c3b1aSJason Wang 247790e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 247890e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 247990e33d45SPetar Penkov return -EPERM; 248090e33d45SPetar Penkov 248190e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 248290e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 248390e33d45SPetar Penkov return -EINVAL; 248490e33d45SPetar Penkov } 248590e33d45SPetar Penkov 248674a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 248774a3e5a7SEric W. Biederman if (dev) { 2488f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2489f85ba780SDavid Woodhouse return -EBUSY; 249074a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 249174a3e5a7SEric W. Biederman tun = netdev_priv(dev); 249274a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 249374a3e5a7SEric W. Biederman tun = netdev_priv(dev); 249474a3e5a7SEric W. Biederman else 249574a3e5a7SEric W. Biederman return -EINVAL; 249674a3e5a7SEric W. Biederman 24978e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 249840630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 24998e6d91aeSJason Wang return -EINVAL; 25008e6d91aeSJason Wang 2501cde8b15fSJason Wang if (tun_not_capable(tun)) 25022b980dbdSPaul Moore return -EPERM; 25035dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 25042b980dbdSPaul Moore if (err < 0) 25052b980dbdSPaul Moore return err; 25062b980dbdSPaul Moore 250794317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 250894317099SPetar Penkov ifr->ifr_flags & IFF_NAPI); 2509a7385ba2SEric W. Biederman if (err < 0) 2510a7385ba2SEric W. Biederman return err; 25114008e97fSJason Wang 251240630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2513e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2514e8dbad66SJason Wang /* One or more queue has already been attached, no need 2515e8dbad66SJason Wang * to initialize the device again. 2516e8dbad66SJason Wang */ 2517e8dbad66SJason Wang return 0; 2518e8dbad66SJason Wang } 251986a264abSDavid Howells } 25201da177e4SLinus Torvalds else { 25211da177e4SLinus Torvalds char *name; 25221da177e4SLinus Torvalds unsigned long flags = 0; 2523edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2524edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 25251da177e4SLinus Torvalds 2526c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2527ca6bb5d7SDavid Woodhouse return -EPERM; 25282b980dbdSPaul Moore err = security_tun_dev_create(); 25292b980dbdSPaul Moore if (err < 0) 25302b980dbdSPaul Moore return err; 2531ca6bb5d7SDavid Woodhouse 25321da177e4SLinus Torvalds /* Set dev type */ 25331da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 25341da177e4SLinus Torvalds /* TUN device */ 253540630b82SMichael S. Tsirkin flags |= IFF_TUN; 25361da177e4SLinus Torvalds name = "tun%d"; 25371da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 25381da177e4SLinus Torvalds /* TAP device */ 253940630b82SMichael S. Tsirkin flags |= IFF_TAP; 25401da177e4SLinus Torvalds name = "tap%d"; 25411da177e4SLinus Torvalds } else 254236989b90SKusanagi Kouichi return -EINVAL; 25431da177e4SLinus Torvalds 25441da177e4SLinus Torvalds if (*ifr->ifr_name) 25451da177e4SLinus Torvalds name = ifr->ifr_name; 25461da177e4SLinus Torvalds 2547c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2548c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2549c835a677STom Gundersen queues); 2550edfb6a14SJason Wang 25511da177e4SLinus Torvalds if (!dev) 25521da177e4SLinus Torvalds return -ENOMEM; 25530ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 25545c25f65fSJulien Gomes if (err < 0) 25550ad646c8SCong Wang goto err_free_dev; 25561da177e4SLinus Torvalds 2557fc54c658SPavel Emelyanov dev_net_set(dev, net); 2558f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2559fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2560c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2561758e43b7SStephen Hemminger 25621da177e4SLinus Torvalds tun = netdev_priv(dev); 25631da177e4SLinus Torvalds tun->dev = dev; 25641da177e4SLinus Torvalds tun->flags = flags; 2565f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2566d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 25671da177e4SLinus Torvalds 2568eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 256954f968d6SJason Wang tun->filter_attached = false; 257054f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 25715503fcecSJason Wang tun->rx_batched = 0; 257296f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 257333dccbb0SHerbert Xu 2574608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2575608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2576608b9977SPaolo Abeni err = -ENOMEM; 2577608b9977SPaolo Abeni goto err_free_dev; 2578608b9977SPaolo Abeni } 2579608b9977SPaolo Abeni 258096442e42SJason Wang spin_lock_init(&tun->lock); 258196442e42SJason Wang 25825dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 25835dbbaf2dSPaul Moore if (err < 0) 2584608b9977SPaolo Abeni goto err_free_stat; 25852b980dbdSPaul Moore 25861da177e4SLinus Torvalds tun_net_init(dev); 2587944a1376SPavel Emelyanov tun_flow_init(tun); 258896442e42SJason Wang 258988255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 25906680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 25916680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 25922a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 25936671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 25946671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 25956671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 259688255375SMichał Mirosław 25974008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 259894317099SPetar Penkov err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2599eb0fb363SJason Wang if (err < 0) 2600662ca437SJason Wang goto err_free_flow; 2601eb0fb363SJason Wang 26021da177e4SLinus Torvalds err = register_netdevice(tun->dev); 26031da177e4SLinus Torvalds if (err < 0) 2604662ca437SJason Wang goto err_detach; 2605af668b3cSMichael S. Tsirkin } 2606980c9e8cSDavid Woodhouse 2607eb0fb363SJason Wang netif_carrier_on(tun->dev); 26081da177e4SLinus Torvalds 26096b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 26101da177e4SLinus Torvalds 2611031f5e03SMichael S. Tsirkin tun->flags = (tun->flags & ~TUN_FEATURES) | 2612031f5e03SMichael S. Tsirkin (ifr->ifr_flags & TUN_FEATURES); 2613c8d68e6bSJason Wang 2614e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2615e35259a9SMax Krasnyansky * xoff state. 2616e35259a9SMax Krasnyansky */ 2617e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2618c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2619e35259a9SMax Krasnyansky 26201da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 26211da177e4SLinus Torvalds return 0; 26221da177e4SLinus Torvalds 2623662ca437SJason Wang err_detach: 2624662ca437SJason Wang tun_detach_all(dev); 2625ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2626ff244c6bSEric Dumazet goto err_free_dev; 2627ff244c6bSEric Dumazet 2628662ca437SJason Wang err_free_flow: 2629662ca437SJason Wang tun_flow_uninit(tun); 2630662ca437SJason Wang security_tun_dev_free_security(tun->security); 2631608b9977SPaolo Abeni err_free_stat: 2632608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 26331da177e4SLinus Torvalds err_free_dev: 26341da177e4SLinus Torvalds free_netdev(dev); 26351da177e4SLinus Torvalds return err; 26361da177e4SLinus Torvalds } 26371da177e4SLinus Torvalds 26389ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2639876bfd4dSHerbert Xu struct ifreq *ifr) 2640e3b99556SMark McLoughlin { 26416b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2642e3b99556SMark McLoughlin 2643e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2644e3b99556SMark McLoughlin 2645980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2646e3b99556SMark McLoughlin 2647e3b99556SMark McLoughlin } 2648e3b99556SMark McLoughlin 26495228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 26505228ddc9SRusty Russell * privs required. */ 265188255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 26525228ddc9SRusty Russell { 2653c8f44affSMichał Mirosław netdev_features_t features = 0; 26545228ddc9SRusty Russell 26555228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 265688255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 26575228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 26585228ddc9SRusty Russell 26595228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 26605228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 26615228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 26625228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 26635228ddc9SRusty Russell } 26645228ddc9SRusty Russell if (arg & TUN_F_TSO4) 26655228ddc9SRusty Russell features |= NETIF_F_TSO; 26665228ddc9SRusty Russell if (arg & TUN_F_TSO6) 26675228ddc9SRusty Russell features |= NETIF_F_TSO6; 26685228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 26695228ddc9SRusty Russell } 26700c19f846SWillem de Bruijn 26710c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 26725228ddc9SRusty Russell } 26735228ddc9SRusty Russell 26745228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 26755228ddc9SRusty Russell * trying to set them. */ 26765228ddc9SRusty Russell if (arg) 26775228ddc9SRusty Russell return -EINVAL; 26785228ddc9SRusty Russell 267988255375SMichał Mirosław tun->set_features = features; 268009050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 268109050957SYaroslav Isakov tun->dev->wanted_features |= features; 268288255375SMichał Mirosław netdev_update_features(tun->dev); 26835228ddc9SRusty Russell 26845228ddc9SRusty Russell return 0; 26855228ddc9SRusty Russell } 26865228ddc9SRusty Russell 2687c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2688c8d68e6bSJason Wang { 2689c8d68e6bSJason Wang int i; 2690c8d68e6bSJason Wang struct tun_file *tfile; 2691c8d68e6bSJason Wang 2692c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2693b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 26948ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 26958ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 26968ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2697c8d68e6bSJason Wang } 2698c8d68e6bSJason Wang 2699c8d68e6bSJason Wang tun->filter_attached = false; 2700c8d68e6bSJason Wang } 2701c8d68e6bSJason Wang 2702c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2703c8d68e6bSJason Wang { 2704c8d68e6bSJason Wang int i, ret = 0; 2705c8d68e6bSJason Wang struct tun_file *tfile; 2706c8d68e6bSJason Wang 2707c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2708b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 27098ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 27108ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 27118ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2712c8d68e6bSJason Wang if (ret) { 2713c8d68e6bSJason Wang tun_detach_filter(tun, i); 2714c8d68e6bSJason Wang return ret; 2715c8d68e6bSJason Wang } 2716c8d68e6bSJason Wang } 2717c8d68e6bSJason Wang 2718c8d68e6bSJason Wang tun->filter_attached = true; 2719c8d68e6bSJason Wang return ret; 2720c8d68e6bSJason Wang } 2721c8d68e6bSJason Wang 2722c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2723c8d68e6bSJason Wang { 2724c8d68e6bSJason Wang struct tun_file *tfile; 2725c8d68e6bSJason Wang int i; 2726c8d68e6bSJason Wang 2727c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2728b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2729c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2730c8d68e6bSJason Wang } 2731c8d68e6bSJason Wang } 2732c8d68e6bSJason Wang 2733cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2734cde8b15fSJason Wang { 2735cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2736cde8b15fSJason Wang struct tun_struct *tun; 2737cde8b15fSJason Wang int ret = 0; 2738cde8b15fSJason Wang 2739cde8b15fSJason Wang rtnl_lock(); 2740cde8b15fSJason Wang 2741cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 27424008e97fSJason Wang tun = tfile->detached; 27435dbbaf2dSPaul Moore if (!tun) { 2744cde8b15fSJason Wang ret = -EINVAL; 27455dbbaf2dSPaul Moore goto unlock; 27465dbbaf2dSPaul Moore } 27475dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 27485dbbaf2dSPaul Moore if (ret < 0) 27495dbbaf2dSPaul Moore goto unlock; 275094317099SPetar Penkov ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 27514008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2752b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 275340630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 27544008e97fSJason Wang ret = -EINVAL; 2755cde8b15fSJason Wang else 27564008e97fSJason Wang __tun_detach(tfile, false); 27574008e97fSJason Wang } else 2758cde8b15fSJason Wang ret = -EINVAL; 2759cde8b15fSJason Wang 27605dbbaf2dSPaul Moore unlock: 2761cde8b15fSJason Wang rtnl_unlock(); 2762cde8b15fSJason Wang return ret; 2763cde8b15fSJason Wang } 2764cde8b15fSJason Wang 2765cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2766cd5681d7SJason Wang void __user *data) 276796f84061SJason Wang { 276896f84061SJason Wang struct bpf_prog *prog; 276996f84061SJason Wang int fd; 277096f84061SJason Wang 277196f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 277296f84061SJason Wang return -EFAULT; 277396f84061SJason Wang 277496f84061SJason Wang if (fd == -1) { 277596f84061SJason Wang prog = NULL; 277696f84061SJason Wang } else { 277796f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 277896f84061SJason Wang if (IS_ERR(prog)) 277996f84061SJason Wang return PTR_ERR(prog); 278096f84061SJason Wang } 278196f84061SJason Wang 2782cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 278396f84061SJason Wang } 278496f84061SJason Wang 278550857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 278650857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 27871da177e4SLinus Torvalds { 278836b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2789631ab46bSEric W. Biederman struct tun_struct *tun; 27901da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 27911da177e4SLinus Torvalds struct ifreq ifr; 27920625c883SEric W. Biederman kuid_t owner; 27930625c883SEric W. Biederman kgid_t group; 279433dccbb0SHerbert Xu int sndbuf; 2795d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2796fb7589a1SPavel Emelyanov unsigned int ifindex; 27971cf8e410SMichael S. Tsirkin int le; 2798f271b2ccSMax Krasnyansky int ret; 27991da177e4SLinus Torvalds 280020861f26SGao Feng if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 280150857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 28021da177e4SLinus Torvalds return -EFAULT; 28038bbb1813SDavid S. Miller } else { 2804a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 28058bbb1813SDavid S. Miller } 2806631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2807631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2808631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2809031f5e03SMichael S. Tsirkin * TUNSETIFF. 2810031f5e03SMichael S. Tsirkin */ 2811031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2812631ab46bSEric W. Biederman (unsigned int __user*)argp); 2813cde8b15fSJason Wang } else if (cmd == TUNSETQUEUE) 2814cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2815631ab46bSEric W. Biederman 2816c8d68e6bSJason Wang ret = 0; 2817876bfd4dSHerbert Xu rtnl_lock(); 2818876bfd4dSHerbert Xu 28199484dc74Syuan linyu tun = tun_get(tfile); 28200f16bc13SGao Feng if (cmd == TUNSETIFF) { 28210f16bc13SGao Feng ret = -EEXIST; 28220f16bc13SGao Feng if (tun) 28230f16bc13SGao Feng goto unlock; 28240f16bc13SGao Feng 28251da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 28261da177e4SLinus Torvalds 2827140e807dSEric W. Biederman ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 28281da177e4SLinus Torvalds 2829876bfd4dSHerbert Xu if (ret) 2830876bfd4dSHerbert Xu goto unlock; 28311da177e4SLinus Torvalds 283250857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2833876bfd4dSHerbert Xu ret = -EFAULT; 2834876bfd4dSHerbert Xu goto unlock; 28351da177e4SLinus Torvalds } 2836fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2837fb7589a1SPavel Emelyanov ret = -EPERM; 2838fb7589a1SPavel Emelyanov if (tun) 2839fb7589a1SPavel Emelyanov goto unlock; 2840fb7589a1SPavel Emelyanov 2841fb7589a1SPavel Emelyanov ret = -EFAULT; 2842fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2843fb7589a1SPavel Emelyanov goto unlock; 2844fb7589a1SPavel Emelyanov 2845fb7589a1SPavel Emelyanov ret = 0; 2846fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2847fb7589a1SPavel Emelyanov goto unlock; 2848fb7589a1SPavel Emelyanov } 28491da177e4SLinus Torvalds 2850876bfd4dSHerbert Xu ret = -EBADFD; 28511da177e4SLinus Torvalds if (!tun) 2852876bfd4dSHerbert Xu goto unlock; 28531da177e4SLinus Torvalds 28541e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 28551da177e4SLinus Torvalds 2856631ab46bSEric W. Biederman ret = 0; 28571da177e4SLinus Torvalds switch (cmd) { 2858e3b99556SMark McLoughlin case TUNGETIFF: 28599ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2860e3b99556SMark McLoughlin 28613d407a80SPavel Emelyanov if (tfile->detached) 28623d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2863849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2864849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 28653d407a80SPavel Emelyanov 286650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2867631ab46bSEric W. Biederman ret = -EFAULT; 2868e3b99556SMark McLoughlin break; 2869e3b99556SMark McLoughlin 28701da177e4SLinus Torvalds case TUNSETNOCSUM: 28711da177e4SLinus Torvalds /* Disable/Enable checksum */ 28721da177e4SLinus Torvalds 287388255375SMichał Mirosław /* [unimplemented] */ 287488255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 28756b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 28761da177e4SLinus Torvalds break; 28771da177e4SLinus Torvalds 28781da177e4SLinus Torvalds case TUNSETPERSIST: 287954f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 288054f968d6SJason Wang * module to prevent the module being unprobed. 288154f968d6SJason Wang */ 288240630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 288340630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 288454f968d6SJason Wang __module_get(THIS_MODULE); 2885dd38bd85SJason Wang } 288640630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 288740630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 288854f968d6SJason Wang module_put(THIS_MODULE); 288954f968d6SJason Wang } 28901da177e4SLinus Torvalds 28916b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 28926b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 28931da177e4SLinus Torvalds break; 28941da177e4SLinus Torvalds 28951da177e4SLinus Torvalds case TUNSETOWNER: 28961da177e4SLinus Torvalds /* Set owner of the device */ 28970625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 28980625c883SEric W. Biederman if (!uid_valid(owner)) { 28990625c883SEric W. Biederman ret = -EINVAL; 29000625c883SEric W. Biederman break; 29010625c883SEric W. Biederman } 29020625c883SEric W. Biederman tun->owner = owner; 29031e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 29040625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 29051da177e4SLinus Torvalds break; 29061da177e4SLinus Torvalds 29078c644623SGuido Guenther case TUNSETGROUP: 29088c644623SGuido Guenther /* Set group of the device */ 29090625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 29100625c883SEric W. Biederman if (!gid_valid(group)) { 29110625c883SEric W. Biederman ret = -EINVAL; 29120625c883SEric W. Biederman break; 29130625c883SEric W. Biederman } 29140625c883SEric W. Biederman tun->group = group; 29151e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 29160625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 29178c644623SGuido Guenther break; 29188c644623SGuido Guenther 2919ff4cc3acSMike Kershaw case TUNSETLINK: 2920ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2921ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 29226b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 29236b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 292448abfe05SDavid S. Miller ret = -EBUSY; 2925ff4cc3acSMike Kershaw } else { 2926ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 29276b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 29286b8a66eeSJoe Perches tun->dev->type); 292948abfe05SDavid S. Miller ret = 0; 2930ff4cc3acSMike Kershaw } 2931631ab46bSEric W. Biederman break; 2932ff4cc3acSMike Kershaw 29331da177e4SLinus Torvalds #ifdef TUN_DEBUG 29341da177e4SLinus Torvalds case TUNSETDEBUG: 29351da177e4SLinus Torvalds tun->debug = arg; 29361da177e4SLinus Torvalds break; 29371da177e4SLinus Torvalds #endif 29385228ddc9SRusty Russell case TUNSETOFFLOAD: 293988255375SMichał Mirosław ret = set_offload(tun, arg); 2940631ab46bSEric W. Biederman break; 29415228ddc9SRusty Russell 2942f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 2943f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 2944631ab46bSEric W. Biederman ret = -EINVAL; 294540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2946631ab46bSEric W. Biederman break; 2947c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 2948631ab46bSEric W. Biederman break; 29491da177e4SLinus Torvalds 29501da177e4SLinus Torvalds case SIOCGIFHWADDR: 2951b595076aSUwe Kleine-König /* Get hw address */ 2952f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2953f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 295450857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2955631ab46bSEric W. Biederman ret = -EFAULT; 2956631ab46bSEric W. Biederman break; 29571da177e4SLinus Torvalds 29581da177e4SLinus Torvalds case SIOCSIFHWADDR: 2959f271b2ccSMax Krasnyansky /* Set hw address */ 29606b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 29616b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 296240102371SKim B. Heino 296340102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2964631ab46bSEric W. Biederman break; 296533dccbb0SHerbert Xu 296633dccbb0SHerbert Xu case TUNGETSNDBUF: 296754f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 296833dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 296933dccbb0SHerbert Xu ret = -EFAULT; 297033dccbb0SHerbert Xu break; 297133dccbb0SHerbert Xu 297233dccbb0SHerbert Xu case TUNSETSNDBUF: 297333dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 297433dccbb0SHerbert Xu ret = -EFAULT; 297533dccbb0SHerbert Xu break; 297633dccbb0SHerbert Xu } 297793161922SCraig Gallek if (sndbuf <= 0) { 297893161922SCraig Gallek ret = -EINVAL; 297993161922SCraig Gallek break; 298093161922SCraig Gallek } 298133dccbb0SHerbert Xu 2982c8d68e6bSJason Wang tun->sndbuf = sndbuf; 2983c8d68e6bSJason Wang tun_set_sndbuf(tun); 298433dccbb0SHerbert Xu break; 298533dccbb0SHerbert Xu 2986d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 2987d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 2988d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2989d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2990d9d52b51SMichael S. Tsirkin break; 2991d9d52b51SMichael S. Tsirkin 2992d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 2993d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2994d9d52b51SMichael S. Tsirkin ret = -EFAULT; 2995d9d52b51SMichael S. Tsirkin break; 2996d9d52b51SMichael S. Tsirkin } 2997d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2998d9d52b51SMichael S. Tsirkin ret = -EINVAL; 2999d9d52b51SMichael S. Tsirkin break; 3000d9d52b51SMichael S. Tsirkin } 3001d9d52b51SMichael S. Tsirkin 3002d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3003d9d52b51SMichael S. Tsirkin break; 3004d9d52b51SMichael S. Tsirkin 30051cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 30061cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 30071cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 30081cf8e410SMichael S. Tsirkin ret = -EFAULT; 30091cf8e410SMichael S. Tsirkin break; 30101cf8e410SMichael S. Tsirkin 30111cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 30121cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 30131cf8e410SMichael S. Tsirkin ret = -EFAULT; 30141cf8e410SMichael S. Tsirkin break; 30151cf8e410SMichael S. Tsirkin } 30161cf8e410SMichael S. Tsirkin if (le) 30171cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 30181cf8e410SMichael S. Tsirkin else 30191cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 30201cf8e410SMichael S. Tsirkin break; 30211cf8e410SMichael S. Tsirkin 30228b8e658bSGreg Kurz case TUNGETVNETBE: 30238b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 30248b8e658bSGreg Kurz break; 30258b8e658bSGreg Kurz 30268b8e658bSGreg Kurz case TUNSETVNETBE: 30278b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 30288b8e658bSGreg Kurz break; 30298b8e658bSGreg Kurz 303099405162SMichael S. Tsirkin case TUNATTACHFILTER: 303199405162SMichael S. Tsirkin /* Can be set only for TAPs */ 303299405162SMichael S. Tsirkin ret = -EINVAL; 303340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 303499405162SMichael S. Tsirkin break; 303599405162SMichael S. Tsirkin ret = -EFAULT; 303654f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 303799405162SMichael S. Tsirkin break; 303899405162SMichael S. Tsirkin 3039c8d68e6bSJason Wang ret = tun_attach_filter(tun); 304099405162SMichael S. Tsirkin break; 304199405162SMichael S. Tsirkin 304299405162SMichael S. Tsirkin case TUNDETACHFILTER: 304399405162SMichael S. Tsirkin /* Can be set only for TAPs */ 304499405162SMichael S. Tsirkin ret = -EINVAL; 304540630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 304699405162SMichael S. Tsirkin break; 3047c8d68e6bSJason Wang ret = 0; 3048c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 304999405162SMichael S. Tsirkin break; 305099405162SMichael S. Tsirkin 305176975e9cSPavel Emelyanov case TUNGETFILTER: 305276975e9cSPavel Emelyanov ret = -EINVAL; 305340630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 305476975e9cSPavel Emelyanov break; 305576975e9cSPavel Emelyanov ret = -EFAULT; 305676975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 305776975e9cSPavel Emelyanov break; 305876975e9cSPavel Emelyanov ret = 0; 305976975e9cSPavel Emelyanov break; 306076975e9cSPavel Emelyanov 306196f84061SJason Wang case TUNSETSTEERINGEBPF: 3062cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 306396f84061SJason Wang break; 306496f84061SJason Wang 3065aff3d70aSJason Wang case TUNSETFILTEREBPF: 3066aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3067aff3d70aSJason Wang break; 3068aff3d70aSJason Wang 30691da177e4SLinus Torvalds default: 3070631ab46bSEric W. Biederman ret = -EINVAL; 3071631ab46bSEric W. Biederman break; 3072ee289b64SJoe Perches } 30731da177e4SLinus Torvalds 3074876bfd4dSHerbert Xu unlock: 3075876bfd4dSHerbert Xu rtnl_unlock(); 3076876bfd4dSHerbert Xu if (tun) 3077631ab46bSEric W. Biederman tun_put(tun); 3078631ab46bSEric W. Biederman return ret; 30791da177e4SLinus Torvalds } 30801da177e4SLinus Torvalds 308150857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 308250857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 308350857e2aSArnd Bergmann { 308450857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 308550857e2aSArnd Bergmann } 308650857e2aSArnd Bergmann 308750857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 308850857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 308950857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 309050857e2aSArnd Bergmann { 309150857e2aSArnd Bergmann switch (cmd) { 309250857e2aSArnd Bergmann case TUNSETIFF: 309350857e2aSArnd Bergmann case TUNGETIFF: 309450857e2aSArnd Bergmann case TUNSETTXFILTER: 309550857e2aSArnd Bergmann case TUNGETSNDBUF: 309650857e2aSArnd Bergmann case TUNSETSNDBUF: 309750857e2aSArnd Bergmann case SIOCGIFHWADDR: 309850857e2aSArnd Bergmann case SIOCSIFHWADDR: 309950857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 310050857e2aSArnd Bergmann break; 310150857e2aSArnd Bergmann default: 310250857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 310350857e2aSArnd Bergmann break; 310450857e2aSArnd Bergmann } 310550857e2aSArnd Bergmann 310650857e2aSArnd Bergmann /* 310750857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 310850857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 310950857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 311050857e2aSArnd Bergmann * contents. 311150857e2aSArnd Bergmann */ 311250857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 311350857e2aSArnd Bergmann } 311450857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 311550857e2aSArnd Bergmann 31161da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 31171da177e4SLinus Torvalds { 311854f968d6SJason Wang struct tun_file *tfile = file->private_data; 31191da177e4SLinus Torvalds int ret; 31201da177e4SLinus Torvalds 312154f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 31229d319522SJonathan Corbet goto out; 31231da177e4SLinus Torvalds 31241da177e4SLinus Torvalds if (on) { 3125e0b93eddSJeff Layton __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 312654f968d6SJason Wang tfile->flags |= TUN_FASYNC; 31271da177e4SLinus Torvalds } else 312854f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 31299d319522SJonathan Corbet ret = 0; 31309d319522SJonathan Corbet out: 31319d319522SJonathan Corbet return ret; 31321da177e4SLinus Torvalds } 31331da177e4SLinus Torvalds 31341da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 31351da177e4SLinus Torvalds { 3136140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3137631ab46bSEric W. Biederman struct tun_file *tfile; 3138deed49fbSThomas Gleixner 31396b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3140631ab46bSEric W. Biederman 3141140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 314211aa9c28SEric W. Biederman &tun_proto, 0); 3143631ab46bSEric W. Biederman if (!tfile) 3144631ab46bSEric W. Biederman return -ENOMEM; 3145c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 314654f968d6SJason Wang tfile->flags = 0; 3147fb7589a1SPavel Emelyanov tfile->ifindex = 0; 314854f968d6SJason Wang 314954f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 31509e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 315154f968d6SJason Wang 315254f968d6SJason Wang tfile->socket.file = file; 315354f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 315454f968d6SJason Wang 315554f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 315654f968d6SJason Wang 315754f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 315854f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 315954f968d6SJason Wang 3160631ab46bSEric W. Biederman file->private_data = tfile; 31614008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 316254f968d6SJason Wang 316319a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 316419a6afb2SJason Wang 31658565d26bSDavid S. Miller memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3166762c330dSJason Wang tfile->xdp_pending_pkts = 0; 31674df0bfc7SCong Wang 31681da177e4SLinus Torvalds return 0; 31691da177e4SLinus Torvalds } 31701da177e4SLinus Torvalds 31711da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 31721da177e4SLinus Torvalds { 3173631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 31741da177e4SLinus Torvalds 3175c8d68e6bSJason Wang tun_detach(tfile, true); 31761da177e4SLinus Torvalds 31771da177e4SLinus Torvalds return 0; 31781da177e4SLinus Torvalds } 31791da177e4SLinus Torvalds 318093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 31819484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 318293e14b6dSMasatake YAMATO { 31839484dc74Syuan linyu struct tun_file *tfile = file->private_data; 318493e14b6dSMasatake YAMATO struct tun_struct *tun; 318593e14b6dSMasatake YAMATO struct ifreq ifr; 318693e14b6dSMasatake YAMATO 318793e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 318893e14b6dSMasatake YAMATO 318993e14b6dSMasatake YAMATO rtnl_lock(); 31909484dc74Syuan linyu tun = tun_get(tfile); 319193e14b6dSMasatake YAMATO if (tun) 319293e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 319393e14b6dSMasatake YAMATO rtnl_unlock(); 319493e14b6dSMasatake YAMATO 319593e14b6dSMasatake YAMATO if (tun) 319693e14b6dSMasatake YAMATO tun_put(tun); 319793e14b6dSMasatake YAMATO 3198a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 319993e14b6dSMasatake YAMATO } 320093e14b6dSMasatake YAMATO #endif 320193e14b6dSMasatake YAMATO 3202d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 32031da177e4SLinus Torvalds .owner = THIS_MODULE, 32041da177e4SLinus Torvalds .llseek = no_llseek, 32059b067034SAl Viro .read_iter = tun_chr_read_iter, 3206f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 32071da177e4SLinus Torvalds .poll = tun_chr_poll, 3208876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 320950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 321050857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 321150857e2aSArnd Bergmann #endif 32121da177e4SLinus Torvalds .open = tun_chr_open, 32131da177e4SLinus Torvalds .release = tun_chr_close, 321493e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 321593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 321693e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 321793e14b6dSMasatake YAMATO #endif 32181da177e4SLinus Torvalds }; 32191da177e4SLinus Torvalds 32201da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 32211da177e4SLinus Torvalds .minor = TUN_MINOR, 32221da177e4SLinus Torvalds .name = "tun", 3223e454cea2SKay Sievers .nodename = "net/tun", 32241da177e4SLinus Torvalds .fops = &tun_fops, 32251da177e4SLinus Torvalds }; 32261da177e4SLinus Torvalds 32271da177e4SLinus Torvalds /* ethtool interface */ 32281da177e4SLinus Torvalds 322929ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev, 323029ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 32311da177e4SLinus Torvalds { 323229ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 323329ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 323429ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 323529ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 323629ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 323729ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 323829ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 32391da177e4SLinus Torvalds return 0; 32401da177e4SLinus Torvalds } 32411da177e4SLinus Torvalds 32421da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 32431da177e4SLinus Torvalds { 32441da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 32451da177e4SLinus Torvalds 324633a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 324733a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 32481da177e4SLinus Torvalds 32491da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 325040630b82SMichael S. Tsirkin case IFF_TUN: 325133a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 32521da177e4SLinus Torvalds break; 325340630b82SMichael S. Tsirkin case IFF_TAP: 325433a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 32551da177e4SLinus Torvalds break; 32561da177e4SLinus Torvalds } 32571da177e4SLinus Torvalds } 32581da177e4SLinus Torvalds 32591da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 32601da177e4SLinus Torvalds { 32611da177e4SLinus Torvalds #ifdef TUN_DEBUG 32621da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 32631da177e4SLinus Torvalds return tun->debug; 32641da177e4SLinus Torvalds #else 32651da177e4SLinus Torvalds return -EOPNOTSUPP; 32661da177e4SLinus Torvalds #endif 32671da177e4SLinus Torvalds } 32681da177e4SLinus Torvalds 32691da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 32701da177e4SLinus Torvalds { 32711da177e4SLinus Torvalds #ifdef TUN_DEBUG 32721da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 32731da177e4SLinus Torvalds tun->debug = value; 32741da177e4SLinus Torvalds #endif 32751da177e4SLinus Torvalds } 32761da177e4SLinus Torvalds 32775503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 32785503fcecSJason Wang struct ethtool_coalesce *ec) 32795503fcecSJason Wang { 32805503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 32815503fcecSJason Wang 32825503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 32835503fcecSJason Wang 32845503fcecSJason Wang return 0; 32855503fcecSJason Wang } 32865503fcecSJason Wang 32875503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 32885503fcecSJason Wang struct ethtool_coalesce *ec) 32895503fcecSJason Wang { 32905503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 32915503fcecSJason Wang 32925503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 32935503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 32945503fcecSJason Wang else 32955503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 32965503fcecSJason Wang 32975503fcecSJason Wang return 0; 32985503fcecSJason Wang } 32995503fcecSJason Wang 33007282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 33011da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 33021da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 33031da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3304bee31369SNolan Leake .get_link = ethtool_op_get_link, 3305eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 33065503fcecSJason Wang .get_coalesce = tun_get_coalesce, 33075503fcecSJason Wang .set_coalesce = tun_set_coalesce, 330829ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 33091da177e4SLinus Torvalds }; 33101da177e4SLinus Torvalds 33111576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 33121576d986SJason Wang { 33131576d986SJason Wang struct net_device *dev = tun->dev; 33141576d986SJason Wang struct tun_file *tfile; 33155990a305SJason Wang struct ptr_ring **rings; 33161576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 33171576d986SJason Wang int ret, i; 33181576d986SJason Wang 33195990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 33205990a305SJason Wang if (!rings) 33211576d986SJason Wang return -ENOMEM; 33221576d986SJason Wang 33231576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 33241576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 33255990a305SJason Wang rings[i] = &tfile->tx_ring; 33261576d986SJason Wang } 33271576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 33285990a305SJason Wang rings[i++] = &tfile->tx_ring; 33291576d986SJason Wang 33305990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 33315990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3332fc72d1d5SJason Wang tun_ptr_free); 33331576d986SJason Wang 33345990a305SJason Wang kfree(rings); 33351576d986SJason Wang return ret; 33361576d986SJason Wang } 33371576d986SJason Wang 33381576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 33391576d986SJason Wang unsigned long event, void *ptr) 33401576d986SJason Wang { 33411576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 33421576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 33431576d986SJason Wang 334486dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 334586dfb4acSCraig Gallek return NOTIFY_DONE; 334686dfb4acSCraig Gallek 33471576d986SJason Wang switch (event) { 33481576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 33491576d986SJason Wang if (tun_queue_resize(tun)) 33501576d986SJason Wang return NOTIFY_BAD; 33511576d986SJason Wang break; 33521576d986SJason Wang default: 33531576d986SJason Wang break; 33541576d986SJason Wang } 33551576d986SJason Wang 33561576d986SJason Wang return NOTIFY_DONE; 33571576d986SJason Wang } 33581576d986SJason Wang 33591576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 33601576d986SJason Wang .notifier_call = tun_device_event, 33611576d986SJason Wang }; 336279d17604SPavel Emelyanov 33631da177e4SLinus Torvalds static int __init tun_init(void) 33641da177e4SLinus Torvalds { 33651da177e4SLinus Torvalds int ret = 0; 33661da177e4SLinus Torvalds 33676b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 33681da177e4SLinus Torvalds 3369f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 337079d17604SPavel Emelyanov if (ret) { 33716b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3372f019a7a5SEric W. Biederman goto err_linkops; 337379d17604SPavel Emelyanov } 337479d17604SPavel Emelyanov 33751da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 337679d17604SPavel Emelyanov if (ret) { 33776b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 337879d17604SPavel Emelyanov goto err_misc; 337979d17604SPavel Emelyanov } 33801576d986SJason Wang 33815edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 33825edfbd3cSTonghao Zhang if (ret) { 33835edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 33845edfbd3cSTonghao Zhang goto err_notifier; 33855edfbd3cSTonghao Zhang } 33865edfbd3cSTonghao Zhang 338779d17604SPavel Emelyanov return 0; 33885edfbd3cSTonghao Zhang 33895edfbd3cSTonghao Zhang err_notifier: 33905edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 339179d17604SPavel Emelyanov err_misc: 3392f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3393f019a7a5SEric W. Biederman err_linkops: 33941da177e4SLinus Torvalds return ret; 33951da177e4SLinus Torvalds } 33961da177e4SLinus Torvalds 33971da177e4SLinus Torvalds static void tun_cleanup(void) 33981da177e4SLinus Torvalds { 33991da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3400f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 34011576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 34021da177e4SLinus Torvalds } 34031da177e4SLinus Torvalds 340405c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 340505c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 340605c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 340705c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 340805c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 340905c2828cSMichael S. Tsirkin { 34106e914fc7SJason Wang struct tun_file *tfile; 341105c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 341205c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 34136e914fc7SJason Wang tfile = file->private_data; 34146e914fc7SJason Wang if (!tfile) 341505c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 341654f968d6SJason Wang return &tfile->socket; 341705c2828cSMichael S. Tsirkin } 341805c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 341905c2828cSMichael S. Tsirkin 34205990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 342183339c6bSJason Wang { 342283339c6bSJason Wang struct tun_file *tfile; 342383339c6bSJason Wang 342483339c6bSJason Wang if (file->f_op != &tun_fops) 342583339c6bSJason Wang return ERR_PTR(-EINVAL); 342683339c6bSJason Wang tfile = file->private_data; 342783339c6bSJason Wang if (!tfile) 342883339c6bSJason Wang return ERR_PTR(-EBADFD); 34295990a305SJason Wang return &tfile->tx_ring; 343083339c6bSJason Wang } 34315990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 343283339c6bSJason Wang 34331da177e4SLinus Torvalds module_init(tun_init); 34341da177e4SLinus Torvalds module_exit(tun_cleanup); 34351da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 34361da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 34371da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 34381da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3439578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3440