11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 73735fc405SJesper Dangaard Brouer #include <net/xdp.h> 7493e14b6dSMasatake YAMATO #include <linux/seq_file.h> 75e0b46d0eSHerbert Xu #include <linux/uio.h> 761576d986SJason Wang #include <linux/skb_array.h> 77761876c8SJason Wang #include <linux/bpf.h> 78761876c8SJason Wang #include <linux/bpf_trace.h> 7990e33d45SPetar Penkov #include <linux/mutex.h> 801da177e4SLinus Torvalds 817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 82f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 831da177e4SLinus Torvalds 844e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 854e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 864e24f2ddSChas Williams 8714daa021SRusty Russell /* Uncomment to enable debugging */ 8814daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8914daa021SRusty Russell 901da177e4SLinus Torvalds #ifdef TUN_DEBUG 911da177e4SLinus Torvalds static int debug; 9214daa021SRusty Russell 936b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (tun->debug) \ 966b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 986b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 996b8a66eeSJoe Perches do { \ 1006b8a66eeSJoe Perches if (debug == 2) \ 1016b8a66eeSJoe Perches printk(level fmt, ##args); \ 1026b8a66eeSJoe Perches } while (0) 10314daa021SRusty Russell #else 1046b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1096b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1106b8a66eeSJoe Perches do { \ 1116b8a66eeSJoe Perches if (0) \ 1126b8a66eeSJoe Perches printk(level fmt, ##args); \ 1136b8a66eeSJoe Perches } while (0) 1141da177e4SLinus Torvalds #endif 1151da177e4SLinus Torvalds 116761876c8SJason Wang #define TUN_HEADROOM 256 1177df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11866ccbc9cSJason Wang 119031f5e03SMichael S. Tsirkin /* TUN device flags */ 120031f5e03SMichael S. Tsirkin 121031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 122031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 123031f5e03SMichael S. Tsirkin */ 124031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1251cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1261cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1278b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 128031f5e03SMichael S. Tsirkin 129031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 13090e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 13190e33d45SPetar Penkov 1320690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1330690899bSMichael S. Tsirkin 134f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 135f271b2ccSMax Krasnyansky struct tap_filter { 136f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 137f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 138f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 139f271b2ccSMax Krasnyansky }; 140f271b2ccSMax Krasnyansky 141baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 142baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 143baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 144b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 145c8d68e6bSJason Wang 14696442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14796442e42SJason Wang 148608b9977SPaolo Abeni struct tun_pcpu_stats { 149608b9977SPaolo Abeni u64 rx_packets; 150608b9977SPaolo Abeni u64 rx_bytes; 151608b9977SPaolo Abeni u64 tx_packets; 152608b9977SPaolo Abeni u64 tx_bytes; 153608b9977SPaolo Abeni struct u64_stats_sync syncp; 154608b9977SPaolo Abeni u32 rx_dropped; 155608b9977SPaolo Abeni u32 tx_dropped; 156608b9977SPaolo Abeni u32 rx_frame_errors; 157608b9977SPaolo Abeni }; 158608b9977SPaolo Abeni 15954f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 16092d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 16154f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 16254f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 16336fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 16454f968d6SJason Wang * this). 1656e914fc7SJason Wang * 1666e914fc7SJason Wang * RCU usage: 16736fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1686e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16954f968d6SJason Wang */ 170631ab46bSEric W. Biederman struct tun_file { 17154f968d6SJason Wang struct sock sk; 17254f968d6SJason Wang struct socket socket; 17354f968d6SJason Wang struct socket_wq wq; 1746e914fc7SJason Wang struct tun_struct __rcu *tun; 17554f968d6SJason Wang struct fasync_struct *fasync; 17654f968d6SJason Wang /* only used for fasnyc */ 17754f968d6SJason Wang unsigned int flags; 178fb7589a1SPavel Emelyanov union { 179c8d68e6bSJason Wang u16 queue_index; 180fb7589a1SPavel Emelyanov unsigned int ifindex; 181fb7589a1SPavel Emelyanov }; 18294317099SPetar Penkov struct napi_struct napi; 183aec72f33SEric Dumazet bool napi_enabled; 184*af3fb24eSEric Dumazet bool napi_frags_enabled; 18590e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1864008e97fSJason Wang struct list_head next; 1874008e97fSJason Wang struct tun_struct *detached; 1885990a305SJason Wang struct ptr_ring tx_ring; 1898bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 190631ab46bSEric W. Biederman }; 191631ab46bSEric W. Biederman 19296442e42SJason Wang struct tun_flow_entry { 19396442e42SJason Wang struct hlist_node hash_link; 19496442e42SJason Wang struct rcu_head rcu; 19596442e42SJason Wang struct tun_struct *tun; 19696442e42SJason Wang 19796442e42SJason Wang u32 rxhash; 1989bc88939STom Herbert u32 rps_rxhash; 19996442e42SJason Wang int queue_index; 20096442e42SJason Wang unsigned long updated; 20196442e42SJason Wang }; 20296442e42SJason Wang 20396442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 204f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 20596442e42SJason Wang 206cd5681d7SJason Wang struct tun_prog { 20796f84061SJason Wang struct rcu_head rcu; 20896f84061SJason Wang struct bpf_prog *prog; 20996f84061SJason Wang }; 21096f84061SJason Wang 21154f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 21236fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 21354f968d6SJason Wang * file were attached to a persist device. 21454f968d6SJason Wang */ 21514daa021SRusty Russell struct tun_struct { 216c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 217c8d68e6bSJason Wang unsigned int numqueues; 218f271b2ccSMax Krasnyansky unsigned int flags; 2190625c883SEric W. Biederman kuid_t owner; 2200625c883SEric W. Biederman kgid_t group; 22114daa021SRusty Russell 22214daa021SRusty Russell struct net_device *dev; 223c8f44affSMichał Mirosław netdev_features_t set_features; 22488255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 225d591a1f3SDavid S. Miller NETIF_F_TSO6) 226d9d52b51SMichael S. Tsirkin 227eaea34b2SPaolo Abeni int align; 228d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 22954f968d6SJason Wang int sndbuf; 23054f968d6SJason Wang struct tap_filter txflt; 23154f968d6SJason Wang struct sock_fprog fprog; 23254f968d6SJason Wang /* protected by rtnl lock */ 23354f968d6SJason Wang bool filter_attached; 23414daa021SRusty Russell #ifdef TUN_DEBUG 23514daa021SRusty Russell int debug; 23614daa021SRusty Russell #endif 23796442e42SJason Wang spinlock_t lock; 23896442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 23996442e42SJason Wang struct timer_list flow_gc_timer; 24096442e42SJason Wang unsigned long ageing_time; 2414008e97fSJason Wang unsigned int numdisabled; 2424008e97fSJason Wang struct list_head disabled; 2435dbbaf2dSPaul Moore void *security; 244b8732fb7SJason Wang u32 flow_count; 2455503fcecSJason Wang u32 rx_batched; 246608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 247761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 248cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 249aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2504e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 25114daa021SRusty Russell }; 25214daa021SRusty Russell 253aff3d70aSJason Wang struct veth { 254aff3d70aSJason Wang __be16 h_vlan_proto; 255aff3d70aSJason Wang __be16 h_vlan_TCI; 2561da177e4SLinus Torvalds }; 2571da177e4SLinus Torvalds 2581ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr) 259fc72d1d5SJason Wang { 260fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 261fc72d1d5SJason Wang } 2621ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame); 263fc72d1d5SJason Wang 264fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 265fc72d1d5SJason Wang { 266fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 267fc72d1d5SJason Wang } 268fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 269fc72d1d5SJason Wang 270fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 271fc72d1d5SJason Wang { 272fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 273fc72d1d5SJason Wang } 274fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 275fc72d1d5SJason Wang 27694317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 27794317099SPetar Penkov { 27894317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 27994317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 28094317099SPetar Penkov struct sk_buff_head process_queue; 28194317099SPetar Penkov struct sk_buff *skb; 28294317099SPetar Penkov int received = 0; 28394317099SPetar Penkov 28494317099SPetar Penkov __skb_queue_head_init(&process_queue); 28594317099SPetar Penkov 28694317099SPetar Penkov spin_lock(&queue->lock); 28794317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 28894317099SPetar Penkov spin_unlock(&queue->lock); 28994317099SPetar Penkov 29094317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 29194317099SPetar Penkov napi_gro_receive(napi, skb); 29294317099SPetar Penkov ++received; 29394317099SPetar Penkov } 29494317099SPetar Penkov 29594317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 29694317099SPetar Penkov spin_lock(&queue->lock); 29794317099SPetar Penkov skb_queue_splice(&process_queue, queue); 29894317099SPetar Penkov spin_unlock(&queue->lock); 29994317099SPetar Penkov } 30094317099SPetar Penkov 30194317099SPetar Penkov return received; 30294317099SPetar Penkov } 30394317099SPetar Penkov 30494317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 30594317099SPetar Penkov { 30694317099SPetar Penkov unsigned int received; 30794317099SPetar Penkov 30894317099SPetar Penkov received = tun_napi_receive(napi, budget); 30994317099SPetar Penkov 31094317099SPetar Penkov if (received < budget) 31194317099SPetar Penkov napi_complete_done(napi, received); 31294317099SPetar Penkov 31394317099SPetar Penkov return received; 31494317099SPetar Penkov } 31594317099SPetar Penkov 31694317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 317*af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 31894317099SPetar Penkov { 319aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 320*af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 32194317099SPetar Penkov if (napi_en) { 32294317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 32394317099SPetar Penkov NAPI_POLL_WEIGHT); 32494317099SPetar Penkov napi_enable(&tfile->napi); 32594317099SPetar Penkov } 32694317099SPetar Penkov } 32794317099SPetar Penkov 32806e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 32994317099SPetar Penkov { 330aec72f33SEric Dumazet if (tfile->napi_enabled) 33194317099SPetar Penkov napi_disable(&tfile->napi); 33294317099SPetar Penkov } 33394317099SPetar Penkov 33406e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 33594317099SPetar Penkov { 336aec72f33SEric Dumazet if (tfile->napi_enabled) 33794317099SPetar Penkov netif_napi_del(&tfile->napi); 33894317099SPetar Penkov } 33994317099SPetar Penkov 340*af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 34190e33d45SPetar Penkov { 342*af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 34390e33d45SPetar Penkov } 34490e33d45SPetar Penkov 3458b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3468b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3478b8e658bSGreg Kurz { 3488b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3498b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3508b8e658bSGreg Kurz } 3518b8e658bSGreg Kurz 3528b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3538b8e658bSGreg Kurz { 3548b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3558b8e658bSGreg Kurz 3568b8e658bSGreg Kurz if (put_user(be, argp)) 3578b8e658bSGreg Kurz return -EFAULT; 3588b8e658bSGreg Kurz 3598b8e658bSGreg Kurz return 0; 3608b8e658bSGreg Kurz } 3618b8e658bSGreg Kurz 3628b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3638b8e658bSGreg Kurz { 3648b8e658bSGreg Kurz int be; 3658b8e658bSGreg Kurz 3668b8e658bSGreg Kurz if (get_user(be, argp)) 3678b8e658bSGreg Kurz return -EFAULT; 3688b8e658bSGreg Kurz 3698b8e658bSGreg Kurz if (be) 3708b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3718b8e658bSGreg Kurz else 3728b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3738b8e658bSGreg Kurz 3748b8e658bSGreg Kurz return 0; 3758b8e658bSGreg Kurz } 3768b8e658bSGreg Kurz #else 3778b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3788b8e658bSGreg Kurz { 3798b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3808b8e658bSGreg Kurz } 3818b8e658bSGreg Kurz 3828b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3838b8e658bSGreg Kurz { 3848b8e658bSGreg Kurz return -EINVAL; 3858b8e658bSGreg Kurz } 3868b8e658bSGreg Kurz 3878b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3888b8e658bSGreg Kurz { 3898b8e658bSGreg Kurz return -EINVAL; 3908b8e658bSGreg Kurz } 3918b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3928b8e658bSGreg Kurz 39325bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 39425bd55bbSGreg Kurz { 3957d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3968b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 39725bd55bbSGreg Kurz } 39825bd55bbSGreg Kurz 39956f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 40056f0dcc5SMichael S. Tsirkin { 40125bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 40256f0dcc5SMichael S. Tsirkin } 40356f0dcc5SMichael S. Tsirkin 40456f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 40556f0dcc5SMichael S. Tsirkin { 40625bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 40756f0dcc5SMichael S. Tsirkin } 40856f0dcc5SMichael S. Tsirkin 40996442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 41096442e42SJason Wang { 411f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 41296442e42SJason Wang } 41396442e42SJason Wang 41496442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 41596442e42SJason Wang { 41696442e42SJason Wang struct tun_flow_entry *e; 41796442e42SJason Wang 418b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 41996442e42SJason Wang if (e->rxhash == rxhash) 42096442e42SJason Wang return e; 42196442e42SJason Wang } 42296442e42SJason Wang return NULL; 42396442e42SJason Wang } 42496442e42SJason Wang 42596442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 42696442e42SJason Wang struct hlist_head *head, 42796442e42SJason Wang u32 rxhash, u16 queue_index) 42896442e42SJason Wang { 4299fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 4309fdc6befSEric Dumazet 43196442e42SJason Wang if (e) { 43296442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 43396442e42SJason Wang rxhash, queue_index); 43496442e42SJason Wang e->updated = jiffies; 43596442e42SJason Wang e->rxhash = rxhash; 4369bc88939STom Herbert e->rps_rxhash = 0; 43796442e42SJason Wang e->queue_index = queue_index; 43896442e42SJason Wang e->tun = tun; 43996442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 440b8732fb7SJason Wang ++tun->flow_count; 44196442e42SJason Wang } 44296442e42SJason Wang return e; 44396442e42SJason Wang } 44496442e42SJason Wang 44596442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 44696442e42SJason Wang { 44796442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 44896442e42SJason Wang e->rxhash, e->queue_index); 44996442e42SJason Wang hlist_del_rcu(&e->hash_link); 4509fdc6befSEric Dumazet kfree_rcu(e, rcu); 451b8732fb7SJason Wang --tun->flow_count; 45296442e42SJason Wang } 45396442e42SJason Wang 45496442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 45596442e42SJason Wang { 45696442e42SJason Wang int i; 45796442e42SJason Wang 45896442e42SJason Wang spin_lock_bh(&tun->lock); 45996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46096442e42SJason Wang struct tun_flow_entry *e; 461b67bfe0dSSasha Levin struct hlist_node *n; 46296442e42SJason Wang 463b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 46496442e42SJason Wang tun_flow_delete(tun, e); 46596442e42SJason Wang } 46696442e42SJason Wang spin_unlock_bh(&tun->lock); 46796442e42SJason Wang } 46896442e42SJason Wang 46996442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 47096442e42SJason Wang { 47196442e42SJason Wang int i; 47296442e42SJason Wang 47396442e42SJason Wang spin_lock_bh(&tun->lock); 47496442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 47596442e42SJason Wang struct tun_flow_entry *e; 476b67bfe0dSSasha Levin struct hlist_node *n; 47796442e42SJason Wang 478b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 47996442e42SJason Wang if (e->queue_index == queue_index) 48096442e42SJason Wang tun_flow_delete(tun, e); 48196442e42SJason Wang } 48296442e42SJason Wang } 48396442e42SJason Wang spin_unlock_bh(&tun->lock); 48496442e42SJason Wang } 48596442e42SJason Wang 486e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 48796442e42SJason Wang { 488e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 48996442e42SJason Wang unsigned long delay = tun->ageing_time; 49096442e42SJason Wang unsigned long next_timer = jiffies + delay; 49196442e42SJason Wang unsigned long count = 0; 49296442e42SJason Wang int i; 49396442e42SJason Wang 49496442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 49596442e42SJason Wang 4967dbfb4efSEric Dumazet spin_lock(&tun->lock); 49796442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 49896442e42SJason Wang struct tun_flow_entry *e; 499b67bfe0dSSasha Levin struct hlist_node *n; 50096442e42SJason Wang 501b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 50296442e42SJason Wang unsigned long this_timer; 50381d98fa4SEric Dumazet 50496442e42SJason Wang this_timer = e->updated + delay; 50581d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 50696442e42SJason Wang tun_flow_delete(tun, e); 50781d98fa4SEric Dumazet continue; 50881d98fa4SEric Dumazet } 50981d98fa4SEric Dumazet count++; 51081d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 51196442e42SJason Wang next_timer = this_timer; 51296442e42SJason Wang } 51396442e42SJason Wang } 51496442e42SJason Wang 51596442e42SJason Wang if (count) 51696442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 5177dbfb4efSEric Dumazet spin_unlock(&tun->lock); 51896442e42SJason Wang } 51996442e42SJason Wang 52049974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 5219e85722dSJason Wang struct tun_file *tfile) 52296442e42SJason Wang { 52396442e42SJason Wang struct hlist_head *head; 52496442e42SJason Wang struct tun_flow_entry *e; 52596442e42SJason Wang unsigned long delay = tun->ageing_time; 5269e85722dSJason Wang u16 queue_index = tfile->queue_index; 52796442e42SJason Wang 52896442e42SJason Wang if (!rxhash) 52996442e42SJason Wang return; 53096442e42SJason Wang else 53196442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 53296442e42SJason Wang 53396442e42SJason Wang rcu_read_lock(); 53496442e42SJason Wang 53596442e42SJason Wang e = tun_flow_find(head, rxhash); 53696442e42SJason Wang if (likely(e)) { 53796442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 53896442e42SJason Wang e->queue_index = queue_index; 53996442e42SJason Wang e->updated = jiffies; 5409bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 54196442e42SJason Wang } else { 54296442e42SJason Wang spin_lock_bh(&tun->lock); 543b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 544b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 54596442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 54696442e42SJason Wang 54796442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 54896442e42SJason Wang mod_timer(&tun->flow_gc_timer, 54996442e42SJason Wang round_jiffies_up(jiffies + delay)); 55096442e42SJason Wang spin_unlock_bh(&tun->lock); 55196442e42SJason Wang } 55296442e42SJason Wang 55396442e42SJason Wang rcu_read_unlock(); 55496442e42SJason Wang } 55596442e42SJason Wang 5569bc88939STom Herbert /** 5579bc88939STom Herbert * Save the hash received in the stack receive path and update the 5589bc88939STom Herbert * flow_hash table accordingly. 5599bc88939STom Herbert */ 5609bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5619bc88939STom Herbert { 562567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5639bc88939STom Herbert e->rps_rxhash = hash; 5649bc88939STom Herbert } 5659bc88939STom Herbert 566c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that 56792d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 568c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 569c8d68e6bSJason Wang * the userspace application move between processors, we may get a 570c8d68e6bSJason Wang * different rxq no. here. If we could not get rxhash, then we would 571c8d68e6bSJason Wang * hope the rxq no. may help here. 572c8d68e6bSJason Wang */ 57396f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 574c8d68e6bSJason Wang { 57596442e42SJason Wang struct tun_flow_entry *e; 576c8d68e6bSJason Wang u32 txq = 0; 577c8d68e6bSJason Wang u32 numqueues = 0; 578c8d68e6bSJason Wang 5796aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 580c8d68e6bSJason Wang 581feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 582c8d68e6bSJason Wang if (txq) { 58396442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5849bc88939STom Herbert if (e) { 5859bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 586fbe4d456SZhi Yong Wu txq = e->queue_index; 5879bc88939STom Herbert } else 588c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 589c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 590c8d68e6bSJason Wang } else if (likely(skb_rx_queue_recorded(skb))) { 591c8d68e6bSJason Wang txq = skb_get_rx_queue(skb); 592c8d68e6bSJason Wang while (unlikely(txq >= numqueues)) 593c8d68e6bSJason Wang txq -= numqueues; 594c8d68e6bSJason Wang } 595c8d68e6bSJason Wang 596c8d68e6bSJason Wang return txq; 597c8d68e6bSJason Wang } 598c8d68e6bSJason Wang 59996f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 60096f84061SJason Wang { 601cd5681d7SJason Wang struct tun_prog *prog; 60296f84061SJason Wang u16 ret = 0; 60396f84061SJason Wang 60496f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 60596f84061SJason Wang if (prog) 60696f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 60796f84061SJason Wang 60896f84061SJason Wang return ret % tun->numqueues; 60996f84061SJason Wang } 61096f84061SJason Wang 61196f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 6124f49dec9SAlexander Duyck struct net_device *sb_dev, 6134f49dec9SAlexander Duyck select_queue_fallback_t fallback) 61496f84061SJason Wang { 61596f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 61696f84061SJason Wang u16 ret; 61796f84061SJason Wang 61896f84061SJason Wang rcu_read_lock(); 61996f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 62096f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 62196f84061SJason Wang else 62296f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 62396f84061SJason Wang rcu_read_unlock(); 62496f84061SJason Wang 62596f84061SJason Wang return ret; 62696f84061SJason Wang } 62796f84061SJason Wang 628cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 629cde8b15fSJason Wang { 630cde8b15fSJason Wang const struct cred *cred = current_cred(); 631c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 632cde8b15fSJason Wang 633cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 634cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 635c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 636cde8b15fSJason Wang } 637cde8b15fSJason Wang 638c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 639c8d68e6bSJason Wang { 640c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 641c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 642c8d68e6bSJason Wang } 643c8d68e6bSJason Wang 6444008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6454008e97fSJason Wang { 6464008e97fSJason Wang tfile->detached = tun; 6474008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6484008e97fSJason Wang ++tun->numdisabled; 6494008e97fSJason Wang } 6504008e97fSJason Wang 651d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6524008e97fSJason Wang { 6534008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6544008e97fSJason Wang 6554008e97fSJason Wang tfile->detached = NULL; 6564008e97fSJason Wang list_del_init(&tfile->next); 6574008e97fSJason Wang --tun->numdisabled; 6584008e97fSJason Wang return tun; 6594008e97fSJason Wang } 6604008e97fSJason Wang 6613a403076SJason Wang void tun_ptr_free(void *ptr) 662fc72d1d5SJason Wang { 663fc72d1d5SJason Wang if (!ptr) 664fc72d1d5SJason Wang return; 6651ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 6661ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 667fc72d1d5SJason Wang 66803993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 669fc72d1d5SJason Wang } else { 670fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 671fc72d1d5SJason Wang } 672fc72d1d5SJason Wang } 6733a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 674fc72d1d5SJason Wang 6754bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6764bfb0513SJason Wang { 677fc72d1d5SJason Wang void *ptr; 6781576d986SJason Wang 679fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 680fc72d1d5SJason Wang tun_ptr_free(ptr); 6811576d986SJason Wang 6825503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6834bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6844bfb0513SJason Wang } 6854bfb0513SJason Wang 686c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 687c8d68e6bSJason Wang { 688c8d68e6bSJason Wang struct tun_file *ntfile; 689c8d68e6bSJason Wang struct tun_struct *tun; 690c8d68e6bSJason Wang 691b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 692b8deabd3SJason Wang 69394317099SPetar Penkov if (tun && clean) { 69406e55addSEric Dumazet tun_napi_disable(tfile); 69506e55addSEric Dumazet tun_napi_del(tfile); 69694317099SPetar Penkov } 69794317099SPetar Penkov 6989e85722dSJason Wang if (tun && !tfile->detached) { 699c8d68e6bSJason Wang u16 index = tfile->queue_index; 700c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 701c8d68e6bSJason Wang 702c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 703c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 704b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 705c8d68e6bSJason Wang ntfile->queue_index = index; 706c8d68e6bSJason Wang 707c8d68e6bSJason Wang --tun->numqueues; 7089e85722dSJason Wang if (clean) { 709c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 710c8d68e6bSJason Wang sock_put(&tfile->sk); 7119e85722dSJason Wang } else 7124008e97fSJason Wang tun_disable_queue(tun, tfile); 713c8d68e6bSJason Wang 714c8d68e6bSJason Wang synchronize_net(); 71596442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 716c8d68e6bSJason Wang /* Drop read queue */ 7174bfb0513SJason Wang tun_queue_purge(tfile); 718c8d68e6bSJason Wang tun_set_real_num_queues(tun); 719dd38bd85SJason Wang } else if (tfile->detached && clean) { 7204008e97fSJason Wang tun = tun_enable_queue(tfile); 721dd38bd85SJason Wang sock_put(&tfile->sk); 722dd38bd85SJason Wang } 723c8d68e6bSJason Wang 724c8d68e6bSJason Wang if (clean) { 725af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 726af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 727af668b3cSMichael S. Tsirkin 72840630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 729af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 7304008e97fSJason Wang unregister_netdevice(tun->dev); 731af668b3cSMichael S. Tsirkin } 732b196d88aSJason Wang if (tun) 733b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7347063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 735140e807dSEric W. Biederman sock_put(&tfile->sk); 736c8d68e6bSJason Wang } 737c8d68e6bSJason Wang } 738c8d68e6bSJason Wang 739c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 740c8d68e6bSJason Wang { 74183c1f36fSSabrina Dubroca struct tun_struct *tun; 74283c1f36fSSabrina Dubroca struct net_device *dev; 74383c1f36fSSabrina Dubroca 744c8d68e6bSJason Wang rtnl_lock(); 74583c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 74683c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 747c8d68e6bSJason Wang __tun_detach(tfile, clean); 74883c1f36fSSabrina Dubroca if (dev) 74983c1f36fSSabrina Dubroca netdev_state_change(dev); 750c8d68e6bSJason Wang rtnl_unlock(); 751c8d68e6bSJason Wang } 752c8d68e6bSJason Wang 753c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 754c8d68e6bSJason Wang { 755c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7564008e97fSJason Wang struct tun_file *tfile, *tmp; 757c8d68e6bSJason Wang int i, n = tun->numqueues; 758c8d68e6bSJason Wang 759c8d68e6bSJason Wang for (i = 0; i < n; i++) { 760b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 761c8d68e6bSJason Wang BUG_ON(!tfile); 76206e55addSEric Dumazet tun_napi_disable(tfile); 763addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7649e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 765c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 766c8d68e6bSJason Wang --tun->numqueues; 767c8d68e6bSJason Wang } 7689e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 769addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7709e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 771c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7729e85722dSJason Wang } 773c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 774c8d68e6bSJason Wang 775c8d68e6bSJason Wang synchronize_net(); 776c8d68e6bSJason Wang for (i = 0; i < n; i++) { 777b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 77806e55addSEric Dumazet tun_napi_del(tfile); 779c8d68e6bSJason Wang /* Drop read queue */ 7804bfb0513SJason Wang tun_queue_purge(tfile); 781b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 782c8d68e6bSJason Wang sock_put(&tfile->sk); 783c8d68e6bSJason Wang } 7844008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7854008e97fSJason Wang tun_enable_queue(tfile); 7864bfb0513SJason Wang tun_queue_purge(tfile); 787b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7884008e97fSJason Wang sock_put(&tfile->sk); 7894008e97fSJason Wang } 7904008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 791dd38bd85SJason Wang 79240630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 793dd38bd85SJason Wang module_put(THIS_MODULE); 794c8d68e6bSJason Wang } 795c8d68e6bSJason Wang 79694317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 797*af3fb24eSEric Dumazet bool skip_filter, bool napi, bool napi_frags) 798a7385ba2SEric W. Biederman { 799631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 8001576d986SJason Wang struct net_device *dev = tun->dev; 80138231b7aSEric W. Biederman int err; 802a7385ba2SEric W. Biederman 8035dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 8045dbbaf2dSPaul Moore if (err < 0) 8055dbbaf2dSPaul Moore goto out; 8065dbbaf2dSPaul Moore 80738231b7aSEric W. Biederman err = -EINVAL; 8089e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 80938231b7aSEric W. Biederman goto out; 81038231b7aSEric W. Biederman 81138231b7aSEric W. Biederman err = -EBUSY; 81240630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 813c8d68e6bSJason Wang goto out; 814c8d68e6bSJason Wang 815c8d68e6bSJason Wang err = -E2BIG; 8164008e97fSJason Wang if (!tfile->detached && 8174008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 81838231b7aSEric W. Biederman goto out; 81938231b7aSEric W. Biederman 82038231b7aSEric W. Biederman err = 0; 82154f968d6SJason Wang 82292d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 823849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 8248ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 8258ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 8268ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 82754f968d6SJason Wang if (!err) 82854f968d6SJason Wang goto out; 82954f968d6SJason Wang } 8301576d986SJason Wang 8311576d986SJason Wang if (!tfile->detached && 832b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 833b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 8341576d986SJason Wang err = -ENOMEM; 8351576d986SJason Wang goto out; 8361576d986SJason Wang } 8371576d986SJason Wang 838c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 839addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8408bf5c4eeSJesper Dangaard Brouer 8418bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8428bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8438bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8448bf5c4eeSJesper Dangaard Brouer 8458bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8468bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8478bf5c4eeSJesper Dangaard Brouer } else { 8488bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8498bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8508bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8518bf5c4eeSJesper Dangaard Brouer if (err < 0) 8528bf5c4eeSJesper Dangaard Brouer goto out; 8538d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 8548d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 8558d5d8852SJesper Dangaard Brouer if (err < 0) { 8568d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 8578d5d8852SJesper Dangaard Brouer goto out; 8588d5d8852SJesper Dangaard Brouer } 8598bf5c4eeSJesper Dangaard Brouer err = 0; 8608bf5c4eeSJesper Dangaard Brouer } 8618bf5c4eeSJesper Dangaard Brouer 8626e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 863c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 864c8d68e6bSJason Wang tun->numqueues++; 865c8d68e6bSJason Wang 86694317099SPetar Penkov if (tfile->detached) { 8674008e97fSJason Wang tun_enable_queue(tfile); 86894317099SPetar Penkov } else { 8694008e97fSJason Wang sock_hold(&tfile->sk); 870*af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 87194317099SPetar Penkov } 8724008e97fSJason Wang 873c8d68e6bSJason Wang tun_set_real_num_queues(tun); 874c8d68e6bSJason Wang 875c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 876c8d68e6bSJason Wang * refcnt. 877c8d68e6bSJason Wang */ 878a7385ba2SEric W. Biederman 87938231b7aSEric W. Biederman out: 88038231b7aSEric W. Biederman return err; 881a7385ba2SEric W. Biederman } 882a7385ba2SEric W. Biederman 8839484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 884631ab46bSEric W. Biederman { 8856e914fc7SJason Wang struct tun_struct *tun; 886c70f1829SEric W. Biederman 8876e914fc7SJason Wang rcu_read_lock(); 8886e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8896e914fc7SJason Wang if (tun) 8906e914fc7SJason Wang dev_hold(tun->dev); 8916e914fc7SJason Wang rcu_read_unlock(); 892c70f1829SEric W. Biederman 893c70f1829SEric W. Biederman return tun; 894631ab46bSEric W. Biederman } 895631ab46bSEric W. Biederman 896631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 897631ab46bSEric W. Biederman { 8986e914fc7SJason Wang dev_put(tun->dev); 899631ab46bSEric W. Biederman } 900631ab46bSEric W. Biederman 9016b8a66eeSJoe Perches /* TAP filtering */ 902f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 903f271b2ccSMax Krasnyansky { 904f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 905f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 906f271b2ccSMax Krasnyansky } 907f271b2ccSMax Krasnyansky 908f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 909f271b2ccSMax Krasnyansky { 910f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 911f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 912f271b2ccSMax Krasnyansky } 913f271b2ccSMax Krasnyansky 914f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 915f271b2ccSMax Krasnyansky { 916f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 917f271b2ccSMax Krasnyansky struct tun_filter uf; 918f271b2ccSMax Krasnyansky int err, alen, n, nexact; 919f271b2ccSMax Krasnyansky 920f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 921f271b2ccSMax Krasnyansky return -EFAULT; 922f271b2ccSMax Krasnyansky 923f271b2ccSMax Krasnyansky if (!uf.count) { 924f271b2ccSMax Krasnyansky /* Disabled */ 925f271b2ccSMax Krasnyansky filter->count = 0; 926f271b2ccSMax Krasnyansky return 0; 927f271b2ccSMax Krasnyansky } 928f271b2ccSMax Krasnyansky 929f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 93028e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 93128e8190dSMarkus Elfring if (IS_ERR(addr)) 93228e8190dSMarkus Elfring return PTR_ERR(addr); 933f271b2ccSMax Krasnyansky 934f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 935f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 936f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 937f271b2ccSMax Krasnyansky filter->count = 0; 938f271b2ccSMax Krasnyansky wmb(); 939f271b2ccSMax Krasnyansky 940f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 941f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 942f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 943f271b2ccSMax Krasnyansky 944f271b2ccSMax Krasnyansky nexact = n; 945f271b2ccSMax Krasnyansky 946cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 947cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 948f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 949cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 950cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 951cfbf84fcSAlex Williamson err = 0; /* no filter */ 9523b8d2a69SMarkus Elfring goto free_addr; 953cfbf84fcSAlex Williamson } 954f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 955cfbf84fcSAlex Williamson } 956f271b2ccSMax Krasnyansky 957f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 958f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 959f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 960f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 961f271b2ccSMax Krasnyansky 962f271b2ccSMax Krasnyansky /* Now enable the filter */ 963f271b2ccSMax Krasnyansky wmb(); 964f271b2ccSMax Krasnyansky filter->count = nexact; 965f271b2ccSMax Krasnyansky 966f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 967f271b2ccSMax Krasnyansky err = nexact; 9683b8d2a69SMarkus Elfring free_addr: 969f271b2ccSMax Krasnyansky kfree(addr); 970f271b2ccSMax Krasnyansky return err; 971f271b2ccSMax Krasnyansky } 972f271b2ccSMax Krasnyansky 973f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 974f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 975f271b2ccSMax Krasnyansky { 976f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 977f271b2ccSMax Krasnyansky * at this point. */ 978f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 979f271b2ccSMax Krasnyansky int i; 980f271b2ccSMax Krasnyansky 981f271b2ccSMax Krasnyansky /* Exact match */ 982f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9832e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 984f271b2ccSMax Krasnyansky return 1; 985f271b2ccSMax Krasnyansky 986f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 987f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 988f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 989f271b2ccSMax Krasnyansky 990f271b2ccSMax Krasnyansky return 0; 991f271b2ccSMax Krasnyansky } 992f271b2ccSMax Krasnyansky 993f271b2ccSMax Krasnyansky /* 994f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 995f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 996f271b2ccSMax Krasnyansky */ 997f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 998f271b2ccSMax Krasnyansky { 999f271b2ccSMax Krasnyansky if (!filter->count) 1000f271b2ccSMax Krasnyansky return 1; 1001f271b2ccSMax Krasnyansky 1002f271b2ccSMax Krasnyansky return run_filter(filter, skb); 1003f271b2ccSMax Krasnyansky } 1004f271b2ccSMax Krasnyansky 10051da177e4SLinus Torvalds /* Network device part of the driver */ 10061da177e4SLinus Torvalds 10071da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 10081da177e4SLinus Torvalds 1009c70f1829SEric W. Biederman /* Net device detach from fd. */ 1010c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1011c70f1829SEric W. Biederman { 1012c8d68e6bSJason Wang tun_detach_all(dev); 1013c70f1829SEric W. Biederman } 1014c70f1829SEric W. Biederman 10151da177e4SLinus Torvalds /* Net device open. */ 10161da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10171da177e4SLinus Torvalds { 1018b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 1019b20e2d54SHannes Frederic Sowa int i; 1020b20e2d54SHannes Frederic Sowa 1021c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1022b20e2d54SHannes Frederic Sowa 1023b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 1024b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 1025b20e2d54SHannes Frederic Sowa 1026b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 1027b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 1028b20e2d54SHannes Frederic Sowa } 1029b20e2d54SHannes Frederic Sowa 10301da177e4SLinus Torvalds return 0; 10311da177e4SLinus Torvalds } 10321da177e4SLinus Torvalds 10331da177e4SLinus Torvalds /* Net device close. */ 10341da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10351da177e4SLinus Torvalds { 1036c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10371da177e4SLinus Torvalds return 0; 10381da177e4SLinus Torvalds } 10391da177e4SLinus Torvalds 10401da177e4SLinus Torvalds /* Net device start xmit */ 104196f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10421da177e4SLinus Torvalds { 10433df97ba8SJason Wang #ifdef CONFIG_RPS 104496f84061SJason Wang if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 10459bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10469bc88939STom Herbert * RPS hash and save it into the flow_table here. 10479bc88939STom Herbert */ 10489bc88939STom Herbert __u32 rxhash; 10499bc88939STom Herbert 1050feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10519bc88939STom Herbert if (rxhash) { 10529bc88939STom Herbert struct tun_flow_entry *e; 10539bc88939STom Herbert e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 10549bc88939STom Herbert rxhash); 10559bc88939STom Herbert if (e) 10569bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10579bc88939STom Herbert } 10589bc88939STom Herbert } 10593df97ba8SJason Wang #endif 106096f84061SJason Wang } 106196f84061SJason Wang 1062aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1063aff3d70aSJason Wang struct sk_buff *skb, 1064aff3d70aSJason Wang int len) 1065aff3d70aSJason Wang { 1066aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1067aff3d70aSJason Wang 1068aff3d70aSJason Wang if (prog) 1069aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1070aff3d70aSJason Wang 1071aff3d70aSJason Wang return len; 1072aff3d70aSJason Wang } 1073aff3d70aSJason Wang 107496f84061SJason Wang /* Net device start xmit */ 107596f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 107696f84061SJason Wang { 107796f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 107896f84061SJason Wang int txq = skb->queue_mapping; 107996f84061SJason Wang struct tun_file *tfile; 1080aff3d70aSJason Wang int len = skb->len; 108196f84061SJason Wang 108296f84061SJason Wang rcu_read_lock(); 108396f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 108496f84061SJason Wang 108596f84061SJason Wang /* Drop packet if interface is not attached */ 1086cc166427SWillem de Bruijn if (txq >= tun->numqueues) 108796f84061SJason Wang goto drop; 108896f84061SJason Wang 108996f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 109096f84061SJason Wang tun_automq_xmit(tun, skb); 10919bc88939STom Herbert 10926e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10936e914fc7SJason Wang 1094c8d68e6bSJason Wang BUG_ON(!tfile); 1095c8d68e6bSJason Wang 1096f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1097f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1098f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1099f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1100f271b2ccSMax Krasnyansky goto drop; 1101f271b2ccSMax Krasnyansky 110254f968d6SJason Wang if (tfile->socket.sk->sk_filter && 110354f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 110499405162SMichael S. Tsirkin goto drop; 110599405162SMichael S. Tsirkin 1106aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 110781c89507SBjørn Mork if (len == 0 || pskb_trim(skb, len)) 1108aff3d70aSJason Wang goto drop; 1109aff3d70aSJason Wang 11101f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 11117bf66305SJason Wang goto drop; 11127bf66305SJason Wang 11137b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1114eda29772SRichard Cochran 11150110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11167bf66305SJason Wang * for indefinite time. 11177bf66305SJason Wang */ 11180110d6f2SMichael S. Tsirkin skb_orphan(skb); 11190110d6f2SMichael S. Tsirkin 1120f8af75f3SEric Dumazet nf_reset(skb); 1121f8af75f3SEric Dumazet 11225990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 11231576d986SJason Wang goto drop; 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds /* Notify and wake up reader process */ 112654f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 112754f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11289e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11296e914fc7SJason Wang 11306e914fc7SJason Wang rcu_read_unlock(); 11316ed10654SPatrick McHardy return NETDEV_TX_OK; 11321da177e4SLinus Torvalds 11331da177e4SLinus Torvalds drop: 1134608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1135149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11361da177e4SLinus Torvalds kfree_skb(skb); 11376e914fc7SJason Wang rcu_read_unlock(); 1138baeababbSJason Wang return NET_XMIT_DROP; 11391da177e4SLinus Torvalds } 11401da177e4SLinus Torvalds 1141f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11421da177e4SLinus Torvalds { 1143f271b2ccSMax Krasnyansky /* 1144f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1145f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1146f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1147f271b2ccSMax Krasnyansky */ 11481da177e4SLinus Torvalds } 11491da177e4SLinus Torvalds 1150c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1151c8f44affSMichał Mirosław netdev_features_t features) 115288255375SMichał Mirosław { 115388255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 115488255375SMichał Mirosław 115588255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 115688255375SMichał Mirosław } 1157eaea34b2SPaolo Abeni 1158eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1159eaea34b2SPaolo Abeni { 1160eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1161eaea34b2SPaolo Abeni 1162eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1163eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1164eaea34b2SPaolo Abeni 1165eaea34b2SPaolo Abeni tun->align = new_hr; 1166eaea34b2SPaolo Abeni } 1167eaea34b2SPaolo Abeni 1168bc1f4470Sstephen hemminger static void 1169608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1170608b9977SPaolo Abeni { 1171608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1172608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1173608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1174608b9977SPaolo Abeni int i; 1175608b9977SPaolo Abeni 1176608b9977SPaolo Abeni for_each_possible_cpu(i) { 1177608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1178608b9977SPaolo Abeni unsigned int start; 1179608b9977SPaolo Abeni 1180608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1181608b9977SPaolo Abeni do { 1182608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1183608b9977SPaolo Abeni rxpackets = p->rx_packets; 1184608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1185608b9977SPaolo Abeni txpackets = p->tx_packets; 1186608b9977SPaolo Abeni txbytes = p->tx_bytes; 1187608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1188608b9977SPaolo Abeni 1189608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1190608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1191608b9977SPaolo Abeni stats->tx_packets += txpackets; 1192608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1193608b9977SPaolo Abeni 1194608b9977SPaolo Abeni /* u32 counters */ 1195608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1196608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1197608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1198608b9977SPaolo Abeni } 1199608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1200608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1201608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1202608b9977SPaolo Abeni } 1203608b9977SPaolo Abeni 1204761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1205761876c8SJason Wang struct netlink_ext_ack *extack) 1206761876c8SJason Wang { 1207761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1208761876c8SJason Wang struct bpf_prog *old_prog; 1209761876c8SJason Wang 1210761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1211761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1212761876c8SJason Wang if (old_prog) 1213761876c8SJason Wang bpf_prog_put(old_prog); 1214761876c8SJason Wang 1215761876c8SJason Wang return 0; 1216761876c8SJason Wang } 1217761876c8SJason Wang 1218761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1219761876c8SJason Wang { 1220761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1221761876c8SJason Wang const struct bpf_prog *xdp_prog; 1222761876c8SJason Wang 1223761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1224761876c8SJason Wang if (xdp_prog) 1225761876c8SJason Wang return xdp_prog->aux->id; 1226761876c8SJason Wang 1227761876c8SJason Wang return 0; 1228761876c8SJason Wang } 1229761876c8SJason Wang 1230f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1231761876c8SJason Wang { 1232761876c8SJason Wang switch (xdp->command) { 1233761876c8SJason Wang case XDP_SETUP_PROG: 1234761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1235761876c8SJason Wang case XDP_QUERY_PROG: 1236761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1237761876c8SJason Wang return 0; 1238761876c8SJason Wang default: 1239761876c8SJason Wang return -EINVAL; 1240761876c8SJason Wang } 1241761876c8SJason Wang } 1242761876c8SJason Wang 1243758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1244c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1245758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1246758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 124700829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 124888255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1249c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1250eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1251608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1252758e43b7SStephen Hemminger }; 1253758e43b7SStephen Hemminger 12540c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 12550c9d917bSJesper Dangaard Brouer { 12560c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 12570c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 12580c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 12590c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 12600c9d917bSJesper Dangaard Brouer } 12610c9d917bSJesper Dangaard Brouer 126242b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 126342b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1264fc72d1d5SJason Wang { 1265fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1266fc72d1d5SJason Wang struct tun_file *tfile; 1267fc72d1d5SJason Wang u32 numqueues; 1268735fc405SJesper Dangaard Brouer int drops = 0; 1269735fc405SJesper Dangaard Brouer int cnt = n; 1270735fc405SJesper Dangaard Brouer int i; 1271fc72d1d5SJason Wang 12720c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 127342b33468SJesper Dangaard Brouer return -EINVAL; 127442b33468SJesper Dangaard Brouer 1275fc72d1d5SJason Wang rcu_read_lock(); 1276fc72d1d5SJason Wang 1277fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1278fc72d1d5SJason Wang if (!numqueues) { 1279735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1280735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1281fc72d1d5SJason Wang } 1282fc72d1d5SJason Wang 1283fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1284fc72d1d5SJason Wang numqueues]); 1285735fc405SJesper Dangaard Brouer 1286735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1287735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1288735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1289fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1290fc72d1d5SJason Wang * XDP buffer from sk_buff. 1291fc72d1d5SJason Wang */ 1292735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1293fc72d1d5SJason Wang 1294735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1295735fc405SJesper Dangaard Brouer this_cpu_inc(tun->pcpu_stats->tx_dropped); 1296735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdp); 1297735fc405SJesper Dangaard Brouer drops++; 1298735fc405SJesper Dangaard Brouer } 1299735fc405SJesper Dangaard Brouer } 1300735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1301735fc405SJesper Dangaard Brouer 13020c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 13030c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 13040c9d917bSJesper Dangaard Brouer 1305fc72d1d5SJason Wang rcu_read_unlock(); 1306735fc405SJesper Dangaard Brouer return cnt - drops; 1307fc72d1d5SJason Wang } 1308fc72d1d5SJason Wang 130944fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 131044fa2dbdSJesper Dangaard Brouer { 131144fa2dbdSJesper Dangaard Brouer struct xdp_frame *frame = convert_to_xdp_frame(xdp); 131244fa2dbdSJesper Dangaard Brouer 131344fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 131444fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 131544fa2dbdSJesper Dangaard Brouer 131642421a56SJesper Dangaard Brouer return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1317fc72d1d5SJason Wang } 1318fc72d1d5SJason Wang 1319758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1320c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1321758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1322758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 132300829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 132488255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1325afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1326758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1327758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1328c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 13295e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1330eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1331608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1332f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1333fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 1334758e43b7SStephen Hemminger }; 1335758e43b7SStephen Hemminger 1336944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 133796442e42SJason Wang { 133896442e42SJason Wang int i; 133996442e42SJason Wang 134096442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 134196442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 134296442e42SJason Wang 134396442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1344e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1345e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1346e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 134796442e42SJason Wang } 134896442e42SJason Wang 134996442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 135096442e42SJason Wang { 135196442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 135296442e42SJason Wang tun_flow_flush(tun); 135396442e42SJason Wang } 135496442e42SJason Wang 135591572088SJarod Wilson #define MIN_MTU 68 135691572088SJarod Wilson #define MAX_MTU 65535 135791572088SJarod Wilson 13581da177e4SLinus Torvalds /* Initialize net device. */ 13591da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13601da177e4SLinus Torvalds { 13611da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13621da177e4SLinus Torvalds 13631da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 136440630b82SMichael S. Tsirkin case IFF_TUN: 1365758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1366758e43b7SStephen Hemminger 13671da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 13681da177e4SLinus Torvalds dev->hard_header_len = 0; 13691da177e4SLinus Torvalds dev->addr_len = 0; 13701da177e4SLinus Torvalds dev->mtu = 1500; 13711da177e4SLinus Torvalds 13721da177e4SLinus Torvalds /* Zero header length */ 13731da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 13741da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 13751da177e4SLinus Torvalds break; 13761da177e4SLinus Torvalds 137740630b82SMichael S. Tsirkin case IFF_TAP: 13787a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 13791da177e4SLinus Torvalds /* Ethernet TAP Device */ 13801da177e4SLinus Torvalds ether_setup(dev); 1381550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1382a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 138336226a8dSBrian Braunstein 1384f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 138536226a8dSBrian Braunstein 13861da177e4SLinus Torvalds break; 13871da177e4SLinus Torvalds } 138891572088SJarod Wilson 138991572088SJarod Wilson dev->min_mtu = MIN_MTU; 139091572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 13911da177e4SLinus Torvalds } 13921da177e4SLinus Torvalds 13932f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 13942f3ab622SJason Wang { 13952f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 13962f3ab622SJason Wang 13972f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 13982f3ab622SJason Wang } 13992f3ab622SJason Wang 14001da177e4SLinus Torvalds /* Character device part */ 14011da177e4SLinus Torvalds 14021da177e4SLinus Torvalds /* Poll */ 1403afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14041da177e4SLinus Torvalds { 1405b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14069484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14073c8a9c63SMariusz Kozlowski struct sock *sk; 1408afc9a42bSAl Viro __poll_t mask = 0; 14091da177e4SLinus Torvalds 14101da177e4SLinus Torvalds if (!tun) 1411a9a08845SLinus Torvalds return EPOLLERR; 14121da177e4SLinus Torvalds 141354f968d6SJason Wang sk = tfile->socket.sk; 14143c8a9c63SMariusz Kozlowski 14156b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 14161da177e4SLinus Torvalds 14179e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14181da177e4SLinus Torvalds 14195990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1420a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14211da177e4SLinus Torvalds 14222f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 14232f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 14242f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 14252f3ab622SJason Wang * after it writes to a down device and meets -EIO. 14262f3ab622SJason Wang */ 14272f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 14289cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 14292f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1430a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 143133dccbb0SHerbert Xu 1432c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1433a9a08845SLinus Torvalds mask = EPOLLERR; 1434c70f1829SEric W. Biederman 1435631ab46bSEric W. Biederman tun_put(tun); 14361da177e4SLinus Torvalds return mask; 14371da177e4SLinus Torvalds } 14381da177e4SLinus Torvalds 143990e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 144090e33d45SPetar Penkov size_t len, 144190e33d45SPetar Penkov const struct iov_iter *it) 144290e33d45SPetar Penkov { 144390e33d45SPetar Penkov struct sk_buff *skb; 144490e33d45SPetar Penkov size_t linear; 144590e33d45SPetar Penkov int err; 144690e33d45SPetar Penkov int i; 144790e33d45SPetar Penkov 144890e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 144990e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 145090e33d45SPetar Penkov 145190e33d45SPetar Penkov local_bh_disable(); 145290e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 145390e33d45SPetar Penkov local_bh_enable(); 145490e33d45SPetar Penkov if (!skb) 145590e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 145690e33d45SPetar Penkov 145790e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 145890e33d45SPetar Penkov err = __skb_grow(skb, linear); 145990e33d45SPetar Penkov if (err) 146090e33d45SPetar Penkov goto free; 146190e33d45SPetar Penkov 146290e33d45SPetar Penkov skb->len = len; 146390e33d45SPetar Penkov skb->data_len = len - linear; 146490e33d45SPetar Penkov skb->truesize += skb->data_len; 146590e33d45SPetar Penkov 146690e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 146743a08e0fSEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 146890e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 146990e33d45SPetar Penkov 147090e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 147190e33d45SPetar Penkov err = -EINVAL; 147290e33d45SPetar Penkov goto free; 147390e33d45SPetar Penkov } 147490e33d45SPetar Penkov 147543a08e0fSEric Dumazet if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 147690e33d45SPetar Penkov err = -ENOMEM; 147790e33d45SPetar Penkov goto free; 147890e33d45SPetar Penkov } 147990e33d45SPetar Penkov 148043a08e0fSEric Dumazet skb_fill_page_desc(skb, i - 1, pfrag->page, 148143a08e0fSEric Dumazet pfrag->offset, fragsz); 148243a08e0fSEric Dumazet page_ref_inc(pfrag->page); 148343a08e0fSEric Dumazet pfrag->offset += fragsz; 148490e33d45SPetar Penkov } 148590e33d45SPetar Penkov 148690e33d45SPetar Penkov return skb; 148790e33d45SPetar Penkov free: 148890e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 148990e33d45SPetar Penkov napi_free_frags(&tfile->napi); 149090e33d45SPetar Penkov return ERR_PTR(err); 149190e33d45SPetar Penkov } 149290e33d45SPetar Penkov 1493f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1494f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 149554f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 149633dccbb0SHerbert Xu size_t prepad, size_t len, 149733dccbb0SHerbert Xu size_t linear, int noblock) 1498f42157cbSRusty Russell { 149954f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1500f42157cbSRusty Russell struct sk_buff *skb; 150133dccbb0SHerbert Xu int err; 1502f42157cbSRusty Russell 1503f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15040eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 150533dccbb0SHerbert Xu linear = len; 1506f42157cbSRusty Russell 150733dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 150828d64271SEric Dumazet &err, 0); 1509f42157cbSRusty Russell if (!skb) 151033dccbb0SHerbert Xu return ERR_PTR(err); 1511f42157cbSRusty Russell 1512f42157cbSRusty Russell skb_reserve(skb, prepad); 1513f42157cbSRusty Russell skb_put(skb, linear); 151433dccbb0SHerbert Xu skb->data_len = len - linear; 151533dccbb0SHerbert Xu skb->len += len - linear; 1516f42157cbSRusty Russell 1517f42157cbSRusty Russell return skb; 1518f42157cbSRusty Russell } 1519f42157cbSRusty Russell 15205503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15215503fcecSJason Wang struct sk_buff *skb, int more) 15225503fcecSJason Wang { 15235503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15245503fcecSJason Wang struct sk_buff_head process_queue; 15255503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15265503fcecSJason Wang bool rcv = false; 15275503fcecSJason Wang 15285503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15295503fcecSJason Wang local_bh_disable(); 15305503fcecSJason Wang netif_receive_skb(skb); 15315503fcecSJason Wang local_bh_enable(); 15325503fcecSJason Wang return; 15335503fcecSJason Wang } 15345503fcecSJason Wang 15355503fcecSJason Wang spin_lock(&queue->lock); 15365503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15375503fcecSJason Wang __skb_queue_head_init(&process_queue); 15385503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15395503fcecSJason Wang rcv = true; 15405503fcecSJason Wang } else { 15415503fcecSJason Wang __skb_queue_tail(queue, skb); 15425503fcecSJason Wang } 15435503fcecSJason Wang spin_unlock(&queue->lock); 15445503fcecSJason Wang 15455503fcecSJason Wang if (rcv) { 15465503fcecSJason Wang struct sk_buff *nskb; 15475503fcecSJason Wang 15485503fcecSJason Wang local_bh_disable(); 15495503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 15505503fcecSJason Wang netif_receive_skb(nskb); 15515503fcecSJason Wang netif_receive_skb(skb); 15525503fcecSJason Wang local_bh_enable(); 15535503fcecSJason Wang } 15545503fcecSJason Wang } 15555503fcecSJason Wang 155666ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 155766ccbc9cSJason Wang int len, int noblock, bool zerocopy) 155866ccbc9cSJason Wang { 155966ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 156066ccbc9cSJason Wang return false; 156166ccbc9cSJason Wang 156266ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 156366ccbc9cSJason Wang return false; 156466ccbc9cSJason Wang 156566ccbc9cSJason Wang if (!noblock) 156666ccbc9cSJason Wang return false; 156766ccbc9cSJason Wang 156866ccbc9cSJason Wang if (zerocopy) 156966ccbc9cSJason Wang return false; 157066ccbc9cSJason Wang 157166ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 157266ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 157366ccbc9cSJason Wang return false; 157466ccbc9cSJason Wang 157566ccbc9cSJason Wang return true; 157666ccbc9cSJason Wang } 157766ccbc9cSJason Wang 1578761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1579761876c8SJason Wang struct tun_file *tfile, 158066ccbc9cSJason Wang struct iov_iter *from, 1581761876c8SJason Wang struct virtio_net_hdr *hdr, 15821cfe6e93SJason Wang int len, int *skb_xdp) 158366ccbc9cSJason Wang { 15840bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 158566ccbc9cSJason Wang struct sk_buff *skb; 1586761876c8SJason Wang struct bpf_prog *xdp_prog; 15877df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1588761876c8SJason Wang unsigned int delta = 0; 158966ccbc9cSJason Wang char *buf; 159066ccbc9cSJason Wang size_t copied; 15917df13219SJason Wang int err, pad = TUN_RX_PAD; 15927df13219SJason Wang 15937df13219SJason Wang rcu_read_lock(); 15947df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 15957df13219SJason Wang if (xdp_prog) 15967df13219SJason Wang pad += TUN_HEADROOM; 15977df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 15987df13219SJason Wang rcu_read_unlock(); 159966ccbc9cSJason Wang 160063b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 160166ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 160266ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 160366ccbc9cSJason Wang 160466ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 160566ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16067df13219SJason Wang alloc_frag->offset + pad, 160766ccbc9cSJason Wang len, from); 160866ccbc9cSJason Wang if (copied != len) 160966ccbc9cSJason Wang return ERR_PTR(-EFAULT); 161066ccbc9cSJason Wang 16117df13219SJason Wang /* There's a small window that XDP may be set after the check 16127df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16137df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16147df13219SJason Wang */ 16157df13219SJason Wang if (hdr->gso_type || !xdp_prog) 16161cfe6e93SJason Wang *skb_xdp = 1; 1617761876c8SJason Wang else 16181cfe6e93SJason Wang *skb_xdp = 0; 161966ccbc9cSJason Wang 16206547e387SToshiaki Makita local_bh_disable(); 1621761876c8SJason Wang rcu_read_lock(); 1622761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16231cfe6e93SJason Wang if (xdp_prog && !*skb_xdp) { 1624761876c8SJason Wang struct xdp_buff xdp; 1625761876c8SJason Wang void *orig_data; 1626761876c8SJason Wang u32 act; 1627761876c8SJason Wang 1628761876c8SJason Wang xdp.data_hard_start = buf; 16297df13219SJason Wang xdp.data = buf + pad; 1630de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1631761876c8SJason Wang xdp.data_end = xdp.data + len; 16328bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1633761876c8SJason Wang orig_data = xdp.data; 1634761876c8SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 1635761876c8SJason Wang 1636761876c8SJason Wang switch (act) { 1637761876c8SJason Wang case XDP_REDIRECT: 1638761876c8SJason Wang get_page(alloc_frag->page); 1639761876c8SJason Wang alloc_frag->offset += buflen; 1640761876c8SJason Wang err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 16411bb4f2e8SJason Wang xdp_do_flush_map(); 1642761876c8SJason Wang if (err) 1643761876c8SJason Wang goto err_redirect; 1644654d5738SXin Long rcu_read_unlock(); 16456547e387SToshiaki Makita local_bh_enable(); 1646761876c8SJason Wang return NULL; 1647761876c8SJason Wang case XDP_TX: 164859655a5bSJason Wang get_page(alloc_frag->page); 164959655a5bSJason Wang alloc_frag->offset += buflen; 16506e8cfd6dSToshiaki Makita if (tun_xdp_tx(tun->dev, &xdp) < 0) 165159655a5bSJason Wang goto err_redirect; 165259655a5bSJason Wang rcu_read_unlock(); 16536547e387SToshiaki Makita local_bh_enable(); 165459655a5bSJason Wang return NULL; 1655761876c8SJason Wang case XDP_PASS: 1656761876c8SJason Wang delta = orig_data - xdp.data; 16578fb58f1eSNikita V. Shirokov len = xdp.data_end - xdp.data; 1658761876c8SJason Wang break; 1659761876c8SJason Wang default: 1660761876c8SJason Wang bpf_warn_invalid_xdp_action(act); 1661761876c8SJason Wang /* fall through */ 1662761876c8SJason Wang case XDP_ABORTED: 1663761876c8SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 1664761876c8SJason Wang /* fall through */ 1665761876c8SJason Wang case XDP_DROP: 1666761876c8SJason Wang goto err_xdp; 1667761876c8SJason Wang } 1668761876c8SJason Wang } 1669761876c8SJason Wang 1670761876c8SJason Wang skb = build_skb(buf, buflen); 1671761876c8SJason Wang if (!skb) { 1672761876c8SJason Wang rcu_read_unlock(); 16736547e387SToshiaki Makita local_bh_enable(); 1674761876c8SJason Wang return ERR_PTR(-ENOMEM); 1675761876c8SJason Wang } 1676761876c8SJason Wang 16777df13219SJason Wang skb_reserve(skb, pad - delta); 16788fb58f1eSNikita V. Shirokov skb_put(skb, len); 167966ccbc9cSJason Wang get_page(alloc_frag->page); 168066ccbc9cSJason Wang alloc_frag->offset += buflen; 168166ccbc9cSJason Wang 1682761876c8SJason Wang rcu_read_unlock(); 16836547e387SToshiaki Makita local_bh_enable(); 1684761876c8SJason Wang 168566ccbc9cSJason Wang return skb; 1686761876c8SJason Wang 1687761876c8SJason Wang err_redirect: 1688761876c8SJason Wang put_page(alloc_frag->page); 1689761876c8SJason Wang err_xdp: 1690761876c8SJason Wang rcu_read_unlock(); 16916547e387SToshiaki Makita local_bh_enable(); 1692761876c8SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 1693761876c8SJason Wang return NULL; 169466ccbc9cSJason Wang } 169566ccbc9cSJason Wang 16961da177e4SLinus Torvalds /* Get packet from user space buffer */ 169754f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1698f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 16995503fcecSJason Wang int noblock, bool more) 17001da177e4SLinus Torvalds { 170109640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17021da177e4SLinus Torvalds struct sk_buff *skb; 1703f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1704eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1705f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1706608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 170796f8d9ecSJason Wang int good_linear; 17080690899bSMichael S. Tsirkin int copylen; 17090690899bSMichael S. Tsirkin bool zerocopy = false; 17100690899bSMichael S. Tsirkin int err; 171196f84061SJason Wang u32 rxhash = 0; 17121cfe6e93SJason Wang int skb_xdp = 1; 1713*af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 17141da177e4SLinus Torvalds 17151bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 17161bd4978aSEric Dumazet return -EIO; 17171bd4978aSEric Dumazet 171840630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 171915718ea0SDan Carpenter if (len < sizeof(pi)) 17201da177e4SLinus Torvalds return -EINVAL; 172115718ea0SDan Carpenter len -= sizeof(pi); 17221da177e4SLinus Torvalds 1723cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17241da177e4SLinus Torvalds return -EFAULT; 17251da177e4SLinus Torvalds } 17261da177e4SLinus Torvalds 172740630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1728e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1729e1edab87SWillem de Bruijn 1730e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1731f43798c2SRusty Russell return -EINVAL; 1732e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1733f43798c2SRusty Russell 1734cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1735f43798c2SRusty Russell return -EFAULT; 1736f43798c2SRusty Russell 17374909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 173856f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 173956f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17404909122fSHerbert Xu 174156f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1742f43798c2SRusty Russell return -EINVAL; 1743e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1744f43798c2SRusty Russell } 1745f43798c2SRusty Russell 174640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1747a504b86eSstephen hemminger align += NET_IP_ALIGN; 17480eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 174956f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1750e01bf1c8SRusty Russell return -EINVAL; 1751e01bf1c8SRusty Russell } 17521da177e4SLinus Torvalds 175396f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 175496f8d9ecSJason Wang 175588529176SJason Wang if (msg_control) { 1756f5ff53b4SAl Viro struct iov_iter i = *from; 1757f5ff53b4SAl Viro 175888529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 175988529176SJason Wang * enough room for skb expand head in case it is used. 17600690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17610690899bSMichael S. Tsirkin */ 176256f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 176396f8d9ecSJason Wang if (copylen > good_linear) 176496f8d9ecSJason Wang copylen = good_linear; 17653dd5c330SJason Wang linear = copylen; 1766f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1767f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 176888529176SJason Wang zerocopy = true; 176988529176SJason Wang } 177088529176SJason Wang 177190e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17721cfe6e93SJason Wang /* For the packet that is not easy to be processed 17731cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 17741cfe6e93SJason Wang * skb was created with generic XDP routine. 17751cfe6e93SJason Wang */ 17761cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 177766ccbc9cSJason Wang if (IS_ERR(skb)) { 177866ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 177966ccbc9cSJason Wang return PTR_ERR(skb); 178066ccbc9cSJason Wang } 1781761876c8SJason Wang if (!skb) 1782761876c8SJason Wang return total_len; 178366ccbc9cSJason Wang } else { 178488529176SJason Wang if (!zerocopy) { 17850690899bSMichael S. Tsirkin copylen = len; 178656f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 178796f8d9ecSJason Wang linear = good_linear; 178896f8d9ecSJason Wang else 178956f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 17903dd5c330SJason Wang } 17910690899bSMichael S. Tsirkin 179290e33d45SPetar Penkov if (frags) { 179390e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 179490e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 179590e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 179690e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 179790e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 179890e33d45SPetar Penkov */ 179990e33d45SPetar Penkov zerocopy = false; 180090e33d45SPetar Penkov } else { 180190e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 180290e33d45SPetar Penkov noblock); 180390e33d45SPetar Penkov } 180490e33d45SPetar Penkov 180533dccbb0SHerbert Xu if (IS_ERR(skb)) { 180633dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1807608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 180890e33d45SPetar Penkov if (frags) 180990e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 181033dccbb0SHerbert Xu return PTR_ERR(skb); 18111da177e4SLinus Torvalds } 18121da177e4SLinus Torvalds 18130690899bSMichael S. Tsirkin if (zerocopy) 1814f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1815af1cc7a2SJason Wang else 1816f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18170690899bSMichael S. Tsirkin 18180690899bSMichael S. Tsirkin if (err) { 1819608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18208f22757eSDave Jones kfree_skb(skb); 182190e33d45SPetar Penkov if (frags) { 182290e33d45SPetar Penkov tfile->napi.skb = NULL; 182390e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 182490e33d45SPetar Penkov } 182590e33d45SPetar Penkov 18261da177e4SLinus Torvalds return -EFAULT; 18278f22757eSDave Jones } 182866ccbc9cSJason Wang } 18291da177e4SLinus Torvalds 18303e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1831df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1832df10db98SPaolo Abeni kfree_skb(skb); 183390e33d45SPetar Penkov if (frags) { 183490e33d45SPetar Penkov tfile->napi.skb = NULL; 183590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 183690e33d45SPetar Penkov } 183790e33d45SPetar Penkov 1838df10db98SPaolo Abeni return -EINVAL; 1839df10db98SPaolo Abeni } 1840df10db98SPaolo Abeni 18411da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 184240630b82SMichael S. Tsirkin case IFF_TUN: 184340630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18442580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18452580c4c1SAlexander Potapenko 18462580c4c1SAlexander Potapenko switch (ip_version) { 18472580c4c1SAlexander Potapenko case 4: 1848f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1849f09f7ee2SAng Way Chuang break; 18502580c4c1SAlexander Potapenko case 6: 1851f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1852f09f7ee2SAng Way Chuang break; 1853f09f7ee2SAng Way Chuang default: 1854608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1855f09f7ee2SAng Way Chuang kfree_skb(skb); 1856f09f7ee2SAng Way Chuang return -EINVAL; 1857f09f7ee2SAng Way Chuang } 1858f09f7ee2SAng Way Chuang } 1859f09f7ee2SAng Way Chuang 1860459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18611da177e4SLinus Torvalds skb->protocol = pi.proto; 18624c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18631da177e4SLinus Torvalds break; 186440630b82SMichael S. Tsirkin case IFF_TAP: 186590e33d45SPetar Penkov if (!frags) 18661da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18671da177e4SLinus Torvalds break; 18686403eab1SJoe Perches } 18691da177e4SLinus Torvalds 18700690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18710690899bSMichael S. Tsirkin if (zerocopy) { 18720690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 18730690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1874c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1875af1cc7a2SJason Wang } else if (msg_control) { 1876af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1877af1cc7a2SJason Wang uarg->callback(uarg, false); 18780690899bSMichael S. Tsirkin } 18790690899bSMichael S. Tsirkin 188072f65107SVlad Yasevich skb_reset_network_header(skb); 188140893fd0SJason Wang skb_probe_transport_header(skb, 0); 188238502af7SJason Wang 18831cfe6e93SJason Wang if (skb_xdp) { 1884761876c8SJason Wang struct bpf_prog *xdp_prog; 1885761876c8SJason Wang int ret; 1886761876c8SJason Wang 18876547e387SToshiaki Makita local_bh_disable(); 1888761876c8SJason Wang rcu_read_lock(); 1889761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1890761876c8SJason Wang if (xdp_prog) { 1891761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1892761876c8SJason Wang if (ret != XDP_PASS) { 1893761876c8SJason Wang rcu_read_unlock(); 18946547e387SToshiaki Makita local_bh_enable(); 1895761876c8SJason Wang return total_len; 1896761876c8SJason Wang } 1897761876c8SJason Wang } 1898761876c8SJason Wang rcu_read_unlock(); 18996547e387SToshiaki Makita local_bh_enable(); 1900761876c8SJason Wang } 1901761876c8SJason Wang 1902cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1903cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1904cf1a1e07SPaolo Abeni * worth to optimize. 1905cf1a1e07SPaolo Abeni */ 1906cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1907cf1a1e07SPaolo Abeni !tfile->detached) 1908feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 190994317099SPetar Penkov 191090e33d45SPetar Penkov if (frags) { 191190e33d45SPetar Penkov /* Exercise flow dissector code path. */ 191290e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 191390e33d45SPetar Penkov 1914010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 191590e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 191690e33d45SPetar Penkov napi_free_frags(&tfile->napi); 191790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 191890e33d45SPetar Penkov WARN_ON(1); 191990e33d45SPetar Penkov return -ENOMEM; 192090e33d45SPetar Penkov } 192190e33d45SPetar Penkov 192290e33d45SPetar Penkov local_bh_disable(); 192390e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 192490e33d45SPetar Penkov local_bh_enable(); 192590e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1926aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 192794317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 192894317099SPetar Penkov int queue_len; 192994317099SPetar Penkov 193094317099SPetar Penkov spin_lock_bh(&queue->lock); 193194317099SPetar Penkov __skb_queue_tail(queue, skb); 193294317099SPetar Penkov queue_len = skb_queue_len(queue); 193394317099SPetar Penkov spin_unlock(&queue->lock); 193494317099SPetar Penkov 193594317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 193694317099SPetar Penkov napi_schedule(&tfile->napi); 193794317099SPetar Penkov 193894317099SPetar Penkov local_bh_enable(); 193994317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 19405503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 194194317099SPetar Penkov } else { 19421da177e4SLinus Torvalds netif_rx_ni(skb); 194394317099SPetar Penkov } 19441da177e4SLinus Torvalds 1945608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1946608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1947608b9977SPaolo Abeni stats->rx_packets++; 1948608b9977SPaolo Abeni stats->rx_bytes += len; 1949608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1950608b9977SPaolo Abeni put_cpu_ptr(stats); 19511da177e4SLinus Torvalds 195296f84061SJason Wang if (rxhash) 19539e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 195496f84061SJason Wang 19550690899bSMichael S. Tsirkin return total_len; 19561da177e4SLinus Torvalds } 19571da177e4SLinus Torvalds 1958f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19591da177e4SLinus Torvalds { 196033dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 196154f968d6SJason Wang struct tun_file *tfile = file->private_data; 19629484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1963631ab46bSEric W. Biederman ssize_t result; 19641da177e4SLinus Torvalds 19651da177e4SLinus Torvalds if (!tun) 19661da177e4SLinus Torvalds return -EBADFD; 19671da177e4SLinus Torvalds 19685503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 19695503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1970631ab46bSEric W. Biederman 1971631ab46bSEric W. Biederman tun_put(tun); 1972631ab46bSEric W. Biederman return result; 19731da177e4SLinus Torvalds } 19741da177e4SLinus Torvalds 1975fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 1976fc72d1d5SJason Wang struct tun_file *tfile, 19771ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 1978fc72d1d5SJason Wang struct iov_iter *iter) 1979fc72d1d5SJason Wang { 1980fc72d1d5SJason Wang int vnet_hdr_sz = 0; 19811ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 1982fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 1983fc72d1d5SJason Wang size_t ret; 1984fc72d1d5SJason Wang 1985fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 1986fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 1987fc72d1d5SJason Wang 1988fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1989fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 1990fc72d1d5SJason Wang return -EINVAL; 1991fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 1992fc72d1d5SJason Wang sizeof(gso))) 1993fc72d1d5SJason Wang return -EFAULT; 1994fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1995fc72d1d5SJason Wang } 1996fc72d1d5SJason Wang 19971ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 1998fc72d1d5SJason Wang 1999fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2000fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 2001fc72d1d5SJason Wang stats->tx_packets++; 2002fc72d1d5SJason Wang stats->tx_bytes += ret; 2003fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2004fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2005fc72d1d5SJason Wang 2006fc72d1d5SJason Wang return ret; 2007fc72d1d5SJason Wang } 2008fc72d1d5SJason Wang 20091da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20106f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 201154f968d6SJason Wang struct tun_file *tfile, 20121da177e4SLinus Torvalds struct sk_buff *skb, 2013e0b46d0eSHerbert Xu struct iov_iter *iter) 20141da177e4SLinus Torvalds { 20151da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2016608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2017e0b46d0eSHerbert Xu ssize_t total; 20188c847d25SJason Wang int vlan_offset = 0; 2019a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20202eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2021a8f9bfdfSHerbert Xu 2022df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2023a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20241da177e4SLinus Torvalds 202540630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2026e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20271da177e4SLinus Torvalds 2028e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2029e0b46d0eSHerbert Xu 203040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2031e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20321da177e4SLinus Torvalds return -EINVAL; 20331da177e4SLinus Torvalds 2034e0b46d0eSHerbert Xu total += sizeof(pi); 2035e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 20361da177e4SLinus Torvalds /* Packet will be striped */ 20371da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 20381da177e4SLinus Torvalds } 20391da177e4SLinus Torvalds 2040e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 20411da177e4SLinus Torvalds return -EFAULT; 20421da177e4SLinus Torvalds } 20431da177e4SLinus Torvalds 20442eb783c4SHerbert Xu if (vnet_hdr_sz) { 20459403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 204634166093SMike Rapoport 2047e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2048f43798c2SRusty Russell return -EINVAL; 2049f43798c2SRusty Russell 20503e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2051fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2052fd3a8862SWillem de Bruijn vlan_hlen)) { 2053f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20546b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2055ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 205656f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 205756f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2058ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2059ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2060ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 206156f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2062ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2063ef3db4a5SMichael S. Tsirkin return -EINVAL; 2064ef3db4a5SMichael S. Tsirkin } 2065f43798c2SRusty Russell 2066e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2067f43798c2SRusty Russell return -EFAULT; 20688c847d25SJason Wang 20698c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2070f43798c2SRusty Russell } 2071f43798c2SRusty Russell 2072a8f9bfdfSHerbert Xu if (vlan_hlen) { 2073e0b46d0eSHerbert Xu int ret; 2074aff3d70aSJason Wang struct veth veth; 20751da177e4SLinus Torvalds 20766680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2077df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 20781da177e4SLinus Torvalds 20796680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 20806680ec68SJason Wang 2081e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2082e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 20836680ec68SJason Wang goto done; 20846680ec68SJason Wang 2085e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2086e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 20876680ec68SJason Wang goto done; 20886680ec68SJason Wang } 20896680ec68SJason Wang 2090e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 20916680ec68SJason Wang 20926680ec68SJason Wang done: 2093608b9977SPaolo Abeni /* caller is in process context, */ 2094608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2095608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2096608b9977SPaolo Abeni stats->tx_packets++; 2097608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 2098608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2099608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21001da177e4SLinus Torvalds 21011da177e4SLinus Torvalds return total; 21021da177e4SLinus Torvalds } 21031da177e4SLinus Torvalds 2104fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21051576d986SJason Wang { 21061576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2107fc72d1d5SJason Wang void *ptr = NULL; 2108f48cc6b2SJason Wang int error = 0; 21091576d986SJason Wang 2110fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2111fc72d1d5SJason Wang if (ptr) 21121576d986SJason Wang goto out; 21131576d986SJason Wang if (noblock) { 2114f48cc6b2SJason Wang error = -EAGAIN; 21151576d986SJason Wang goto out; 21161576d986SJason Wang } 21171576d986SJason Wang 21181576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 21191576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 21201576d986SJason Wang 21211576d986SJason Wang while (1) { 2122fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2123fc72d1d5SJason Wang if (ptr) 21241576d986SJason Wang break; 21251576d986SJason Wang if (signal_pending(current)) { 2126f48cc6b2SJason Wang error = -ERESTARTSYS; 21271576d986SJason Wang break; 21281576d986SJason Wang } 21291576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2130f48cc6b2SJason Wang error = -EFAULT; 21311576d986SJason Wang break; 21321576d986SJason Wang } 21331576d986SJason Wang 21341576d986SJason Wang schedule(); 21351576d986SJason Wang } 21361576d986SJason Wang 21371576d986SJason Wang current->state = TASK_RUNNING; 21381576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 21391576d986SJason Wang 21401576d986SJason Wang out: 2141f48cc6b2SJason Wang *err = error; 2142fc72d1d5SJason Wang return ptr; 21431576d986SJason Wang } 21441576d986SJason Wang 214554f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 21469b067034SAl Viro struct iov_iter *to, 2147fc72d1d5SJason Wang int noblock, void *ptr) 21481da177e4SLinus Torvalds { 21499b067034SAl Viro ssize_t ret; 21501576d986SJason Wang int err; 21511da177e4SLinus Torvalds 21523872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 21531da177e4SLinus Torvalds 2154c33ee15bSWei Xu if (!iov_iter_count(to)) { 2155fc72d1d5SJason Wang tun_ptr_free(ptr); 21569b067034SAl Viro return 0; 2157c33ee15bSWei Xu } 21581da177e4SLinus Torvalds 2159fc72d1d5SJason Wang if (!ptr) { 21601576d986SJason Wang /* Read frames from ring */ 2161fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2162fc72d1d5SJason Wang if (!ptr) 2163957f094fSAlex Gartrell return err; 2164ac77cfd4SJason Wang } 2165e0b46d0eSHerbert Xu 21661ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 21671ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2168fc72d1d5SJason Wang 21691ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 217003993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2171fc72d1d5SJason Wang } else { 2172fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2173fc72d1d5SJason Wang 21749b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2175f51a5e82SJason Wang if (unlikely(ret < 0)) 21761da177e4SLinus Torvalds kfree_skb(skb); 2177f51a5e82SJason Wang else 2178f51a5e82SJason Wang consume_skb(skb); 2179fc72d1d5SJason Wang } 21801da177e4SLinus Torvalds 218105c2828cSMichael S. Tsirkin return ret; 218205c2828cSMichael S. Tsirkin } 218305c2828cSMichael S. Tsirkin 21849b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 218505c2828cSMichael S. Tsirkin { 218605c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 218705c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 21889484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 21899b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 219005c2828cSMichael S. Tsirkin 219105c2828cSMichael S. Tsirkin if (!tun) 219205c2828cSMichael S. Tsirkin return -EBADFD; 2193ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 219442404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2195d0b7da8aSZhi Yong Wu if (ret > 0) 2196d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2197631ab46bSEric W. Biederman tun_put(tun); 21981da177e4SLinus Torvalds return ret; 21991da177e4SLinus Torvalds } 22001da177e4SLinus Torvalds 2201cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 220296f84061SJason Wang { 2203cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 220496f84061SJason Wang 220596f84061SJason Wang bpf_prog_destroy(prog->prog); 220696f84061SJason Wang kfree(prog); 220796f84061SJason Wang } 220896f84061SJason Wang 22099d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22109d6474e4SJason Wang struct tun_prog __rcu **prog_p, 221196f84061SJason Wang struct bpf_prog *prog) 221296f84061SJason Wang { 2213cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 221496f84061SJason Wang 221596f84061SJason Wang if (prog) { 221696f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 221796f84061SJason Wang if (!new) 221896f84061SJason Wang return -ENOMEM; 221996f84061SJason Wang new->prog = prog; 222096f84061SJason Wang } 222196f84061SJason Wang 2222124da8f6SJason Wang spin_lock_bh(&tun->lock); 2223cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2224124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2225cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2226124da8f6SJason Wang spin_unlock_bh(&tun->lock); 222796f84061SJason Wang 222896f84061SJason Wang if (old) 2229cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 223096f84061SJason Wang 223196f84061SJason Wang return 0; 223296f84061SJason Wang } 223396f84061SJason Wang 223496442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 223596442e42SJason Wang { 223696442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 223796442e42SJason Wang 22384008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2239608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 224096442e42SJason Wang tun_flow_uninit(tun); 22415dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2242cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2243aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 224496442e42SJason Wang } 224596442e42SJason Wang 22461da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22471da177e4SLinus Torvalds { 22481da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22491da177e4SLinus Torvalds 22500625c883SEric W. Biederman tun->owner = INVALID_UID; 22510625c883SEric W. Biederman tun->group = INVALID_GID; 22524e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 22531da177e4SLinus Torvalds 22541da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2255cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2256cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2257016adb72SJason Wang /* We prefer our own queue length */ 2258016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22591da177e4SLinus Torvalds } 22601da177e4SLinus Torvalds 2261f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2262f019a7a5SEric W. Biederman * device with netlink. 2263f019a7a5SEric W. Biederman */ 2264a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2265a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2266f019a7a5SEric W. Biederman { 2267f019a7a5SEric W. Biederman return -EINVAL; 2268f019a7a5SEric W. Biederman } 2269f019a7a5SEric W. Biederman 22701ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 22711ec010e7SSabrina Dubroca { 22721ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 22731ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 22741ec010e7SSabrina Dubroca 22751ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 22761ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 22771ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 22781ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 22791ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 22801ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 22811ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 22821ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 22831ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 22841ec010e7SSabrina Dubroca 0; 22851ec010e7SSabrina Dubroca } 22861ec010e7SSabrina Dubroca 22871ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 22881ec010e7SSabrina Dubroca { 22891ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 22901ec010e7SSabrina Dubroca 22911ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 22921ec010e7SSabrina Dubroca goto nla_put_failure; 22931ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 22941ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 22951ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 22961ec010e7SSabrina Dubroca goto nla_put_failure; 22971ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 22981ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 22991ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 23001ec010e7SSabrina Dubroca goto nla_put_failure; 23011ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 23021ec010e7SSabrina Dubroca goto nla_put_failure; 23031ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 23041ec010e7SSabrina Dubroca goto nla_put_failure; 23051ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 23061ec010e7SSabrina Dubroca goto nla_put_failure; 23071ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 23081ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 23091ec010e7SSabrina Dubroca goto nla_put_failure; 23101ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 23111ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 23121ec010e7SSabrina Dubroca goto nla_put_failure; 23131ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 23141ec010e7SSabrina Dubroca tun->numdisabled)) 23151ec010e7SSabrina Dubroca goto nla_put_failure; 23161ec010e7SSabrina Dubroca } 23171ec010e7SSabrina Dubroca 23181ec010e7SSabrina Dubroca return 0; 23191ec010e7SSabrina Dubroca 23201ec010e7SSabrina Dubroca nla_put_failure: 23211ec010e7SSabrina Dubroca return -EMSGSIZE; 23221ec010e7SSabrina Dubroca } 23231ec010e7SSabrina Dubroca 2324f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2325f019a7a5SEric W. Biederman .kind = DRV_NAME, 2326f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2327f019a7a5SEric W. Biederman .setup = tun_setup, 2328f019a7a5SEric W. Biederman .validate = tun_validate, 23291ec010e7SSabrina Dubroca .get_size = tun_get_size, 23301ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2331f019a7a5SEric W. Biederman }; 2332f019a7a5SEric W. Biederman 233333dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 233433dccbb0SHerbert Xu { 233554f968d6SJason Wang struct tun_file *tfile; 233643815482SEric Dumazet wait_queue_head_t *wqueue; 233733dccbb0SHerbert Xu 233833dccbb0SHerbert Xu if (!sock_writeable(sk)) 233933dccbb0SHerbert Xu return; 234033dccbb0SHerbert Xu 23419cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 234233dccbb0SHerbert Xu return; 234333dccbb0SHerbert Xu 234443815482SEric Dumazet wqueue = sk_sleep(sk); 234543815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2346a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2347a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2348c722c625SHerbert Xu 234954f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 235054f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 235133dccbb0SHerbert Xu } 235233dccbb0SHerbert Xu 23531b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 235405c2828cSMichael S. Tsirkin { 235554f968d6SJason Wang int ret; 235654f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23579484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 235854f968d6SJason Wang 235954f968d6SJason Wang if (!tun) 236054f968d6SJason Wang return -EBADFD; 2361f5ff53b4SAl Viro 2362c0371da6SAl Viro ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 23635503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 23645503fcecSJason Wang m->msg_flags & MSG_MORE); 236554f968d6SJason Wang tun_put(tun); 236654f968d6SJason Wang return ret; 236705c2828cSMichael S. Tsirkin } 236805c2828cSMichael S. Tsirkin 23691b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 237005c2828cSMichael S. Tsirkin int flags) 237105c2828cSMichael S. Tsirkin { 237254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 23739484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2374fc72d1d5SJason Wang void *ptr = m->msg_control; 237505c2828cSMichael S. Tsirkin int ret; 237654f968d6SJason Wang 2377c33ee15bSWei Xu if (!tun) { 2378c33ee15bSWei Xu ret = -EBADFD; 2379fc72d1d5SJason Wang goto out_free; 2380c33ee15bSWei Xu } 238154f968d6SJason Wang 2382eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 23833811ae76SGao feng ret = -EINVAL; 2384c33ee15bSWei Xu goto out_put_tun; 23853811ae76SGao feng } 2386eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2387eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2388eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2389eda29772SRichard Cochran goto out; 2390eda29772SRichard Cochran } 2391fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 239287897931SAlex Gartrell if (ret > (ssize_t)total_len) { 239342404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 239442404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 239542404c09SDavid S. Miller } 23963811ae76SGao feng out: 239754f968d6SJason Wang tun_put(tun); 239805c2828cSMichael S. Tsirkin return ret; 2399c33ee15bSWei Xu 2400c33ee15bSWei Xu out_put_tun: 2401c33ee15bSWei Xu tun_put(tun); 2402fc72d1d5SJason Wang out_free: 2403fc72d1d5SJason Wang tun_ptr_free(ptr); 2404c33ee15bSWei Xu return ret; 240505c2828cSMichael S. Tsirkin } 240605c2828cSMichael S. Tsirkin 2407fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2408fc72d1d5SJason Wang { 2409fc72d1d5SJason Wang if (likely(ptr)) { 24101ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 24111ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2412fc72d1d5SJason Wang 24131ffcbc85SJesper Dangaard Brouer return xdpf->len; 2414fc72d1d5SJason Wang } 2415fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2416fc72d1d5SJason Wang } else { 2417fc72d1d5SJason Wang return 0; 2418fc72d1d5SJason Wang } 2419fc72d1d5SJason Wang } 2420fc72d1d5SJason Wang 24211576d986SJason Wang static int tun_peek_len(struct socket *sock) 24221576d986SJason Wang { 24231576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24241576d986SJason Wang struct tun_struct *tun; 24251576d986SJason Wang int ret = 0; 24261576d986SJason Wang 24279484dc74Syuan linyu tun = tun_get(tfile); 24281576d986SJason Wang if (!tun) 24291576d986SJason Wang return 0; 24301576d986SJason Wang 2431fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 24321576d986SJason Wang tun_put(tun); 24331576d986SJason Wang 24341576d986SJason Wang return ret; 24351576d986SJason Wang } 24361576d986SJason Wang 243705c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 243805c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 24391576d986SJason Wang .peek_len = tun_peek_len, 244005c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 244105c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 244205c2828cSMichael S. Tsirkin }; 244305c2828cSMichael S. Tsirkin 244433dccbb0SHerbert Xu static struct proto tun_proto = { 244533dccbb0SHerbert Xu .name = "tun", 244633dccbb0SHerbert Xu .owner = THIS_MODULE, 244754f968d6SJason Wang .obj_size = sizeof(struct tun_file), 244833dccbb0SHerbert Xu }; 2449f019a7a5SEric W. Biederman 2450980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2451980c9e8cSDavid Woodhouse { 2452031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2453980c9e8cSDavid Woodhouse } 2454980c9e8cSDavid Woodhouse 2455980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2456980c9e8cSDavid Woodhouse char *buf) 2457980c9e8cSDavid Woodhouse { 2458980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2459980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2460980c9e8cSDavid Woodhouse } 2461980c9e8cSDavid Woodhouse 2462980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2463980c9e8cSDavid Woodhouse char *buf) 2464980c9e8cSDavid Woodhouse { 2465980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 24660625c883SEric W. Biederman return uid_valid(tun->owner)? 24670625c883SEric W. Biederman sprintf(buf, "%u\n", 24680625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 24690625c883SEric W. Biederman sprintf(buf, "-1\n"); 2470980c9e8cSDavid Woodhouse } 2471980c9e8cSDavid Woodhouse 2472980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2473980c9e8cSDavid Woodhouse char *buf) 2474980c9e8cSDavid Woodhouse { 2475980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 24760625c883SEric W. Biederman return gid_valid(tun->group) ? 24770625c883SEric W. Biederman sprintf(buf, "%u\n", 24780625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 24790625c883SEric W. Biederman sprintf(buf, "-1\n"); 2480980c9e8cSDavid Woodhouse } 2481980c9e8cSDavid Woodhouse 2482980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2483980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2484980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2485980c9e8cSDavid Woodhouse 2486c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2487c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2488c4d33e24STakashi Iwai &dev_attr_owner.attr, 2489c4d33e24STakashi Iwai &dev_attr_group.attr, 2490c4d33e24STakashi Iwai NULL 2491c4d33e24STakashi Iwai }; 2492c4d33e24STakashi Iwai 2493c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2494c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2495c4d33e24STakashi Iwai }; 2496c4d33e24STakashi Iwai 2497d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 24981da177e4SLinus Torvalds { 24991da177e4SLinus Torvalds struct tun_struct *tun; 250054f968d6SJason Wang struct tun_file *tfile = file->private_data; 25011da177e4SLinus Torvalds struct net_device *dev; 25021da177e4SLinus Torvalds int err; 25031da177e4SLinus Torvalds 25047c0c3b1aSJason Wang if (tfile->detached) 25057c0c3b1aSJason Wang return -EINVAL; 25067c0c3b1aSJason Wang 250790e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 250890e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 250990e33d45SPetar Penkov return -EPERM; 251090e33d45SPetar Penkov 251190e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 251290e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 251390e33d45SPetar Penkov return -EINVAL; 251490e33d45SPetar Penkov } 251590e33d45SPetar Penkov 251674a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 251774a3e5a7SEric W. Biederman if (dev) { 2518f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2519f85ba780SDavid Woodhouse return -EBUSY; 252074a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 252174a3e5a7SEric W. Biederman tun = netdev_priv(dev); 252274a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 252374a3e5a7SEric W. Biederman tun = netdev_priv(dev); 252474a3e5a7SEric W. Biederman else 252574a3e5a7SEric W. Biederman return -EINVAL; 252674a3e5a7SEric W. Biederman 25278e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 252840630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 25298e6d91aeSJason Wang return -EINVAL; 25308e6d91aeSJason Wang 2531cde8b15fSJason Wang if (tun_not_capable(tun)) 25322b980dbdSPaul Moore return -EPERM; 25335dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 25342b980dbdSPaul Moore if (err < 0) 25352b980dbdSPaul Moore return err; 25362b980dbdSPaul Moore 253794317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2538*af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 2539*af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2540a7385ba2SEric W. Biederman if (err < 0) 2541a7385ba2SEric W. Biederman return err; 25424008e97fSJason Wang 254340630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2544e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2545e8dbad66SJason Wang /* One or more queue has already been attached, no need 2546e8dbad66SJason Wang * to initialize the device again. 2547e8dbad66SJason Wang */ 254883c1f36fSSabrina Dubroca netdev_state_change(dev); 2549e8dbad66SJason Wang return 0; 2550e8dbad66SJason Wang } 25519fffc5c6SSabrina Dubroca 25529fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 25539fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 255483c1f36fSSabrina Dubroca 255583c1f36fSSabrina Dubroca netdev_state_change(dev); 255683c1f36fSSabrina Dubroca } else { 25571da177e4SLinus Torvalds char *name; 25581da177e4SLinus Torvalds unsigned long flags = 0; 2559edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2560edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 25611da177e4SLinus Torvalds 2562c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2563ca6bb5d7SDavid Woodhouse return -EPERM; 25642b980dbdSPaul Moore err = security_tun_dev_create(); 25652b980dbdSPaul Moore if (err < 0) 25662b980dbdSPaul Moore return err; 2567ca6bb5d7SDavid Woodhouse 25681da177e4SLinus Torvalds /* Set dev type */ 25691da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 25701da177e4SLinus Torvalds /* TUN device */ 257140630b82SMichael S. Tsirkin flags |= IFF_TUN; 25721da177e4SLinus Torvalds name = "tun%d"; 25731da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 25741da177e4SLinus Torvalds /* TAP device */ 257540630b82SMichael S. Tsirkin flags |= IFF_TAP; 25761da177e4SLinus Torvalds name = "tap%d"; 25771da177e4SLinus Torvalds } else 257836989b90SKusanagi Kouichi return -EINVAL; 25791da177e4SLinus Torvalds 25801da177e4SLinus Torvalds if (*ifr->ifr_name) 25811da177e4SLinus Torvalds name = ifr->ifr_name; 25821da177e4SLinus Torvalds 2583c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2584c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2585c835a677STom Gundersen queues); 2586edfb6a14SJason Wang 25871da177e4SLinus Torvalds if (!dev) 25881da177e4SLinus Torvalds return -ENOMEM; 25890ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 25905c25f65fSJulien Gomes if (err < 0) 25910ad646c8SCong Wang goto err_free_dev; 25921da177e4SLinus Torvalds 2593fc54c658SPavel Emelyanov dev_net_set(dev, net); 2594f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2595fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2596c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2597758e43b7SStephen Hemminger 25981da177e4SLinus Torvalds tun = netdev_priv(dev); 25991da177e4SLinus Torvalds tun->dev = dev; 26001da177e4SLinus Torvalds tun->flags = flags; 2601f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2602d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 26031da177e4SLinus Torvalds 2604eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 260554f968d6SJason Wang tun->filter_attached = false; 260654f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 26075503fcecSJason Wang tun->rx_batched = 0; 260896f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 260933dccbb0SHerbert Xu 2610608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2611608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2612608b9977SPaolo Abeni err = -ENOMEM; 2613608b9977SPaolo Abeni goto err_free_dev; 2614608b9977SPaolo Abeni } 2615608b9977SPaolo Abeni 261696442e42SJason Wang spin_lock_init(&tun->lock); 261796442e42SJason Wang 26185dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 26195dbbaf2dSPaul Moore if (err < 0) 2620608b9977SPaolo Abeni goto err_free_stat; 26212b980dbdSPaul Moore 26221da177e4SLinus Torvalds tun_net_init(dev); 2623944a1376SPavel Emelyanov tun_flow_init(tun); 262496442e42SJason Wang 262588255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 26266680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 26276680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 26282a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 26296671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 26306671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 26316671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 263288255375SMichał Mirosław 26339fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 26349fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 26359fffc5c6SSabrina Dubroca 26364008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 2637*af3fb24eSEric Dumazet err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2638*af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2639eb0fb363SJason Wang if (err < 0) 2640662ca437SJason Wang goto err_free_flow; 2641eb0fb363SJason Wang 26421da177e4SLinus Torvalds err = register_netdevice(tun->dev); 26431da177e4SLinus Torvalds if (err < 0) 2644662ca437SJason Wang goto err_detach; 2645af668b3cSMichael S. Tsirkin } 2646980c9e8cSDavid Woodhouse 2647eb0fb363SJason Wang netif_carrier_on(tun->dev); 26481da177e4SLinus Torvalds 26496b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 26501da177e4SLinus Torvalds 2651e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2652e35259a9SMax Krasnyansky * xoff state. 2653e35259a9SMax Krasnyansky */ 2654e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2655c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2656e35259a9SMax Krasnyansky 26571da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 26581da177e4SLinus Torvalds return 0; 26591da177e4SLinus Torvalds 2660662ca437SJason Wang err_detach: 2661662ca437SJason Wang tun_detach_all(dev); 2662ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2663ff244c6bSEric Dumazet goto err_free_dev; 2664ff244c6bSEric Dumazet 2665662ca437SJason Wang err_free_flow: 2666662ca437SJason Wang tun_flow_uninit(tun); 2667662ca437SJason Wang security_tun_dev_free_security(tun->security); 2668608b9977SPaolo Abeni err_free_stat: 2669608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 26701da177e4SLinus Torvalds err_free_dev: 26711da177e4SLinus Torvalds free_netdev(dev); 26721da177e4SLinus Torvalds return err; 26731da177e4SLinus Torvalds } 26741da177e4SLinus Torvalds 26759ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2676876bfd4dSHerbert Xu struct ifreq *ifr) 2677e3b99556SMark McLoughlin { 26786b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2679e3b99556SMark McLoughlin 2680e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2681e3b99556SMark McLoughlin 2682980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2683e3b99556SMark McLoughlin 2684e3b99556SMark McLoughlin } 2685e3b99556SMark McLoughlin 26865228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 26875228ddc9SRusty Russell * privs required. */ 268888255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 26895228ddc9SRusty Russell { 2690c8f44affSMichał Mirosław netdev_features_t features = 0; 26915228ddc9SRusty Russell 26925228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 269388255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 26945228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 26955228ddc9SRusty Russell 26965228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 26975228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 26985228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 26995228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 27005228ddc9SRusty Russell } 27015228ddc9SRusty Russell if (arg & TUN_F_TSO4) 27025228ddc9SRusty Russell features |= NETIF_F_TSO; 27035228ddc9SRusty Russell if (arg & TUN_F_TSO6) 27045228ddc9SRusty Russell features |= NETIF_F_TSO6; 27055228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 27065228ddc9SRusty Russell } 27070c19f846SWillem de Bruijn 27080c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 27095228ddc9SRusty Russell } 27105228ddc9SRusty Russell 27115228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 27125228ddc9SRusty Russell * trying to set them. */ 27135228ddc9SRusty Russell if (arg) 27145228ddc9SRusty Russell return -EINVAL; 27155228ddc9SRusty Russell 271688255375SMichał Mirosław tun->set_features = features; 271709050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 271809050957SYaroslav Isakov tun->dev->wanted_features |= features; 271988255375SMichał Mirosław netdev_update_features(tun->dev); 27205228ddc9SRusty Russell 27215228ddc9SRusty Russell return 0; 27225228ddc9SRusty Russell } 27235228ddc9SRusty Russell 2724c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2725c8d68e6bSJason Wang { 2726c8d68e6bSJason Wang int i; 2727c8d68e6bSJason Wang struct tun_file *tfile; 2728c8d68e6bSJason Wang 2729c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2730b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 27318ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 27328ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 27338ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2734c8d68e6bSJason Wang } 2735c8d68e6bSJason Wang 2736c8d68e6bSJason Wang tun->filter_attached = false; 2737c8d68e6bSJason Wang } 2738c8d68e6bSJason Wang 2739c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2740c8d68e6bSJason Wang { 2741c8d68e6bSJason Wang int i, ret = 0; 2742c8d68e6bSJason Wang struct tun_file *tfile; 2743c8d68e6bSJason Wang 2744c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2745b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 27468ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 27478ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 27488ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2749c8d68e6bSJason Wang if (ret) { 2750c8d68e6bSJason Wang tun_detach_filter(tun, i); 2751c8d68e6bSJason Wang return ret; 2752c8d68e6bSJason Wang } 2753c8d68e6bSJason Wang } 2754c8d68e6bSJason Wang 2755c8d68e6bSJason Wang tun->filter_attached = true; 2756c8d68e6bSJason Wang return ret; 2757c8d68e6bSJason Wang } 2758c8d68e6bSJason Wang 2759c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2760c8d68e6bSJason Wang { 2761c8d68e6bSJason Wang struct tun_file *tfile; 2762c8d68e6bSJason Wang int i; 2763c8d68e6bSJason Wang 2764c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2765b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2766c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2767c8d68e6bSJason Wang } 2768c8d68e6bSJason Wang } 2769c8d68e6bSJason Wang 2770cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2771cde8b15fSJason Wang { 2772cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2773cde8b15fSJason Wang struct tun_struct *tun; 2774cde8b15fSJason Wang int ret = 0; 2775cde8b15fSJason Wang 2776cde8b15fSJason Wang rtnl_lock(); 2777cde8b15fSJason Wang 2778cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 27794008e97fSJason Wang tun = tfile->detached; 27805dbbaf2dSPaul Moore if (!tun) { 2781cde8b15fSJason Wang ret = -EINVAL; 27825dbbaf2dSPaul Moore goto unlock; 27835dbbaf2dSPaul Moore } 27845dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 27855dbbaf2dSPaul Moore if (ret < 0) 27865dbbaf2dSPaul Moore goto unlock; 2787*af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2788*af3fb24eSEric Dumazet tun->flags & IFF_NAPI_FRAGS); 27894008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2790b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 279140630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 27924008e97fSJason Wang ret = -EINVAL; 2793cde8b15fSJason Wang else 27944008e97fSJason Wang __tun_detach(tfile, false); 27954008e97fSJason Wang } else 2796cde8b15fSJason Wang ret = -EINVAL; 2797cde8b15fSJason Wang 279883c1f36fSSabrina Dubroca if (ret >= 0) 279983c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 280083c1f36fSSabrina Dubroca 28015dbbaf2dSPaul Moore unlock: 2802cde8b15fSJason Wang rtnl_unlock(); 2803cde8b15fSJason Wang return ret; 2804cde8b15fSJason Wang } 2805cde8b15fSJason Wang 2806cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2807cd5681d7SJason Wang void __user *data) 280896f84061SJason Wang { 280996f84061SJason Wang struct bpf_prog *prog; 281096f84061SJason Wang int fd; 281196f84061SJason Wang 281296f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 281396f84061SJason Wang return -EFAULT; 281496f84061SJason Wang 281596f84061SJason Wang if (fd == -1) { 281696f84061SJason Wang prog = NULL; 281796f84061SJason Wang } else { 281896f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 281996f84061SJason Wang if (IS_ERR(prog)) 282096f84061SJason Wang return PTR_ERR(prog); 282196f84061SJason Wang } 282296f84061SJason Wang 2823cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 282496f84061SJason Wang } 282596f84061SJason Wang 282650857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 282750857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 28281da177e4SLinus Torvalds { 282936b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2830f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 2831631ab46bSEric W. Biederman struct tun_struct *tun; 28321da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 28331da177e4SLinus Torvalds struct ifreq ifr; 28340625c883SEric W. Biederman kuid_t owner; 28350625c883SEric W. Biederman kgid_t group; 283633dccbb0SHerbert Xu int sndbuf; 2837d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2838fb7589a1SPavel Emelyanov unsigned int ifindex; 28391cf8e410SMichael S. Tsirkin int le; 2840f271b2ccSMax Krasnyansky int ret; 284183c1f36fSSabrina Dubroca bool do_notify = false; 28421da177e4SLinus Torvalds 2843f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2844f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 284550857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 28461da177e4SLinus Torvalds return -EFAULT; 28478bbb1813SDavid S. Miller } else { 2848a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 28498bbb1813SDavid S. Miller } 2850631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2851631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2852631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2853031f5e03SMichael S. Tsirkin * TUNSETIFF. 2854031f5e03SMichael S. Tsirkin */ 2855031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2856631ab46bSEric W. Biederman (unsigned int __user*)argp); 2857f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 2858cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2859f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 2860f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2861f663706aSKirill Tkhai return -EPERM; 2862f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 2863f663706aSKirill Tkhai } 2864631ab46bSEric W. Biederman 2865c8d68e6bSJason Wang ret = 0; 2866876bfd4dSHerbert Xu rtnl_lock(); 2867876bfd4dSHerbert Xu 28689484dc74Syuan linyu tun = tun_get(tfile); 28690f16bc13SGao Feng if (cmd == TUNSETIFF) { 28700f16bc13SGao Feng ret = -EEXIST; 28710f16bc13SGao Feng if (tun) 28720f16bc13SGao Feng goto unlock; 28730f16bc13SGao Feng 28741da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 28751da177e4SLinus Torvalds 2876f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 28771da177e4SLinus Torvalds 2878876bfd4dSHerbert Xu if (ret) 2879876bfd4dSHerbert Xu goto unlock; 28801da177e4SLinus Torvalds 288150857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2882876bfd4dSHerbert Xu ret = -EFAULT; 2883876bfd4dSHerbert Xu goto unlock; 28841da177e4SLinus Torvalds } 2885fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 2886fb7589a1SPavel Emelyanov ret = -EPERM; 2887fb7589a1SPavel Emelyanov if (tun) 2888fb7589a1SPavel Emelyanov goto unlock; 2889fb7589a1SPavel Emelyanov 2890fb7589a1SPavel Emelyanov ret = -EFAULT; 2891fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2892fb7589a1SPavel Emelyanov goto unlock; 2893fb7589a1SPavel Emelyanov 2894fb7589a1SPavel Emelyanov ret = 0; 2895fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 2896fb7589a1SPavel Emelyanov goto unlock; 2897fb7589a1SPavel Emelyanov } 28981da177e4SLinus Torvalds 2899876bfd4dSHerbert Xu ret = -EBADFD; 29001da177e4SLinus Torvalds if (!tun) 2901876bfd4dSHerbert Xu goto unlock; 29021da177e4SLinus Torvalds 29031e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 29041da177e4SLinus Torvalds 2905631ab46bSEric W. Biederman ret = 0; 29061da177e4SLinus Torvalds switch (cmd) { 2907e3b99556SMark McLoughlin case TUNGETIFF: 29089ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2909e3b99556SMark McLoughlin 29103d407a80SPavel Emelyanov if (tfile->detached) 29113d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 2912849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 2913849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 29143d407a80SPavel Emelyanov 291550857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 2916631ab46bSEric W. Biederman ret = -EFAULT; 2917e3b99556SMark McLoughlin break; 2918e3b99556SMark McLoughlin 29191da177e4SLinus Torvalds case TUNSETNOCSUM: 29201da177e4SLinus Torvalds /* Disable/Enable checksum */ 29211da177e4SLinus Torvalds 292288255375SMichał Mirosław /* [unimplemented] */ 292388255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 29246b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 29251da177e4SLinus Torvalds break; 29261da177e4SLinus Torvalds 29271da177e4SLinus Torvalds case TUNSETPERSIST: 292854f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 292954f968d6SJason Wang * module to prevent the module being unprobed. 293054f968d6SJason Wang */ 293140630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 293240630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 293354f968d6SJason Wang __module_get(THIS_MODULE); 293483c1f36fSSabrina Dubroca do_notify = true; 2935dd38bd85SJason Wang } 293640630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 293740630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 293854f968d6SJason Wang module_put(THIS_MODULE); 293983c1f36fSSabrina Dubroca do_notify = true; 294054f968d6SJason Wang } 29411da177e4SLinus Torvalds 29426b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 29436b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 29441da177e4SLinus Torvalds break; 29451da177e4SLinus Torvalds 29461da177e4SLinus Torvalds case TUNSETOWNER: 29471da177e4SLinus Torvalds /* Set owner of the device */ 29480625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 29490625c883SEric W. Biederman if (!uid_valid(owner)) { 29500625c883SEric W. Biederman ret = -EINVAL; 29510625c883SEric W. Biederman break; 29520625c883SEric W. Biederman } 29530625c883SEric W. Biederman tun->owner = owner; 295483c1f36fSSabrina Dubroca do_notify = true; 29551e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 29560625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 29571da177e4SLinus Torvalds break; 29581da177e4SLinus Torvalds 29598c644623SGuido Guenther case TUNSETGROUP: 29608c644623SGuido Guenther /* Set group of the device */ 29610625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 29620625c883SEric W. Biederman if (!gid_valid(group)) { 29630625c883SEric W. Biederman ret = -EINVAL; 29640625c883SEric W. Biederman break; 29650625c883SEric W. Biederman } 29660625c883SEric W. Biederman tun->group = group; 296783c1f36fSSabrina Dubroca do_notify = true; 29681e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 29690625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 29708c644623SGuido Guenther break; 29718c644623SGuido Guenther 2972ff4cc3acSMike Kershaw case TUNSETLINK: 2973ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 2974ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 29756b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 29766b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 297748abfe05SDavid S. Miller ret = -EBUSY; 2978ff4cc3acSMike Kershaw } else { 2979ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 29806b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 29816b8a66eeSJoe Perches tun->dev->type); 298248abfe05SDavid S. Miller ret = 0; 2983ff4cc3acSMike Kershaw } 2984631ab46bSEric W. Biederman break; 2985ff4cc3acSMike Kershaw 29861da177e4SLinus Torvalds #ifdef TUN_DEBUG 29871da177e4SLinus Torvalds case TUNSETDEBUG: 29881da177e4SLinus Torvalds tun->debug = arg; 29891da177e4SLinus Torvalds break; 29901da177e4SLinus Torvalds #endif 29915228ddc9SRusty Russell case TUNSETOFFLOAD: 299288255375SMichał Mirosław ret = set_offload(tun, arg); 2993631ab46bSEric W. Biederman break; 29945228ddc9SRusty Russell 2995f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 2996f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 2997631ab46bSEric W. Biederman ret = -EINVAL; 299840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2999631ab46bSEric W. Biederman break; 3000c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3001631ab46bSEric W. Biederman break; 30021da177e4SLinus Torvalds 30031da177e4SLinus Torvalds case SIOCGIFHWADDR: 3004b595076aSUwe Kleine-König /* Get hw address */ 3005f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3006f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 300750857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3008631ab46bSEric W. Biederman ret = -EFAULT; 3009631ab46bSEric W. Biederman break; 30101da177e4SLinus Torvalds 30111da177e4SLinus Torvalds case SIOCSIFHWADDR: 3012f271b2ccSMax Krasnyansky /* Set hw address */ 30136b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 30146b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 301540102371SKim B. Heino 301640102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 3017631ab46bSEric W. Biederman break; 301833dccbb0SHerbert Xu 301933dccbb0SHerbert Xu case TUNGETSNDBUF: 302054f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 302133dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 302233dccbb0SHerbert Xu ret = -EFAULT; 302333dccbb0SHerbert Xu break; 302433dccbb0SHerbert Xu 302533dccbb0SHerbert Xu case TUNSETSNDBUF: 302633dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 302733dccbb0SHerbert Xu ret = -EFAULT; 302833dccbb0SHerbert Xu break; 302933dccbb0SHerbert Xu } 303093161922SCraig Gallek if (sndbuf <= 0) { 303193161922SCraig Gallek ret = -EINVAL; 303293161922SCraig Gallek break; 303393161922SCraig Gallek } 303433dccbb0SHerbert Xu 3035c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3036c8d68e6bSJason Wang tun_set_sndbuf(tun); 303733dccbb0SHerbert Xu break; 303833dccbb0SHerbert Xu 3039d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3040d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3041d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3042d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3043d9d52b51SMichael S. Tsirkin break; 3044d9d52b51SMichael S. Tsirkin 3045d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3046d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3047d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3048d9d52b51SMichael S. Tsirkin break; 3049d9d52b51SMichael S. Tsirkin } 3050d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3051d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3052d9d52b51SMichael S. Tsirkin break; 3053d9d52b51SMichael S. Tsirkin } 3054d9d52b51SMichael S. Tsirkin 3055d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3056d9d52b51SMichael S. Tsirkin break; 3057d9d52b51SMichael S. Tsirkin 30581cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 30591cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 30601cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 30611cf8e410SMichael S. Tsirkin ret = -EFAULT; 30621cf8e410SMichael S. Tsirkin break; 30631cf8e410SMichael S. Tsirkin 30641cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 30651cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 30661cf8e410SMichael S. Tsirkin ret = -EFAULT; 30671cf8e410SMichael S. Tsirkin break; 30681cf8e410SMichael S. Tsirkin } 30691cf8e410SMichael S. Tsirkin if (le) 30701cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 30711cf8e410SMichael S. Tsirkin else 30721cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 30731cf8e410SMichael S. Tsirkin break; 30741cf8e410SMichael S. Tsirkin 30758b8e658bSGreg Kurz case TUNGETVNETBE: 30768b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 30778b8e658bSGreg Kurz break; 30788b8e658bSGreg Kurz 30798b8e658bSGreg Kurz case TUNSETVNETBE: 30808b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 30818b8e658bSGreg Kurz break; 30828b8e658bSGreg Kurz 308399405162SMichael S. Tsirkin case TUNATTACHFILTER: 308499405162SMichael S. Tsirkin /* Can be set only for TAPs */ 308599405162SMichael S. Tsirkin ret = -EINVAL; 308640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 308799405162SMichael S. Tsirkin break; 308899405162SMichael S. Tsirkin ret = -EFAULT; 308954f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 309099405162SMichael S. Tsirkin break; 309199405162SMichael S. Tsirkin 3092c8d68e6bSJason Wang ret = tun_attach_filter(tun); 309399405162SMichael S. Tsirkin break; 309499405162SMichael S. Tsirkin 309599405162SMichael S. Tsirkin case TUNDETACHFILTER: 309699405162SMichael S. Tsirkin /* Can be set only for TAPs */ 309799405162SMichael S. Tsirkin ret = -EINVAL; 309840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 309999405162SMichael S. Tsirkin break; 3100c8d68e6bSJason Wang ret = 0; 3101c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 310299405162SMichael S. Tsirkin break; 310399405162SMichael S. Tsirkin 310476975e9cSPavel Emelyanov case TUNGETFILTER: 310576975e9cSPavel Emelyanov ret = -EINVAL; 310640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 310776975e9cSPavel Emelyanov break; 310876975e9cSPavel Emelyanov ret = -EFAULT; 310976975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 311076975e9cSPavel Emelyanov break; 311176975e9cSPavel Emelyanov ret = 0; 311276975e9cSPavel Emelyanov break; 311376975e9cSPavel Emelyanov 311496f84061SJason Wang case TUNSETSTEERINGEBPF: 3115cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 311696f84061SJason Wang break; 311796f84061SJason Wang 3118aff3d70aSJason Wang case TUNSETFILTEREBPF: 3119aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3120aff3d70aSJason Wang break; 3121aff3d70aSJason Wang 31221da177e4SLinus Torvalds default: 3123631ab46bSEric W. Biederman ret = -EINVAL; 3124631ab46bSEric W. Biederman break; 3125ee289b64SJoe Perches } 31261da177e4SLinus Torvalds 312783c1f36fSSabrina Dubroca if (do_notify) 312883c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 312983c1f36fSSabrina Dubroca 3130876bfd4dSHerbert Xu unlock: 3131876bfd4dSHerbert Xu rtnl_unlock(); 3132876bfd4dSHerbert Xu if (tun) 3133631ab46bSEric W. Biederman tun_put(tun); 3134631ab46bSEric W. Biederman return ret; 31351da177e4SLinus Torvalds } 31361da177e4SLinus Torvalds 313750857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 313850857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 313950857e2aSArnd Bergmann { 314050857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 314150857e2aSArnd Bergmann } 314250857e2aSArnd Bergmann 314350857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 314450857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 314550857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 314650857e2aSArnd Bergmann { 314750857e2aSArnd Bergmann switch (cmd) { 314850857e2aSArnd Bergmann case TUNSETIFF: 314950857e2aSArnd Bergmann case TUNGETIFF: 315050857e2aSArnd Bergmann case TUNSETTXFILTER: 315150857e2aSArnd Bergmann case TUNGETSNDBUF: 315250857e2aSArnd Bergmann case TUNSETSNDBUF: 315350857e2aSArnd Bergmann case SIOCGIFHWADDR: 315450857e2aSArnd Bergmann case SIOCSIFHWADDR: 315550857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 315650857e2aSArnd Bergmann break; 315750857e2aSArnd Bergmann default: 315850857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 315950857e2aSArnd Bergmann break; 316050857e2aSArnd Bergmann } 316150857e2aSArnd Bergmann 316250857e2aSArnd Bergmann /* 316350857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 316450857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 316550857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 316650857e2aSArnd Bergmann * contents. 316750857e2aSArnd Bergmann */ 316850857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 316950857e2aSArnd Bergmann } 317050857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 317150857e2aSArnd Bergmann 31721da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 31731da177e4SLinus Torvalds { 317454f968d6SJason Wang struct tun_file *tfile = file->private_data; 31751da177e4SLinus Torvalds int ret; 31761da177e4SLinus Torvalds 317754f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 31789d319522SJonathan Corbet goto out; 31791da177e4SLinus Torvalds 31801da177e4SLinus Torvalds if (on) { 318101919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 318254f968d6SJason Wang tfile->flags |= TUN_FASYNC; 31831da177e4SLinus Torvalds } else 318454f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 31859d319522SJonathan Corbet ret = 0; 31869d319522SJonathan Corbet out: 31879d319522SJonathan Corbet return ret; 31881da177e4SLinus Torvalds } 31891da177e4SLinus Torvalds 31901da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 31911da177e4SLinus Torvalds { 3192140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3193631ab46bSEric W. Biederman struct tun_file *tfile; 3194deed49fbSThomas Gleixner 31956b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3196631ab46bSEric W. Biederman 3197140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 319811aa9c28SEric W. Biederman &tun_proto, 0); 3199631ab46bSEric W. Biederman if (!tfile) 3200631ab46bSEric W. Biederman return -ENOMEM; 3201b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3202b196d88aSJason Wang sk_free(&tfile->sk); 3203b196d88aSJason Wang return -ENOMEM; 3204b196d88aSJason Wang } 3205b196d88aSJason Wang 3206c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3207c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 320854f968d6SJason Wang tfile->flags = 0; 3209fb7589a1SPavel Emelyanov tfile->ifindex = 0; 321054f968d6SJason Wang 321154f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 32129e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 321354f968d6SJason Wang 321454f968d6SJason Wang tfile->socket.file = file; 321554f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 321654f968d6SJason Wang 321754f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 321854f968d6SJason Wang 321954f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 322054f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 322154f968d6SJason Wang 3222631ab46bSEric W. Biederman file->private_data = tfile; 32234008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 322454f968d6SJason Wang 322519a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 322619a6afb2SJason Wang 32271da177e4SLinus Torvalds return 0; 32281da177e4SLinus Torvalds } 32291da177e4SLinus Torvalds 32301da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 32311da177e4SLinus Torvalds { 3232631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 32331da177e4SLinus Torvalds 3234c8d68e6bSJason Wang tun_detach(tfile, true); 32351da177e4SLinus Torvalds 32361da177e4SLinus Torvalds return 0; 32371da177e4SLinus Torvalds } 32381da177e4SLinus Torvalds 323993e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 32409484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 324193e14b6dSMasatake YAMATO { 32429484dc74Syuan linyu struct tun_file *tfile = file->private_data; 324393e14b6dSMasatake YAMATO struct tun_struct *tun; 324493e14b6dSMasatake YAMATO struct ifreq ifr; 324593e14b6dSMasatake YAMATO 324693e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 324793e14b6dSMasatake YAMATO 324893e14b6dSMasatake YAMATO rtnl_lock(); 32499484dc74Syuan linyu tun = tun_get(tfile); 325093e14b6dSMasatake YAMATO if (tun) 325193e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 325293e14b6dSMasatake YAMATO rtnl_unlock(); 325393e14b6dSMasatake YAMATO 325493e14b6dSMasatake YAMATO if (tun) 325593e14b6dSMasatake YAMATO tun_put(tun); 325693e14b6dSMasatake YAMATO 3257a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 325893e14b6dSMasatake YAMATO } 325993e14b6dSMasatake YAMATO #endif 326093e14b6dSMasatake YAMATO 3261d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 32621da177e4SLinus Torvalds .owner = THIS_MODULE, 32631da177e4SLinus Torvalds .llseek = no_llseek, 32649b067034SAl Viro .read_iter = tun_chr_read_iter, 3265f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 32661da177e4SLinus Torvalds .poll = tun_chr_poll, 3267876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 326850857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 326950857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 327050857e2aSArnd Bergmann #endif 32711da177e4SLinus Torvalds .open = tun_chr_open, 32721da177e4SLinus Torvalds .release = tun_chr_close, 327393e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 327493e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 327593e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 327693e14b6dSMasatake YAMATO #endif 32771da177e4SLinus Torvalds }; 32781da177e4SLinus Torvalds 32791da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 32801da177e4SLinus Torvalds .minor = TUN_MINOR, 32811da177e4SLinus Torvalds .name = "tun", 3282e454cea2SKay Sievers .nodename = "net/tun", 32831da177e4SLinus Torvalds .fops = &tun_fops, 32841da177e4SLinus Torvalds }; 32851da177e4SLinus Torvalds 32861da177e4SLinus Torvalds /* ethtool interface */ 32871da177e4SLinus Torvalds 32884e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 328929ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 32901da177e4SLinus Torvalds { 329129ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 329229ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 329329ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 329429ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 329529ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 329629ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 329729ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 32984e24f2ddSChas Williams } 32994e24f2ddSChas Williams 33004e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 33014e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 33024e24f2ddSChas Williams { 33034e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 33044e24f2ddSChas Williams 33054e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 33064e24f2ddSChas Williams return 0; 33074e24f2ddSChas Williams } 33084e24f2ddSChas Williams 33094e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 33104e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 33114e24f2ddSChas Williams { 33124e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 33134e24f2ddSChas Williams 33144e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 33151da177e4SLinus Torvalds return 0; 33161da177e4SLinus Torvalds } 33171da177e4SLinus Torvalds 33181da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 33191da177e4SLinus Torvalds { 33201da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33211da177e4SLinus Torvalds 332233a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 332333a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 33241da177e4SLinus Torvalds 33251da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 332640630b82SMichael S. Tsirkin case IFF_TUN: 332733a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 33281da177e4SLinus Torvalds break; 332940630b82SMichael S. Tsirkin case IFF_TAP: 333033a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 33311da177e4SLinus Torvalds break; 33321da177e4SLinus Torvalds } 33331da177e4SLinus Torvalds } 33341da177e4SLinus Torvalds 33351da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 33361da177e4SLinus Torvalds { 33371da177e4SLinus Torvalds #ifdef TUN_DEBUG 33381da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33391da177e4SLinus Torvalds return tun->debug; 33401da177e4SLinus Torvalds #else 33411da177e4SLinus Torvalds return -EOPNOTSUPP; 33421da177e4SLinus Torvalds #endif 33431da177e4SLinus Torvalds } 33441da177e4SLinus Torvalds 33451da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 33461da177e4SLinus Torvalds { 33471da177e4SLinus Torvalds #ifdef TUN_DEBUG 33481da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 33491da177e4SLinus Torvalds tun->debug = value; 33501da177e4SLinus Torvalds #endif 33511da177e4SLinus Torvalds } 33521da177e4SLinus Torvalds 33535503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 33545503fcecSJason Wang struct ethtool_coalesce *ec) 33555503fcecSJason Wang { 33565503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 33575503fcecSJason Wang 33585503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 33595503fcecSJason Wang 33605503fcecSJason Wang return 0; 33615503fcecSJason Wang } 33625503fcecSJason Wang 33635503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 33645503fcecSJason Wang struct ethtool_coalesce *ec) 33655503fcecSJason Wang { 33665503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 33675503fcecSJason Wang 33685503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 33695503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 33705503fcecSJason Wang else 33715503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 33725503fcecSJason Wang 33735503fcecSJason Wang return 0; 33745503fcecSJason Wang } 33755503fcecSJason Wang 33767282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 33771da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 33781da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 33791da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3380bee31369SNolan Leake .get_link = ethtool_op_get_link, 3381eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 33825503fcecSJason Wang .get_coalesce = tun_get_coalesce, 33835503fcecSJason Wang .set_coalesce = tun_set_coalesce, 338429ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 33854e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 33861da177e4SLinus Torvalds }; 33871da177e4SLinus Torvalds 33881576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 33891576d986SJason Wang { 33901576d986SJason Wang struct net_device *dev = tun->dev; 33911576d986SJason Wang struct tun_file *tfile; 33925990a305SJason Wang struct ptr_ring **rings; 33931576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 33941576d986SJason Wang int ret, i; 33951576d986SJason Wang 33965990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 33975990a305SJason Wang if (!rings) 33981576d986SJason Wang return -ENOMEM; 33991576d986SJason Wang 34001576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 34011576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 34025990a305SJason Wang rings[i] = &tfile->tx_ring; 34031576d986SJason Wang } 34041576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 34055990a305SJason Wang rings[i++] = &tfile->tx_ring; 34061576d986SJason Wang 34075990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 34085990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3409fc72d1d5SJason Wang tun_ptr_free); 34101576d986SJason Wang 34115990a305SJason Wang kfree(rings); 34121576d986SJason Wang return ret; 34131576d986SJason Wang } 34141576d986SJason Wang 34151576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 34161576d986SJason Wang unsigned long event, void *ptr) 34171576d986SJason Wang { 34181576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 34191576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 34201576d986SJason Wang 342186dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 342286dfb4acSCraig Gallek return NOTIFY_DONE; 342386dfb4acSCraig Gallek 34241576d986SJason Wang switch (event) { 34251576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 34261576d986SJason Wang if (tun_queue_resize(tun)) 34271576d986SJason Wang return NOTIFY_BAD; 34281576d986SJason Wang break; 34291576d986SJason Wang default: 34301576d986SJason Wang break; 34311576d986SJason Wang } 34321576d986SJason Wang 34331576d986SJason Wang return NOTIFY_DONE; 34341576d986SJason Wang } 34351576d986SJason Wang 34361576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 34371576d986SJason Wang .notifier_call = tun_device_event, 34381576d986SJason Wang }; 343979d17604SPavel Emelyanov 34401da177e4SLinus Torvalds static int __init tun_init(void) 34411da177e4SLinus Torvalds { 34421da177e4SLinus Torvalds int ret = 0; 34431da177e4SLinus Torvalds 34446b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 34451da177e4SLinus Torvalds 3446f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 344779d17604SPavel Emelyanov if (ret) { 34486b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3449f019a7a5SEric W. Biederman goto err_linkops; 345079d17604SPavel Emelyanov } 345179d17604SPavel Emelyanov 34521da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 345379d17604SPavel Emelyanov if (ret) { 34546b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 345579d17604SPavel Emelyanov goto err_misc; 345679d17604SPavel Emelyanov } 34571576d986SJason Wang 34585edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 34595edfbd3cSTonghao Zhang if (ret) { 34605edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 34615edfbd3cSTonghao Zhang goto err_notifier; 34625edfbd3cSTonghao Zhang } 34635edfbd3cSTonghao Zhang 346479d17604SPavel Emelyanov return 0; 34655edfbd3cSTonghao Zhang 34665edfbd3cSTonghao Zhang err_notifier: 34675edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 346879d17604SPavel Emelyanov err_misc: 3469f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3470f019a7a5SEric W. Biederman err_linkops: 34711da177e4SLinus Torvalds return ret; 34721da177e4SLinus Torvalds } 34731da177e4SLinus Torvalds 34741da177e4SLinus Torvalds static void tun_cleanup(void) 34751da177e4SLinus Torvalds { 34761da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3477f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 34781576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 34791da177e4SLinus Torvalds } 34801da177e4SLinus Torvalds 348105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 348205c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 348305c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 348405c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 348505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 348605c2828cSMichael S. Tsirkin { 34876e914fc7SJason Wang struct tun_file *tfile; 348805c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 348905c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 34906e914fc7SJason Wang tfile = file->private_data; 34916e914fc7SJason Wang if (!tfile) 349205c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 349354f968d6SJason Wang return &tfile->socket; 349405c2828cSMichael S. Tsirkin } 349505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 349605c2828cSMichael S. Tsirkin 34975990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 349883339c6bSJason Wang { 349983339c6bSJason Wang struct tun_file *tfile; 350083339c6bSJason Wang 350183339c6bSJason Wang if (file->f_op != &tun_fops) 350283339c6bSJason Wang return ERR_PTR(-EINVAL); 350383339c6bSJason Wang tfile = file->private_data; 350483339c6bSJason Wang if (!tfile) 350583339c6bSJason Wang return ERR_PTR(-EBADFD); 35065990a305SJason Wang return &tfile->tx_ring; 350783339c6bSJason Wang } 35085990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 350983339c6bSJason Wang 35101da177e4SLinus Torvalds module_init(tun_init); 35111da177e4SLinus Torvalds module_exit(tun_cleanup); 35121da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 35131da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 35141da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 35151da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3516578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3517