11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 73735fc405SJesper Dangaard Brouer #include <net/xdp.h> 7493e14b6dSMasatake YAMATO #include <linux/seq_file.h> 75e0b46d0eSHerbert Xu #include <linux/uio.h> 761576d986SJason Wang #include <linux/skb_array.h> 77761876c8SJason Wang #include <linux/bpf.h> 78761876c8SJason Wang #include <linux/bpf_trace.h> 7990e33d45SPetar Penkov #include <linux/mutex.h> 801da177e4SLinus Torvalds 817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 82f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 831da177e4SLinus Torvalds 844e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 854e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 864e24f2ddSChas Williams 8714daa021SRusty Russell /* Uncomment to enable debugging */ 8814daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8914daa021SRusty Russell 901da177e4SLinus Torvalds #ifdef TUN_DEBUG 911da177e4SLinus Torvalds static int debug; 9214daa021SRusty Russell 936b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (tun->debug) \ 966b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 986b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 996b8a66eeSJoe Perches do { \ 1006b8a66eeSJoe Perches if (debug == 2) \ 1016b8a66eeSJoe Perches printk(level fmt, ##args); \ 1026b8a66eeSJoe Perches } while (0) 10314daa021SRusty Russell #else 1046b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1096b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1106b8a66eeSJoe Perches do { \ 1116b8a66eeSJoe Perches if (0) \ 1126b8a66eeSJoe Perches printk(level fmt, ##args); \ 1136b8a66eeSJoe Perches } while (0) 1141da177e4SLinus Torvalds #endif 1151da177e4SLinus Torvalds 1167df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11766ccbc9cSJason Wang 118031f5e03SMichael S. Tsirkin /* TUN device flags */ 119031f5e03SMichael S. Tsirkin 120031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 121031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 122031f5e03SMichael S. Tsirkin */ 123031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1241cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1251cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1268b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 127031f5e03SMichael S. Tsirkin 128031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12990e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 13090e33d45SPetar Penkov 1310690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1320690899bSMichael S. Tsirkin 133f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 134f271b2ccSMax Krasnyansky struct tap_filter { 135f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 136f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 137f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 138f271b2ccSMax Krasnyansky }; 139f271b2ccSMax Krasnyansky 140baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 141baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 142baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 143b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 144c8d68e6bSJason Wang 14596442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14696442e42SJason Wang 147608b9977SPaolo Abeni struct tun_pcpu_stats { 148608b9977SPaolo Abeni u64 rx_packets; 149608b9977SPaolo Abeni u64 rx_bytes; 150608b9977SPaolo Abeni u64 tx_packets; 151608b9977SPaolo Abeni u64 tx_bytes; 152608b9977SPaolo Abeni struct u64_stats_sync syncp; 153608b9977SPaolo Abeni u32 rx_dropped; 154608b9977SPaolo Abeni u32 tx_dropped; 155608b9977SPaolo Abeni u32 rx_frame_errors; 156608b9977SPaolo Abeni }; 157608b9977SPaolo Abeni 15854f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15992d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 16054f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 16154f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 16236fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 16354f968d6SJason Wang * this). 1646e914fc7SJason Wang * 1656e914fc7SJason Wang * RCU usage: 16636fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1676e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16854f968d6SJason Wang */ 169631ab46bSEric W. Biederman struct tun_file { 17054f968d6SJason Wang struct sock sk; 17154f968d6SJason Wang struct socket socket; 17254f968d6SJason Wang struct socket_wq wq; 1736e914fc7SJason Wang struct tun_struct __rcu *tun; 17454f968d6SJason Wang struct fasync_struct *fasync; 17554f968d6SJason Wang /* only used for fasnyc */ 17654f968d6SJason Wang unsigned int flags; 177fb7589a1SPavel Emelyanov union { 178c8d68e6bSJason Wang u16 queue_index; 179fb7589a1SPavel Emelyanov unsigned int ifindex; 180fb7589a1SPavel Emelyanov }; 18194317099SPetar Penkov struct napi_struct napi; 182aec72f33SEric Dumazet bool napi_enabled; 183af3fb24eSEric Dumazet bool napi_frags_enabled; 18490e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1854008e97fSJason Wang struct list_head next; 1864008e97fSJason Wang struct tun_struct *detached; 1875990a305SJason Wang struct ptr_ring tx_ring; 1888bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 189631ab46bSEric W. Biederman }; 190631ab46bSEric W. Biederman 191f9e06c45SJason Wang struct tun_page { 192f9e06c45SJason Wang struct page *page; 193f9e06c45SJason Wang int count; 194f9e06c45SJason Wang }; 195f9e06c45SJason Wang 19696442e42SJason Wang struct tun_flow_entry { 19796442e42SJason Wang struct hlist_node hash_link; 19896442e42SJason Wang struct rcu_head rcu; 19996442e42SJason Wang struct tun_struct *tun; 20096442e42SJason Wang 20196442e42SJason Wang u32 rxhash; 2029bc88939STom Herbert u32 rps_rxhash; 20396442e42SJason Wang int queue_index; 20483b1bc12SLi RongQing unsigned long updated ____cacheline_aligned_in_smp; 20596442e42SJason Wang }; 20696442e42SJason Wang 20796442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 208f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 20996442e42SJason Wang 210cd5681d7SJason Wang struct tun_prog { 21196f84061SJason Wang struct rcu_head rcu; 21296f84061SJason Wang struct bpf_prog *prog; 21396f84061SJason Wang }; 21496f84061SJason Wang 21554f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 21636fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 21754f968d6SJason Wang * file were attached to a persist device. 21854f968d6SJason Wang */ 21914daa021SRusty Russell struct tun_struct { 220c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 221c8d68e6bSJason Wang unsigned int numqueues; 222f271b2ccSMax Krasnyansky unsigned int flags; 2230625c883SEric W. Biederman kuid_t owner; 2240625c883SEric W. Biederman kgid_t group; 22514daa021SRusty Russell 22614daa021SRusty Russell struct net_device *dev; 227c8f44affSMichał Mirosław netdev_features_t set_features; 22888255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 229d591a1f3SDavid S. Miller NETIF_F_TSO6) 230d9d52b51SMichael S. Tsirkin 231eaea34b2SPaolo Abeni int align; 232d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 23354f968d6SJason Wang int sndbuf; 23454f968d6SJason Wang struct tap_filter txflt; 23554f968d6SJason Wang struct sock_fprog fprog; 23654f968d6SJason Wang /* protected by rtnl lock */ 23754f968d6SJason Wang bool filter_attached; 23814daa021SRusty Russell #ifdef TUN_DEBUG 23914daa021SRusty Russell int debug; 24014daa021SRusty Russell #endif 24196442e42SJason Wang spinlock_t lock; 24296442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 24396442e42SJason Wang struct timer_list flow_gc_timer; 24496442e42SJason Wang unsigned long ageing_time; 2454008e97fSJason Wang unsigned int numdisabled; 2464008e97fSJason Wang struct list_head disabled; 2475dbbaf2dSPaul Moore void *security; 248b8732fb7SJason Wang u32 flow_count; 2495503fcecSJason Wang u32 rx_batched; 250608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 251761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 252cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 253aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2544e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 25514daa021SRusty Russell }; 25614daa021SRusty Russell 257aff3d70aSJason Wang struct veth { 258aff3d70aSJason Wang __be16 h_vlan_proto; 259aff3d70aSJason Wang __be16 h_vlan_TCI; 2601da177e4SLinus Torvalds }; 2611da177e4SLinus Torvalds 2621ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr) 263fc72d1d5SJason Wang { 264fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 265fc72d1d5SJason Wang } 2661ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame); 267fc72d1d5SJason Wang 268fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 269fc72d1d5SJason Wang { 270fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 271fc72d1d5SJason Wang } 272fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 273fc72d1d5SJason Wang 274fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 275fc72d1d5SJason Wang { 276fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 277fc72d1d5SJason Wang } 278fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 279fc72d1d5SJason Wang 28094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 28194317099SPetar Penkov { 28294317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 28394317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 28494317099SPetar Penkov struct sk_buff_head process_queue; 28594317099SPetar Penkov struct sk_buff *skb; 28694317099SPetar Penkov int received = 0; 28794317099SPetar Penkov 28894317099SPetar Penkov __skb_queue_head_init(&process_queue); 28994317099SPetar Penkov 29094317099SPetar Penkov spin_lock(&queue->lock); 29194317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 29294317099SPetar Penkov spin_unlock(&queue->lock); 29394317099SPetar Penkov 29494317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 29594317099SPetar Penkov napi_gro_receive(napi, skb); 29694317099SPetar Penkov ++received; 29794317099SPetar Penkov } 29894317099SPetar Penkov 29994317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 30094317099SPetar Penkov spin_lock(&queue->lock); 30194317099SPetar Penkov skb_queue_splice(&process_queue, queue); 30294317099SPetar Penkov spin_unlock(&queue->lock); 30394317099SPetar Penkov } 30494317099SPetar Penkov 30594317099SPetar Penkov return received; 30694317099SPetar Penkov } 30794317099SPetar Penkov 30894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 30994317099SPetar Penkov { 31094317099SPetar Penkov unsigned int received; 31194317099SPetar Penkov 31294317099SPetar Penkov received = tun_napi_receive(napi, budget); 31394317099SPetar Penkov 31494317099SPetar Penkov if (received < budget) 31594317099SPetar Penkov napi_complete_done(napi, received); 31694317099SPetar Penkov 31794317099SPetar Penkov return received; 31894317099SPetar Penkov } 31994317099SPetar Penkov 32094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 321af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 32294317099SPetar Penkov { 323aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 324af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 32594317099SPetar Penkov if (napi_en) { 32694317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 32794317099SPetar Penkov NAPI_POLL_WEIGHT); 32894317099SPetar Penkov napi_enable(&tfile->napi); 32994317099SPetar Penkov } 33094317099SPetar Penkov } 33194317099SPetar Penkov 33206e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 33394317099SPetar Penkov { 334aec72f33SEric Dumazet if (tfile->napi_enabled) 33594317099SPetar Penkov napi_disable(&tfile->napi); 33694317099SPetar Penkov } 33794317099SPetar Penkov 33806e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 33994317099SPetar Penkov { 340aec72f33SEric Dumazet if (tfile->napi_enabled) 34194317099SPetar Penkov netif_napi_del(&tfile->napi); 34294317099SPetar Penkov } 34394317099SPetar Penkov 344af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 34590e33d45SPetar Penkov { 346af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 34790e33d45SPetar Penkov } 34890e33d45SPetar Penkov 3498b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3508b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3518b8e658bSGreg Kurz { 3528b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3538b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3548b8e658bSGreg Kurz } 3558b8e658bSGreg Kurz 3568b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3578b8e658bSGreg Kurz { 3588b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3598b8e658bSGreg Kurz 3608b8e658bSGreg Kurz if (put_user(be, argp)) 3618b8e658bSGreg Kurz return -EFAULT; 3628b8e658bSGreg Kurz 3638b8e658bSGreg Kurz return 0; 3648b8e658bSGreg Kurz } 3658b8e658bSGreg Kurz 3668b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3678b8e658bSGreg Kurz { 3688b8e658bSGreg Kurz int be; 3698b8e658bSGreg Kurz 3708b8e658bSGreg Kurz if (get_user(be, argp)) 3718b8e658bSGreg Kurz return -EFAULT; 3728b8e658bSGreg Kurz 3738b8e658bSGreg Kurz if (be) 3748b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3758b8e658bSGreg Kurz else 3768b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3778b8e658bSGreg Kurz 3788b8e658bSGreg Kurz return 0; 3798b8e658bSGreg Kurz } 3808b8e658bSGreg Kurz #else 3818b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3828b8e658bSGreg Kurz { 3838b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3848b8e658bSGreg Kurz } 3858b8e658bSGreg Kurz 3868b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3878b8e658bSGreg Kurz { 3888b8e658bSGreg Kurz return -EINVAL; 3898b8e658bSGreg Kurz } 3908b8e658bSGreg Kurz 3918b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3928b8e658bSGreg Kurz { 3938b8e658bSGreg Kurz return -EINVAL; 3948b8e658bSGreg Kurz } 3958b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3968b8e658bSGreg Kurz 39725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 39825bd55bbSGreg Kurz { 3997d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 4008b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 40125bd55bbSGreg Kurz } 40225bd55bbSGreg Kurz 40356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 40456f0dcc5SMichael S. Tsirkin { 40525bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 40656f0dcc5SMichael S. Tsirkin } 40756f0dcc5SMichael S. Tsirkin 40856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 40956f0dcc5SMichael S. Tsirkin { 41025bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 41156f0dcc5SMichael S. Tsirkin } 41256f0dcc5SMichael S. Tsirkin 41396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 41496442e42SJason Wang { 415f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 41696442e42SJason Wang } 41796442e42SJason Wang 41896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 41996442e42SJason Wang { 42096442e42SJason Wang struct tun_flow_entry *e; 42196442e42SJason Wang 422b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 42396442e42SJason Wang if (e->rxhash == rxhash) 42496442e42SJason Wang return e; 42596442e42SJason Wang } 42696442e42SJason Wang return NULL; 42796442e42SJason Wang } 42896442e42SJason Wang 42996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 43096442e42SJason Wang struct hlist_head *head, 43196442e42SJason Wang u32 rxhash, u16 queue_index) 43296442e42SJason Wang { 4339fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 4349fdc6befSEric Dumazet 43596442e42SJason Wang if (e) { 43696442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 43796442e42SJason Wang rxhash, queue_index); 43896442e42SJason Wang e->updated = jiffies; 43996442e42SJason Wang e->rxhash = rxhash; 4409bc88939STom Herbert e->rps_rxhash = 0; 44196442e42SJason Wang e->queue_index = queue_index; 44296442e42SJason Wang e->tun = tun; 44396442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 444b8732fb7SJason Wang ++tun->flow_count; 44596442e42SJason Wang } 44696442e42SJason Wang return e; 44796442e42SJason Wang } 44896442e42SJason Wang 44996442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 45096442e42SJason Wang { 45196442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 45296442e42SJason Wang e->rxhash, e->queue_index); 45396442e42SJason Wang hlist_del_rcu(&e->hash_link); 4549fdc6befSEric Dumazet kfree_rcu(e, rcu); 455b8732fb7SJason Wang --tun->flow_count; 45696442e42SJason Wang } 45796442e42SJason Wang 45896442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 45996442e42SJason Wang { 46096442e42SJason Wang int i; 46196442e42SJason Wang 46296442e42SJason Wang spin_lock_bh(&tun->lock); 46396442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 46496442e42SJason Wang struct tun_flow_entry *e; 465b67bfe0dSSasha Levin struct hlist_node *n; 46696442e42SJason Wang 467b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 46896442e42SJason Wang tun_flow_delete(tun, e); 46996442e42SJason Wang } 47096442e42SJason Wang spin_unlock_bh(&tun->lock); 47196442e42SJason Wang } 47296442e42SJason Wang 47396442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 47496442e42SJason Wang { 47596442e42SJason Wang int i; 47696442e42SJason Wang 47796442e42SJason Wang spin_lock_bh(&tun->lock); 47896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 47996442e42SJason Wang struct tun_flow_entry *e; 480b67bfe0dSSasha Levin struct hlist_node *n; 48196442e42SJason Wang 482b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 48396442e42SJason Wang if (e->queue_index == queue_index) 48496442e42SJason Wang tun_flow_delete(tun, e); 48596442e42SJason Wang } 48696442e42SJason Wang } 48796442e42SJason Wang spin_unlock_bh(&tun->lock); 48896442e42SJason Wang } 48996442e42SJason Wang 490e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 49196442e42SJason Wang { 492e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 49396442e42SJason Wang unsigned long delay = tun->ageing_time; 49496442e42SJason Wang unsigned long next_timer = jiffies + delay; 49596442e42SJason Wang unsigned long count = 0; 49696442e42SJason Wang int i; 49796442e42SJason Wang 49896442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 49996442e42SJason Wang 5007dbfb4efSEric Dumazet spin_lock(&tun->lock); 50196442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 50296442e42SJason Wang struct tun_flow_entry *e; 503b67bfe0dSSasha Levin struct hlist_node *n; 50496442e42SJason Wang 505b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 50696442e42SJason Wang unsigned long this_timer; 50781d98fa4SEric Dumazet 50896442e42SJason Wang this_timer = e->updated + delay; 50981d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 51096442e42SJason Wang tun_flow_delete(tun, e); 51181d98fa4SEric Dumazet continue; 51281d98fa4SEric Dumazet } 51381d98fa4SEric Dumazet count++; 51481d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 51596442e42SJason Wang next_timer = this_timer; 51696442e42SJason Wang } 51796442e42SJason Wang } 51896442e42SJason Wang 51996442e42SJason Wang if (count) 52096442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 5217dbfb4efSEric Dumazet spin_unlock(&tun->lock); 52296442e42SJason Wang } 52396442e42SJason Wang 52449974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 5259e85722dSJason Wang struct tun_file *tfile) 52696442e42SJason Wang { 52796442e42SJason Wang struct hlist_head *head; 52896442e42SJason Wang struct tun_flow_entry *e; 52996442e42SJason Wang unsigned long delay = tun->ageing_time; 5309e85722dSJason Wang u16 queue_index = tfile->queue_index; 53196442e42SJason Wang 53296442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 53396442e42SJason Wang 53496442e42SJason Wang rcu_read_lock(); 53596442e42SJason Wang 53696442e42SJason Wang e = tun_flow_find(head, rxhash); 53796442e42SJason Wang if (likely(e)) { 53896442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 53983b1bc12SLi RongQing if (e->queue_index != queue_index) 54096442e42SJason Wang e->queue_index = queue_index; 54183b1bc12SLi RongQing if (e->updated != jiffies) 54296442e42SJason Wang e->updated = jiffies; 5439bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 54496442e42SJason Wang } else { 54596442e42SJason Wang spin_lock_bh(&tun->lock); 546b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 547b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 54896442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 54996442e42SJason Wang 55096442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 55196442e42SJason Wang mod_timer(&tun->flow_gc_timer, 55296442e42SJason Wang round_jiffies_up(jiffies + delay)); 55396442e42SJason Wang spin_unlock_bh(&tun->lock); 55496442e42SJason Wang } 55596442e42SJason Wang 55696442e42SJason Wang rcu_read_unlock(); 55796442e42SJason Wang } 55896442e42SJason Wang 5599bc88939STom Herbert /** 5609bc88939STom Herbert * Save the hash received in the stack receive path and update the 5619bc88939STom Herbert * flow_hash table accordingly. 5629bc88939STom Herbert */ 5639bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5649bc88939STom Herbert { 565567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5669bc88939STom Herbert e->rps_rxhash = hash; 5679bc88939STom Herbert } 5689bc88939STom Herbert 5694b035271SWang Li /* We try to identify a flow through its rxhash. The reason that 57092d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 571c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 572c8d68e6bSJason Wang * the userspace application move between processors, we may get a 5734b035271SWang Li * different rxq no. here. 574c8d68e6bSJason Wang */ 57596f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 576c8d68e6bSJason Wang { 57796442e42SJason Wang struct tun_flow_entry *e; 578c8d68e6bSJason Wang u32 txq = 0; 579c8d68e6bSJason Wang u32 numqueues = 0; 580c8d68e6bSJason Wang 5816aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 582c8d68e6bSJason Wang 583feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 58496442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5859bc88939STom Herbert if (e) { 5869bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 587fbe4d456SZhi Yong Wu txq = e->queue_index; 5884b035271SWang Li } else { 589c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 590c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 591c8d68e6bSJason Wang } 592c8d68e6bSJason Wang 593c8d68e6bSJason Wang return txq; 594c8d68e6bSJason Wang } 595c8d68e6bSJason Wang 59696f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 59796f84061SJason Wang { 598cd5681d7SJason Wang struct tun_prog *prog; 599*a35d310fSJason Wang u32 numqueues; 60096f84061SJason Wang u16 ret = 0; 60196f84061SJason Wang 602*a35d310fSJason Wang numqueues = READ_ONCE(tun->numqueues); 603*a35d310fSJason Wang if (!numqueues) 604*a35d310fSJason Wang return 0; 605*a35d310fSJason Wang 60696f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 60796f84061SJason Wang if (prog) 60896f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 60996f84061SJason Wang 610*a35d310fSJason Wang return ret % numqueues; 61196f84061SJason Wang } 61296f84061SJason Wang 61396f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 614a350ecceSPaolo Abeni struct net_device *sb_dev) 61596f84061SJason Wang { 61696f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 61796f84061SJason Wang u16 ret; 61896f84061SJason Wang 61996f84061SJason Wang rcu_read_lock(); 62096f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 62196f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 62296f84061SJason Wang else 62396f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 62496f84061SJason Wang rcu_read_unlock(); 62596f84061SJason Wang 62696f84061SJason Wang return ret; 62796f84061SJason Wang } 62896f84061SJason Wang 629cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 630cde8b15fSJason Wang { 631cde8b15fSJason Wang const struct cred *cred = current_cred(); 632c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 633cde8b15fSJason Wang 634cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 635cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 636c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 637cde8b15fSJason Wang } 638cde8b15fSJason Wang 639c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 640c8d68e6bSJason Wang { 641c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 642c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 643c8d68e6bSJason Wang } 644c8d68e6bSJason Wang 6454008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6464008e97fSJason Wang { 6474008e97fSJason Wang tfile->detached = tun; 6484008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6494008e97fSJason Wang ++tun->numdisabled; 6504008e97fSJason Wang } 6514008e97fSJason Wang 652d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6534008e97fSJason Wang { 6544008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6554008e97fSJason Wang 6564008e97fSJason Wang tfile->detached = NULL; 6574008e97fSJason Wang list_del_init(&tfile->next); 6584008e97fSJason Wang --tun->numdisabled; 6594008e97fSJason Wang return tun; 6604008e97fSJason Wang } 6614008e97fSJason Wang 6623a403076SJason Wang void tun_ptr_free(void *ptr) 663fc72d1d5SJason Wang { 664fc72d1d5SJason Wang if (!ptr) 665fc72d1d5SJason Wang return; 6661ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 6671ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 668fc72d1d5SJason Wang 66903993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 670fc72d1d5SJason Wang } else { 671fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 672fc72d1d5SJason Wang } 673fc72d1d5SJason Wang } 6743a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 675fc72d1d5SJason Wang 6764bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6774bfb0513SJason Wang { 678fc72d1d5SJason Wang void *ptr; 6791576d986SJason Wang 680fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 681fc72d1d5SJason Wang tun_ptr_free(ptr); 6821576d986SJason Wang 6835503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6844bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6854bfb0513SJason Wang } 6864bfb0513SJason Wang 687c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 688c8d68e6bSJason Wang { 689c8d68e6bSJason Wang struct tun_file *ntfile; 690c8d68e6bSJason Wang struct tun_struct *tun; 691c8d68e6bSJason Wang 692b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 693b8deabd3SJason Wang 69494317099SPetar Penkov if (tun && clean) { 69506e55addSEric Dumazet tun_napi_disable(tfile); 69606e55addSEric Dumazet tun_napi_del(tfile); 69794317099SPetar Penkov } 69894317099SPetar Penkov 6999e85722dSJason Wang if (tun && !tfile->detached) { 700c8d68e6bSJason Wang u16 index = tfile->queue_index; 701c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 702c8d68e6bSJason Wang 703c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 704c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 705b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 706c8d68e6bSJason Wang ntfile->queue_index = index; 707c8d68e6bSJason Wang 708c8d68e6bSJason Wang --tun->numqueues; 7099e85722dSJason Wang if (clean) { 710c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 711c8d68e6bSJason Wang sock_put(&tfile->sk); 7129e85722dSJason Wang } else 7134008e97fSJason Wang tun_disable_queue(tun, tfile); 714c8d68e6bSJason Wang 715c8d68e6bSJason Wang synchronize_net(); 71696442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 717c8d68e6bSJason Wang /* Drop read queue */ 7184bfb0513SJason Wang tun_queue_purge(tfile); 719c8d68e6bSJason Wang tun_set_real_num_queues(tun); 720dd38bd85SJason Wang } else if (tfile->detached && clean) { 7214008e97fSJason Wang tun = tun_enable_queue(tfile); 722dd38bd85SJason Wang sock_put(&tfile->sk); 723dd38bd85SJason Wang } 724c8d68e6bSJason Wang 725c8d68e6bSJason Wang if (clean) { 726af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 727af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 728af668b3cSMichael S. Tsirkin 72940630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 730af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 7314008e97fSJason Wang unregister_netdevice(tun->dev); 732af668b3cSMichael S. Tsirkin } 733b196d88aSJason Wang if (tun) 734b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7357063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 736140e807dSEric W. Biederman sock_put(&tfile->sk); 737c8d68e6bSJason Wang } 738c8d68e6bSJason Wang } 739c8d68e6bSJason Wang 740c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 741c8d68e6bSJason Wang { 74283c1f36fSSabrina Dubroca struct tun_struct *tun; 74383c1f36fSSabrina Dubroca struct net_device *dev; 74483c1f36fSSabrina Dubroca 745c8d68e6bSJason Wang rtnl_lock(); 74683c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 74783c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 748c8d68e6bSJason Wang __tun_detach(tfile, clean); 74983c1f36fSSabrina Dubroca if (dev) 75083c1f36fSSabrina Dubroca netdev_state_change(dev); 751c8d68e6bSJason Wang rtnl_unlock(); 752c8d68e6bSJason Wang } 753c8d68e6bSJason Wang 754c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 755c8d68e6bSJason Wang { 756c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7574008e97fSJason Wang struct tun_file *tfile, *tmp; 758c8d68e6bSJason Wang int i, n = tun->numqueues; 759c8d68e6bSJason Wang 760c8d68e6bSJason Wang for (i = 0; i < n; i++) { 761b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 762c8d68e6bSJason Wang BUG_ON(!tfile); 76306e55addSEric Dumazet tun_napi_disable(tfile); 764addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7659e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 766c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 767c8d68e6bSJason Wang --tun->numqueues; 768c8d68e6bSJason Wang } 7699e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 770addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7719e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 772c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7739e85722dSJason Wang } 774c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 775c8d68e6bSJason Wang 776c8d68e6bSJason Wang synchronize_net(); 777c8d68e6bSJason Wang for (i = 0; i < n; i++) { 778b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 77906e55addSEric Dumazet tun_napi_del(tfile); 780c8d68e6bSJason Wang /* Drop read queue */ 7814bfb0513SJason Wang tun_queue_purge(tfile); 782b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 783c8d68e6bSJason Wang sock_put(&tfile->sk); 784c8d68e6bSJason Wang } 7854008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7864008e97fSJason Wang tun_enable_queue(tfile); 7874bfb0513SJason Wang tun_queue_purge(tfile); 788b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7894008e97fSJason Wang sock_put(&tfile->sk); 7904008e97fSJason Wang } 7914008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 792dd38bd85SJason Wang 79340630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 794dd38bd85SJason Wang module_put(THIS_MODULE); 795c8d68e6bSJason Wang } 796c8d68e6bSJason Wang 79794317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 798af3fb24eSEric Dumazet bool skip_filter, bool napi, bool napi_frags) 799a7385ba2SEric W. Biederman { 800631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 8011576d986SJason Wang struct net_device *dev = tun->dev; 80238231b7aSEric W. Biederman int err; 803a7385ba2SEric W. Biederman 8045dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 8055dbbaf2dSPaul Moore if (err < 0) 8065dbbaf2dSPaul Moore goto out; 8075dbbaf2dSPaul Moore 80838231b7aSEric W. Biederman err = -EINVAL; 8099e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 81038231b7aSEric W. Biederman goto out; 81138231b7aSEric W. Biederman 81238231b7aSEric W. Biederman err = -EBUSY; 81340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 814c8d68e6bSJason Wang goto out; 815c8d68e6bSJason Wang 816c8d68e6bSJason Wang err = -E2BIG; 8174008e97fSJason Wang if (!tfile->detached && 8184008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 81938231b7aSEric W. Biederman goto out; 82038231b7aSEric W. Biederman 82138231b7aSEric W. Biederman err = 0; 82254f968d6SJason Wang 82392d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 824849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 8258ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 8268ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 8278ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 82854f968d6SJason Wang if (!err) 82954f968d6SJason Wang goto out; 83054f968d6SJason Wang } 8311576d986SJason Wang 8321576d986SJason Wang if (!tfile->detached && 833b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 834b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 8351576d986SJason Wang err = -ENOMEM; 8361576d986SJason Wang goto out; 8371576d986SJason Wang } 8381576d986SJason Wang 839c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 840addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8418bf5c4eeSJesper Dangaard Brouer 8428bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8438bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8448bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8458bf5c4eeSJesper Dangaard Brouer 8468bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8478bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8488bf5c4eeSJesper Dangaard Brouer } else { 8498bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8508bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8518bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8528bf5c4eeSJesper Dangaard Brouer if (err < 0) 8538bf5c4eeSJesper Dangaard Brouer goto out; 8548d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 8558d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 8568d5d8852SJesper Dangaard Brouer if (err < 0) { 8578d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 8588d5d8852SJesper Dangaard Brouer goto out; 8598d5d8852SJesper Dangaard Brouer } 8608bf5c4eeSJesper Dangaard Brouer err = 0; 8618bf5c4eeSJesper Dangaard Brouer } 8628bf5c4eeSJesper Dangaard Brouer 86394317099SPetar Penkov if (tfile->detached) { 8644008e97fSJason Wang tun_enable_queue(tfile); 86594317099SPetar Penkov } else { 8664008e97fSJason Wang sock_hold(&tfile->sk); 867af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 86894317099SPetar Penkov } 8694008e97fSJason Wang 870e4a2a304SJason Wang if (rtnl_dereference(tun->xdp_prog)) 871e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 872e4a2a304SJason Wang 873c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 874c8d68e6bSJason Wang * refcnt. 875c8d68e6bSJason Wang */ 876a7385ba2SEric W. Biederman 8770b7959b6SStanislav Fomichev /* Publish tfile->tun and tun->tfiles only after we've fully 8780b7959b6SStanislav Fomichev * initialized tfile; otherwise we risk using half-initialized 8790b7959b6SStanislav Fomichev * object. 8800b7959b6SStanislav Fomichev */ 8810b7959b6SStanislav Fomichev rcu_assign_pointer(tfile->tun, tun); 8820b7959b6SStanislav Fomichev rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 8830b7959b6SStanislav Fomichev tun->numqueues++; 8843a03cb84SGeorge Amanakis tun_set_real_num_queues(tun); 88538231b7aSEric W. Biederman out: 88638231b7aSEric W. Biederman return err; 887a7385ba2SEric W. Biederman } 888a7385ba2SEric W. Biederman 8899484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 890631ab46bSEric W. Biederman { 8916e914fc7SJason Wang struct tun_struct *tun; 892c70f1829SEric W. Biederman 8936e914fc7SJason Wang rcu_read_lock(); 8946e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8956e914fc7SJason Wang if (tun) 8966e914fc7SJason Wang dev_hold(tun->dev); 8976e914fc7SJason Wang rcu_read_unlock(); 898c70f1829SEric W. Biederman 899c70f1829SEric W. Biederman return tun; 900631ab46bSEric W. Biederman } 901631ab46bSEric W. Biederman 902631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 903631ab46bSEric W. Biederman { 9046e914fc7SJason Wang dev_put(tun->dev); 905631ab46bSEric W. Biederman } 906631ab46bSEric W. Biederman 9076b8a66eeSJoe Perches /* TAP filtering */ 908f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 909f271b2ccSMax Krasnyansky { 910f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 911f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 912f271b2ccSMax Krasnyansky } 913f271b2ccSMax Krasnyansky 914f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 915f271b2ccSMax Krasnyansky { 916f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 917f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 918f271b2ccSMax Krasnyansky } 919f271b2ccSMax Krasnyansky 920f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 921f271b2ccSMax Krasnyansky { 922f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 923f271b2ccSMax Krasnyansky struct tun_filter uf; 924f271b2ccSMax Krasnyansky int err, alen, n, nexact; 925f271b2ccSMax Krasnyansky 926f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 927f271b2ccSMax Krasnyansky return -EFAULT; 928f271b2ccSMax Krasnyansky 929f271b2ccSMax Krasnyansky if (!uf.count) { 930f271b2ccSMax Krasnyansky /* Disabled */ 931f271b2ccSMax Krasnyansky filter->count = 0; 932f271b2ccSMax Krasnyansky return 0; 933f271b2ccSMax Krasnyansky } 934f271b2ccSMax Krasnyansky 935f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 93628e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 93728e8190dSMarkus Elfring if (IS_ERR(addr)) 93828e8190dSMarkus Elfring return PTR_ERR(addr); 939f271b2ccSMax Krasnyansky 940f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 941f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 942f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 943f271b2ccSMax Krasnyansky filter->count = 0; 944f271b2ccSMax Krasnyansky wmb(); 945f271b2ccSMax Krasnyansky 946f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 947f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 948f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 949f271b2ccSMax Krasnyansky 950f271b2ccSMax Krasnyansky nexact = n; 951f271b2ccSMax Krasnyansky 952cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 953cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 954f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 955cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 956cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 957cfbf84fcSAlex Williamson err = 0; /* no filter */ 9583b8d2a69SMarkus Elfring goto free_addr; 959cfbf84fcSAlex Williamson } 960f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 961cfbf84fcSAlex Williamson } 962f271b2ccSMax Krasnyansky 963f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 964f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 965f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 966f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 967f271b2ccSMax Krasnyansky 968f271b2ccSMax Krasnyansky /* Now enable the filter */ 969f271b2ccSMax Krasnyansky wmb(); 970f271b2ccSMax Krasnyansky filter->count = nexact; 971f271b2ccSMax Krasnyansky 972f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 973f271b2ccSMax Krasnyansky err = nexact; 9743b8d2a69SMarkus Elfring free_addr: 975f271b2ccSMax Krasnyansky kfree(addr); 976f271b2ccSMax Krasnyansky return err; 977f271b2ccSMax Krasnyansky } 978f271b2ccSMax Krasnyansky 979f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 980f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 981f271b2ccSMax Krasnyansky { 982f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 983f271b2ccSMax Krasnyansky * at this point. */ 984f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 985f271b2ccSMax Krasnyansky int i; 986f271b2ccSMax Krasnyansky 987f271b2ccSMax Krasnyansky /* Exact match */ 988f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9892e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 990f271b2ccSMax Krasnyansky return 1; 991f271b2ccSMax Krasnyansky 992f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 993f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 994f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 995f271b2ccSMax Krasnyansky 996f271b2ccSMax Krasnyansky return 0; 997f271b2ccSMax Krasnyansky } 998f271b2ccSMax Krasnyansky 999f271b2ccSMax Krasnyansky /* 1000f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 1001f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 1002f271b2ccSMax Krasnyansky */ 1003f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 1004f271b2ccSMax Krasnyansky { 1005f271b2ccSMax Krasnyansky if (!filter->count) 1006f271b2ccSMax Krasnyansky return 1; 1007f271b2ccSMax Krasnyansky 1008f271b2ccSMax Krasnyansky return run_filter(filter, skb); 1009f271b2ccSMax Krasnyansky } 1010f271b2ccSMax Krasnyansky 10111da177e4SLinus Torvalds /* Network device part of the driver */ 10121da177e4SLinus Torvalds 10131da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 10141da177e4SLinus Torvalds 1015c70f1829SEric W. Biederman /* Net device detach from fd. */ 1016c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1017c70f1829SEric W. Biederman { 1018c8d68e6bSJason Wang tun_detach_all(dev); 1019c70f1829SEric W. Biederman } 1020c70f1829SEric W. Biederman 10211da177e4SLinus Torvalds /* Net device open. */ 10221da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10231da177e4SLinus Torvalds { 1024b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 1025b20e2d54SHannes Frederic Sowa int i; 1026b20e2d54SHannes Frederic Sowa 1027c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1028b20e2d54SHannes Frederic Sowa 1029b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 1030b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 1031b20e2d54SHannes Frederic Sowa 1032b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 1033b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 1034b20e2d54SHannes Frederic Sowa } 1035b20e2d54SHannes Frederic Sowa 10361da177e4SLinus Torvalds return 0; 10371da177e4SLinus Torvalds } 10381da177e4SLinus Torvalds 10391da177e4SLinus Torvalds /* Net device close. */ 10401da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10411da177e4SLinus Torvalds { 1042c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10431da177e4SLinus Torvalds return 0; 10441da177e4SLinus Torvalds } 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds /* Net device start xmit */ 104796f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10481da177e4SLinus Torvalds { 10493df97ba8SJason Wang #ifdef CONFIG_RPS 1050dc05360fSEric Dumazet if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 10519bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10529bc88939STom Herbert * RPS hash and save it into the flow_table here. 10539bc88939STom Herbert */ 10544b035271SWang Li struct tun_flow_entry *e; 10559bc88939STom Herbert __u32 rxhash; 10569bc88939STom Herbert 1057feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 10584b035271SWang Li e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 10599bc88939STom Herbert if (e) 10609bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10619bc88939STom Herbert } 10623df97ba8SJason Wang #endif 106396f84061SJason Wang } 106496f84061SJason Wang 1065aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1066aff3d70aSJason Wang struct sk_buff *skb, 1067aff3d70aSJason Wang int len) 1068aff3d70aSJason Wang { 1069aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1070aff3d70aSJason Wang 1071aff3d70aSJason Wang if (prog) 1072aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1073aff3d70aSJason Wang 1074aff3d70aSJason Wang return len; 1075aff3d70aSJason Wang } 1076aff3d70aSJason Wang 107796f84061SJason Wang /* Net device start xmit */ 107896f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 107996f84061SJason Wang { 108096f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 108196f84061SJason Wang int txq = skb->queue_mapping; 108296f84061SJason Wang struct tun_file *tfile; 1083aff3d70aSJason Wang int len = skb->len; 108496f84061SJason Wang 108596f84061SJason Wang rcu_read_lock(); 108696f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 108796f84061SJason Wang 108896f84061SJason Wang /* Drop packet if interface is not attached */ 1089cc166427SWillem de Bruijn if (txq >= tun->numqueues) 109096f84061SJason Wang goto drop; 109196f84061SJason Wang 109296f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 109396f84061SJason Wang tun_automq_xmit(tun, skb); 10949bc88939STom Herbert 10956e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10966e914fc7SJason Wang 1097c8d68e6bSJason Wang BUG_ON(!tfile); 1098c8d68e6bSJason Wang 1099f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1100f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1101f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1102f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1103f271b2ccSMax Krasnyansky goto drop; 1104f271b2ccSMax Krasnyansky 110554f968d6SJason Wang if (tfile->socket.sk->sk_filter && 110654f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 110799405162SMichael S. Tsirkin goto drop; 110899405162SMichael S. Tsirkin 1109aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 111081c89507SBjørn Mork if (len == 0 || pskb_trim(skb, len)) 1111aff3d70aSJason Wang goto drop; 1112aff3d70aSJason Wang 11131f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 11147bf66305SJason Wang goto drop; 11157bf66305SJason Wang 11167b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1117eda29772SRichard Cochran 11180110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11197bf66305SJason Wang * for indefinite time. 11207bf66305SJason Wang */ 11210110d6f2SMichael S. Tsirkin skb_orphan(skb); 11220110d6f2SMichael S. Tsirkin 1123f8af75f3SEric Dumazet nf_reset(skb); 1124f8af75f3SEric Dumazet 11255990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 11261576d986SJason Wang goto drop; 11271da177e4SLinus Torvalds 11281da177e4SLinus Torvalds /* Notify and wake up reader process */ 112954f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 113054f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11319e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11326e914fc7SJason Wang 11336e914fc7SJason Wang rcu_read_unlock(); 11346ed10654SPatrick McHardy return NETDEV_TX_OK; 11351da177e4SLinus Torvalds 11361da177e4SLinus Torvalds drop: 1137608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1138149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11391da177e4SLinus Torvalds kfree_skb(skb); 11406e914fc7SJason Wang rcu_read_unlock(); 1141baeababbSJason Wang return NET_XMIT_DROP; 11421da177e4SLinus Torvalds } 11431da177e4SLinus Torvalds 1144f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11451da177e4SLinus Torvalds { 1146f271b2ccSMax Krasnyansky /* 1147f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1148f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1149f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1150f271b2ccSMax Krasnyansky */ 11511da177e4SLinus Torvalds } 11521da177e4SLinus Torvalds 1153c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1154c8f44affSMichał Mirosław netdev_features_t features) 115588255375SMichał Mirosław { 115688255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 115788255375SMichał Mirosław 115888255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 115988255375SMichał Mirosław } 1160eaea34b2SPaolo Abeni 1161eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1162eaea34b2SPaolo Abeni { 1163eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1164eaea34b2SPaolo Abeni 1165eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1166eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1167eaea34b2SPaolo Abeni 1168eaea34b2SPaolo Abeni tun->align = new_hr; 1169eaea34b2SPaolo Abeni } 1170eaea34b2SPaolo Abeni 1171bc1f4470Sstephen hemminger static void 1172608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1173608b9977SPaolo Abeni { 1174608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1175608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1176608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1177608b9977SPaolo Abeni int i; 1178608b9977SPaolo Abeni 1179608b9977SPaolo Abeni for_each_possible_cpu(i) { 1180608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1181608b9977SPaolo Abeni unsigned int start; 1182608b9977SPaolo Abeni 1183608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1184608b9977SPaolo Abeni do { 1185608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1186608b9977SPaolo Abeni rxpackets = p->rx_packets; 1187608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1188608b9977SPaolo Abeni txpackets = p->tx_packets; 1189608b9977SPaolo Abeni txbytes = p->tx_bytes; 1190608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1191608b9977SPaolo Abeni 1192608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1193608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1194608b9977SPaolo Abeni stats->tx_packets += txpackets; 1195608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1196608b9977SPaolo Abeni 1197608b9977SPaolo Abeni /* u32 counters */ 1198608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1199608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1200608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1201608b9977SPaolo Abeni } 1202608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1203608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1204608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1205608b9977SPaolo Abeni } 1206608b9977SPaolo Abeni 1207761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1208761876c8SJason Wang struct netlink_ext_ack *extack) 1209761876c8SJason Wang { 1210761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1211e4a2a304SJason Wang struct tun_file *tfile; 1212761876c8SJason Wang struct bpf_prog *old_prog; 1213e4a2a304SJason Wang int i; 1214761876c8SJason Wang 1215761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1216761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1217761876c8SJason Wang if (old_prog) 1218761876c8SJason Wang bpf_prog_put(old_prog); 1219761876c8SJason Wang 1220e4a2a304SJason Wang for (i = 0; i < tun->numqueues; i++) { 1221e4a2a304SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 1222e4a2a304SJason Wang if (prog) 1223e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1224e4a2a304SJason Wang else 1225e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1226e4a2a304SJason Wang } 1227e4a2a304SJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 1228e4a2a304SJason Wang if (prog) 1229e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1230e4a2a304SJason Wang else 1231e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1232e4a2a304SJason Wang } 1233e4a2a304SJason Wang 1234761876c8SJason Wang return 0; 1235761876c8SJason Wang } 1236761876c8SJason Wang 1237761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1238761876c8SJason Wang { 1239761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1240761876c8SJason Wang const struct bpf_prog *xdp_prog; 1241761876c8SJason Wang 1242761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1243761876c8SJason Wang if (xdp_prog) 1244761876c8SJason Wang return xdp_prog->aux->id; 1245761876c8SJason Wang 1246761876c8SJason Wang return 0; 1247761876c8SJason Wang } 1248761876c8SJason Wang 1249f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1250761876c8SJason Wang { 1251761876c8SJason Wang switch (xdp->command) { 1252761876c8SJason Wang case XDP_SETUP_PROG: 1253761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1254761876c8SJason Wang case XDP_QUERY_PROG: 1255761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1256761876c8SJason Wang return 0; 1257761876c8SJason Wang default: 1258761876c8SJason Wang return -EINVAL; 1259761876c8SJason Wang } 1260761876c8SJason Wang } 1261761876c8SJason Wang 126226d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 126326d31925SNicolas Dichtel { 126426d31925SNicolas Dichtel if (new_carrier) { 126526d31925SNicolas Dichtel struct tun_struct *tun = netdev_priv(dev); 126626d31925SNicolas Dichtel 126726d31925SNicolas Dichtel if (!tun->numqueues) 126826d31925SNicolas Dichtel return -EPERM; 126926d31925SNicolas Dichtel 127026d31925SNicolas Dichtel netif_carrier_on(dev); 127126d31925SNicolas Dichtel } else { 127226d31925SNicolas Dichtel netif_carrier_off(dev); 127326d31925SNicolas Dichtel } 127426d31925SNicolas Dichtel return 0; 127526d31925SNicolas Dichtel } 127626d31925SNicolas Dichtel 1277758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1278c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1279758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1280758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 128100829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 128288255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1283c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1284eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1285608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 128626d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1287758e43b7SStephen Hemminger }; 1288758e43b7SStephen Hemminger 12890c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 12900c9d917bSJesper Dangaard Brouer { 12910c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 12920c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 12930c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 12940c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 12950c9d917bSJesper Dangaard Brouer } 12960c9d917bSJesper Dangaard Brouer 129742b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 129842b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1299fc72d1d5SJason Wang { 1300fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1301fc72d1d5SJason Wang struct tun_file *tfile; 1302fc72d1d5SJason Wang u32 numqueues; 1303735fc405SJesper Dangaard Brouer int drops = 0; 1304735fc405SJesper Dangaard Brouer int cnt = n; 1305735fc405SJesper Dangaard Brouer int i; 1306fc72d1d5SJason Wang 13070c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 130842b33468SJesper Dangaard Brouer return -EINVAL; 130942b33468SJesper Dangaard Brouer 1310fc72d1d5SJason Wang rcu_read_lock(); 1311fc72d1d5SJason Wang 1312fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1313fc72d1d5SJason Wang if (!numqueues) { 1314735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1315735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1316fc72d1d5SJason Wang } 1317fc72d1d5SJason Wang 1318fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1319fc72d1d5SJason Wang numqueues]); 1320735fc405SJesper Dangaard Brouer 1321735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1322735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1323735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1324fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1325fc72d1d5SJason Wang * XDP buffer from sk_buff. 1326fc72d1d5SJason Wang */ 1327735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1328fc72d1d5SJason Wang 1329735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1330735fc405SJesper Dangaard Brouer this_cpu_inc(tun->pcpu_stats->tx_dropped); 1331735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdp); 1332735fc405SJesper Dangaard Brouer drops++; 1333735fc405SJesper Dangaard Brouer } 1334735fc405SJesper Dangaard Brouer } 1335735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1336735fc405SJesper Dangaard Brouer 13370c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 13380c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 13390c9d917bSJesper Dangaard Brouer 1340fc72d1d5SJason Wang rcu_read_unlock(); 1341735fc405SJesper Dangaard Brouer return cnt - drops; 1342fc72d1d5SJason Wang } 1343fc72d1d5SJason Wang 134444fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 134544fa2dbdSJesper Dangaard Brouer { 134644fa2dbdSJesper Dangaard Brouer struct xdp_frame *frame = convert_to_xdp_frame(xdp); 134744fa2dbdSJesper Dangaard Brouer 134844fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 134944fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 135044fa2dbdSJesper Dangaard Brouer 135142421a56SJesper Dangaard Brouer return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1352fc72d1d5SJason Wang } 1353fc72d1d5SJason Wang 1354758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1355c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1356758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1357758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 135800829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 135988255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1360afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1361758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1362758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1363c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 13645e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1365eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1366608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1367f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1368fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 136926d31925SNicolas Dichtel .ndo_change_carrier = tun_net_change_carrier, 1370758e43b7SStephen Hemminger }; 1371758e43b7SStephen Hemminger 1372944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 137396442e42SJason Wang { 137496442e42SJason Wang int i; 137596442e42SJason Wang 137696442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 137796442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 137896442e42SJason Wang 137996442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1380e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1381e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1382e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 138396442e42SJason Wang } 138496442e42SJason Wang 138596442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 138696442e42SJason Wang { 138796442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 138896442e42SJason Wang tun_flow_flush(tun); 138996442e42SJason Wang } 139096442e42SJason Wang 139191572088SJarod Wilson #define MIN_MTU 68 139291572088SJarod Wilson #define MAX_MTU 65535 139391572088SJarod Wilson 13941da177e4SLinus Torvalds /* Initialize net device. */ 13951da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13961da177e4SLinus Torvalds { 13971da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13981da177e4SLinus Torvalds 13991da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 140040630b82SMichael S. Tsirkin case IFF_TUN: 1401758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1402758e43b7SStephen Hemminger 14031da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 14041da177e4SLinus Torvalds dev->hard_header_len = 0; 14051da177e4SLinus Torvalds dev->addr_len = 0; 14061da177e4SLinus Torvalds dev->mtu = 1500; 14071da177e4SLinus Torvalds 14081da177e4SLinus Torvalds /* Zero header length */ 14091da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 14101da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 14111da177e4SLinus Torvalds break; 14121da177e4SLinus Torvalds 141340630b82SMichael S. Tsirkin case IFF_TAP: 14147a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 14151da177e4SLinus Torvalds /* Ethernet TAP Device */ 14161da177e4SLinus Torvalds ether_setup(dev); 1417550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1418a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 141936226a8dSBrian Braunstein 1420f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 142136226a8dSBrian Braunstein 14221da177e4SLinus Torvalds break; 14231da177e4SLinus Torvalds } 142491572088SJarod Wilson 142591572088SJarod Wilson dev->min_mtu = MIN_MTU; 142691572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 14271da177e4SLinus Torvalds } 14281da177e4SLinus Torvalds 14292f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 14302f3ab622SJason Wang { 14312f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 14322f3ab622SJason Wang 14332f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 14342f3ab622SJason Wang } 14352f3ab622SJason Wang 14361da177e4SLinus Torvalds /* Character device part */ 14371da177e4SLinus Torvalds 14381da177e4SLinus Torvalds /* Poll */ 1439afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14401da177e4SLinus Torvalds { 1441b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14429484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14433c8a9c63SMariusz Kozlowski struct sock *sk; 1444afc9a42bSAl Viro __poll_t mask = 0; 14451da177e4SLinus Torvalds 14461da177e4SLinus Torvalds if (!tun) 1447a9a08845SLinus Torvalds return EPOLLERR; 14481da177e4SLinus Torvalds 144954f968d6SJason Wang sk = tfile->socket.sk; 14503c8a9c63SMariusz Kozlowski 14516b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 14521da177e4SLinus Torvalds 14539e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14541da177e4SLinus Torvalds 14555990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1456a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14571da177e4SLinus Torvalds 14582f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 14592f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 14602f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 14612f3ab622SJason Wang * after it writes to a down device and meets -EIO. 14622f3ab622SJason Wang */ 14632f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 14649cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 14652f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1466a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 146733dccbb0SHerbert Xu 1468c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1469a9a08845SLinus Torvalds mask = EPOLLERR; 1470c70f1829SEric W. Biederman 1471631ab46bSEric W. Biederman tun_put(tun); 14721da177e4SLinus Torvalds return mask; 14731da177e4SLinus Torvalds } 14741da177e4SLinus Torvalds 147590e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 147690e33d45SPetar Penkov size_t len, 147790e33d45SPetar Penkov const struct iov_iter *it) 147890e33d45SPetar Penkov { 147990e33d45SPetar Penkov struct sk_buff *skb; 148090e33d45SPetar Penkov size_t linear; 148190e33d45SPetar Penkov int err; 148290e33d45SPetar Penkov int i; 148390e33d45SPetar Penkov 148490e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 148590e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 148690e33d45SPetar Penkov 148790e33d45SPetar Penkov local_bh_disable(); 148890e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 148990e33d45SPetar Penkov local_bh_enable(); 149090e33d45SPetar Penkov if (!skb) 149190e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 149290e33d45SPetar Penkov 149390e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 149490e33d45SPetar Penkov err = __skb_grow(skb, linear); 149590e33d45SPetar Penkov if (err) 149690e33d45SPetar Penkov goto free; 149790e33d45SPetar Penkov 149890e33d45SPetar Penkov skb->len = len; 149990e33d45SPetar Penkov skb->data_len = len - linear; 150090e33d45SPetar Penkov skb->truesize += skb->data_len; 150190e33d45SPetar Penkov 150290e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 150390e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 1504aa6daacaSEric Dumazet struct page *page; 1505aa6daacaSEric Dumazet void *frag; 150690e33d45SPetar Penkov 150790e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 150890e33d45SPetar Penkov err = -EINVAL; 150990e33d45SPetar Penkov goto free; 151090e33d45SPetar Penkov } 1511aa6daacaSEric Dumazet frag = netdev_alloc_frag(fragsz); 1512aa6daacaSEric Dumazet if (!frag) { 151390e33d45SPetar Penkov err = -ENOMEM; 151490e33d45SPetar Penkov goto free; 151590e33d45SPetar Penkov } 1516aa6daacaSEric Dumazet page = virt_to_head_page(frag); 1517aa6daacaSEric Dumazet skb_fill_page_desc(skb, i - 1, page, 1518aa6daacaSEric Dumazet frag - page_address(page), fragsz); 151990e33d45SPetar Penkov } 152090e33d45SPetar Penkov 152190e33d45SPetar Penkov return skb; 152290e33d45SPetar Penkov free: 152390e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 152490e33d45SPetar Penkov napi_free_frags(&tfile->napi); 152590e33d45SPetar Penkov return ERR_PTR(err); 152690e33d45SPetar Penkov } 152790e33d45SPetar Penkov 1528f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1529f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 153054f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 153133dccbb0SHerbert Xu size_t prepad, size_t len, 153233dccbb0SHerbert Xu size_t linear, int noblock) 1533f42157cbSRusty Russell { 153454f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1535f42157cbSRusty Russell struct sk_buff *skb; 153633dccbb0SHerbert Xu int err; 1537f42157cbSRusty Russell 1538f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15390eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 154033dccbb0SHerbert Xu linear = len; 1541f42157cbSRusty Russell 154233dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 154328d64271SEric Dumazet &err, 0); 1544f42157cbSRusty Russell if (!skb) 154533dccbb0SHerbert Xu return ERR_PTR(err); 1546f42157cbSRusty Russell 1547f42157cbSRusty Russell skb_reserve(skb, prepad); 1548f42157cbSRusty Russell skb_put(skb, linear); 154933dccbb0SHerbert Xu skb->data_len = len - linear; 155033dccbb0SHerbert Xu skb->len += len - linear; 1551f42157cbSRusty Russell 1552f42157cbSRusty Russell return skb; 1553f42157cbSRusty Russell } 1554f42157cbSRusty Russell 15555503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15565503fcecSJason Wang struct sk_buff *skb, int more) 15575503fcecSJason Wang { 15585503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15595503fcecSJason Wang struct sk_buff_head process_queue; 15605503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15615503fcecSJason Wang bool rcv = false; 15625503fcecSJason Wang 15635503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15645503fcecSJason Wang local_bh_disable(); 15658ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15665503fcecSJason Wang netif_receive_skb(skb); 15675503fcecSJason Wang local_bh_enable(); 15685503fcecSJason Wang return; 15695503fcecSJason Wang } 15705503fcecSJason Wang 15715503fcecSJason Wang spin_lock(&queue->lock); 15725503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15735503fcecSJason Wang __skb_queue_head_init(&process_queue); 15745503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15755503fcecSJason Wang rcv = true; 15765503fcecSJason Wang } else { 15775503fcecSJason Wang __skb_queue_tail(queue, skb); 15785503fcecSJason Wang } 15795503fcecSJason Wang spin_unlock(&queue->lock); 15805503fcecSJason Wang 15815503fcecSJason Wang if (rcv) { 15825503fcecSJason Wang struct sk_buff *nskb; 15835503fcecSJason Wang 15845503fcecSJason Wang local_bh_disable(); 15858ebebcbaSMatthew Cover while ((nskb = __skb_dequeue(&process_queue))) { 15868ebebcbaSMatthew Cover skb_record_rx_queue(nskb, tfile->queue_index); 15875503fcecSJason Wang netif_receive_skb(nskb); 15888ebebcbaSMatthew Cover } 15898ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 15905503fcecSJason Wang netif_receive_skb(skb); 15915503fcecSJason Wang local_bh_enable(); 15925503fcecSJason Wang } 15935503fcecSJason Wang } 15945503fcecSJason Wang 159566ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 159666ccbc9cSJason Wang int len, int noblock, bool zerocopy) 159766ccbc9cSJason Wang { 159866ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 159966ccbc9cSJason Wang return false; 160066ccbc9cSJason Wang 160166ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 160266ccbc9cSJason Wang return false; 160366ccbc9cSJason Wang 160466ccbc9cSJason Wang if (!noblock) 160566ccbc9cSJason Wang return false; 160666ccbc9cSJason Wang 160766ccbc9cSJason Wang if (zerocopy) 160866ccbc9cSJason Wang return false; 160966ccbc9cSJason Wang 161066ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 161166ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 161266ccbc9cSJason Wang return false; 161366ccbc9cSJason Wang 161466ccbc9cSJason Wang return true; 161566ccbc9cSJason Wang } 161666ccbc9cSJason Wang 1617ac1f1f6cSJason Wang static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, 16188ae1aff0SJason Wang int buflen, int len, int pad) 1619ac1f1f6cSJason Wang { 1620ac1f1f6cSJason Wang struct sk_buff *skb = build_skb(buf, buflen); 1621ac1f1f6cSJason Wang 1622ac1f1f6cSJason Wang if (!skb) 1623ac1f1f6cSJason Wang return ERR_PTR(-ENOMEM); 1624ac1f1f6cSJason Wang 16258ae1aff0SJason Wang skb_reserve(skb, pad); 1626ac1f1f6cSJason Wang skb_put(skb, len); 1627ac1f1f6cSJason Wang 1628ac1f1f6cSJason Wang get_page(alloc_frag->page); 1629ac1f1f6cSJason Wang alloc_frag->offset += buflen; 1630ac1f1f6cSJason Wang 1631ac1f1f6cSJason Wang return skb; 1632ac1f1f6cSJason Wang } 1633ac1f1f6cSJason Wang 16348ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 16358ae1aff0SJason Wang struct xdp_buff *xdp, u32 act) 16368ae1aff0SJason Wang { 16378ae1aff0SJason Wang int err; 16388ae1aff0SJason Wang 16398ae1aff0SJason Wang switch (act) { 16408ae1aff0SJason Wang case XDP_REDIRECT: 16418ae1aff0SJason Wang err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 16428ae1aff0SJason Wang if (err) 16438ae1aff0SJason Wang return err; 16448ae1aff0SJason Wang break; 16458ae1aff0SJason Wang case XDP_TX: 16468ae1aff0SJason Wang err = tun_xdp_tx(tun->dev, xdp); 16478ae1aff0SJason Wang if (err < 0) 16488ae1aff0SJason Wang return err; 16498ae1aff0SJason Wang break; 16508ae1aff0SJason Wang case XDP_PASS: 16518ae1aff0SJason Wang break; 16528ae1aff0SJason Wang default: 16538ae1aff0SJason Wang bpf_warn_invalid_xdp_action(act); 16548ae1aff0SJason Wang /* fall through */ 16558ae1aff0SJason Wang case XDP_ABORTED: 16568ae1aff0SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 16578ae1aff0SJason Wang /* fall through */ 16588ae1aff0SJason Wang case XDP_DROP: 16598ae1aff0SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 16608ae1aff0SJason Wang break; 16618ae1aff0SJason Wang } 16628ae1aff0SJason Wang 16638ae1aff0SJason Wang return act; 16648ae1aff0SJason Wang } 16658ae1aff0SJason Wang 1666761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1667761876c8SJason Wang struct tun_file *tfile, 166866ccbc9cSJason Wang struct iov_iter *from, 1669761876c8SJason Wang struct virtio_net_hdr *hdr, 16701cfe6e93SJason Wang int len, int *skb_xdp) 167166ccbc9cSJason Wang { 16720bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 1673761876c8SJason Wang struct bpf_prog *xdp_prog; 16747df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 167566ccbc9cSJason Wang char *buf; 167666ccbc9cSJason Wang size_t copied; 16778ae1aff0SJason Wang int pad = TUN_RX_PAD; 16788ae1aff0SJason Wang int err = 0; 16797df13219SJason Wang 16807df13219SJason Wang rcu_read_lock(); 16817df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16827df13219SJason Wang if (xdp_prog) 16834f23aff8SJason Wang pad += XDP_PACKET_HEADROOM; 16847df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16857df13219SJason Wang rcu_read_unlock(); 168666ccbc9cSJason Wang 168763b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 168866ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 168966ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 169066ccbc9cSJason Wang 169166ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 169266ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16937df13219SJason Wang alloc_frag->offset + pad, 169466ccbc9cSJason Wang len, from); 169566ccbc9cSJason Wang if (copied != len) 169666ccbc9cSJason Wang return ERR_PTR(-EFAULT); 169766ccbc9cSJason Wang 16987df13219SJason Wang /* There's a small window that XDP may be set after the check 16997df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 17007df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 17017df13219SJason Wang */ 1702ac1f1f6cSJason Wang if (hdr->gso_type || !xdp_prog) { 17031cfe6e93SJason Wang *skb_xdp = 1; 17048ae1aff0SJason Wang return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1705ac1f1f6cSJason Wang } 1706ac1f1f6cSJason Wang 17071cfe6e93SJason Wang *skb_xdp = 0; 170866ccbc9cSJason Wang 17096547e387SToshiaki Makita local_bh_disable(); 1710761876c8SJason Wang rcu_read_lock(); 1711761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 17128ae1aff0SJason Wang if (xdp_prog) { 1713761876c8SJason Wang struct xdp_buff xdp; 1714761876c8SJason Wang u32 act; 1715761876c8SJason Wang 1716761876c8SJason Wang xdp.data_hard_start = buf; 17177df13219SJason Wang xdp.data = buf + pad; 1718de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1719761876c8SJason Wang xdp.data_end = xdp.data + len; 17208bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1721761876c8SJason Wang 17228ae1aff0SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 17238ae1aff0SJason Wang if (act == XDP_REDIRECT || act == XDP_TX) { 1724761876c8SJason Wang get_page(alloc_frag->page); 1725761876c8SJason Wang alloc_frag->offset += buflen; 1726761876c8SJason Wang } 17278ae1aff0SJason Wang err = tun_xdp_act(tun, xdp_prog, &xdp, act); 17288ae1aff0SJason Wang if (err < 0) 17298ae1aff0SJason Wang goto err_xdp; 17301a097910SJason Wang if (err == XDP_REDIRECT) 17311a097910SJason Wang xdp_do_flush_map(); 17328ae1aff0SJason Wang if (err != XDP_PASS) 17338ae1aff0SJason Wang goto out; 17348ae1aff0SJason Wang 17358ae1aff0SJason Wang pad = xdp.data - xdp.data_hard_start; 17368ae1aff0SJason Wang len = xdp.data_end - xdp.data; 1737761876c8SJason Wang } 1738761876c8SJason Wang rcu_read_unlock(); 17396547e387SToshiaki Makita local_bh_enable(); 1740291aeb2bSJason Wang 17418ae1aff0SJason Wang return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1742761876c8SJason Wang 17438ae1aff0SJason Wang err_xdp: 1744761876c8SJason Wang put_page(alloc_frag->page); 1745f7053b6cSJason Wang out: 1746761876c8SJason Wang rcu_read_unlock(); 17476547e387SToshiaki Makita local_bh_enable(); 1748761876c8SJason Wang return NULL; 174966ccbc9cSJason Wang } 175066ccbc9cSJason Wang 17511da177e4SLinus Torvalds /* Get packet from user space buffer */ 175254f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1753f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17545503fcecSJason Wang int noblock, bool more) 17551da177e4SLinus Torvalds { 175609640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17571da177e4SLinus Torvalds struct sk_buff *skb; 1758f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1759eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1760f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1761608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 176296f8d9ecSJason Wang int good_linear; 17630690899bSMichael S. Tsirkin int copylen; 17640690899bSMichael S. Tsirkin bool zerocopy = false; 17650690899bSMichael S. Tsirkin int err; 176696f84061SJason Wang u32 rxhash = 0; 17671cfe6e93SJason Wang int skb_xdp = 1; 1768af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 17691da177e4SLinus Torvalds 177040630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 177115718ea0SDan Carpenter if (len < sizeof(pi)) 17721da177e4SLinus Torvalds return -EINVAL; 177315718ea0SDan Carpenter len -= sizeof(pi); 17741da177e4SLinus Torvalds 1775cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17761da177e4SLinus Torvalds return -EFAULT; 17771da177e4SLinus Torvalds } 17781da177e4SLinus Torvalds 177940630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1780e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1781e1edab87SWillem de Bruijn 1782e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1783f43798c2SRusty Russell return -EINVAL; 1784e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1785f43798c2SRusty Russell 1786cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1787f43798c2SRusty Russell return -EFAULT; 1788f43798c2SRusty Russell 17894909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 179056f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 179156f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17924909122fSHerbert Xu 179356f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1794f43798c2SRusty Russell return -EINVAL; 1795e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1796f43798c2SRusty Russell } 1797f43798c2SRusty Russell 179840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1799a504b86eSstephen hemminger align += NET_IP_ALIGN; 18000eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 180156f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1802e01bf1c8SRusty Russell return -EINVAL; 1803e01bf1c8SRusty Russell } 18041da177e4SLinus Torvalds 180596f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 180696f8d9ecSJason Wang 180788529176SJason Wang if (msg_control) { 1808f5ff53b4SAl Viro struct iov_iter i = *from; 1809f5ff53b4SAl Viro 181088529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 181188529176SJason Wang * enough room for skb expand head in case it is used. 18120690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 18130690899bSMichael S. Tsirkin */ 181456f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 181596f8d9ecSJason Wang if (copylen > good_linear) 181696f8d9ecSJason Wang copylen = good_linear; 18173dd5c330SJason Wang linear = copylen; 1818f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1819f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 182088529176SJason Wang zerocopy = true; 182188529176SJason Wang } 182288529176SJason Wang 182390e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 18241cfe6e93SJason Wang /* For the packet that is not easy to be processed 18251cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 18261cfe6e93SJason Wang * skb was created with generic XDP routine. 18271cfe6e93SJason Wang */ 18281cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 182966ccbc9cSJason Wang if (IS_ERR(skb)) { 183066ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 183166ccbc9cSJason Wang return PTR_ERR(skb); 183266ccbc9cSJason Wang } 1833761876c8SJason Wang if (!skb) 1834761876c8SJason Wang return total_len; 183566ccbc9cSJason Wang } else { 183688529176SJason Wang if (!zerocopy) { 18370690899bSMichael S. Tsirkin copylen = len; 183856f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 183996f8d9ecSJason Wang linear = good_linear; 184096f8d9ecSJason Wang else 184156f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18423dd5c330SJason Wang } 18430690899bSMichael S. Tsirkin 184490e33d45SPetar Penkov if (frags) { 184590e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 184690e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 184790e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 184890e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 184990e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 185090e33d45SPetar Penkov */ 185190e33d45SPetar Penkov zerocopy = false; 185290e33d45SPetar Penkov } else { 185390e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 185490e33d45SPetar Penkov noblock); 185590e33d45SPetar Penkov } 185690e33d45SPetar Penkov 185733dccbb0SHerbert Xu if (IS_ERR(skb)) { 185833dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1859608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 186090e33d45SPetar Penkov if (frags) 186190e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 186233dccbb0SHerbert Xu return PTR_ERR(skb); 18631da177e4SLinus Torvalds } 18641da177e4SLinus Torvalds 18650690899bSMichael S. Tsirkin if (zerocopy) 1866f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1867af1cc7a2SJason Wang else 1868f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18690690899bSMichael S. Tsirkin 18700690899bSMichael S. Tsirkin if (err) { 18714477138fSEric Dumazet err = -EFAULT; 18724477138fSEric Dumazet drop: 1873608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18748f22757eSDave Jones kfree_skb(skb); 187590e33d45SPetar Penkov if (frags) { 187690e33d45SPetar Penkov tfile->napi.skb = NULL; 187790e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 187890e33d45SPetar Penkov } 187990e33d45SPetar Penkov 18804477138fSEric Dumazet return err; 18818f22757eSDave Jones } 188266ccbc9cSJason Wang } 18831da177e4SLinus Torvalds 18843e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1885df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1886df10db98SPaolo Abeni kfree_skb(skb); 188790e33d45SPetar Penkov if (frags) { 188890e33d45SPetar Penkov tfile->napi.skb = NULL; 188990e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 189090e33d45SPetar Penkov } 189190e33d45SPetar Penkov 1892df10db98SPaolo Abeni return -EINVAL; 1893df10db98SPaolo Abeni } 1894df10db98SPaolo Abeni 18951da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 189640630b82SMichael S. Tsirkin case IFF_TUN: 189740630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18982580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18992580c4c1SAlexander Potapenko 19002580c4c1SAlexander Potapenko switch (ip_version) { 19012580c4c1SAlexander Potapenko case 4: 1902f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1903f09f7ee2SAng Way Chuang break; 19042580c4c1SAlexander Potapenko case 6: 1905f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1906f09f7ee2SAng Way Chuang break; 1907f09f7ee2SAng Way Chuang default: 1908608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1909f09f7ee2SAng Way Chuang kfree_skb(skb); 1910f09f7ee2SAng Way Chuang return -EINVAL; 1911f09f7ee2SAng Way Chuang } 1912f09f7ee2SAng Way Chuang } 1913f09f7ee2SAng Way Chuang 1914459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 19151da177e4SLinus Torvalds skb->protocol = pi.proto; 19164c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 19171da177e4SLinus Torvalds break; 191840630b82SMichael S. Tsirkin case IFF_TAP: 191990e33d45SPetar Penkov if (!frags) 19201da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 19211da177e4SLinus Torvalds break; 19226403eab1SJoe Perches } 19231da177e4SLinus Torvalds 19240690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 19250690899bSMichael S. Tsirkin if (zerocopy) { 19260690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 19270690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1928c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1929af1cc7a2SJason Wang } else if (msg_control) { 1930af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1931af1cc7a2SJason Wang uarg->callback(uarg, false); 19320690899bSMichael S. Tsirkin } 19330690899bSMichael S. Tsirkin 193472f65107SVlad Yasevich skb_reset_network_header(skb); 1935d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 193638502af7SJason Wang 19371cfe6e93SJason Wang if (skb_xdp) { 1938761876c8SJason Wang struct bpf_prog *xdp_prog; 1939761876c8SJason Wang int ret; 1940761876c8SJason Wang 19416547e387SToshiaki Makita local_bh_disable(); 1942761876c8SJason Wang rcu_read_lock(); 1943761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1944761876c8SJason Wang if (xdp_prog) { 1945761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1946761876c8SJason Wang if (ret != XDP_PASS) { 1947761876c8SJason Wang rcu_read_unlock(); 19486547e387SToshiaki Makita local_bh_enable(); 1949761876c8SJason Wang return total_len; 1950761876c8SJason Wang } 1951761876c8SJason Wang } 1952761876c8SJason Wang rcu_read_unlock(); 19536547e387SToshiaki Makita local_bh_enable(); 1954761876c8SJason Wang } 1955761876c8SJason Wang 1956cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1957cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1958cf1a1e07SPaolo Abeni * worth to optimize. 1959cf1a1e07SPaolo Abeni */ 1960cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1961cf1a1e07SPaolo Abeni !tfile->detached) 1962feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 196394317099SPetar Penkov 19644477138fSEric Dumazet rcu_read_lock(); 19654477138fSEric Dumazet if (unlikely(!(tun->dev->flags & IFF_UP))) { 19664477138fSEric Dumazet err = -EIO; 19679180bb4fSEric Dumazet rcu_read_unlock(); 19684477138fSEric Dumazet goto drop; 19694477138fSEric Dumazet } 19704477138fSEric Dumazet 197190e33d45SPetar Penkov if (frags) { 197290e33d45SPetar Penkov /* Exercise flow dissector code path. */ 1973c43f1255SStanislav Fomichev u32 headlen = eth_get_headlen(tun->dev, skb->data, 1974c43f1255SStanislav Fomichev skb_headlen(skb)); 197590e33d45SPetar Penkov 1976010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 197790e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 197890e33d45SPetar Penkov napi_free_frags(&tfile->napi); 19794477138fSEric Dumazet rcu_read_unlock(); 198090e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 198190e33d45SPetar Penkov WARN_ON(1); 198290e33d45SPetar Penkov return -ENOMEM; 198390e33d45SPetar Penkov } 198490e33d45SPetar Penkov 198590e33d45SPetar Penkov local_bh_disable(); 198690e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 198790e33d45SPetar Penkov local_bh_enable(); 198890e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1989aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 199094317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 199194317099SPetar Penkov int queue_len; 199294317099SPetar Penkov 199394317099SPetar Penkov spin_lock_bh(&queue->lock); 199494317099SPetar Penkov __skb_queue_tail(queue, skb); 199594317099SPetar Penkov queue_len = skb_queue_len(queue); 199694317099SPetar Penkov spin_unlock(&queue->lock); 199794317099SPetar Penkov 199894317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 199994317099SPetar Penkov napi_schedule(&tfile->napi); 200094317099SPetar Penkov 200194317099SPetar Penkov local_bh_enable(); 200294317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 20035503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 200494317099SPetar Penkov } else { 20051da177e4SLinus Torvalds netif_rx_ni(skb); 200694317099SPetar Penkov } 20074477138fSEric Dumazet rcu_read_unlock(); 20081da177e4SLinus Torvalds 2009608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2010608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2011608b9977SPaolo Abeni stats->rx_packets++; 2012608b9977SPaolo Abeni stats->rx_bytes += len; 2013608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2014608b9977SPaolo Abeni put_cpu_ptr(stats); 20151da177e4SLinus Torvalds 201696f84061SJason Wang if (rxhash) 20179e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 201896f84061SJason Wang 20190690899bSMichael S. Tsirkin return total_len; 20201da177e4SLinus Torvalds } 20211da177e4SLinus Torvalds 2022f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 20231da177e4SLinus Torvalds { 202433dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 202554f968d6SJason Wang struct tun_file *tfile = file->private_data; 20269484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2027631ab46bSEric W. Biederman ssize_t result; 20281da177e4SLinus Torvalds 20291da177e4SLinus Torvalds if (!tun) 20301da177e4SLinus Torvalds return -EBADFD; 20311da177e4SLinus Torvalds 20325503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 20335503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 2034631ab46bSEric W. Biederman 2035631ab46bSEric W. Biederman tun_put(tun); 2036631ab46bSEric W. Biederman return result; 20371da177e4SLinus Torvalds } 20381da177e4SLinus Torvalds 2039fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2040fc72d1d5SJason Wang struct tun_file *tfile, 20411ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 2042fc72d1d5SJason Wang struct iov_iter *iter) 2043fc72d1d5SJason Wang { 2044fc72d1d5SJason Wang int vnet_hdr_sz = 0; 20451ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 2046fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 2047fc72d1d5SJason Wang size_t ret; 2048fc72d1d5SJason Wang 2049fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2050fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2051fc72d1d5SJason Wang 2052fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2053fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2054fc72d1d5SJason Wang return -EINVAL; 2055fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2056fc72d1d5SJason Wang sizeof(gso))) 2057fc72d1d5SJason Wang return -EFAULT; 2058fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2059fc72d1d5SJason Wang } 2060fc72d1d5SJason Wang 20611ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2062fc72d1d5SJason Wang 2063fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2064fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 2065fc72d1d5SJason Wang stats->tx_packets++; 2066fc72d1d5SJason Wang stats->tx_bytes += ret; 2067fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2068fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2069fc72d1d5SJason Wang 2070fc72d1d5SJason Wang return ret; 2071fc72d1d5SJason Wang } 2072fc72d1d5SJason Wang 20731da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20746f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 207554f968d6SJason Wang struct tun_file *tfile, 20761da177e4SLinus Torvalds struct sk_buff *skb, 2077e0b46d0eSHerbert Xu struct iov_iter *iter) 20781da177e4SLinus Torvalds { 20791da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2080608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2081e0b46d0eSHerbert Xu ssize_t total; 20828c847d25SJason Wang int vlan_offset = 0; 2083a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20842eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2085a8f9bfdfSHerbert Xu 2086df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2087a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20881da177e4SLinus Torvalds 208940630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2090e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20911da177e4SLinus Torvalds 2092e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2093e0b46d0eSHerbert Xu 209440630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2095e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20961da177e4SLinus Torvalds return -EINVAL; 20971da177e4SLinus Torvalds 2098e0b46d0eSHerbert Xu total += sizeof(pi); 2099e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 21001da177e4SLinus Torvalds /* Packet will be striped */ 21011da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 21021da177e4SLinus Torvalds } 21031da177e4SLinus Torvalds 2104e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 21051da177e4SLinus Torvalds return -EFAULT; 21061da177e4SLinus Torvalds } 21071da177e4SLinus Torvalds 21082eb783c4SHerbert Xu if (vnet_hdr_sz) { 21099403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 211034166093SMike Rapoport 2111e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2112f43798c2SRusty Russell return -EINVAL; 2113f43798c2SRusty Russell 21143e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2115fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2116fd3a8862SWillem de Bruijn vlan_hlen)) { 2117f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 21186b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2119ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 212056f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 212156f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2122ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2123ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2124ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 212556f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2126ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2127ef3db4a5SMichael S. Tsirkin return -EINVAL; 2128ef3db4a5SMichael S. Tsirkin } 2129f43798c2SRusty Russell 2130e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2131f43798c2SRusty Russell return -EFAULT; 21328c847d25SJason Wang 21338c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2134f43798c2SRusty Russell } 2135f43798c2SRusty Russell 2136a8f9bfdfSHerbert Xu if (vlan_hlen) { 2137e0b46d0eSHerbert Xu int ret; 2138aff3d70aSJason Wang struct veth veth; 21391da177e4SLinus Torvalds 21406680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2141df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 21421da177e4SLinus Torvalds 21436680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21446680ec68SJason Wang 2145e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2146e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21476680ec68SJason Wang goto done; 21486680ec68SJason Wang 2149e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2150e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21516680ec68SJason Wang goto done; 21526680ec68SJason Wang } 21536680ec68SJason Wang 2154e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21556680ec68SJason Wang 21566680ec68SJason Wang done: 2157608b9977SPaolo Abeni /* caller is in process context, */ 2158608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2159608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2160608b9977SPaolo Abeni stats->tx_packets++; 2161608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 2162608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2163608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21641da177e4SLinus Torvalds 21651da177e4SLinus Torvalds return total; 21661da177e4SLinus Torvalds } 21671da177e4SLinus Torvalds 2168fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21691576d986SJason Wang { 21701576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2171fc72d1d5SJason Wang void *ptr = NULL; 2172f48cc6b2SJason Wang int error = 0; 21731576d986SJason Wang 2174fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2175fc72d1d5SJason Wang if (ptr) 21761576d986SJason Wang goto out; 21771576d986SJason Wang if (noblock) { 2178f48cc6b2SJason Wang error = -EAGAIN; 21791576d986SJason Wang goto out; 21801576d986SJason Wang } 21811576d986SJason Wang 21821576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 21831576d986SJason Wang 21841576d986SJason Wang while (1) { 218571828b22STimur Celik set_current_state(TASK_INTERRUPTIBLE); 2186fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2187fc72d1d5SJason Wang if (ptr) 21881576d986SJason Wang break; 21891576d986SJason Wang if (signal_pending(current)) { 2190f48cc6b2SJason Wang error = -ERESTARTSYS; 21911576d986SJason Wang break; 21921576d986SJason Wang } 21931576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2194f48cc6b2SJason Wang error = -EFAULT; 21951576d986SJason Wang break; 21961576d986SJason Wang } 21971576d986SJason Wang 21981576d986SJason Wang schedule(); 21991576d986SJason Wang } 22001576d986SJason Wang 2201ecef67cbSTimur Celik __set_current_state(TASK_RUNNING); 22021576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 22031576d986SJason Wang 22041576d986SJason Wang out: 2205f48cc6b2SJason Wang *err = error; 2206fc72d1d5SJason Wang return ptr; 22071576d986SJason Wang } 22081576d986SJason Wang 220954f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 22109b067034SAl Viro struct iov_iter *to, 2211fc72d1d5SJason Wang int noblock, void *ptr) 22121da177e4SLinus Torvalds { 22139b067034SAl Viro ssize_t ret; 22141576d986SJason Wang int err; 22151da177e4SLinus Torvalds 22163872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 22171da177e4SLinus Torvalds 2218c33ee15bSWei Xu if (!iov_iter_count(to)) { 2219fc72d1d5SJason Wang tun_ptr_free(ptr); 22209b067034SAl Viro return 0; 2221c33ee15bSWei Xu } 22221da177e4SLinus Torvalds 2223fc72d1d5SJason Wang if (!ptr) { 22241576d986SJason Wang /* Read frames from ring */ 2225fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2226fc72d1d5SJason Wang if (!ptr) 2227957f094fSAlex Gartrell return err; 2228ac77cfd4SJason Wang } 2229e0b46d0eSHerbert Xu 22301ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 22311ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2232fc72d1d5SJason Wang 22331ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 223403993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2235fc72d1d5SJason Wang } else { 2236fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2237fc72d1d5SJason Wang 22389b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2239f51a5e82SJason Wang if (unlikely(ret < 0)) 22401da177e4SLinus Torvalds kfree_skb(skb); 2241f51a5e82SJason Wang else 2242f51a5e82SJason Wang consume_skb(skb); 2243fc72d1d5SJason Wang } 22441da177e4SLinus Torvalds 224505c2828cSMichael S. Tsirkin return ret; 224605c2828cSMichael S. Tsirkin } 224705c2828cSMichael S. Tsirkin 22489b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 224905c2828cSMichael S. Tsirkin { 225005c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 225105c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22529484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22539b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 225405c2828cSMichael S. Tsirkin 225505c2828cSMichael S. Tsirkin if (!tun) 225605c2828cSMichael S. Tsirkin return -EBADFD; 2257ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 225842404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2259d0b7da8aSZhi Yong Wu if (ret > 0) 2260d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2261631ab46bSEric W. Biederman tun_put(tun); 22621da177e4SLinus Torvalds return ret; 22631da177e4SLinus Torvalds } 22641da177e4SLinus Torvalds 2265cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 226696f84061SJason Wang { 2267cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 226896f84061SJason Wang 226996f84061SJason Wang bpf_prog_destroy(prog->prog); 227096f84061SJason Wang kfree(prog); 227196f84061SJason Wang } 227296f84061SJason Wang 22739d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22749d6474e4SJason Wang struct tun_prog __rcu **prog_p, 227596f84061SJason Wang struct bpf_prog *prog) 227696f84061SJason Wang { 2277cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 227896f84061SJason Wang 227996f84061SJason Wang if (prog) { 228096f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 228196f84061SJason Wang if (!new) 228296f84061SJason Wang return -ENOMEM; 228396f84061SJason Wang new->prog = prog; 228496f84061SJason Wang } 228596f84061SJason Wang 2286124da8f6SJason Wang spin_lock_bh(&tun->lock); 2287cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2288124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2289cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2290124da8f6SJason Wang spin_unlock_bh(&tun->lock); 229196f84061SJason Wang 229296f84061SJason Wang if (old) 2293cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 229496f84061SJason Wang 229596f84061SJason Wang return 0; 229696f84061SJason Wang } 229796f84061SJason Wang 229896442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 229996442e42SJason Wang { 230096442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 230196442e42SJason Wang 23024008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2303608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 230496442e42SJason Wang tun_flow_uninit(tun); 23055dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2306cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2307aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 230896442e42SJason Wang } 230996442e42SJason Wang 23101da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 23111da177e4SLinus Torvalds { 23121da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 23131da177e4SLinus Torvalds 23140625c883SEric W. Biederman tun->owner = INVALID_UID; 23150625c883SEric W. Biederman tun->group = INVALID_GID; 23164e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 23171da177e4SLinus Torvalds 23181da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2319cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2320cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2321016adb72SJason Wang /* We prefer our own queue length */ 2322016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 23231da177e4SLinus Torvalds } 23241da177e4SLinus Torvalds 2325f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2326f019a7a5SEric W. Biederman * device with netlink. 2327f019a7a5SEric W. Biederman */ 2328a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2329a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2330f019a7a5SEric W. Biederman { 233135b827b6SNicolas Dichtel NL_SET_ERR_MSG(extack, 233235b827b6SNicolas Dichtel "tun/tap creation via rtnetlink is not supported."); 233335b827b6SNicolas Dichtel return -EOPNOTSUPP; 2334f019a7a5SEric W. Biederman } 2335f019a7a5SEric W. Biederman 23361ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 23371ec010e7SSabrina Dubroca { 23381ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 23391ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 23401ec010e7SSabrina Dubroca 23411ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 23421ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 23431ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 23441ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 23451ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 23461ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 23471ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 23481ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 23491ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 23501ec010e7SSabrina Dubroca 0; 23511ec010e7SSabrina Dubroca } 23521ec010e7SSabrina Dubroca 23531ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 23541ec010e7SSabrina Dubroca { 23551ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 23561ec010e7SSabrina Dubroca 23571ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 23581ec010e7SSabrina Dubroca goto nla_put_failure; 23591ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 23601ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 23611ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 23621ec010e7SSabrina Dubroca goto nla_put_failure; 23631ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 23641ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 23651ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 23661ec010e7SSabrina Dubroca goto nla_put_failure; 23671ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 23681ec010e7SSabrina Dubroca goto nla_put_failure; 23691ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 23701ec010e7SSabrina Dubroca goto nla_put_failure; 23711ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 23721ec010e7SSabrina Dubroca goto nla_put_failure; 23731ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 23741ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 23751ec010e7SSabrina Dubroca goto nla_put_failure; 23761ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 23771ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 23781ec010e7SSabrina Dubroca goto nla_put_failure; 23791ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 23801ec010e7SSabrina Dubroca tun->numdisabled)) 23811ec010e7SSabrina Dubroca goto nla_put_failure; 23821ec010e7SSabrina Dubroca } 23831ec010e7SSabrina Dubroca 23841ec010e7SSabrina Dubroca return 0; 23851ec010e7SSabrina Dubroca 23861ec010e7SSabrina Dubroca nla_put_failure: 23871ec010e7SSabrina Dubroca return -EMSGSIZE; 23881ec010e7SSabrina Dubroca } 23891ec010e7SSabrina Dubroca 2390f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2391f019a7a5SEric W. Biederman .kind = DRV_NAME, 2392f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2393f019a7a5SEric W. Biederman .setup = tun_setup, 2394f019a7a5SEric W. Biederman .validate = tun_validate, 23951ec010e7SSabrina Dubroca .get_size = tun_get_size, 23961ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2397f019a7a5SEric W. Biederman }; 2398f019a7a5SEric W. Biederman 239933dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 240033dccbb0SHerbert Xu { 240154f968d6SJason Wang struct tun_file *tfile; 240243815482SEric Dumazet wait_queue_head_t *wqueue; 240333dccbb0SHerbert Xu 240433dccbb0SHerbert Xu if (!sock_writeable(sk)) 240533dccbb0SHerbert Xu return; 240633dccbb0SHerbert Xu 24079cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 240833dccbb0SHerbert Xu return; 240933dccbb0SHerbert Xu 241043815482SEric Dumazet wqueue = sk_sleep(sk); 241143815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2412a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2413a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2414c722c625SHerbert Xu 241554f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 241654f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 241733dccbb0SHerbert Xu } 241833dccbb0SHerbert Xu 2419f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage) 2420f9e06c45SJason Wang { 2421f9e06c45SJason Wang if (tpage->page) 2422f9e06c45SJason Wang __page_frag_cache_drain(tpage->page, tpage->count); 2423f9e06c45SJason Wang } 2424f9e06c45SJason Wang 2425043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun, 2426043d222fSJason Wang struct tun_file *tfile, 2427f9e06c45SJason Wang struct xdp_buff *xdp, int *flush, 2428f9e06c45SJason Wang struct tun_page *tpage) 2429043d222fSJason Wang { 24304e4b08e5SPrashant Bhole unsigned int datasize = xdp->data_end - xdp->data; 2431043d222fSJason Wang struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2432043d222fSJason Wang struct virtio_net_hdr *gso = &hdr->gso; 2433043d222fSJason Wang struct tun_pcpu_stats *stats; 2434043d222fSJason Wang struct bpf_prog *xdp_prog; 2435043d222fSJason Wang struct sk_buff *skb = NULL; 2436043d222fSJason Wang u32 rxhash = 0, act; 2437043d222fSJason Wang int buflen = hdr->buflen; 2438043d222fSJason Wang int err = 0; 2439043d222fSJason Wang bool skb_xdp = false; 2440f9e06c45SJason Wang struct page *page; 2441043d222fSJason Wang 2442043d222fSJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 2443043d222fSJason Wang if (xdp_prog) { 2444043d222fSJason Wang if (gso->gso_type) { 2445043d222fSJason Wang skb_xdp = true; 2446043d222fSJason Wang goto build; 2447043d222fSJason Wang } 2448043d222fSJason Wang xdp_set_data_meta_invalid(xdp); 2449043d222fSJason Wang xdp->rxq = &tfile->xdp_rxq; 2450043d222fSJason Wang 2451043d222fSJason Wang act = bpf_prog_run_xdp(xdp_prog, xdp); 2452043d222fSJason Wang err = tun_xdp_act(tun, xdp_prog, xdp, act); 2453043d222fSJason Wang if (err < 0) { 2454043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2455043d222fSJason Wang return err; 2456043d222fSJason Wang } 2457043d222fSJason Wang 2458043d222fSJason Wang switch (err) { 2459043d222fSJason Wang case XDP_REDIRECT: 2460043d222fSJason Wang *flush = true; 2461043d222fSJason Wang /* fall through */ 2462043d222fSJason Wang case XDP_TX: 2463043d222fSJason Wang return 0; 2464043d222fSJason Wang case XDP_PASS: 2465043d222fSJason Wang break; 2466043d222fSJason Wang default: 2467f9e06c45SJason Wang page = virt_to_head_page(xdp->data); 2468f9e06c45SJason Wang if (tpage->page == page) { 2469f9e06c45SJason Wang ++tpage->count; 2470f9e06c45SJason Wang } else { 2471f9e06c45SJason Wang tun_put_page(tpage); 2472f9e06c45SJason Wang tpage->page = page; 2473f9e06c45SJason Wang tpage->count = 1; 2474f9e06c45SJason Wang } 2475043d222fSJason Wang return 0; 2476043d222fSJason Wang } 2477043d222fSJason Wang } 2478043d222fSJason Wang 2479043d222fSJason Wang build: 2480043d222fSJason Wang skb = build_skb(xdp->data_hard_start, buflen); 2481043d222fSJason Wang if (!skb) { 2482043d222fSJason Wang err = -ENOMEM; 2483043d222fSJason Wang goto out; 2484043d222fSJason Wang } 2485043d222fSJason Wang 2486043d222fSJason Wang skb_reserve(skb, xdp->data - xdp->data_hard_start); 2487043d222fSJason Wang skb_put(skb, xdp->data_end - xdp->data); 2488043d222fSJason Wang 2489043d222fSJason Wang if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2490043d222fSJason Wang this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 2491043d222fSJason Wang kfree_skb(skb); 2492043d222fSJason Wang err = -EINVAL; 2493043d222fSJason Wang goto out; 2494043d222fSJason Wang } 2495043d222fSJason Wang 2496043d222fSJason Wang skb->protocol = eth_type_trans(skb, tun->dev); 2497043d222fSJason Wang skb_reset_network_header(skb); 2498d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb); 2499043d222fSJason Wang 2500043d222fSJason Wang if (skb_xdp) { 2501043d222fSJason Wang err = do_xdp_generic(xdp_prog, skb); 2502043d222fSJason Wang if (err != XDP_PASS) 2503043d222fSJason Wang goto out; 2504043d222fSJason Wang } 2505043d222fSJason Wang 2506f29eb2a9SPaolo Abeni if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2507f29eb2a9SPaolo Abeni !tfile->detached) 2508043d222fSJason Wang rxhash = __skb_get_hash_symmetric(skb); 2509043d222fSJason Wang 25108ebebcbaSMatthew Cover skb_record_rx_queue(skb, tfile->queue_index); 2511043d222fSJason Wang netif_receive_skb(skb); 2512043d222fSJason Wang 25136342ca64SPrashant Bhole /* No need for get_cpu_ptr() here since this function is 25146342ca64SPrashant Bhole * always called with bh disabled 25156342ca64SPrashant Bhole */ 25166342ca64SPrashant Bhole stats = this_cpu_ptr(tun->pcpu_stats); 2517043d222fSJason Wang u64_stats_update_begin(&stats->syncp); 2518043d222fSJason Wang stats->rx_packets++; 25194e4b08e5SPrashant Bhole stats->rx_bytes += datasize; 2520043d222fSJason Wang u64_stats_update_end(&stats->syncp); 2521043d222fSJason Wang 2522043d222fSJason Wang if (rxhash) 2523043d222fSJason Wang tun_flow_update(tun, rxhash, tfile); 2524043d222fSJason Wang 2525043d222fSJason Wang out: 2526043d222fSJason Wang return err; 2527043d222fSJason Wang } 2528043d222fSJason Wang 25291b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 253005c2828cSMichael S. Tsirkin { 2531043d222fSJason Wang int ret, i; 253254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25339484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2534fe8dd45bSJason Wang struct tun_msg_ctl *ctl = m->msg_control; 2535043d222fSJason Wang struct xdp_buff *xdp; 253654f968d6SJason Wang 253754f968d6SJason Wang if (!tun) 253854f968d6SJason Wang return -EBADFD; 2539f5ff53b4SAl Viro 2540043d222fSJason Wang if (ctl && (ctl->type == TUN_MSG_PTR)) { 25416f0271d9SDavid S. Miller struct tun_page tpage; 2542043d222fSJason Wang int n = ctl->num; 2543043d222fSJason Wang int flush = 0; 2544043d222fSJason Wang 25456f0271d9SDavid S. Miller memset(&tpage, 0, sizeof(tpage)); 25466f0271d9SDavid S. Miller 2547043d222fSJason Wang local_bh_disable(); 2548043d222fSJason Wang rcu_read_lock(); 2549043d222fSJason Wang 2550043d222fSJason Wang for (i = 0; i < n; i++) { 2551043d222fSJason Wang xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2552f9e06c45SJason Wang tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2553043d222fSJason Wang } 2554043d222fSJason Wang 2555043d222fSJason Wang if (flush) 2556043d222fSJason Wang xdp_do_flush_map(); 2557043d222fSJason Wang 2558043d222fSJason Wang rcu_read_unlock(); 2559043d222fSJason Wang local_bh_enable(); 2560043d222fSJason Wang 2561f9e06c45SJason Wang tun_put_page(&tpage); 2562f9e06c45SJason Wang 2563043d222fSJason Wang ret = total_len; 2564043d222fSJason Wang goto out; 2565043d222fSJason Wang } 2566fe8dd45bSJason Wang 2567fe8dd45bSJason Wang ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 25685503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 25695503fcecSJason Wang m->msg_flags & MSG_MORE); 2570043d222fSJason Wang out: 257154f968d6SJason Wang tun_put(tun); 257254f968d6SJason Wang return ret; 257305c2828cSMichael S. Tsirkin } 257405c2828cSMichael S. Tsirkin 25751b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 257605c2828cSMichael S. Tsirkin int flags) 257705c2828cSMichael S. Tsirkin { 257854f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25799484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2580fc72d1d5SJason Wang void *ptr = m->msg_control; 258105c2828cSMichael S. Tsirkin int ret; 258254f968d6SJason Wang 2583c33ee15bSWei Xu if (!tun) { 2584c33ee15bSWei Xu ret = -EBADFD; 2585fc72d1d5SJason Wang goto out_free; 2586c33ee15bSWei Xu } 258754f968d6SJason Wang 2588eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 25893811ae76SGao feng ret = -EINVAL; 2590c33ee15bSWei Xu goto out_put_tun; 25913811ae76SGao feng } 2592eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2593eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2594eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2595eda29772SRichard Cochran goto out; 2596eda29772SRichard Cochran } 2597fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 259887897931SAlex Gartrell if (ret > (ssize_t)total_len) { 259942404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 260042404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 260142404c09SDavid S. Miller } 26023811ae76SGao feng out: 260354f968d6SJason Wang tun_put(tun); 260405c2828cSMichael S. Tsirkin return ret; 2605c33ee15bSWei Xu 2606c33ee15bSWei Xu out_put_tun: 2607c33ee15bSWei Xu tun_put(tun); 2608fc72d1d5SJason Wang out_free: 2609fc72d1d5SJason Wang tun_ptr_free(ptr); 2610c33ee15bSWei Xu return ret; 261105c2828cSMichael S. Tsirkin } 261205c2828cSMichael S. Tsirkin 2613fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2614fc72d1d5SJason Wang { 2615fc72d1d5SJason Wang if (likely(ptr)) { 26161ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 26171ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2618fc72d1d5SJason Wang 26191ffcbc85SJesper Dangaard Brouer return xdpf->len; 2620fc72d1d5SJason Wang } 2621fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2622fc72d1d5SJason Wang } else { 2623fc72d1d5SJason Wang return 0; 2624fc72d1d5SJason Wang } 2625fc72d1d5SJason Wang } 2626fc72d1d5SJason Wang 26271576d986SJason Wang static int tun_peek_len(struct socket *sock) 26281576d986SJason Wang { 26291576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 26301576d986SJason Wang struct tun_struct *tun; 26311576d986SJason Wang int ret = 0; 26321576d986SJason Wang 26339484dc74Syuan linyu tun = tun_get(tfile); 26341576d986SJason Wang if (!tun) 26351576d986SJason Wang return 0; 26361576d986SJason Wang 2637fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 26381576d986SJason Wang tun_put(tun); 26391576d986SJason Wang 26401576d986SJason Wang return ret; 26411576d986SJason Wang } 26421576d986SJason Wang 264305c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 264405c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 26451576d986SJason Wang .peek_len = tun_peek_len, 264605c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 264705c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 264805c2828cSMichael S. Tsirkin }; 264905c2828cSMichael S. Tsirkin 265033dccbb0SHerbert Xu static struct proto tun_proto = { 265133dccbb0SHerbert Xu .name = "tun", 265233dccbb0SHerbert Xu .owner = THIS_MODULE, 265354f968d6SJason Wang .obj_size = sizeof(struct tun_file), 265433dccbb0SHerbert Xu }; 2655f019a7a5SEric W. Biederman 2656980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2657980c9e8cSDavid Woodhouse { 2658031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2659980c9e8cSDavid Woodhouse } 2660980c9e8cSDavid Woodhouse 2661980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2662980c9e8cSDavid Woodhouse char *buf) 2663980c9e8cSDavid Woodhouse { 2664980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2665980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2666980c9e8cSDavid Woodhouse } 2667980c9e8cSDavid Woodhouse 2668980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2669980c9e8cSDavid Woodhouse char *buf) 2670980c9e8cSDavid Woodhouse { 2671980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26720625c883SEric W. Biederman return uid_valid(tun->owner)? 26730625c883SEric W. Biederman sprintf(buf, "%u\n", 26740625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 26750625c883SEric W. Biederman sprintf(buf, "-1\n"); 2676980c9e8cSDavid Woodhouse } 2677980c9e8cSDavid Woodhouse 2678980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2679980c9e8cSDavid Woodhouse char *buf) 2680980c9e8cSDavid Woodhouse { 2681980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26820625c883SEric W. Biederman return gid_valid(tun->group) ? 26830625c883SEric W. Biederman sprintf(buf, "%u\n", 26840625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 26850625c883SEric W. Biederman sprintf(buf, "-1\n"); 2686980c9e8cSDavid Woodhouse } 2687980c9e8cSDavid Woodhouse 2688980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2689980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2690980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2691980c9e8cSDavid Woodhouse 2692c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2693c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2694c4d33e24STakashi Iwai &dev_attr_owner.attr, 2695c4d33e24STakashi Iwai &dev_attr_group.attr, 2696c4d33e24STakashi Iwai NULL 2697c4d33e24STakashi Iwai }; 2698c4d33e24STakashi Iwai 2699c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2700c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2701c4d33e24STakashi Iwai }; 2702c4d33e24STakashi Iwai 2703d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 27041da177e4SLinus Torvalds { 27051da177e4SLinus Torvalds struct tun_struct *tun; 270654f968d6SJason Wang struct tun_file *tfile = file->private_data; 27071da177e4SLinus Torvalds struct net_device *dev; 27081da177e4SLinus Torvalds int err; 27091da177e4SLinus Torvalds 27107c0c3b1aSJason Wang if (tfile->detached) 27117c0c3b1aSJason Wang return -EINVAL; 27127c0c3b1aSJason Wang 271390e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 271490e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 271590e33d45SPetar Penkov return -EPERM; 271690e33d45SPetar Penkov 271790e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 271890e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 271990e33d45SPetar Penkov return -EINVAL; 272090e33d45SPetar Penkov } 272190e33d45SPetar Penkov 272274a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 272374a3e5a7SEric W. Biederman if (dev) { 2724f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2725f85ba780SDavid Woodhouse return -EBUSY; 272674a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 272774a3e5a7SEric W. Biederman tun = netdev_priv(dev); 272874a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 272974a3e5a7SEric W. Biederman tun = netdev_priv(dev); 273074a3e5a7SEric W. Biederman else 273174a3e5a7SEric W. Biederman return -EINVAL; 273274a3e5a7SEric W. Biederman 27338e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 273440630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 27358e6d91aeSJason Wang return -EINVAL; 27368e6d91aeSJason Wang 2737cde8b15fSJason Wang if (tun_not_capable(tun)) 27382b980dbdSPaul Moore return -EPERM; 27395dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 27402b980dbdSPaul Moore if (err < 0) 27412b980dbdSPaul Moore return err; 27422b980dbdSPaul Moore 274394317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2744af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 2745af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2746a7385ba2SEric W. Biederman if (err < 0) 2747a7385ba2SEric W. Biederman return err; 27484008e97fSJason Wang 274940630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2750e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2751e8dbad66SJason Wang /* One or more queue has already been attached, no need 2752e8dbad66SJason Wang * to initialize the device again. 2753e8dbad66SJason Wang */ 275483c1f36fSSabrina Dubroca netdev_state_change(dev); 2755e8dbad66SJason Wang return 0; 2756e8dbad66SJason Wang } 27579fffc5c6SSabrina Dubroca 27589fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 27599fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 276083c1f36fSSabrina Dubroca 276183c1f36fSSabrina Dubroca netdev_state_change(dev); 276283c1f36fSSabrina Dubroca } else { 27631da177e4SLinus Torvalds char *name; 27641da177e4SLinus Torvalds unsigned long flags = 0; 2765edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2766edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 27671da177e4SLinus Torvalds 2768c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2769ca6bb5d7SDavid Woodhouse return -EPERM; 27702b980dbdSPaul Moore err = security_tun_dev_create(); 27712b980dbdSPaul Moore if (err < 0) 27722b980dbdSPaul Moore return err; 2773ca6bb5d7SDavid Woodhouse 27741da177e4SLinus Torvalds /* Set dev type */ 27751da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 27761da177e4SLinus Torvalds /* TUN device */ 277740630b82SMichael S. Tsirkin flags |= IFF_TUN; 27781da177e4SLinus Torvalds name = "tun%d"; 27791da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 27801da177e4SLinus Torvalds /* TAP device */ 278140630b82SMichael S. Tsirkin flags |= IFF_TAP; 27821da177e4SLinus Torvalds name = "tap%d"; 27831da177e4SLinus Torvalds } else 278436989b90SKusanagi Kouichi return -EINVAL; 27851da177e4SLinus Torvalds 27861da177e4SLinus Torvalds if (*ifr->ifr_name) 27871da177e4SLinus Torvalds name = ifr->ifr_name; 27881da177e4SLinus Torvalds 2789c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2790c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2791c835a677STom Gundersen queues); 2792edfb6a14SJason Wang 27931da177e4SLinus Torvalds if (!dev) 27941da177e4SLinus Torvalds return -ENOMEM; 27950ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 27965c25f65fSJulien Gomes if (err < 0) 27970ad646c8SCong Wang goto err_free_dev; 27981da177e4SLinus Torvalds 2799fc54c658SPavel Emelyanov dev_net_set(dev, net); 2800f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2801fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2802c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2803758e43b7SStephen Hemminger 28041da177e4SLinus Torvalds tun = netdev_priv(dev); 28051da177e4SLinus Torvalds tun->dev = dev; 28061da177e4SLinus Torvalds tun->flags = flags; 2807f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2808d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 28091da177e4SLinus Torvalds 2810eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 281154f968d6SJason Wang tun->filter_attached = false; 281254f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 28135503fcecSJason Wang tun->rx_batched = 0; 281496f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 281533dccbb0SHerbert Xu 2816608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2817608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2818608b9977SPaolo Abeni err = -ENOMEM; 2819608b9977SPaolo Abeni goto err_free_dev; 2820608b9977SPaolo Abeni } 2821608b9977SPaolo Abeni 282296442e42SJason Wang spin_lock_init(&tun->lock); 282396442e42SJason Wang 28245dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 28255dbbaf2dSPaul Moore if (err < 0) 2826608b9977SPaolo Abeni goto err_free_stat; 28272b980dbdSPaul Moore 28281da177e4SLinus Torvalds tun_net_init(dev); 2829944a1376SPavel Emelyanov tun_flow_init(tun); 283096442e42SJason Wang 283188255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 28326680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 28336680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 28342a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 28356671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 28366671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 28376671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 283888255375SMichał Mirosław 28399fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 28409fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 28419fffc5c6SSabrina Dubroca 28424008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 2843af3fb24eSEric Dumazet err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2844af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2845eb0fb363SJason Wang if (err < 0) 2846662ca437SJason Wang goto err_free_flow; 2847eb0fb363SJason Wang 28481da177e4SLinus Torvalds err = register_netdevice(tun->dev); 28491da177e4SLinus Torvalds if (err < 0) 2850662ca437SJason Wang goto err_detach; 2851af668b3cSMichael S. Tsirkin } 2852980c9e8cSDavid Woodhouse 2853eb0fb363SJason Wang netif_carrier_on(tun->dev); 28541da177e4SLinus Torvalds 28556b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 28561da177e4SLinus Torvalds 2857e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2858e35259a9SMax Krasnyansky * xoff state. 2859e35259a9SMax Krasnyansky */ 2860e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2861c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2862e35259a9SMax Krasnyansky 28631da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 28641da177e4SLinus Torvalds return 0; 28651da177e4SLinus Torvalds 2866662ca437SJason Wang err_detach: 2867662ca437SJason Wang tun_detach_all(dev); 2868ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2869ff244c6bSEric Dumazet goto err_free_dev; 2870ff244c6bSEric Dumazet 2871662ca437SJason Wang err_free_flow: 2872662ca437SJason Wang tun_flow_uninit(tun); 2873662ca437SJason Wang security_tun_dev_free_security(tun->security); 2874608b9977SPaolo Abeni err_free_stat: 2875608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 28761da177e4SLinus Torvalds err_free_dev: 28771da177e4SLinus Torvalds free_netdev(dev); 28781da177e4SLinus Torvalds return err; 28791da177e4SLinus Torvalds } 28801da177e4SLinus Torvalds 288112132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2882e3b99556SMark McLoughlin { 28836b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2884e3b99556SMark McLoughlin 2885e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2886e3b99556SMark McLoughlin 2887980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2888e3b99556SMark McLoughlin 2889e3b99556SMark McLoughlin } 2890e3b99556SMark McLoughlin 28915228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 28925228ddc9SRusty Russell * privs required. */ 289388255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 28945228ddc9SRusty Russell { 2895c8f44affSMichał Mirosław netdev_features_t features = 0; 28965228ddc9SRusty Russell 28975228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 289888255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 28995228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 29005228ddc9SRusty Russell 29015228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 29025228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 29035228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 29045228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 29055228ddc9SRusty Russell } 29065228ddc9SRusty Russell if (arg & TUN_F_TSO4) 29075228ddc9SRusty Russell features |= NETIF_F_TSO; 29085228ddc9SRusty Russell if (arg & TUN_F_TSO6) 29095228ddc9SRusty Russell features |= NETIF_F_TSO6; 29105228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 29115228ddc9SRusty Russell } 29120c19f846SWillem de Bruijn 29130c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 29145228ddc9SRusty Russell } 29155228ddc9SRusty Russell 29165228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 29175228ddc9SRusty Russell * trying to set them. */ 29185228ddc9SRusty Russell if (arg) 29195228ddc9SRusty Russell return -EINVAL; 29205228ddc9SRusty Russell 292188255375SMichał Mirosław tun->set_features = features; 292209050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 292309050957SYaroslav Isakov tun->dev->wanted_features |= features; 292488255375SMichał Mirosław netdev_update_features(tun->dev); 29255228ddc9SRusty Russell 29265228ddc9SRusty Russell return 0; 29275228ddc9SRusty Russell } 29285228ddc9SRusty Russell 2929c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2930c8d68e6bSJason Wang { 2931c8d68e6bSJason Wang int i; 2932c8d68e6bSJason Wang struct tun_file *tfile; 2933c8d68e6bSJason Wang 2934c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2935b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29368ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29378ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 29388ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2939c8d68e6bSJason Wang } 2940c8d68e6bSJason Wang 2941c8d68e6bSJason Wang tun->filter_attached = false; 2942c8d68e6bSJason Wang } 2943c8d68e6bSJason Wang 2944c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2945c8d68e6bSJason Wang { 2946c8d68e6bSJason Wang int i, ret = 0; 2947c8d68e6bSJason Wang struct tun_file *tfile; 2948c8d68e6bSJason Wang 2949c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2950b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 29518ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 29528ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 29538ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2954c8d68e6bSJason Wang if (ret) { 2955c8d68e6bSJason Wang tun_detach_filter(tun, i); 2956c8d68e6bSJason Wang return ret; 2957c8d68e6bSJason Wang } 2958c8d68e6bSJason Wang } 2959c8d68e6bSJason Wang 2960c8d68e6bSJason Wang tun->filter_attached = true; 2961c8d68e6bSJason Wang return ret; 2962c8d68e6bSJason Wang } 2963c8d68e6bSJason Wang 2964c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2965c8d68e6bSJason Wang { 2966c8d68e6bSJason Wang struct tun_file *tfile; 2967c8d68e6bSJason Wang int i; 2968c8d68e6bSJason Wang 2969c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2970b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2971c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2972c8d68e6bSJason Wang } 2973c8d68e6bSJason Wang } 2974c8d68e6bSJason Wang 2975cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2976cde8b15fSJason Wang { 2977cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2978cde8b15fSJason Wang struct tun_struct *tun; 2979cde8b15fSJason Wang int ret = 0; 2980cde8b15fSJason Wang 2981cde8b15fSJason Wang rtnl_lock(); 2982cde8b15fSJason Wang 2983cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 29844008e97fSJason Wang tun = tfile->detached; 29855dbbaf2dSPaul Moore if (!tun) { 2986cde8b15fSJason Wang ret = -EINVAL; 29875dbbaf2dSPaul Moore goto unlock; 29885dbbaf2dSPaul Moore } 29895dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 29905dbbaf2dSPaul Moore if (ret < 0) 29915dbbaf2dSPaul Moore goto unlock; 2992af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2993af3fb24eSEric Dumazet tun->flags & IFF_NAPI_FRAGS); 29944008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2995b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 299640630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 29974008e97fSJason Wang ret = -EINVAL; 2998cde8b15fSJason Wang else 29994008e97fSJason Wang __tun_detach(tfile, false); 30004008e97fSJason Wang } else 3001cde8b15fSJason Wang ret = -EINVAL; 3002cde8b15fSJason Wang 300383c1f36fSSabrina Dubroca if (ret >= 0) 300483c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 300583c1f36fSSabrina Dubroca 30065dbbaf2dSPaul Moore unlock: 3007cde8b15fSJason Wang rtnl_unlock(); 3008cde8b15fSJason Wang return ret; 3009cde8b15fSJason Wang } 3010cde8b15fSJason Wang 3011cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 3012cd5681d7SJason Wang void __user *data) 301396f84061SJason Wang { 301496f84061SJason Wang struct bpf_prog *prog; 301596f84061SJason Wang int fd; 301696f84061SJason Wang 301796f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 301896f84061SJason Wang return -EFAULT; 301996f84061SJason Wang 302096f84061SJason Wang if (fd == -1) { 302196f84061SJason Wang prog = NULL; 302296f84061SJason Wang } else { 302396f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 302496f84061SJason Wang if (IS_ERR(prog)) 302596f84061SJason Wang return PTR_ERR(prog); 302696f84061SJason Wang } 302796f84061SJason Wang 3028cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 302996f84061SJason Wang } 303096f84061SJason Wang 303150857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 303250857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 30331da177e4SLinus Torvalds { 303436b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 3035f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 3036631ab46bSEric W. Biederman struct tun_struct *tun; 30371da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 303826d31925SNicolas Dichtel unsigned int ifindex, carrier; 30391da177e4SLinus Torvalds struct ifreq ifr; 30400625c883SEric W. Biederman kuid_t owner; 30410625c883SEric W. Biederman kgid_t group; 304233dccbb0SHerbert Xu int sndbuf; 3043d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 30441cf8e410SMichael S. Tsirkin int le; 3045f271b2ccSMax Krasnyansky int ret; 304683c1f36fSSabrina Dubroca bool do_notify = false; 30471da177e4SLinus Torvalds 3048f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 3049f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 305050857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 30511da177e4SLinus Torvalds return -EFAULT; 30528bbb1813SDavid S. Miller } else { 3053a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 30548bbb1813SDavid S. Miller } 3055631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 3056631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 3057631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 3058031f5e03SMichael S. Tsirkin * TUNSETIFF. 3059031f5e03SMichael S. Tsirkin */ 3060031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 3061631ab46bSEric W. Biederman (unsigned int __user*)argp); 3062f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 3063cde8b15fSJason Wang return tun_set_queue(file, &ifr); 3064f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 3065f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3066f663706aSKirill Tkhai return -EPERM; 3067f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 3068f663706aSKirill Tkhai } 3069631ab46bSEric W. Biederman 3070c8d68e6bSJason Wang ret = 0; 3071876bfd4dSHerbert Xu rtnl_lock(); 3072876bfd4dSHerbert Xu 30739484dc74Syuan linyu tun = tun_get(tfile); 30740f16bc13SGao Feng if (cmd == TUNSETIFF) { 30750f16bc13SGao Feng ret = -EEXIST; 30760f16bc13SGao Feng if (tun) 30770f16bc13SGao Feng goto unlock; 30780f16bc13SGao Feng 30791da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 30801da177e4SLinus Torvalds 3081f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 30821da177e4SLinus Torvalds 3083876bfd4dSHerbert Xu if (ret) 3084876bfd4dSHerbert Xu goto unlock; 30851da177e4SLinus Torvalds 308650857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3087876bfd4dSHerbert Xu ret = -EFAULT; 3088876bfd4dSHerbert Xu goto unlock; 30891da177e4SLinus Torvalds } 3090fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 3091fb7589a1SPavel Emelyanov ret = -EPERM; 3092fb7589a1SPavel Emelyanov if (tun) 3093fb7589a1SPavel Emelyanov goto unlock; 3094fb7589a1SPavel Emelyanov 3095fb7589a1SPavel Emelyanov ret = -EFAULT; 3096fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3097fb7589a1SPavel Emelyanov goto unlock; 3098fb7589a1SPavel Emelyanov 3099fb7589a1SPavel Emelyanov ret = 0; 3100fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 3101fb7589a1SPavel Emelyanov goto unlock; 3102fb7589a1SPavel Emelyanov } 31031da177e4SLinus Torvalds 3104876bfd4dSHerbert Xu ret = -EBADFD; 31051da177e4SLinus Torvalds if (!tun) 3106876bfd4dSHerbert Xu goto unlock; 31071da177e4SLinus Torvalds 31081e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 31091da177e4SLinus Torvalds 31100c3e0e3bSKirill Tkhai net = dev_net(tun->dev); 3111631ab46bSEric W. Biederman ret = 0; 31121da177e4SLinus Torvalds switch (cmd) { 3113e3b99556SMark McLoughlin case TUNGETIFF: 311412132768SKirill Tkhai tun_get_iff(tun, &ifr); 3115e3b99556SMark McLoughlin 31163d407a80SPavel Emelyanov if (tfile->detached) 31173d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 3118849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 3119849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 31203d407a80SPavel Emelyanov 312150857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3122631ab46bSEric W. Biederman ret = -EFAULT; 3123e3b99556SMark McLoughlin break; 3124e3b99556SMark McLoughlin 31251da177e4SLinus Torvalds case TUNSETNOCSUM: 31261da177e4SLinus Torvalds /* Disable/Enable checksum */ 31271da177e4SLinus Torvalds 312888255375SMichał Mirosław /* [unimplemented] */ 312988255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 31306b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 31311da177e4SLinus Torvalds break; 31321da177e4SLinus Torvalds 31331da177e4SLinus Torvalds case TUNSETPERSIST: 313454f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 313554f968d6SJason Wang * module to prevent the module being unprobed. 313654f968d6SJason Wang */ 313740630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 313840630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 313954f968d6SJason Wang __module_get(THIS_MODULE); 314083c1f36fSSabrina Dubroca do_notify = true; 3141dd38bd85SJason Wang } 314240630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 314340630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 314454f968d6SJason Wang module_put(THIS_MODULE); 314583c1f36fSSabrina Dubroca do_notify = true; 314654f968d6SJason Wang } 31471da177e4SLinus Torvalds 31486b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 31496b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 31501da177e4SLinus Torvalds break; 31511da177e4SLinus Torvalds 31521da177e4SLinus Torvalds case TUNSETOWNER: 31531da177e4SLinus Torvalds /* Set owner of the device */ 31540625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 31550625c883SEric W. Biederman if (!uid_valid(owner)) { 31560625c883SEric W. Biederman ret = -EINVAL; 31570625c883SEric W. Biederman break; 31580625c883SEric W. Biederman } 31590625c883SEric W. Biederman tun->owner = owner; 316083c1f36fSSabrina Dubroca do_notify = true; 31611e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 31620625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 31631da177e4SLinus Torvalds break; 31641da177e4SLinus Torvalds 31658c644623SGuido Guenther case TUNSETGROUP: 31668c644623SGuido Guenther /* Set group of the device */ 31670625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 31680625c883SEric W. Biederman if (!gid_valid(group)) { 31690625c883SEric W. Biederman ret = -EINVAL; 31700625c883SEric W. Biederman break; 31710625c883SEric W. Biederman } 31720625c883SEric W. Biederman tun->group = group; 317383c1f36fSSabrina Dubroca do_notify = true; 31741e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 31750625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 31768c644623SGuido Guenther break; 31778c644623SGuido Guenther 3178ff4cc3acSMike Kershaw case TUNSETLINK: 3179ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 3180ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 31816b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 31826b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 318348abfe05SDavid S. Miller ret = -EBUSY; 3184ff4cc3acSMike Kershaw } else { 3185ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 31866b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 31876b8a66eeSJoe Perches tun->dev->type); 318848abfe05SDavid S. Miller ret = 0; 3189ff4cc3acSMike Kershaw } 3190631ab46bSEric W. Biederman break; 3191ff4cc3acSMike Kershaw 31921da177e4SLinus Torvalds #ifdef TUN_DEBUG 31931da177e4SLinus Torvalds case TUNSETDEBUG: 31941da177e4SLinus Torvalds tun->debug = arg; 31951da177e4SLinus Torvalds break; 31961da177e4SLinus Torvalds #endif 31975228ddc9SRusty Russell case TUNSETOFFLOAD: 319888255375SMichał Mirosław ret = set_offload(tun, arg); 3199631ab46bSEric W. Biederman break; 32005228ddc9SRusty Russell 3201f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3202f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3203631ab46bSEric W. Biederman ret = -EINVAL; 320440630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3205631ab46bSEric W. Biederman break; 3206c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3207631ab46bSEric W. Biederman break; 32081da177e4SLinus Torvalds 32091da177e4SLinus Torvalds case SIOCGIFHWADDR: 3210b595076aSUwe Kleine-König /* Get hw address */ 3211f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3212f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 321350857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3214631ab46bSEric W. Biederman ret = -EFAULT; 3215631ab46bSEric W. Biederman break; 32161da177e4SLinus Torvalds 32171da177e4SLinus Torvalds case SIOCSIFHWADDR: 3218f271b2ccSMax Krasnyansky /* Set hw address */ 32196b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 32206b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 322140102371SKim B. Heino 32223a37a963SPetr Machata ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); 3223631ab46bSEric W. Biederman break; 322433dccbb0SHerbert Xu 322533dccbb0SHerbert Xu case TUNGETSNDBUF: 322654f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 322733dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 322833dccbb0SHerbert Xu ret = -EFAULT; 322933dccbb0SHerbert Xu break; 323033dccbb0SHerbert Xu 323133dccbb0SHerbert Xu case TUNSETSNDBUF: 323233dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 323333dccbb0SHerbert Xu ret = -EFAULT; 323433dccbb0SHerbert Xu break; 323533dccbb0SHerbert Xu } 323693161922SCraig Gallek if (sndbuf <= 0) { 323793161922SCraig Gallek ret = -EINVAL; 323893161922SCraig Gallek break; 323993161922SCraig Gallek } 324033dccbb0SHerbert Xu 3241c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3242c8d68e6bSJason Wang tun_set_sndbuf(tun); 324333dccbb0SHerbert Xu break; 324433dccbb0SHerbert Xu 3245d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3246d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3247d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3248d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3249d9d52b51SMichael S. Tsirkin break; 3250d9d52b51SMichael S. Tsirkin 3251d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3252d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3253d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3254d9d52b51SMichael S. Tsirkin break; 3255d9d52b51SMichael S. Tsirkin } 3256d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3257d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3258d9d52b51SMichael S. Tsirkin break; 3259d9d52b51SMichael S. Tsirkin } 3260d9d52b51SMichael S. Tsirkin 3261d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3262d9d52b51SMichael S. Tsirkin break; 3263d9d52b51SMichael S. Tsirkin 32641cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 32651cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 32661cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 32671cf8e410SMichael S. Tsirkin ret = -EFAULT; 32681cf8e410SMichael S. Tsirkin break; 32691cf8e410SMichael S. Tsirkin 32701cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 32711cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 32721cf8e410SMichael S. Tsirkin ret = -EFAULT; 32731cf8e410SMichael S. Tsirkin break; 32741cf8e410SMichael S. Tsirkin } 32751cf8e410SMichael S. Tsirkin if (le) 32761cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 32771cf8e410SMichael S. Tsirkin else 32781cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 32791cf8e410SMichael S. Tsirkin break; 32801cf8e410SMichael S. Tsirkin 32818b8e658bSGreg Kurz case TUNGETVNETBE: 32828b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 32838b8e658bSGreg Kurz break; 32848b8e658bSGreg Kurz 32858b8e658bSGreg Kurz case TUNSETVNETBE: 32868b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 32878b8e658bSGreg Kurz break; 32888b8e658bSGreg Kurz 328999405162SMichael S. Tsirkin case TUNATTACHFILTER: 329099405162SMichael S. Tsirkin /* Can be set only for TAPs */ 329199405162SMichael S. Tsirkin ret = -EINVAL; 329240630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 329399405162SMichael S. Tsirkin break; 329499405162SMichael S. Tsirkin ret = -EFAULT; 329554f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 329699405162SMichael S. Tsirkin break; 329799405162SMichael S. Tsirkin 3298c8d68e6bSJason Wang ret = tun_attach_filter(tun); 329999405162SMichael S. Tsirkin break; 330099405162SMichael S. Tsirkin 330199405162SMichael S. Tsirkin case TUNDETACHFILTER: 330299405162SMichael S. Tsirkin /* Can be set only for TAPs */ 330399405162SMichael S. Tsirkin ret = -EINVAL; 330440630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 330599405162SMichael S. Tsirkin break; 3306c8d68e6bSJason Wang ret = 0; 3307c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 330899405162SMichael S. Tsirkin break; 330999405162SMichael S. Tsirkin 331076975e9cSPavel Emelyanov case TUNGETFILTER: 331176975e9cSPavel Emelyanov ret = -EINVAL; 331240630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 331376975e9cSPavel Emelyanov break; 331476975e9cSPavel Emelyanov ret = -EFAULT; 331576975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 331676975e9cSPavel Emelyanov break; 331776975e9cSPavel Emelyanov ret = 0; 331876975e9cSPavel Emelyanov break; 331976975e9cSPavel Emelyanov 332096f84061SJason Wang case TUNSETSTEERINGEBPF: 3321cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 332296f84061SJason Wang break; 332396f84061SJason Wang 3324aff3d70aSJason Wang case TUNSETFILTEREBPF: 3325aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3326aff3d70aSJason Wang break; 3327aff3d70aSJason Wang 332826d31925SNicolas Dichtel case TUNSETCARRIER: 332926d31925SNicolas Dichtel ret = -EFAULT; 333026d31925SNicolas Dichtel if (copy_from_user(&carrier, argp, sizeof(carrier))) 333126d31925SNicolas Dichtel goto unlock; 333226d31925SNicolas Dichtel 333326d31925SNicolas Dichtel ret = tun_net_change_carrier(tun->dev, (bool)carrier); 333426d31925SNicolas Dichtel break; 333526d31925SNicolas Dichtel 33360c3e0e3bSKirill Tkhai case TUNGETDEVNETNS: 33370c3e0e3bSKirill Tkhai ret = -EPERM; 33380c3e0e3bSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 33390c3e0e3bSKirill Tkhai goto unlock; 33400c3e0e3bSKirill Tkhai ret = open_related_ns(&net->ns, get_net_ns); 33410c3e0e3bSKirill Tkhai break; 33420c3e0e3bSKirill Tkhai 33431da177e4SLinus Torvalds default: 3344631ab46bSEric W. Biederman ret = -EINVAL; 3345631ab46bSEric W. Biederman break; 3346ee289b64SJoe Perches } 33471da177e4SLinus Torvalds 334883c1f36fSSabrina Dubroca if (do_notify) 334983c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 335083c1f36fSSabrina Dubroca 3351876bfd4dSHerbert Xu unlock: 3352876bfd4dSHerbert Xu rtnl_unlock(); 3353876bfd4dSHerbert Xu if (tun) 3354631ab46bSEric W. Biederman tun_put(tun); 3355631ab46bSEric W. Biederman return ret; 33561da177e4SLinus Torvalds } 33571da177e4SLinus Torvalds 335850857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 335950857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 336050857e2aSArnd Bergmann { 336150857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 336250857e2aSArnd Bergmann } 336350857e2aSArnd Bergmann 336450857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 336550857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 336650857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 336750857e2aSArnd Bergmann { 336850857e2aSArnd Bergmann switch (cmd) { 336950857e2aSArnd Bergmann case TUNSETIFF: 337050857e2aSArnd Bergmann case TUNGETIFF: 337150857e2aSArnd Bergmann case TUNSETTXFILTER: 337250857e2aSArnd Bergmann case TUNGETSNDBUF: 337350857e2aSArnd Bergmann case TUNSETSNDBUF: 337450857e2aSArnd Bergmann case SIOCGIFHWADDR: 337550857e2aSArnd Bergmann case SIOCSIFHWADDR: 337650857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 337750857e2aSArnd Bergmann break; 337850857e2aSArnd Bergmann default: 337950857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 338050857e2aSArnd Bergmann break; 338150857e2aSArnd Bergmann } 338250857e2aSArnd Bergmann 338350857e2aSArnd Bergmann /* 338450857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 338550857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 338650857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 338750857e2aSArnd Bergmann * contents. 338850857e2aSArnd Bergmann */ 338950857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 339050857e2aSArnd Bergmann } 339150857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 339250857e2aSArnd Bergmann 33931da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 33941da177e4SLinus Torvalds { 339554f968d6SJason Wang struct tun_file *tfile = file->private_data; 33961da177e4SLinus Torvalds int ret; 33971da177e4SLinus Torvalds 339854f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 33999d319522SJonathan Corbet goto out; 34001da177e4SLinus Torvalds 34011da177e4SLinus Torvalds if (on) { 340201919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 340354f968d6SJason Wang tfile->flags |= TUN_FASYNC; 34041da177e4SLinus Torvalds } else 340554f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 34069d319522SJonathan Corbet ret = 0; 34079d319522SJonathan Corbet out: 34089d319522SJonathan Corbet return ret; 34091da177e4SLinus Torvalds } 34101da177e4SLinus Torvalds 34111da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 34121da177e4SLinus Torvalds { 3413140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3414631ab46bSEric W. Biederman struct tun_file *tfile; 3415deed49fbSThomas Gleixner 34166b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3417631ab46bSEric W. Biederman 3418140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 341911aa9c28SEric W. Biederman &tun_proto, 0); 3420631ab46bSEric W. Biederman if (!tfile) 3421631ab46bSEric W. Biederman return -ENOMEM; 3422b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3423b196d88aSJason Wang sk_free(&tfile->sk); 3424b196d88aSJason Wang return -ENOMEM; 3425b196d88aSJason Wang } 3426b196d88aSJason Wang 3427c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3428c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 342954f968d6SJason Wang tfile->flags = 0; 3430fb7589a1SPavel Emelyanov tfile->ifindex = 0; 343154f968d6SJason Wang 343254f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 34339e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 343454f968d6SJason Wang 343554f968d6SJason Wang tfile->socket.file = file; 343654f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 343754f968d6SJason Wang 343854f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 343954f968d6SJason Wang 344054f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 344154f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 344254f968d6SJason Wang 3443631ab46bSEric W. Biederman file->private_data = tfile; 34444008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 344554f968d6SJason Wang 344619a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 344719a6afb2SJason Wang 34481da177e4SLinus Torvalds return 0; 34491da177e4SLinus Torvalds } 34501da177e4SLinus Torvalds 34511da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 34521da177e4SLinus Torvalds { 3453631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 34541da177e4SLinus Torvalds 3455c8d68e6bSJason Wang tun_detach(tfile, true); 34561da177e4SLinus Torvalds 34571da177e4SLinus Torvalds return 0; 34581da177e4SLinus Torvalds } 34591da177e4SLinus Torvalds 346093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 34619484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 346293e14b6dSMasatake YAMATO { 34639484dc74Syuan linyu struct tun_file *tfile = file->private_data; 346493e14b6dSMasatake YAMATO struct tun_struct *tun; 346593e14b6dSMasatake YAMATO struct ifreq ifr; 346693e14b6dSMasatake YAMATO 346793e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 346893e14b6dSMasatake YAMATO 346993e14b6dSMasatake YAMATO rtnl_lock(); 34709484dc74Syuan linyu tun = tun_get(tfile); 347193e14b6dSMasatake YAMATO if (tun) 347212132768SKirill Tkhai tun_get_iff(tun, &ifr); 347393e14b6dSMasatake YAMATO rtnl_unlock(); 347493e14b6dSMasatake YAMATO 347593e14b6dSMasatake YAMATO if (tun) 347693e14b6dSMasatake YAMATO tun_put(tun); 347793e14b6dSMasatake YAMATO 3478a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 347993e14b6dSMasatake YAMATO } 348093e14b6dSMasatake YAMATO #endif 348193e14b6dSMasatake YAMATO 3482d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 34831da177e4SLinus Torvalds .owner = THIS_MODULE, 34841da177e4SLinus Torvalds .llseek = no_llseek, 34859b067034SAl Viro .read_iter = tun_chr_read_iter, 3486f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 34871da177e4SLinus Torvalds .poll = tun_chr_poll, 3488876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 348950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 349050857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 349150857e2aSArnd Bergmann #endif 34921da177e4SLinus Torvalds .open = tun_chr_open, 34931da177e4SLinus Torvalds .release = tun_chr_close, 349493e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 349593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 349693e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 349793e14b6dSMasatake YAMATO #endif 34981da177e4SLinus Torvalds }; 34991da177e4SLinus Torvalds 35001da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 35011da177e4SLinus Torvalds .minor = TUN_MINOR, 35021da177e4SLinus Torvalds .name = "tun", 3503e454cea2SKay Sievers .nodename = "net/tun", 35041da177e4SLinus Torvalds .fops = &tun_fops, 35051da177e4SLinus Torvalds }; 35061da177e4SLinus Torvalds 35071da177e4SLinus Torvalds /* ethtool interface */ 35081da177e4SLinus Torvalds 35094e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 351029ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 35111da177e4SLinus Torvalds { 351229ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 351329ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 351429ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 351529ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 351629ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 351729ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 351829ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 35194e24f2ddSChas Williams } 35204e24f2ddSChas Williams 35214e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 35224e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 35234e24f2ddSChas Williams { 35244e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 35254e24f2ddSChas Williams 35264e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 35274e24f2ddSChas Williams return 0; 35284e24f2ddSChas Williams } 35294e24f2ddSChas Williams 35304e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 35314e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 35324e24f2ddSChas Williams { 35334e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 35344e24f2ddSChas Williams 35354e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 35361da177e4SLinus Torvalds return 0; 35371da177e4SLinus Torvalds } 35381da177e4SLinus Torvalds 35391da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 35401da177e4SLinus Torvalds { 35411da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35421da177e4SLinus Torvalds 354333a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 354433a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 35451da177e4SLinus Torvalds 35461da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 354740630b82SMichael S. Tsirkin case IFF_TUN: 354833a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 35491da177e4SLinus Torvalds break; 355040630b82SMichael S. Tsirkin case IFF_TAP: 355133a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 35521da177e4SLinus Torvalds break; 35531da177e4SLinus Torvalds } 35541da177e4SLinus Torvalds } 35551da177e4SLinus Torvalds 35561da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 35571da177e4SLinus Torvalds { 35581da177e4SLinus Torvalds #ifdef TUN_DEBUG 35591da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35601da177e4SLinus Torvalds return tun->debug; 35611da177e4SLinus Torvalds #else 35621da177e4SLinus Torvalds return -EOPNOTSUPP; 35631da177e4SLinus Torvalds #endif 35641da177e4SLinus Torvalds } 35651da177e4SLinus Torvalds 35661da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 35671da177e4SLinus Torvalds { 35681da177e4SLinus Torvalds #ifdef TUN_DEBUG 35691da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 35701da177e4SLinus Torvalds tun->debug = value; 35711da177e4SLinus Torvalds #endif 35721da177e4SLinus Torvalds } 35731da177e4SLinus Torvalds 35745503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 35755503fcecSJason Wang struct ethtool_coalesce *ec) 35765503fcecSJason Wang { 35775503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 35785503fcecSJason Wang 35795503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 35805503fcecSJason Wang 35815503fcecSJason Wang return 0; 35825503fcecSJason Wang } 35835503fcecSJason Wang 35845503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 35855503fcecSJason Wang struct ethtool_coalesce *ec) 35865503fcecSJason Wang { 35875503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 35885503fcecSJason Wang 35895503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 35905503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 35915503fcecSJason Wang else 35925503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 35935503fcecSJason Wang 35945503fcecSJason Wang return 0; 35955503fcecSJason Wang } 35965503fcecSJason Wang 35977282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 35981da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 35991da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 36001da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3601bee31369SNolan Leake .get_link = ethtool_op_get_link, 3602eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 36035503fcecSJason Wang .get_coalesce = tun_get_coalesce, 36045503fcecSJason Wang .set_coalesce = tun_set_coalesce, 360529ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 36064e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 36071da177e4SLinus Torvalds }; 36081da177e4SLinus Torvalds 36091576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 36101576d986SJason Wang { 36111576d986SJason Wang struct net_device *dev = tun->dev; 36121576d986SJason Wang struct tun_file *tfile; 36135990a305SJason Wang struct ptr_ring **rings; 36141576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 36151576d986SJason Wang int ret, i; 36161576d986SJason Wang 36175990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 36185990a305SJason Wang if (!rings) 36191576d986SJason Wang return -ENOMEM; 36201576d986SJason Wang 36211576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 36221576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 36235990a305SJason Wang rings[i] = &tfile->tx_ring; 36241576d986SJason Wang } 36251576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 36265990a305SJason Wang rings[i++] = &tfile->tx_ring; 36271576d986SJason Wang 36285990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 36295990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3630fc72d1d5SJason Wang tun_ptr_free); 36311576d986SJason Wang 36325990a305SJason Wang kfree(rings); 36331576d986SJason Wang return ret; 36341576d986SJason Wang } 36351576d986SJason Wang 36361576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 36371576d986SJason Wang unsigned long event, void *ptr) 36381576d986SJason Wang { 36391576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 36401576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 36411576d986SJason Wang 364286dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 364386dfb4acSCraig Gallek return NOTIFY_DONE; 364486dfb4acSCraig Gallek 36451576d986SJason Wang switch (event) { 36461576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 36471576d986SJason Wang if (tun_queue_resize(tun)) 36481576d986SJason Wang return NOTIFY_BAD; 36491576d986SJason Wang break; 36501576d986SJason Wang default: 36511576d986SJason Wang break; 36521576d986SJason Wang } 36531576d986SJason Wang 36541576d986SJason Wang return NOTIFY_DONE; 36551576d986SJason Wang } 36561576d986SJason Wang 36571576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 36581576d986SJason Wang .notifier_call = tun_device_event, 36591576d986SJason Wang }; 366079d17604SPavel Emelyanov 36611da177e4SLinus Torvalds static int __init tun_init(void) 36621da177e4SLinus Torvalds { 36631da177e4SLinus Torvalds int ret = 0; 36641da177e4SLinus Torvalds 36656b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 36661da177e4SLinus Torvalds 3667f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 366879d17604SPavel Emelyanov if (ret) { 36696b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3670f019a7a5SEric W. Biederman goto err_linkops; 367179d17604SPavel Emelyanov } 367279d17604SPavel Emelyanov 36731da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 367479d17604SPavel Emelyanov if (ret) { 36756b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 367679d17604SPavel Emelyanov goto err_misc; 367779d17604SPavel Emelyanov } 36781576d986SJason Wang 36795edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 36805edfbd3cSTonghao Zhang if (ret) { 36815edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 36825edfbd3cSTonghao Zhang goto err_notifier; 36835edfbd3cSTonghao Zhang } 36845edfbd3cSTonghao Zhang 368579d17604SPavel Emelyanov return 0; 36865edfbd3cSTonghao Zhang 36875edfbd3cSTonghao Zhang err_notifier: 36885edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 368979d17604SPavel Emelyanov err_misc: 3690f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3691f019a7a5SEric W. Biederman err_linkops: 36921da177e4SLinus Torvalds return ret; 36931da177e4SLinus Torvalds } 36941da177e4SLinus Torvalds 36951da177e4SLinus Torvalds static void tun_cleanup(void) 36961da177e4SLinus Torvalds { 36971da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3698f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 36991576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 37001da177e4SLinus Torvalds } 37011da177e4SLinus Torvalds 370205c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 370305c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 370405c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 370505c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 370605c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 370705c2828cSMichael S. Tsirkin { 37086e914fc7SJason Wang struct tun_file *tfile; 370905c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 371005c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 37116e914fc7SJason Wang tfile = file->private_data; 37126e914fc7SJason Wang if (!tfile) 371305c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 371454f968d6SJason Wang return &tfile->socket; 371505c2828cSMichael S. Tsirkin } 371605c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 371705c2828cSMichael S. Tsirkin 37185990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 371983339c6bSJason Wang { 372083339c6bSJason Wang struct tun_file *tfile; 372183339c6bSJason Wang 372283339c6bSJason Wang if (file->f_op != &tun_fops) 372383339c6bSJason Wang return ERR_PTR(-EINVAL); 372483339c6bSJason Wang tfile = file->private_data; 372583339c6bSJason Wang if (!tfile) 372683339c6bSJason Wang return ERR_PTR(-EBADFD); 37275990a305SJason Wang return &tfile->tx_ring; 372883339c6bSJason Wang } 37295990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 373083339c6bSJason Wang 37311da177e4SLinus Torvalds module_init(tun_init); 37321da177e4SLinus Torvalds module_exit(tun_cleanup); 37331da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 37341da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 37351da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 37361da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3737578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3738