11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * TUN - Universal TUN/TAP device driver. 31da177e4SLinus Torvalds * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or modify 61da177e4SLinus Torvalds * it under the terms of the GNU General Public License as published by 71da177e4SLinus Torvalds * the Free Software Foundation; either version 2 of the License, or 81da177e4SLinus Torvalds * (at your option) any later version. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * This program is distributed in the hope that it will be useful, 111da177e4SLinus Torvalds * but WITHOUT ANY WARRANTY; without even the implied warranty of 121da177e4SLinus Torvalds * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 131da177e4SLinus Torvalds * GNU General Public License for more details. 141da177e4SLinus Torvalds * 151da177e4SLinus Torvalds * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 161da177e4SLinus Torvalds */ 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds /* 191da177e4SLinus Torvalds * Changes: 201da177e4SLinus Torvalds * 21ff4cc3acSMike Kershaw * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22ff4cc3acSMike Kershaw * Add TUNSETLINK ioctl to set the link encapsulation 23ff4cc3acSMike Kershaw * 241da177e4SLinus Torvalds * Mark Smith <markzzzsmith@yahoo.com.au> 25344dc8edSJoe Perches * Use eth_random_addr() for tap MAC address. 261da177e4SLinus Torvalds * 271da177e4SLinus Torvalds * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 281da177e4SLinus Torvalds * Fixes in packet dropping, queue length setting and queue wakeup. 291da177e4SLinus Torvalds * Increased default tx queue length. 301da177e4SLinus Torvalds * Added ethtool API. 311da177e4SLinus Torvalds * Minor cleanups 321da177e4SLinus Torvalds * 331da177e4SLinus Torvalds * Daniel Podlejski <underley@underley.eu.org> 341da177e4SLinus Torvalds * Modifications for 2.3.99-pre5 kernel. 351da177e4SLinus Torvalds */ 361da177e4SLinus Torvalds 376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 386b8a66eeSJoe Perches 391da177e4SLinus Torvalds #define DRV_NAME "tun" 401da177e4SLinus Torvalds #define DRV_VERSION "1.6" 411da177e4SLinus Torvalds #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 421da177e4SLinus Torvalds #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 431da177e4SLinus Torvalds 441da177e4SLinus Torvalds #include <linux/module.h> 451da177e4SLinus Torvalds #include <linux/errno.h> 461da177e4SLinus Torvalds #include <linux/kernel.h> 47174cd4b1SIngo Molnar #include <linux/sched/signal.h> 481da177e4SLinus Torvalds #include <linux/major.h> 491da177e4SLinus Torvalds #include <linux/slab.h> 501da177e4SLinus Torvalds #include <linux/poll.h> 511da177e4SLinus Torvalds #include <linux/fcntl.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/skbuff.h> 541da177e4SLinus Torvalds #include <linux/netdevice.h> 551da177e4SLinus Torvalds #include <linux/etherdevice.h> 561da177e4SLinus Torvalds #include <linux/miscdevice.h> 571da177e4SLinus Torvalds #include <linux/ethtool.h> 581da177e4SLinus Torvalds #include <linux/rtnetlink.h> 5950857e2aSArnd Bergmann #include <linux/compat.h> 601da177e4SLinus Torvalds #include <linux/if.h> 611da177e4SLinus Torvalds #include <linux/if_arp.h> 621da177e4SLinus Torvalds #include <linux/if_ether.h> 631da177e4SLinus Torvalds #include <linux/if_tun.h> 646680ec68SJason Wang #include <linux/if_vlan.h> 651da177e4SLinus Torvalds #include <linux/crc32.h> 66d647a591SPavel Emelyanov #include <linux/nsproxy.h> 67f43798c2SRusty Russell #include <linux/virtio_net.h> 6899405162SMichael S. Tsirkin #include <linux/rcupdate.h> 69881d966bSEric W. Biederman #include <net/net_namespace.h> 7079d17604SPavel Emelyanov #include <net/netns/generic.h> 71f019a7a5SEric W. Biederman #include <net/rtnetlink.h> 7233dccbb0SHerbert Xu #include <net/sock.h> 73735fc405SJesper Dangaard Brouer #include <net/xdp.h> 7493e14b6dSMasatake YAMATO #include <linux/seq_file.h> 75e0b46d0eSHerbert Xu #include <linux/uio.h> 761576d986SJason Wang #include <linux/skb_array.h> 77761876c8SJason Wang #include <linux/bpf.h> 78761876c8SJason Wang #include <linux/bpf_trace.h> 7990e33d45SPetar Penkov #include <linux/mutex.h> 801da177e4SLinus Torvalds 817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 82f2780d6dSKirill Tkhai #include <linux/proc_fs.h> 831da177e4SLinus Torvalds 844e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 854e24f2ddSChas Williams struct ethtool_link_ksettings *cmd); 864e24f2ddSChas Williams 8714daa021SRusty Russell /* Uncomment to enable debugging */ 8814daa021SRusty Russell /* #define TUN_DEBUG 1 */ 8914daa021SRusty Russell 901da177e4SLinus Torvalds #ifdef TUN_DEBUG 911da177e4SLinus Torvalds static int debug; 9214daa021SRusty Russell 936b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 946b8a66eeSJoe Perches do { \ 956b8a66eeSJoe Perches if (tun->debug) \ 966b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 976b8a66eeSJoe Perches } while (0) 986b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 996b8a66eeSJoe Perches do { \ 1006b8a66eeSJoe Perches if (debug == 2) \ 1016b8a66eeSJoe Perches printk(level fmt, ##args); \ 1026b8a66eeSJoe Perches } while (0) 10314daa021SRusty Russell #else 1046b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...) \ 1056b8a66eeSJoe Perches do { \ 1066b8a66eeSJoe Perches if (0) \ 1076b8a66eeSJoe Perches netdev_printk(level, tun->dev, fmt, ##args); \ 1086b8a66eeSJoe Perches } while (0) 1096b8a66eeSJoe Perches #define DBG1(level, fmt, args...) \ 1106b8a66eeSJoe Perches do { \ 1116b8a66eeSJoe Perches if (0) \ 1126b8a66eeSJoe Perches printk(level fmt, ##args); \ 1136b8a66eeSJoe Perches } while (0) 1141da177e4SLinus Torvalds #endif 1151da177e4SLinus Torvalds 1167df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 11766ccbc9cSJason Wang 118031f5e03SMichael S. Tsirkin /* TUN device flags */ 119031f5e03SMichael S. Tsirkin 120031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags, 121031f5e03SMichael S. Tsirkin * overload it to mean fasync when stored there. 122031f5e03SMichael S. Tsirkin */ 123031f5e03SMichael S. Tsirkin #define TUN_FASYNC IFF_ATTACH_QUEUE 1241cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */ 1251cf8e410SMichael S. Tsirkin #define TUN_VNET_LE 0x80000000 1268b8e658bSGreg Kurz #define TUN_VNET_BE 0x40000000 127031f5e03SMichael S. Tsirkin 128031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 12990e33d45SPetar Penkov IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 13090e33d45SPetar Penkov 1310690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128 1320690899bSMichael S. Tsirkin 133f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8 134f271b2ccSMax Krasnyansky struct tap_filter { 135f271b2ccSMax Krasnyansky unsigned int count; /* Number of addrs. Zero means disabled */ 136f271b2ccSMax Krasnyansky u32 mask[2]; /* Mask of the hashed addrs */ 137f271b2ccSMax Krasnyansky unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 138f271b2ccSMax Krasnyansky }; 139f271b2ccSMax Krasnyansky 140baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 141baf71c5cSPankaj Gupta * to max number of VCPUs in guest. */ 142baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256 143b8732fb7SJason Wang #define MAX_TAP_FLOWS 4096 144c8d68e6bSJason Wang 14596442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ) 14696442e42SJason Wang 147608b9977SPaolo Abeni struct tun_pcpu_stats { 148608b9977SPaolo Abeni u64 rx_packets; 149608b9977SPaolo Abeni u64 rx_bytes; 150608b9977SPaolo Abeni u64 tx_packets; 151608b9977SPaolo Abeni u64 tx_bytes; 152608b9977SPaolo Abeni struct u64_stats_sync syncp; 153608b9977SPaolo Abeni u32 rx_dropped; 154608b9977SPaolo Abeni u32 tx_dropped; 155608b9977SPaolo Abeni u32 rx_frame_errors; 156608b9977SPaolo Abeni }; 157608b9977SPaolo Abeni 15854f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It 15992d4ea6eSstephen hemminger * also contains all socket related structures (except sock_fprog and tap_filter) 16054f968d6SJason Wang * to serve as one transmit queue for tuntap device. The sock_fprog and 16154f968d6SJason Wang * tap_filter were kept in tun_struct since they were used for filtering for the 16236fe8c09SRami Rosen * netdevice not for a specific queue (at least I didn't see the requirement for 16354f968d6SJason Wang * this). 1646e914fc7SJason Wang * 1656e914fc7SJason Wang * RCU usage: 16636fe8c09SRami Rosen * The tun_file and tun_struct are loosely coupled, the pointer from one to the 1676e914fc7SJason Wang * other can only be read while rcu_read_lock or rtnl_lock is held. 16854f968d6SJason Wang */ 169631ab46bSEric W. Biederman struct tun_file { 17054f968d6SJason Wang struct sock sk; 17154f968d6SJason Wang struct socket socket; 17254f968d6SJason Wang struct socket_wq wq; 1736e914fc7SJason Wang struct tun_struct __rcu *tun; 17454f968d6SJason Wang struct fasync_struct *fasync; 17554f968d6SJason Wang /* only used for fasnyc */ 17654f968d6SJason Wang unsigned int flags; 177fb7589a1SPavel Emelyanov union { 178c8d68e6bSJason Wang u16 queue_index; 179fb7589a1SPavel Emelyanov unsigned int ifindex; 180fb7589a1SPavel Emelyanov }; 18194317099SPetar Penkov struct napi_struct napi; 182aec72f33SEric Dumazet bool napi_enabled; 183af3fb24eSEric Dumazet bool napi_frags_enabled; 18490e33d45SPetar Penkov struct mutex napi_mutex; /* Protects access to the above napi */ 1854008e97fSJason Wang struct list_head next; 1864008e97fSJason Wang struct tun_struct *detached; 1875990a305SJason Wang struct ptr_ring tx_ring; 1888bf5c4eeSJesper Dangaard Brouer struct xdp_rxq_info xdp_rxq; 189631ab46bSEric W. Biederman }; 190631ab46bSEric W. Biederman 19196442e42SJason Wang struct tun_flow_entry { 19296442e42SJason Wang struct hlist_node hash_link; 19396442e42SJason Wang struct rcu_head rcu; 19496442e42SJason Wang struct tun_struct *tun; 19596442e42SJason Wang 19696442e42SJason Wang u32 rxhash; 1979bc88939STom Herbert u32 rps_rxhash; 19896442e42SJason Wang int queue_index; 19996442e42SJason Wang unsigned long updated; 20096442e42SJason Wang }; 20196442e42SJason Wang 20296442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024 203f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 20496442e42SJason Wang 205cd5681d7SJason Wang struct tun_prog { 20696f84061SJason Wang struct rcu_head rcu; 20796f84061SJason Wang struct bpf_prog *prog; 20896f84061SJason Wang }; 20996f84061SJason Wang 21054f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist 21136fe8c09SRami Rosen * device, socket filter, sndbuf and vnet header size were restore when the 21254f968d6SJason Wang * file were attached to a persist device. 21354f968d6SJason Wang */ 21414daa021SRusty Russell struct tun_struct { 215c8d68e6bSJason Wang struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 216c8d68e6bSJason Wang unsigned int numqueues; 217f271b2ccSMax Krasnyansky unsigned int flags; 2180625c883SEric W. Biederman kuid_t owner; 2190625c883SEric W. Biederman kgid_t group; 22014daa021SRusty Russell 22114daa021SRusty Russell struct net_device *dev; 222c8f44affSMichał Mirosław netdev_features_t set_features; 22388255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 224d591a1f3SDavid S. Miller NETIF_F_TSO6) 225d9d52b51SMichael S. Tsirkin 226eaea34b2SPaolo Abeni int align; 227d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 22854f968d6SJason Wang int sndbuf; 22954f968d6SJason Wang struct tap_filter txflt; 23054f968d6SJason Wang struct sock_fprog fprog; 23154f968d6SJason Wang /* protected by rtnl lock */ 23254f968d6SJason Wang bool filter_attached; 23314daa021SRusty Russell #ifdef TUN_DEBUG 23414daa021SRusty Russell int debug; 23514daa021SRusty Russell #endif 23696442e42SJason Wang spinlock_t lock; 23796442e42SJason Wang struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 23896442e42SJason Wang struct timer_list flow_gc_timer; 23996442e42SJason Wang unsigned long ageing_time; 2404008e97fSJason Wang unsigned int numdisabled; 2414008e97fSJason Wang struct list_head disabled; 2425dbbaf2dSPaul Moore void *security; 243b8732fb7SJason Wang u32 flow_count; 2445503fcecSJason Wang u32 rx_batched; 245608b9977SPaolo Abeni struct tun_pcpu_stats __percpu *pcpu_stats; 246761876c8SJason Wang struct bpf_prog __rcu *xdp_prog; 247cd5681d7SJason Wang struct tun_prog __rcu *steering_prog; 248aff3d70aSJason Wang struct tun_prog __rcu *filter_prog; 2494e24f2ddSChas Williams struct ethtool_link_ksettings link_ksettings; 25014daa021SRusty Russell }; 25114daa021SRusty Russell 252aff3d70aSJason Wang struct veth { 253aff3d70aSJason Wang __be16 h_vlan_proto; 254aff3d70aSJason Wang __be16 h_vlan_TCI; 2551da177e4SLinus Torvalds }; 2561da177e4SLinus Torvalds 2571ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr) 258fc72d1d5SJason Wang { 259fc72d1d5SJason Wang return (unsigned long)ptr & TUN_XDP_FLAG; 260fc72d1d5SJason Wang } 2611ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame); 262fc72d1d5SJason Wang 263fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr) 264fc72d1d5SJason Wang { 265fc72d1d5SJason Wang return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 266fc72d1d5SJason Wang } 267fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr); 268fc72d1d5SJason Wang 269fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr) 270fc72d1d5SJason Wang { 271fc72d1d5SJason Wang return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 272fc72d1d5SJason Wang } 273fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp); 274fc72d1d5SJason Wang 27594317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget) 27694317099SPetar Penkov { 27794317099SPetar Penkov struct tun_file *tfile = container_of(napi, struct tun_file, napi); 27894317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 27994317099SPetar Penkov struct sk_buff_head process_queue; 28094317099SPetar Penkov struct sk_buff *skb; 28194317099SPetar Penkov int received = 0; 28294317099SPetar Penkov 28394317099SPetar Penkov __skb_queue_head_init(&process_queue); 28494317099SPetar Penkov 28594317099SPetar Penkov spin_lock(&queue->lock); 28694317099SPetar Penkov skb_queue_splice_tail_init(queue, &process_queue); 28794317099SPetar Penkov spin_unlock(&queue->lock); 28894317099SPetar Penkov 28994317099SPetar Penkov while (received < budget && (skb = __skb_dequeue(&process_queue))) { 29094317099SPetar Penkov napi_gro_receive(napi, skb); 29194317099SPetar Penkov ++received; 29294317099SPetar Penkov } 29394317099SPetar Penkov 29494317099SPetar Penkov if (!skb_queue_empty(&process_queue)) { 29594317099SPetar Penkov spin_lock(&queue->lock); 29694317099SPetar Penkov skb_queue_splice(&process_queue, queue); 29794317099SPetar Penkov spin_unlock(&queue->lock); 29894317099SPetar Penkov } 29994317099SPetar Penkov 30094317099SPetar Penkov return received; 30194317099SPetar Penkov } 30294317099SPetar Penkov 30394317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget) 30494317099SPetar Penkov { 30594317099SPetar Penkov unsigned int received; 30694317099SPetar Penkov 30794317099SPetar Penkov received = tun_napi_receive(napi, budget); 30894317099SPetar Penkov 30994317099SPetar Penkov if (received < budget) 31094317099SPetar Penkov napi_complete_done(napi, received); 31194317099SPetar Penkov 31294317099SPetar Penkov return received; 31394317099SPetar Penkov } 31494317099SPetar Penkov 31594317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 316af3fb24eSEric Dumazet bool napi_en, bool napi_frags) 31794317099SPetar Penkov { 318aec72f33SEric Dumazet tfile->napi_enabled = napi_en; 319af3fb24eSEric Dumazet tfile->napi_frags_enabled = napi_en && napi_frags; 32094317099SPetar Penkov if (napi_en) { 32194317099SPetar Penkov netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 32294317099SPetar Penkov NAPI_POLL_WEIGHT); 32394317099SPetar Penkov napi_enable(&tfile->napi); 32494317099SPetar Penkov } 32594317099SPetar Penkov } 32694317099SPetar Penkov 32706e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile) 32894317099SPetar Penkov { 329aec72f33SEric Dumazet if (tfile->napi_enabled) 33094317099SPetar Penkov napi_disable(&tfile->napi); 33194317099SPetar Penkov } 33294317099SPetar Penkov 33306e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile) 33494317099SPetar Penkov { 335aec72f33SEric Dumazet if (tfile->napi_enabled) 33694317099SPetar Penkov netif_napi_del(&tfile->napi); 33794317099SPetar Penkov } 33894317099SPetar Penkov 339af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile) 34090e33d45SPetar Penkov { 341af3fb24eSEric Dumazet return tfile->napi_frags_enabled; 34290e33d45SPetar Penkov } 34390e33d45SPetar Penkov 3448b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE 3458b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3468b8e658bSGreg Kurz { 3478b8e658bSGreg Kurz return tun->flags & TUN_VNET_BE ? false : 3488b8e658bSGreg Kurz virtio_legacy_is_little_endian(); 3498b8e658bSGreg Kurz } 3508b8e658bSGreg Kurz 3518b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3528b8e658bSGreg Kurz { 3538b8e658bSGreg Kurz int be = !!(tun->flags & TUN_VNET_BE); 3548b8e658bSGreg Kurz 3558b8e658bSGreg Kurz if (put_user(be, argp)) 3568b8e658bSGreg Kurz return -EFAULT; 3578b8e658bSGreg Kurz 3588b8e658bSGreg Kurz return 0; 3598b8e658bSGreg Kurz } 3608b8e658bSGreg Kurz 3618b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3628b8e658bSGreg Kurz { 3638b8e658bSGreg Kurz int be; 3648b8e658bSGreg Kurz 3658b8e658bSGreg Kurz if (get_user(be, argp)) 3668b8e658bSGreg Kurz return -EFAULT; 3678b8e658bSGreg Kurz 3688b8e658bSGreg Kurz if (be) 3698b8e658bSGreg Kurz tun->flags |= TUN_VNET_BE; 3708b8e658bSGreg Kurz else 3718b8e658bSGreg Kurz tun->flags &= ~TUN_VNET_BE; 3728b8e658bSGreg Kurz 3738b8e658bSGreg Kurz return 0; 3748b8e658bSGreg Kurz } 3758b8e658bSGreg Kurz #else 3768b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 3778b8e658bSGreg Kurz { 3788b8e658bSGreg Kurz return virtio_legacy_is_little_endian(); 3798b8e658bSGreg Kurz } 3808b8e658bSGreg Kurz 3818b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 3828b8e658bSGreg Kurz { 3838b8e658bSGreg Kurz return -EINVAL; 3848b8e658bSGreg Kurz } 3858b8e658bSGreg Kurz 3868b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 3878b8e658bSGreg Kurz { 3888b8e658bSGreg Kurz return -EINVAL; 3898b8e658bSGreg Kurz } 3908b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */ 3918b8e658bSGreg Kurz 39225bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun) 39325bd55bbSGreg Kurz { 3947d824109SGreg Kurz return tun->flags & TUN_VNET_LE || 3958b8e658bSGreg Kurz tun_legacy_is_little_endian(tun); 39625bd55bbSGreg Kurz } 39725bd55bbSGreg Kurz 39856f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 39956f0dcc5SMichael S. Tsirkin { 40025bd55bbSGreg Kurz return __virtio16_to_cpu(tun_is_little_endian(tun), val); 40156f0dcc5SMichael S. Tsirkin } 40256f0dcc5SMichael S. Tsirkin 40356f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 40456f0dcc5SMichael S. Tsirkin { 40525bd55bbSGreg Kurz return __cpu_to_virtio16(tun_is_little_endian(tun), val); 40656f0dcc5SMichael S. Tsirkin } 40756f0dcc5SMichael S. Tsirkin 40896442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash) 40996442e42SJason Wang { 410f13b5468SLi RongQing return rxhash & TUN_MASK_FLOW_ENTRIES; 41196442e42SJason Wang } 41296442e42SJason Wang 41396442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 41496442e42SJason Wang { 41596442e42SJason Wang struct tun_flow_entry *e; 41696442e42SJason Wang 417b67bfe0dSSasha Levin hlist_for_each_entry_rcu(e, head, hash_link) { 41896442e42SJason Wang if (e->rxhash == rxhash) 41996442e42SJason Wang return e; 42096442e42SJason Wang } 42196442e42SJason Wang return NULL; 42296442e42SJason Wang } 42396442e42SJason Wang 42496442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 42596442e42SJason Wang struct hlist_head *head, 42696442e42SJason Wang u32 rxhash, u16 queue_index) 42796442e42SJason Wang { 4289fdc6befSEric Dumazet struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 4299fdc6befSEric Dumazet 43096442e42SJason Wang if (e) { 43196442e42SJason Wang tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 43296442e42SJason Wang rxhash, queue_index); 43396442e42SJason Wang e->updated = jiffies; 43496442e42SJason Wang e->rxhash = rxhash; 4359bc88939STom Herbert e->rps_rxhash = 0; 43696442e42SJason Wang e->queue_index = queue_index; 43796442e42SJason Wang e->tun = tun; 43896442e42SJason Wang hlist_add_head_rcu(&e->hash_link, head); 439b8732fb7SJason Wang ++tun->flow_count; 44096442e42SJason Wang } 44196442e42SJason Wang return e; 44296442e42SJason Wang } 44396442e42SJason Wang 44496442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 44596442e42SJason Wang { 44696442e42SJason Wang tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 44796442e42SJason Wang e->rxhash, e->queue_index); 44896442e42SJason Wang hlist_del_rcu(&e->hash_link); 4499fdc6befSEric Dumazet kfree_rcu(e, rcu); 450b8732fb7SJason Wang --tun->flow_count; 45196442e42SJason Wang } 45296442e42SJason Wang 45396442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun) 45496442e42SJason Wang { 45596442e42SJason Wang int i; 45696442e42SJason Wang 45796442e42SJason Wang spin_lock_bh(&tun->lock); 45896442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 45996442e42SJason Wang struct tun_flow_entry *e; 460b67bfe0dSSasha Levin struct hlist_node *n; 46196442e42SJason Wang 462b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 46396442e42SJason Wang tun_flow_delete(tun, e); 46496442e42SJason Wang } 46596442e42SJason Wang spin_unlock_bh(&tun->lock); 46696442e42SJason Wang } 46796442e42SJason Wang 46896442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 46996442e42SJason Wang { 47096442e42SJason Wang int i; 47196442e42SJason Wang 47296442e42SJason Wang spin_lock_bh(&tun->lock); 47396442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 47496442e42SJason Wang struct tun_flow_entry *e; 475b67bfe0dSSasha Levin struct hlist_node *n; 47696442e42SJason Wang 477b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 47896442e42SJason Wang if (e->queue_index == queue_index) 47996442e42SJason Wang tun_flow_delete(tun, e); 48096442e42SJason Wang } 48196442e42SJason Wang } 48296442e42SJason Wang spin_unlock_bh(&tun->lock); 48396442e42SJason Wang } 48496442e42SJason Wang 485e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t) 48696442e42SJason Wang { 487e99e88a9SKees Cook struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 48896442e42SJason Wang unsigned long delay = tun->ageing_time; 48996442e42SJason Wang unsigned long next_timer = jiffies + delay; 49096442e42SJason Wang unsigned long count = 0; 49196442e42SJason Wang int i; 49296442e42SJason Wang 49396442e42SJason Wang tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 49496442e42SJason Wang 4957dbfb4efSEric Dumazet spin_lock(&tun->lock); 49696442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 49796442e42SJason Wang struct tun_flow_entry *e; 498b67bfe0dSSasha Levin struct hlist_node *n; 49996442e42SJason Wang 500b67bfe0dSSasha Levin hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 50196442e42SJason Wang unsigned long this_timer; 50281d98fa4SEric Dumazet 50396442e42SJason Wang this_timer = e->updated + delay; 50481d98fa4SEric Dumazet if (time_before_eq(this_timer, jiffies)) { 50596442e42SJason Wang tun_flow_delete(tun, e); 50681d98fa4SEric Dumazet continue; 50781d98fa4SEric Dumazet } 50881d98fa4SEric Dumazet count++; 50981d98fa4SEric Dumazet if (time_before(this_timer, next_timer)) 51096442e42SJason Wang next_timer = this_timer; 51196442e42SJason Wang } 51296442e42SJason Wang } 51396442e42SJason Wang 51496442e42SJason Wang if (count) 51596442e42SJason Wang mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 5167dbfb4efSEric Dumazet spin_unlock(&tun->lock); 51796442e42SJason Wang } 51896442e42SJason Wang 51949974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 5209e85722dSJason Wang struct tun_file *tfile) 52196442e42SJason Wang { 52296442e42SJason Wang struct hlist_head *head; 52396442e42SJason Wang struct tun_flow_entry *e; 52496442e42SJason Wang unsigned long delay = tun->ageing_time; 5259e85722dSJason Wang u16 queue_index = tfile->queue_index; 52696442e42SJason Wang 52796442e42SJason Wang if (!rxhash) 52896442e42SJason Wang return; 52996442e42SJason Wang else 53096442e42SJason Wang head = &tun->flows[tun_hashfn(rxhash)]; 53196442e42SJason Wang 53296442e42SJason Wang rcu_read_lock(); 53396442e42SJason Wang 53496442e42SJason Wang e = tun_flow_find(head, rxhash); 53596442e42SJason Wang if (likely(e)) { 53696442e42SJason Wang /* TODO: keep queueing to old queue until it's empty? */ 53796442e42SJason Wang e->queue_index = queue_index; 53896442e42SJason Wang e->updated = jiffies; 5399bc88939STom Herbert sock_rps_record_flow_hash(e->rps_rxhash); 54096442e42SJason Wang } else { 54196442e42SJason Wang spin_lock_bh(&tun->lock); 542b8732fb7SJason Wang if (!tun_flow_find(head, rxhash) && 543b8732fb7SJason Wang tun->flow_count < MAX_TAP_FLOWS) 54496442e42SJason Wang tun_flow_create(tun, head, rxhash, queue_index); 54596442e42SJason Wang 54696442e42SJason Wang if (!timer_pending(&tun->flow_gc_timer)) 54796442e42SJason Wang mod_timer(&tun->flow_gc_timer, 54896442e42SJason Wang round_jiffies_up(jiffies + delay)); 54996442e42SJason Wang spin_unlock_bh(&tun->lock); 55096442e42SJason Wang } 55196442e42SJason Wang 55296442e42SJason Wang rcu_read_unlock(); 55396442e42SJason Wang } 55496442e42SJason Wang 5559bc88939STom Herbert /** 5569bc88939STom Herbert * Save the hash received in the stack receive path and update the 5579bc88939STom Herbert * flow_hash table accordingly. 5589bc88939STom Herbert */ 5599bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 5609bc88939STom Herbert { 561567e4b79SEric Dumazet if (unlikely(e->rps_rxhash != hash)) 5629bc88939STom Herbert e->rps_rxhash = hash; 5639bc88939STom Herbert } 5649bc88939STom Herbert 565*4b035271SWang Li /* We try to identify a flow through its rxhash. The reason that 56692d4ea6eSstephen hemminger * we do not check rxq no. is because some cards(e.g 82599), chooses 567c8d68e6bSJason Wang * the rxq based on the txq where the last packet of the flow comes. As 568c8d68e6bSJason Wang * the userspace application move between processors, we may get a 569*4b035271SWang Li * different rxq no. here. 570c8d68e6bSJason Wang */ 57196f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 572c8d68e6bSJason Wang { 57396442e42SJason Wang struct tun_flow_entry *e; 574c8d68e6bSJason Wang u32 txq = 0; 575c8d68e6bSJason Wang u32 numqueues = 0; 576c8d68e6bSJason Wang 5776aa7de05SMark Rutland numqueues = READ_ONCE(tun->numqueues); 578c8d68e6bSJason Wang 579feec084aSJason Wang txq = __skb_get_hash_symmetric(skb); 58096442e42SJason Wang e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 5819bc88939STom Herbert if (e) { 5829bc88939STom Herbert tun_flow_save_rps_rxhash(e, txq); 583fbe4d456SZhi Yong Wu txq = e->queue_index; 584*4b035271SWang Li } else { 585c8d68e6bSJason Wang /* use multiply and shift instead of expensive divide */ 586c8d68e6bSJason Wang txq = ((u64)txq * numqueues) >> 32; 587c8d68e6bSJason Wang } 588c8d68e6bSJason Wang 589c8d68e6bSJason Wang return txq; 590c8d68e6bSJason Wang } 591c8d68e6bSJason Wang 59296f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 59396f84061SJason Wang { 594cd5681d7SJason Wang struct tun_prog *prog; 59596f84061SJason Wang u16 ret = 0; 59696f84061SJason Wang 59796f84061SJason Wang prog = rcu_dereference(tun->steering_prog); 59896f84061SJason Wang if (prog) 59996f84061SJason Wang ret = bpf_prog_run_clear_cb(prog->prog, skb); 60096f84061SJason Wang 60196f84061SJason Wang return ret % tun->numqueues; 60296f84061SJason Wang } 60396f84061SJason Wang 60496f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 6054f49dec9SAlexander Duyck struct net_device *sb_dev, 6064f49dec9SAlexander Duyck select_queue_fallback_t fallback) 60796f84061SJason Wang { 60896f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 60996f84061SJason Wang u16 ret; 61096f84061SJason Wang 61196f84061SJason Wang rcu_read_lock(); 61296f84061SJason Wang if (rcu_dereference(tun->steering_prog)) 61396f84061SJason Wang ret = tun_ebpf_select_queue(tun, skb); 61496f84061SJason Wang else 61596f84061SJason Wang ret = tun_automq_select_queue(tun, skb); 61696f84061SJason Wang rcu_read_unlock(); 61796f84061SJason Wang 61896f84061SJason Wang return ret; 61996f84061SJason Wang } 62096f84061SJason Wang 621cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun) 622cde8b15fSJason Wang { 623cde8b15fSJason Wang const struct cred *cred = current_cred(); 624c260b772SEric W. Biederman struct net *net = dev_net(tun->dev); 625cde8b15fSJason Wang 626cde8b15fSJason Wang return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 627cde8b15fSJason Wang (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 628c260b772SEric W. Biederman !ns_capable(net->user_ns, CAP_NET_ADMIN); 629cde8b15fSJason Wang } 630cde8b15fSJason Wang 631c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun) 632c8d68e6bSJason Wang { 633c8d68e6bSJason Wang netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 634c8d68e6bSJason Wang netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 635c8d68e6bSJason Wang } 636c8d68e6bSJason Wang 6374008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 6384008e97fSJason Wang { 6394008e97fSJason Wang tfile->detached = tun; 6404008e97fSJason Wang list_add_tail(&tfile->next, &tun->disabled); 6414008e97fSJason Wang ++tun->numdisabled; 6424008e97fSJason Wang } 6434008e97fSJason Wang 644d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 6454008e97fSJason Wang { 6464008e97fSJason Wang struct tun_struct *tun = tfile->detached; 6474008e97fSJason Wang 6484008e97fSJason Wang tfile->detached = NULL; 6494008e97fSJason Wang list_del_init(&tfile->next); 6504008e97fSJason Wang --tun->numdisabled; 6514008e97fSJason Wang return tun; 6524008e97fSJason Wang } 6534008e97fSJason Wang 6543a403076SJason Wang void tun_ptr_free(void *ptr) 655fc72d1d5SJason Wang { 656fc72d1d5SJason Wang if (!ptr) 657fc72d1d5SJason Wang return; 6581ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 6591ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 660fc72d1d5SJason Wang 66103993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 662fc72d1d5SJason Wang } else { 663fc72d1d5SJason Wang __skb_array_destroy_skb(ptr); 664fc72d1d5SJason Wang } 665fc72d1d5SJason Wang } 6663a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free); 667fc72d1d5SJason Wang 6684bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile) 6694bfb0513SJason Wang { 670fc72d1d5SJason Wang void *ptr; 6711576d986SJason Wang 672fc72d1d5SJason Wang while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 673fc72d1d5SJason Wang tun_ptr_free(ptr); 6741576d986SJason Wang 6755503fcecSJason Wang skb_queue_purge(&tfile->sk.sk_write_queue); 6764bfb0513SJason Wang skb_queue_purge(&tfile->sk.sk_error_queue); 6774bfb0513SJason Wang } 6784bfb0513SJason Wang 679c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean) 680c8d68e6bSJason Wang { 681c8d68e6bSJason Wang struct tun_file *ntfile; 682c8d68e6bSJason Wang struct tun_struct *tun; 683c8d68e6bSJason Wang 684b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 685b8deabd3SJason Wang 68694317099SPetar Penkov if (tun && clean) { 68706e55addSEric Dumazet tun_napi_disable(tfile); 68806e55addSEric Dumazet tun_napi_del(tfile); 68994317099SPetar Penkov } 69094317099SPetar Penkov 6919e85722dSJason Wang if (tun && !tfile->detached) { 692c8d68e6bSJason Wang u16 index = tfile->queue_index; 693c8d68e6bSJason Wang BUG_ON(index >= tun->numqueues); 694c8d68e6bSJason Wang 695c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[index], 696c8d68e6bSJason Wang tun->tfiles[tun->numqueues - 1]); 697b8deabd3SJason Wang ntfile = rtnl_dereference(tun->tfiles[index]); 698c8d68e6bSJason Wang ntfile->queue_index = index; 699c8d68e6bSJason Wang 700c8d68e6bSJason Wang --tun->numqueues; 7019e85722dSJason Wang if (clean) { 702c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 703c8d68e6bSJason Wang sock_put(&tfile->sk); 7049e85722dSJason Wang } else 7054008e97fSJason Wang tun_disable_queue(tun, tfile); 706c8d68e6bSJason Wang 707c8d68e6bSJason Wang synchronize_net(); 70896442e42SJason Wang tun_flow_delete_by_queue(tun, tun->numqueues + 1); 709c8d68e6bSJason Wang /* Drop read queue */ 7104bfb0513SJason Wang tun_queue_purge(tfile); 711c8d68e6bSJason Wang tun_set_real_num_queues(tun); 712dd38bd85SJason Wang } else if (tfile->detached && clean) { 7134008e97fSJason Wang tun = tun_enable_queue(tfile); 714dd38bd85SJason Wang sock_put(&tfile->sk); 715dd38bd85SJason Wang } 716c8d68e6bSJason Wang 717c8d68e6bSJason Wang if (clean) { 718af668b3cSMichael S. Tsirkin if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 719af668b3cSMichael S. Tsirkin netif_carrier_off(tun->dev); 720af668b3cSMichael S. Tsirkin 72140630b82SMichael S. Tsirkin if (!(tun->flags & IFF_PERSIST) && 722af668b3cSMichael S. Tsirkin tun->dev->reg_state == NETREG_REGISTERED) 7234008e97fSJason Wang unregister_netdevice(tun->dev); 724af668b3cSMichael S. Tsirkin } 725b196d88aSJason Wang if (tun) 726b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7277063efd3SJason Wang ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 728140e807dSEric W. Biederman sock_put(&tfile->sk); 729c8d68e6bSJason Wang } 730c8d68e6bSJason Wang } 731c8d68e6bSJason Wang 732c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean) 733c8d68e6bSJason Wang { 73483c1f36fSSabrina Dubroca struct tun_struct *tun; 73583c1f36fSSabrina Dubroca struct net_device *dev; 73683c1f36fSSabrina Dubroca 737c8d68e6bSJason Wang rtnl_lock(); 73883c1f36fSSabrina Dubroca tun = rtnl_dereference(tfile->tun); 73983c1f36fSSabrina Dubroca dev = tun ? tun->dev : NULL; 740c8d68e6bSJason Wang __tun_detach(tfile, clean); 74183c1f36fSSabrina Dubroca if (dev) 74283c1f36fSSabrina Dubroca netdev_state_change(dev); 743c8d68e6bSJason Wang rtnl_unlock(); 744c8d68e6bSJason Wang } 745c8d68e6bSJason Wang 746c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev) 747c8d68e6bSJason Wang { 748c8d68e6bSJason Wang struct tun_struct *tun = netdev_priv(dev); 7494008e97fSJason Wang struct tun_file *tfile, *tmp; 750c8d68e6bSJason Wang int i, n = tun->numqueues; 751c8d68e6bSJason Wang 752c8d68e6bSJason Wang for (i = 0; i < n; i++) { 753b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 754c8d68e6bSJason Wang BUG_ON(!tfile); 75506e55addSEric Dumazet tun_napi_disable(tfile); 756addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7579e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 758c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 759c8d68e6bSJason Wang --tun->numqueues; 760c8d68e6bSJason Wang } 7619e85722dSJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 762addf8fc4SJason Wang tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 7639e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 764c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 7659e85722dSJason Wang } 766c8d68e6bSJason Wang BUG_ON(tun->numqueues != 0); 767c8d68e6bSJason Wang 768c8d68e6bSJason Wang synchronize_net(); 769c8d68e6bSJason Wang for (i = 0; i < n; i++) { 770b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 77106e55addSEric Dumazet tun_napi_del(tfile); 772c8d68e6bSJason Wang /* Drop read queue */ 7734bfb0513SJason Wang tun_queue_purge(tfile); 774b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 775c8d68e6bSJason Wang sock_put(&tfile->sk); 776c8d68e6bSJason Wang } 7774008e97fSJason Wang list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 7784008e97fSJason Wang tun_enable_queue(tfile); 7794bfb0513SJason Wang tun_queue_purge(tfile); 780b196d88aSJason Wang xdp_rxq_info_unreg(&tfile->xdp_rxq); 7814008e97fSJason Wang sock_put(&tfile->sk); 7824008e97fSJason Wang } 7834008e97fSJason Wang BUG_ON(tun->numdisabled != 0); 784dd38bd85SJason Wang 78540630b82SMichael S. Tsirkin if (tun->flags & IFF_PERSIST) 786dd38bd85SJason Wang module_put(THIS_MODULE); 787c8d68e6bSJason Wang } 788c8d68e6bSJason Wang 78994317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file, 790af3fb24eSEric Dumazet bool skip_filter, bool napi, bool napi_frags) 791a7385ba2SEric W. Biederman { 792631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 7931576d986SJason Wang struct net_device *dev = tun->dev; 79438231b7aSEric W. Biederman int err; 795a7385ba2SEric W. Biederman 7965dbbaf2dSPaul Moore err = security_tun_dev_attach(tfile->socket.sk, tun->security); 7975dbbaf2dSPaul Moore if (err < 0) 7985dbbaf2dSPaul Moore goto out; 7995dbbaf2dSPaul Moore 80038231b7aSEric W. Biederman err = -EINVAL; 8019e85722dSJason Wang if (rtnl_dereference(tfile->tun) && !tfile->detached) 80238231b7aSEric W. Biederman goto out; 80338231b7aSEric W. Biederman 80438231b7aSEric W. Biederman err = -EBUSY; 80540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 806c8d68e6bSJason Wang goto out; 807c8d68e6bSJason Wang 808c8d68e6bSJason Wang err = -E2BIG; 8094008e97fSJason Wang if (!tfile->detached && 8104008e97fSJason Wang tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 81138231b7aSEric W. Biederman goto out; 81238231b7aSEric W. Biederman 81338231b7aSEric W. Biederman err = 0; 81454f968d6SJason Wang 81592d4ea6eSstephen hemminger /* Re-attach the filter to persist device */ 816849c9b6fSPavel Emelyanov if (!skip_filter && (tun->filter_attached == true)) { 8178ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 8188ced425eSHannes Frederic Sowa err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 8198ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 82054f968d6SJason Wang if (!err) 82154f968d6SJason Wang goto out; 82254f968d6SJason Wang } 8231576d986SJason Wang 8241576d986SJason Wang if (!tfile->detached && 825b196d88aSJason Wang ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 826b196d88aSJason Wang GFP_KERNEL, tun_ptr_free)) { 8271576d986SJason Wang err = -ENOMEM; 8281576d986SJason Wang goto out; 8291576d986SJason Wang } 8301576d986SJason Wang 831c8d68e6bSJason Wang tfile->queue_index = tun->numqueues; 832addf8fc4SJason Wang tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 8338bf5c4eeSJesper Dangaard Brouer 8348bf5c4eeSJesper Dangaard Brouer if (tfile->detached) { 8358bf5c4eeSJesper Dangaard Brouer /* Re-attach detached tfile, updating XDP queue_index */ 8368bf5c4eeSJesper Dangaard Brouer WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 8378bf5c4eeSJesper Dangaard Brouer 8388bf5c4eeSJesper Dangaard Brouer if (tfile->xdp_rxq.queue_index != tfile->queue_index) 8398bf5c4eeSJesper Dangaard Brouer tfile->xdp_rxq.queue_index = tfile->queue_index; 8408bf5c4eeSJesper Dangaard Brouer } else { 8418bf5c4eeSJesper Dangaard Brouer /* Setup XDP RX-queue info, for new tfile getting attached */ 8428bf5c4eeSJesper Dangaard Brouer err = xdp_rxq_info_reg(&tfile->xdp_rxq, 8438bf5c4eeSJesper Dangaard Brouer tun->dev, tfile->queue_index); 8448bf5c4eeSJesper Dangaard Brouer if (err < 0) 8458bf5c4eeSJesper Dangaard Brouer goto out; 8468d5d8852SJesper Dangaard Brouer err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 8478d5d8852SJesper Dangaard Brouer MEM_TYPE_PAGE_SHARED, NULL); 8488d5d8852SJesper Dangaard Brouer if (err < 0) { 8498d5d8852SJesper Dangaard Brouer xdp_rxq_info_unreg(&tfile->xdp_rxq); 8508d5d8852SJesper Dangaard Brouer goto out; 8518d5d8852SJesper Dangaard Brouer } 8528bf5c4eeSJesper Dangaard Brouer err = 0; 8538bf5c4eeSJesper Dangaard Brouer } 8548bf5c4eeSJesper Dangaard Brouer 8556e914fc7SJason Wang rcu_assign_pointer(tfile->tun, tun); 856c8d68e6bSJason Wang rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 857c8d68e6bSJason Wang tun->numqueues++; 858c8d68e6bSJason Wang 85994317099SPetar Penkov if (tfile->detached) { 8604008e97fSJason Wang tun_enable_queue(tfile); 86194317099SPetar Penkov } else { 8624008e97fSJason Wang sock_hold(&tfile->sk); 863af3fb24eSEric Dumazet tun_napi_init(tun, tfile, napi, napi_frags); 86494317099SPetar Penkov } 8654008e97fSJason Wang 866e4a2a304SJason Wang if (rtnl_dereference(tun->xdp_prog)) 867e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 868e4a2a304SJason Wang 869c8d68e6bSJason Wang tun_set_real_num_queues(tun); 870c8d68e6bSJason Wang 871c8d68e6bSJason Wang /* device is allowed to go away first, so no need to hold extra 872c8d68e6bSJason Wang * refcnt. 873c8d68e6bSJason Wang */ 874a7385ba2SEric W. Biederman 87538231b7aSEric W. Biederman out: 87638231b7aSEric W. Biederman return err; 877a7385ba2SEric W. Biederman } 878a7385ba2SEric W. Biederman 8799484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile) 880631ab46bSEric W. Biederman { 8816e914fc7SJason Wang struct tun_struct *tun; 882c70f1829SEric W. Biederman 8836e914fc7SJason Wang rcu_read_lock(); 8846e914fc7SJason Wang tun = rcu_dereference(tfile->tun); 8856e914fc7SJason Wang if (tun) 8866e914fc7SJason Wang dev_hold(tun->dev); 8876e914fc7SJason Wang rcu_read_unlock(); 888c70f1829SEric W. Biederman 889c70f1829SEric W. Biederman return tun; 890631ab46bSEric W. Biederman } 891631ab46bSEric W. Biederman 892631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun) 893631ab46bSEric W. Biederman { 8946e914fc7SJason Wang dev_put(tun->dev); 895631ab46bSEric W. Biederman } 896631ab46bSEric W. Biederman 8976b8a66eeSJoe Perches /* TAP filtering */ 898f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr) 899f271b2ccSMax Krasnyansky { 900f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 901f271b2ccSMax Krasnyansky mask[n >> 5] |= (1 << (n & 31)); 902f271b2ccSMax Krasnyansky } 903f271b2ccSMax Krasnyansky 904f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 905f271b2ccSMax Krasnyansky { 906f271b2ccSMax Krasnyansky int n = ether_crc(ETH_ALEN, addr) >> 26; 907f271b2ccSMax Krasnyansky return mask[n >> 5] & (1 << (n & 31)); 908f271b2ccSMax Krasnyansky } 909f271b2ccSMax Krasnyansky 910f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg) 911f271b2ccSMax Krasnyansky { 912f271b2ccSMax Krasnyansky struct { u8 u[ETH_ALEN]; } *addr; 913f271b2ccSMax Krasnyansky struct tun_filter uf; 914f271b2ccSMax Krasnyansky int err, alen, n, nexact; 915f271b2ccSMax Krasnyansky 916f271b2ccSMax Krasnyansky if (copy_from_user(&uf, arg, sizeof(uf))) 917f271b2ccSMax Krasnyansky return -EFAULT; 918f271b2ccSMax Krasnyansky 919f271b2ccSMax Krasnyansky if (!uf.count) { 920f271b2ccSMax Krasnyansky /* Disabled */ 921f271b2ccSMax Krasnyansky filter->count = 0; 922f271b2ccSMax Krasnyansky return 0; 923f271b2ccSMax Krasnyansky } 924f271b2ccSMax Krasnyansky 925f271b2ccSMax Krasnyansky alen = ETH_ALEN * uf.count; 92628e8190dSMarkus Elfring addr = memdup_user(arg + sizeof(uf), alen); 92728e8190dSMarkus Elfring if (IS_ERR(addr)) 92828e8190dSMarkus Elfring return PTR_ERR(addr); 929f271b2ccSMax Krasnyansky 930f271b2ccSMax Krasnyansky /* The filter is updated without holding any locks. Which is 931f271b2ccSMax Krasnyansky * perfectly safe. We disable it first and in the worst 932f271b2ccSMax Krasnyansky * case we'll accept a few undesired packets. */ 933f271b2ccSMax Krasnyansky filter->count = 0; 934f271b2ccSMax Krasnyansky wmb(); 935f271b2ccSMax Krasnyansky 936f271b2ccSMax Krasnyansky /* Use first set of addresses as an exact filter */ 937f271b2ccSMax Krasnyansky for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 938f271b2ccSMax Krasnyansky memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 939f271b2ccSMax Krasnyansky 940f271b2ccSMax Krasnyansky nexact = n; 941f271b2ccSMax Krasnyansky 942cfbf84fcSAlex Williamson /* Remaining multicast addresses are hashed, 943cfbf84fcSAlex Williamson * unicast will leave the filter disabled. */ 944f271b2ccSMax Krasnyansky memset(filter->mask, 0, sizeof(filter->mask)); 945cfbf84fcSAlex Williamson for (; n < uf.count; n++) { 946cfbf84fcSAlex Williamson if (!is_multicast_ether_addr(addr[n].u)) { 947cfbf84fcSAlex Williamson err = 0; /* no filter */ 9483b8d2a69SMarkus Elfring goto free_addr; 949cfbf84fcSAlex Williamson } 950f271b2ccSMax Krasnyansky addr_hash_set(filter->mask, addr[n].u); 951cfbf84fcSAlex Williamson } 952f271b2ccSMax Krasnyansky 953f271b2ccSMax Krasnyansky /* For ALLMULTI just set the mask to all ones. 954f271b2ccSMax Krasnyansky * This overrides the mask populated above. */ 955f271b2ccSMax Krasnyansky if ((uf.flags & TUN_FLT_ALLMULTI)) 956f271b2ccSMax Krasnyansky memset(filter->mask, ~0, sizeof(filter->mask)); 957f271b2ccSMax Krasnyansky 958f271b2ccSMax Krasnyansky /* Now enable the filter */ 959f271b2ccSMax Krasnyansky wmb(); 960f271b2ccSMax Krasnyansky filter->count = nexact; 961f271b2ccSMax Krasnyansky 962f271b2ccSMax Krasnyansky /* Return the number of exact filters */ 963f271b2ccSMax Krasnyansky err = nexact; 9643b8d2a69SMarkus Elfring free_addr: 965f271b2ccSMax Krasnyansky kfree(addr); 966f271b2ccSMax Krasnyansky return err; 967f271b2ccSMax Krasnyansky } 968f271b2ccSMax Krasnyansky 969f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */ 970f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 971f271b2ccSMax Krasnyansky { 972f271b2ccSMax Krasnyansky /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 973f271b2ccSMax Krasnyansky * at this point. */ 974f271b2ccSMax Krasnyansky struct ethhdr *eh = (struct ethhdr *) skb->data; 975f271b2ccSMax Krasnyansky int i; 976f271b2ccSMax Krasnyansky 977f271b2ccSMax Krasnyansky /* Exact match */ 978f271b2ccSMax Krasnyansky for (i = 0; i < filter->count; i++) 9792e42e474SJoe Perches if (ether_addr_equal(eh->h_dest, filter->addr[i])) 980f271b2ccSMax Krasnyansky return 1; 981f271b2ccSMax Krasnyansky 982f271b2ccSMax Krasnyansky /* Inexact match (multicast only) */ 983f271b2ccSMax Krasnyansky if (is_multicast_ether_addr(eh->h_dest)) 984f271b2ccSMax Krasnyansky return addr_hash_test(filter->mask, eh->h_dest); 985f271b2ccSMax Krasnyansky 986f271b2ccSMax Krasnyansky return 0; 987f271b2ccSMax Krasnyansky } 988f271b2ccSMax Krasnyansky 989f271b2ccSMax Krasnyansky /* 990f271b2ccSMax Krasnyansky * Checks whether the packet is accepted or not. 991f271b2ccSMax Krasnyansky * Returns: 0 - drop, !=0 - accept 992f271b2ccSMax Krasnyansky */ 993f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 994f271b2ccSMax Krasnyansky { 995f271b2ccSMax Krasnyansky if (!filter->count) 996f271b2ccSMax Krasnyansky return 1; 997f271b2ccSMax Krasnyansky 998f271b2ccSMax Krasnyansky return run_filter(filter, skb); 999f271b2ccSMax Krasnyansky } 1000f271b2ccSMax Krasnyansky 10011da177e4SLinus Torvalds /* Network device part of the driver */ 10021da177e4SLinus Torvalds 10031da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops; 10041da177e4SLinus Torvalds 1005c70f1829SEric W. Biederman /* Net device detach from fd. */ 1006c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev) 1007c70f1829SEric W. Biederman { 1008c8d68e6bSJason Wang tun_detach_all(dev); 1009c70f1829SEric W. Biederman } 1010c70f1829SEric W. Biederman 10111da177e4SLinus Torvalds /* Net device open. */ 10121da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev) 10131da177e4SLinus Torvalds { 1014b20e2d54SHannes Frederic Sowa struct tun_struct *tun = netdev_priv(dev); 1015b20e2d54SHannes Frederic Sowa int i; 1016b20e2d54SHannes Frederic Sowa 1017c8d68e6bSJason Wang netif_tx_start_all_queues(dev); 1018b20e2d54SHannes Frederic Sowa 1019b20e2d54SHannes Frederic Sowa for (i = 0; i < tun->numqueues; i++) { 1020b20e2d54SHannes Frederic Sowa struct tun_file *tfile; 1021b20e2d54SHannes Frederic Sowa 1022b20e2d54SHannes Frederic Sowa tfile = rtnl_dereference(tun->tfiles[i]); 1023b20e2d54SHannes Frederic Sowa tfile->socket.sk->sk_write_space(tfile->socket.sk); 1024b20e2d54SHannes Frederic Sowa } 1025b20e2d54SHannes Frederic Sowa 10261da177e4SLinus Torvalds return 0; 10271da177e4SLinus Torvalds } 10281da177e4SLinus Torvalds 10291da177e4SLinus Torvalds /* Net device close. */ 10301da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev) 10311da177e4SLinus Torvalds { 1032c8d68e6bSJason Wang netif_tx_stop_all_queues(dev); 10331da177e4SLinus Torvalds return 0; 10341da177e4SLinus Torvalds } 10351da177e4SLinus Torvalds 10361da177e4SLinus Torvalds /* Net device start xmit */ 103796f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 10381da177e4SLinus Torvalds { 10393df97ba8SJason Wang #ifdef CONFIG_RPS 104096f84061SJason Wang if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 10419bc88939STom Herbert /* Select queue was not called for the skbuff, so we extract the 10429bc88939STom Herbert * RPS hash and save it into the flow_table here. 10439bc88939STom Herbert */ 1044*4b035271SWang Li struct tun_flow_entry *e; 10459bc88939STom Herbert __u32 rxhash; 10469bc88939STom Herbert 1047feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 1048*4b035271SWang Li e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 10499bc88939STom Herbert if (e) 10509bc88939STom Herbert tun_flow_save_rps_rxhash(e, rxhash); 10519bc88939STom Herbert } 10523df97ba8SJason Wang #endif 105396f84061SJason Wang } 105496f84061SJason Wang 1055aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun, 1056aff3d70aSJason Wang struct sk_buff *skb, 1057aff3d70aSJason Wang int len) 1058aff3d70aSJason Wang { 1059aff3d70aSJason Wang struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1060aff3d70aSJason Wang 1061aff3d70aSJason Wang if (prog) 1062aff3d70aSJason Wang len = bpf_prog_run_clear_cb(prog->prog, skb); 1063aff3d70aSJason Wang 1064aff3d70aSJason Wang return len; 1065aff3d70aSJason Wang } 1066aff3d70aSJason Wang 106796f84061SJason Wang /* Net device start xmit */ 106896f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 106996f84061SJason Wang { 107096f84061SJason Wang struct tun_struct *tun = netdev_priv(dev); 107196f84061SJason Wang int txq = skb->queue_mapping; 107296f84061SJason Wang struct tun_file *tfile; 1073aff3d70aSJason Wang int len = skb->len; 107496f84061SJason Wang 107596f84061SJason Wang rcu_read_lock(); 107696f84061SJason Wang tfile = rcu_dereference(tun->tfiles[txq]); 107796f84061SJason Wang 107896f84061SJason Wang /* Drop packet if interface is not attached */ 1079cc166427SWillem de Bruijn if (txq >= tun->numqueues) 108096f84061SJason Wang goto drop; 108196f84061SJason Wang 108296f84061SJason Wang if (!rcu_dereference(tun->steering_prog)) 108396f84061SJason Wang tun_automq_xmit(tun, skb); 10849bc88939STom Herbert 10856e914fc7SJason Wang tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 10866e914fc7SJason Wang 1087c8d68e6bSJason Wang BUG_ON(!tfile); 1088c8d68e6bSJason Wang 1089f271b2ccSMax Krasnyansky /* Drop if the filter does not like it. 1090f271b2ccSMax Krasnyansky * This is a noop if the filter is disabled. 1091f271b2ccSMax Krasnyansky * Filter can be enabled only for the TAP devices. */ 1092f271b2ccSMax Krasnyansky if (!check_filter(&tun->txflt, skb)) 1093f271b2ccSMax Krasnyansky goto drop; 1094f271b2ccSMax Krasnyansky 109554f968d6SJason Wang if (tfile->socket.sk->sk_filter && 109654f968d6SJason Wang sk_filter(tfile->socket.sk, skb)) 109799405162SMichael S. Tsirkin goto drop; 109899405162SMichael S. Tsirkin 1099aff3d70aSJason Wang len = run_ebpf_filter(tun, skb, len); 110081c89507SBjørn Mork if (len == 0 || pskb_trim(skb, len)) 1101aff3d70aSJason Wang goto drop; 1102aff3d70aSJason Wang 11031f8b977aSWillem de Bruijn if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 11047bf66305SJason Wang goto drop; 11057bf66305SJason Wang 11067b996243SSoheil Hassas Yeganeh skb_tx_timestamp(skb); 1107eda29772SRichard Cochran 11080110d6f2SMichael S. Tsirkin /* Orphan the skb - required as we might hang on to it 11097bf66305SJason Wang * for indefinite time. 11107bf66305SJason Wang */ 11110110d6f2SMichael S. Tsirkin skb_orphan(skb); 11120110d6f2SMichael S. Tsirkin 1113f8af75f3SEric Dumazet nf_reset(skb); 1114f8af75f3SEric Dumazet 11155990a305SJason Wang if (ptr_ring_produce(&tfile->tx_ring, skb)) 11161576d986SJason Wang goto drop; 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds /* Notify and wake up reader process */ 111954f968d6SJason Wang if (tfile->flags & TUN_FASYNC) 112054f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 11219e641bdcSXi Wang tfile->socket.sk->sk_data_ready(tfile->socket.sk); 11226e914fc7SJason Wang 11236e914fc7SJason Wang rcu_read_unlock(); 11246ed10654SPatrick McHardy return NETDEV_TX_OK; 11251da177e4SLinus Torvalds 11261da177e4SLinus Torvalds drop: 1127608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->tx_dropped); 1128149d36f7SMichael S. Tsirkin skb_tx_error(skb); 11291da177e4SLinus Torvalds kfree_skb(skb); 11306e914fc7SJason Wang rcu_read_unlock(); 1131baeababbSJason Wang return NET_XMIT_DROP; 11321da177e4SLinus Torvalds } 11331da177e4SLinus Torvalds 1134f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev) 11351da177e4SLinus Torvalds { 1136f271b2ccSMax Krasnyansky /* 1137f271b2ccSMax Krasnyansky * This callback is supposed to deal with mc filter in 1138f271b2ccSMax Krasnyansky * _rx_ path and has nothing to do with the _tx_ path. 1139f271b2ccSMax Krasnyansky * In rx path we always accept everything userspace gives us. 1140f271b2ccSMax Krasnyansky */ 11411da177e4SLinus Torvalds } 11421da177e4SLinus Torvalds 1143c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev, 1144c8f44affSMichał Mirosław netdev_features_t features) 114588255375SMichał Mirosław { 114688255375SMichał Mirosław struct tun_struct *tun = netdev_priv(dev); 114788255375SMichał Mirosław 114888255375SMichał Mirosław return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 114988255375SMichał Mirosław } 1150eaea34b2SPaolo Abeni 1151eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr) 1152eaea34b2SPaolo Abeni { 1153eaea34b2SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1154eaea34b2SPaolo Abeni 1155eaea34b2SPaolo Abeni if (new_hr < NET_SKB_PAD) 1156eaea34b2SPaolo Abeni new_hr = NET_SKB_PAD; 1157eaea34b2SPaolo Abeni 1158eaea34b2SPaolo Abeni tun->align = new_hr; 1159eaea34b2SPaolo Abeni } 1160eaea34b2SPaolo Abeni 1161bc1f4470Sstephen hemminger static void 1162608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1163608b9977SPaolo Abeni { 1164608b9977SPaolo Abeni u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1165608b9977SPaolo Abeni struct tun_struct *tun = netdev_priv(dev); 1166608b9977SPaolo Abeni struct tun_pcpu_stats *p; 1167608b9977SPaolo Abeni int i; 1168608b9977SPaolo Abeni 1169608b9977SPaolo Abeni for_each_possible_cpu(i) { 1170608b9977SPaolo Abeni u64 rxpackets, rxbytes, txpackets, txbytes; 1171608b9977SPaolo Abeni unsigned int start; 1172608b9977SPaolo Abeni 1173608b9977SPaolo Abeni p = per_cpu_ptr(tun->pcpu_stats, i); 1174608b9977SPaolo Abeni do { 1175608b9977SPaolo Abeni start = u64_stats_fetch_begin(&p->syncp); 1176608b9977SPaolo Abeni rxpackets = p->rx_packets; 1177608b9977SPaolo Abeni rxbytes = p->rx_bytes; 1178608b9977SPaolo Abeni txpackets = p->tx_packets; 1179608b9977SPaolo Abeni txbytes = p->tx_bytes; 1180608b9977SPaolo Abeni } while (u64_stats_fetch_retry(&p->syncp, start)); 1181608b9977SPaolo Abeni 1182608b9977SPaolo Abeni stats->rx_packets += rxpackets; 1183608b9977SPaolo Abeni stats->rx_bytes += rxbytes; 1184608b9977SPaolo Abeni stats->tx_packets += txpackets; 1185608b9977SPaolo Abeni stats->tx_bytes += txbytes; 1186608b9977SPaolo Abeni 1187608b9977SPaolo Abeni /* u32 counters */ 1188608b9977SPaolo Abeni rx_dropped += p->rx_dropped; 1189608b9977SPaolo Abeni rx_frame_errors += p->rx_frame_errors; 1190608b9977SPaolo Abeni tx_dropped += p->tx_dropped; 1191608b9977SPaolo Abeni } 1192608b9977SPaolo Abeni stats->rx_dropped = rx_dropped; 1193608b9977SPaolo Abeni stats->rx_frame_errors = rx_frame_errors; 1194608b9977SPaolo Abeni stats->tx_dropped = tx_dropped; 1195608b9977SPaolo Abeni } 1196608b9977SPaolo Abeni 1197761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1198761876c8SJason Wang struct netlink_ext_ack *extack) 1199761876c8SJason Wang { 1200761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1201e4a2a304SJason Wang struct tun_file *tfile; 1202761876c8SJason Wang struct bpf_prog *old_prog; 1203e4a2a304SJason Wang int i; 1204761876c8SJason Wang 1205761876c8SJason Wang old_prog = rtnl_dereference(tun->xdp_prog); 1206761876c8SJason Wang rcu_assign_pointer(tun->xdp_prog, prog); 1207761876c8SJason Wang if (old_prog) 1208761876c8SJason Wang bpf_prog_put(old_prog); 1209761876c8SJason Wang 1210e4a2a304SJason Wang for (i = 0; i < tun->numqueues; i++) { 1211e4a2a304SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 1212e4a2a304SJason Wang if (prog) 1213e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1214e4a2a304SJason Wang else 1215e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1216e4a2a304SJason Wang } 1217e4a2a304SJason Wang list_for_each_entry(tfile, &tun->disabled, next) { 1218e4a2a304SJason Wang if (prog) 1219e4a2a304SJason Wang sock_set_flag(&tfile->sk, SOCK_XDP); 1220e4a2a304SJason Wang else 1221e4a2a304SJason Wang sock_reset_flag(&tfile->sk, SOCK_XDP); 1222e4a2a304SJason Wang } 1223e4a2a304SJason Wang 1224761876c8SJason Wang return 0; 1225761876c8SJason Wang } 1226761876c8SJason Wang 1227761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev) 1228761876c8SJason Wang { 1229761876c8SJason Wang struct tun_struct *tun = netdev_priv(dev); 1230761876c8SJason Wang const struct bpf_prog *xdp_prog; 1231761876c8SJason Wang 1232761876c8SJason Wang xdp_prog = rtnl_dereference(tun->xdp_prog); 1233761876c8SJason Wang if (xdp_prog) 1234761876c8SJason Wang return xdp_prog->aux->id; 1235761876c8SJason Wang 1236761876c8SJason Wang return 0; 1237761876c8SJason Wang } 1238761876c8SJason Wang 1239f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1240761876c8SJason Wang { 1241761876c8SJason Wang switch (xdp->command) { 1242761876c8SJason Wang case XDP_SETUP_PROG: 1243761876c8SJason Wang return tun_xdp_set(dev, xdp->prog, xdp->extack); 1244761876c8SJason Wang case XDP_QUERY_PROG: 1245761876c8SJason Wang xdp->prog_id = tun_xdp_query(dev); 1246761876c8SJason Wang return 0; 1247761876c8SJason Wang default: 1248761876c8SJason Wang return -EINVAL; 1249761876c8SJason Wang } 1250761876c8SJason Wang } 1251761876c8SJason Wang 1252758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = { 1253c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1254758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1255758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 125600829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 125788255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1258c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 1259eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1260608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1261758e43b7SStephen Hemminger }; 1262758e43b7SStephen Hemminger 12630c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile) 12640c9d917bSJesper Dangaard Brouer { 12650c9d917bSJesper Dangaard Brouer /* Notify and wake up reader process */ 12660c9d917bSJesper Dangaard Brouer if (tfile->flags & TUN_FASYNC) 12670c9d917bSJesper Dangaard Brouer kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 12680c9d917bSJesper Dangaard Brouer tfile->socket.sk->sk_data_ready(tfile->socket.sk); 12690c9d917bSJesper Dangaard Brouer } 12700c9d917bSJesper Dangaard Brouer 127142b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, 127242b33468SJesper Dangaard Brouer struct xdp_frame **frames, u32 flags) 1273fc72d1d5SJason Wang { 1274fc72d1d5SJason Wang struct tun_struct *tun = netdev_priv(dev); 1275fc72d1d5SJason Wang struct tun_file *tfile; 1276fc72d1d5SJason Wang u32 numqueues; 1277735fc405SJesper Dangaard Brouer int drops = 0; 1278735fc405SJesper Dangaard Brouer int cnt = n; 1279735fc405SJesper Dangaard Brouer int i; 1280fc72d1d5SJason Wang 12810c9d917bSJesper Dangaard Brouer if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 128242b33468SJesper Dangaard Brouer return -EINVAL; 128342b33468SJesper Dangaard Brouer 1284fc72d1d5SJason Wang rcu_read_lock(); 1285fc72d1d5SJason Wang 1286fc72d1d5SJason Wang numqueues = READ_ONCE(tun->numqueues); 1287fc72d1d5SJason Wang if (!numqueues) { 1288735fc405SJesper Dangaard Brouer rcu_read_unlock(); 1289735fc405SJesper Dangaard Brouer return -ENXIO; /* Caller will free/return all frames */ 1290fc72d1d5SJason Wang } 1291fc72d1d5SJason Wang 1292fc72d1d5SJason Wang tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1293fc72d1d5SJason Wang numqueues]); 1294735fc405SJesper Dangaard Brouer 1295735fc405SJesper Dangaard Brouer spin_lock(&tfile->tx_ring.producer_lock); 1296735fc405SJesper Dangaard Brouer for (i = 0; i < n; i++) { 1297735fc405SJesper Dangaard Brouer struct xdp_frame *xdp = frames[i]; 1298fc72d1d5SJason Wang /* Encode the XDP flag into lowest bit for consumer to differ 1299fc72d1d5SJason Wang * XDP buffer from sk_buff. 1300fc72d1d5SJason Wang */ 1301735fc405SJesper Dangaard Brouer void *frame = tun_xdp_to_ptr(xdp); 1302fc72d1d5SJason Wang 1303735fc405SJesper Dangaard Brouer if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1304735fc405SJesper Dangaard Brouer this_cpu_inc(tun->pcpu_stats->tx_dropped); 1305735fc405SJesper Dangaard Brouer xdp_return_frame_rx_napi(xdp); 1306735fc405SJesper Dangaard Brouer drops++; 1307735fc405SJesper Dangaard Brouer } 1308735fc405SJesper Dangaard Brouer } 1309735fc405SJesper Dangaard Brouer spin_unlock(&tfile->tx_ring.producer_lock); 1310735fc405SJesper Dangaard Brouer 13110c9d917bSJesper Dangaard Brouer if (flags & XDP_XMIT_FLUSH) 13120c9d917bSJesper Dangaard Brouer __tun_xdp_flush_tfile(tfile); 13130c9d917bSJesper Dangaard Brouer 1314fc72d1d5SJason Wang rcu_read_unlock(); 1315735fc405SJesper Dangaard Brouer return cnt - drops; 1316fc72d1d5SJason Wang } 1317fc72d1d5SJason Wang 131844fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 131944fa2dbdSJesper Dangaard Brouer { 132044fa2dbdSJesper Dangaard Brouer struct xdp_frame *frame = convert_to_xdp_frame(xdp); 132144fa2dbdSJesper Dangaard Brouer 132244fa2dbdSJesper Dangaard Brouer if (unlikely(!frame)) 132344fa2dbdSJesper Dangaard Brouer return -EOVERFLOW; 132444fa2dbdSJesper Dangaard Brouer 132542421a56SJesper Dangaard Brouer return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1326fc72d1d5SJason Wang } 1327fc72d1d5SJason Wang 1328758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = { 1329c70f1829SEric W. Biederman .ndo_uninit = tun_net_uninit, 1330758e43b7SStephen Hemminger .ndo_open = tun_net_open, 1331758e43b7SStephen Hemminger .ndo_stop = tun_net_close, 133200829823SStephen Hemminger .ndo_start_xmit = tun_net_xmit, 133388255375SMichał Mirosław .ndo_fix_features = tun_net_fix_features, 1334afc4b13dSJiri Pirko .ndo_set_rx_mode = tun_net_mclist, 1335758e43b7SStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 1336758e43b7SStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1337c8d68e6bSJason Wang .ndo_select_queue = tun_select_queue, 13385e52796aSToshiaki Makita .ndo_features_check = passthru_features_check, 1339eaea34b2SPaolo Abeni .ndo_set_rx_headroom = tun_set_headroom, 1340608b9977SPaolo Abeni .ndo_get_stats64 = tun_net_get_stats64, 1341f4e63525SJakub Kicinski .ndo_bpf = tun_xdp, 1342fc72d1d5SJason Wang .ndo_xdp_xmit = tun_xdp_xmit, 1343758e43b7SStephen Hemminger }; 1344758e43b7SStephen Hemminger 1345944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun) 134696442e42SJason Wang { 134796442e42SJason Wang int i; 134896442e42SJason Wang 134996442e42SJason Wang for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 135096442e42SJason Wang INIT_HLIST_HEAD(&tun->flows[i]); 135196442e42SJason Wang 135296442e42SJason Wang tun->ageing_time = TUN_FLOW_EXPIRE; 1353e99e88a9SKees Cook timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1354e99e88a9SKees Cook mod_timer(&tun->flow_gc_timer, 1355e99e88a9SKees Cook round_jiffies_up(jiffies + tun->ageing_time)); 135696442e42SJason Wang } 135796442e42SJason Wang 135896442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun) 135996442e42SJason Wang { 136096442e42SJason Wang del_timer_sync(&tun->flow_gc_timer); 136196442e42SJason Wang tun_flow_flush(tun); 136296442e42SJason Wang } 136396442e42SJason Wang 136491572088SJarod Wilson #define MIN_MTU 68 136591572088SJarod Wilson #define MAX_MTU 65535 136691572088SJarod Wilson 13671da177e4SLinus Torvalds /* Initialize net device. */ 13681da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev) 13691da177e4SLinus Torvalds { 13701da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 13711da177e4SLinus Torvalds 13721da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 137340630b82SMichael S. Tsirkin case IFF_TUN: 1374758e43b7SStephen Hemminger dev->netdev_ops = &tun_netdev_ops; 1375758e43b7SStephen Hemminger 13761da177e4SLinus Torvalds /* Point-to-Point TUN Device */ 13771da177e4SLinus Torvalds dev->hard_header_len = 0; 13781da177e4SLinus Torvalds dev->addr_len = 0; 13791da177e4SLinus Torvalds dev->mtu = 1500; 13801da177e4SLinus Torvalds 13811da177e4SLinus Torvalds /* Zero header length */ 13821da177e4SLinus Torvalds dev->type = ARPHRD_NONE; 13831da177e4SLinus Torvalds dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 13841da177e4SLinus Torvalds break; 13851da177e4SLinus Torvalds 138640630b82SMichael S. Tsirkin case IFF_TAP: 13877a0a9608SKusanagi Kouichi dev->netdev_ops = &tap_netdev_ops; 13881da177e4SLinus Torvalds /* Ethernet TAP Device */ 13891da177e4SLinus Torvalds ether_setup(dev); 1390550fd08cSNeil Horman dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1391a676847bSstephen hemminger dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 139236226a8dSBrian Braunstein 1393f2cedb63SDanny Kukawka eth_hw_addr_random(dev); 139436226a8dSBrian Braunstein 13951da177e4SLinus Torvalds break; 13961da177e4SLinus Torvalds } 139791572088SJarod Wilson 139891572088SJarod Wilson dev->min_mtu = MIN_MTU; 139991572088SJarod Wilson dev->max_mtu = MAX_MTU - dev->hard_header_len; 14001da177e4SLinus Torvalds } 14011da177e4SLinus Torvalds 14022f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 14032f3ab622SJason Wang { 14042f3ab622SJason Wang struct sock *sk = tfile->socket.sk; 14052f3ab622SJason Wang 14062f3ab622SJason Wang return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 14072f3ab622SJason Wang } 14082f3ab622SJason Wang 14091da177e4SLinus Torvalds /* Character device part */ 14101da177e4SLinus Torvalds 14111da177e4SLinus Torvalds /* Poll */ 1412afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 14131da177e4SLinus Torvalds { 1414b2430de3SEric W. Biederman struct tun_file *tfile = file->private_data; 14159484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 14163c8a9c63SMariusz Kozlowski struct sock *sk; 1417afc9a42bSAl Viro __poll_t mask = 0; 14181da177e4SLinus Torvalds 14191da177e4SLinus Torvalds if (!tun) 1420a9a08845SLinus Torvalds return EPOLLERR; 14211da177e4SLinus Torvalds 142254f968d6SJason Wang sk = tfile->socket.sk; 14233c8a9c63SMariusz Kozlowski 14246b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 14251da177e4SLinus Torvalds 14269e641bdcSXi Wang poll_wait(file, sk_sleep(sk), wait); 14271da177e4SLinus Torvalds 14285990a305SJason Wang if (!ptr_ring_empty(&tfile->tx_ring)) 1429a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 14301da177e4SLinus Torvalds 14312f3ab622SJason Wang /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 14322f3ab622SJason Wang * guarantee EPOLLOUT to be raised by either here or 14332f3ab622SJason Wang * tun_sock_write_space(). Then process could get notification 14342f3ab622SJason Wang * after it writes to a down device and meets -EIO. 14352f3ab622SJason Wang */ 14362f3ab622SJason Wang if (tun_sock_writeable(tun, tfile) || 14379cd3e072SEric Dumazet (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 14382f3ab622SJason Wang tun_sock_writeable(tun, tfile))) 1439a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 144033dccbb0SHerbert Xu 1441c70f1829SEric W. Biederman if (tun->dev->reg_state != NETREG_REGISTERED) 1442a9a08845SLinus Torvalds mask = EPOLLERR; 1443c70f1829SEric W. Biederman 1444631ab46bSEric W. Biederman tun_put(tun); 14451da177e4SLinus Torvalds return mask; 14461da177e4SLinus Torvalds } 14471da177e4SLinus Torvalds 144890e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 144990e33d45SPetar Penkov size_t len, 145090e33d45SPetar Penkov const struct iov_iter *it) 145190e33d45SPetar Penkov { 145290e33d45SPetar Penkov struct sk_buff *skb; 145390e33d45SPetar Penkov size_t linear; 145490e33d45SPetar Penkov int err; 145590e33d45SPetar Penkov int i; 145690e33d45SPetar Penkov 145790e33d45SPetar Penkov if (it->nr_segs > MAX_SKB_FRAGS + 1) 145890e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 145990e33d45SPetar Penkov 146090e33d45SPetar Penkov local_bh_disable(); 146190e33d45SPetar Penkov skb = napi_get_frags(&tfile->napi); 146290e33d45SPetar Penkov local_bh_enable(); 146390e33d45SPetar Penkov if (!skb) 146490e33d45SPetar Penkov return ERR_PTR(-ENOMEM); 146590e33d45SPetar Penkov 146690e33d45SPetar Penkov linear = iov_iter_single_seg_count(it); 146790e33d45SPetar Penkov err = __skb_grow(skb, linear); 146890e33d45SPetar Penkov if (err) 146990e33d45SPetar Penkov goto free; 147090e33d45SPetar Penkov 147190e33d45SPetar Penkov skb->len = len; 147290e33d45SPetar Penkov skb->data_len = len - linear; 147390e33d45SPetar Penkov skb->truesize += skb->data_len; 147490e33d45SPetar Penkov 147590e33d45SPetar Penkov for (i = 1; i < it->nr_segs; i++) { 147643a08e0fSEric Dumazet struct page_frag *pfrag = ¤t->task_frag; 147790e33d45SPetar Penkov size_t fragsz = it->iov[i].iov_len; 147890e33d45SPetar Penkov 147990e33d45SPetar Penkov if (fragsz == 0 || fragsz > PAGE_SIZE) { 148090e33d45SPetar Penkov err = -EINVAL; 148190e33d45SPetar Penkov goto free; 148290e33d45SPetar Penkov } 148390e33d45SPetar Penkov 148443a08e0fSEric Dumazet if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 148590e33d45SPetar Penkov err = -ENOMEM; 148690e33d45SPetar Penkov goto free; 148790e33d45SPetar Penkov } 148890e33d45SPetar Penkov 148943a08e0fSEric Dumazet skb_fill_page_desc(skb, i - 1, pfrag->page, 149043a08e0fSEric Dumazet pfrag->offset, fragsz); 149143a08e0fSEric Dumazet page_ref_inc(pfrag->page); 149243a08e0fSEric Dumazet pfrag->offset += fragsz; 149390e33d45SPetar Penkov } 149490e33d45SPetar Penkov 149590e33d45SPetar Penkov return skb; 149690e33d45SPetar Penkov free: 149790e33d45SPetar Penkov /* frees skb and all frags allocated with napi_alloc_frag() */ 149890e33d45SPetar Penkov napi_free_frags(&tfile->napi); 149990e33d45SPetar Penkov return ERR_PTR(err); 150090e33d45SPetar Penkov } 150190e33d45SPetar Penkov 1502f42157cbSRusty Russell /* prepad is the amount to reserve at front. len is length after that. 1503f42157cbSRusty Russell * linear is a hint as to how much to copy (usually headers). */ 150454f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 150533dccbb0SHerbert Xu size_t prepad, size_t len, 150633dccbb0SHerbert Xu size_t linear, int noblock) 1507f42157cbSRusty Russell { 150854f968d6SJason Wang struct sock *sk = tfile->socket.sk; 1509f42157cbSRusty Russell struct sk_buff *skb; 151033dccbb0SHerbert Xu int err; 1511f42157cbSRusty Russell 1512f42157cbSRusty Russell /* Under a page? Don't bother with paged skb. */ 15130eca93bcSHerbert Xu if (prepad + len < PAGE_SIZE || !linear) 151433dccbb0SHerbert Xu linear = len; 1515f42157cbSRusty Russell 151633dccbb0SHerbert Xu skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 151728d64271SEric Dumazet &err, 0); 1518f42157cbSRusty Russell if (!skb) 151933dccbb0SHerbert Xu return ERR_PTR(err); 1520f42157cbSRusty Russell 1521f42157cbSRusty Russell skb_reserve(skb, prepad); 1522f42157cbSRusty Russell skb_put(skb, linear); 152333dccbb0SHerbert Xu skb->data_len = len - linear; 152433dccbb0SHerbert Xu skb->len += len - linear; 1525f42157cbSRusty Russell 1526f42157cbSRusty Russell return skb; 1527f42157cbSRusty Russell } 1528f42157cbSRusty Russell 15295503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 15305503fcecSJason Wang struct sk_buff *skb, int more) 15315503fcecSJason Wang { 15325503fcecSJason Wang struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 15335503fcecSJason Wang struct sk_buff_head process_queue; 15345503fcecSJason Wang u32 rx_batched = tun->rx_batched; 15355503fcecSJason Wang bool rcv = false; 15365503fcecSJason Wang 15375503fcecSJason Wang if (!rx_batched || (!more && skb_queue_empty(queue))) { 15385503fcecSJason Wang local_bh_disable(); 15395503fcecSJason Wang netif_receive_skb(skb); 15405503fcecSJason Wang local_bh_enable(); 15415503fcecSJason Wang return; 15425503fcecSJason Wang } 15435503fcecSJason Wang 15445503fcecSJason Wang spin_lock(&queue->lock); 15455503fcecSJason Wang if (!more || skb_queue_len(queue) == rx_batched) { 15465503fcecSJason Wang __skb_queue_head_init(&process_queue); 15475503fcecSJason Wang skb_queue_splice_tail_init(queue, &process_queue); 15485503fcecSJason Wang rcv = true; 15495503fcecSJason Wang } else { 15505503fcecSJason Wang __skb_queue_tail(queue, skb); 15515503fcecSJason Wang } 15525503fcecSJason Wang spin_unlock(&queue->lock); 15535503fcecSJason Wang 15545503fcecSJason Wang if (rcv) { 15555503fcecSJason Wang struct sk_buff *nskb; 15565503fcecSJason Wang 15575503fcecSJason Wang local_bh_disable(); 15585503fcecSJason Wang while ((nskb = __skb_dequeue(&process_queue))) 15595503fcecSJason Wang netif_receive_skb(nskb); 15605503fcecSJason Wang netif_receive_skb(skb); 15615503fcecSJason Wang local_bh_enable(); 15625503fcecSJason Wang } 15635503fcecSJason Wang } 15645503fcecSJason Wang 156566ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 156666ccbc9cSJason Wang int len, int noblock, bool zerocopy) 156766ccbc9cSJason Wang { 156866ccbc9cSJason Wang if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 156966ccbc9cSJason Wang return false; 157066ccbc9cSJason Wang 157166ccbc9cSJason Wang if (tfile->socket.sk->sk_sndbuf != INT_MAX) 157266ccbc9cSJason Wang return false; 157366ccbc9cSJason Wang 157466ccbc9cSJason Wang if (!noblock) 157566ccbc9cSJason Wang return false; 157666ccbc9cSJason Wang 157766ccbc9cSJason Wang if (zerocopy) 157866ccbc9cSJason Wang return false; 157966ccbc9cSJason Wang 158066ccbc9cSJason Wang if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 158166ccbc9cSJason Wang SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 158266ccbc9cSJason Wang return false; 158366ccbc9cSJason Wang 158466ccbc9cSJason Wang return true; 158566ccbc9cSJason Wang } 158666ccbc9cSJason Wang 1587ac1f1f6cSJason Wang static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, 15888ae1aff0SJason Wang int buflen, int len, int pad) 1589ac1f1f6cSJason Wang { 1590ac1f1f6cSJason Wang struct sk_buff *skb = build_skb(buf, buflen); 1591ac1f1f6cSJason Wang 1592ac1f1f6cSJason Wang if (!skb) 1593ac1f1f6cSJason Wang return ERR_PTR(-ENOMEM); 1594ac1f1f6cSJason Wang 15958ae1aff0SJason Wang skb_reserve(skb, pad); 1596ac1f1f6cSJason Wang skb_put(skb, len); 1597ac1f1f6cSJason Wang 1598ac1f1f6cSJason Wang get_page(alloc_frag->page); 1599ac1f1f6cSJason Wang alloc_frag->offset += buflen; 1600ac1f1f6cSJason Wang 1601ac1f1f6cSJason Wang return skb; 1602ac1f1f6cSJason Wang } 1603ac1f1f6cSJason Wang 16048ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 16058ae1aff0SJason Wang struct xdp_buff *xdp, u32 act) 16068ae1aff0SJason Wang { 16078ae1aff0SJason Wang int err; 16088ae1aff0SJason Wang 16098ae1aff0SJason Wang switch (act) { 16108ae1aff0SJason Wang case XDP_REDIRECT: 16118ae1aff0SJason Wang err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 16128ae1aff0SJason Wang if (err) 16138ae1aff0SJason Wang return err; 16148ae1aff0SJason Wang break; 16158ae1aff0SJason Wang case XDP_TX: 16168ae1aff0SJason Wang err = tun_xdp_tx(tun->dev, xdp); 16178ae1aff0SJason Wang if (err < 0) 16188ae1aff0SJason Wang return err; 16198ae1aff0SJason Wang break; 16208ae1aff0SJason Wang case XDP_PASS: 16218ae1aff0SJason Wang break; 16228ae1aff0SJason Wang default: 16238ae1aff0SJason Wang bpf_warn_invalid_xdp_action(act); 16248ae1aff0SJason Wang /* fall through */ 16258ae1aff0SJason Wang case XDP_ABORTED: 16268ae1aff0SJason Wang trace_xdp_exception(tun->dev, xdp_prog, act); 16278ae1aff0SJason Wang /* fall through */ 16288ae1aff0SJason Wang case XDP_DROP: 16298ae1aff0SJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 16308ae1aff0SJason Wang break; 16318ae1aff0SJason Wang } 16328ae1aff0SJason Wang 16338ae1aff0SJason Wang return act; 16348ae1aff0SJason Wang } 16358ae1aff0SJason Wang 1636761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1637761876c8SJason Wang struct tun_file *tfile, 163866ccbc9cSJason Wang struct iov_iter *from, 1639761876c8SJason Wang struct virtio_net_hdr *hdr, 16401cfe6e93SJason Wang int len, int *skb_xdp) 164166ccbc9cSJason Wang { 16420bbd7dadSEric Dumazet struct page_frag *alloc_frag = ¤t->task_frag; 1643761876c8SJason Wang struct bpf_prog *xdp_prog; 16447df13219SJason Wang int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 164566ccbc9cSJason Wang char *buf; 164666ccbc9cSJason Wang size_t copied; 16478ae1aff0SJason Wang int pad = TUN_RX_PAD; 16488ae1aff0SJason Wang int err = 0; 16497df13219SJason Wang 16507df13219SJason Wang rcu_read_lock(); 16517df13219SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16527df13219SJason Wang if (xdp_prog) 16534f23aff8SJason Wang pad += XDP_PACKET_HEADROOM; 16547df13219SJason Wang buflen += SKB_DATA_ALIGN(len + pad); 16557df13219SJason Wang rcu_read_unlock(); 165666ccbc9cSJason Wang 165763b9ab65SJason Wang alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 165866ccbc9cSJason Wang if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 165966ccbc9cSJason Wang return ERR_PTR(-ENOMEM); 166066ccbc9cSJason Wang 166166ccbc9cSJason Wang buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 166266ccbc9cSJason Wang copied = copy_page_from_iter(alloc_frag->page, 16637df13219SJason Wang alloc_frag->offset + pad, 166466ccbc9cSJason Wang len, from); 166566ccbc9cSJason Wang if (copied != len) 166666ccbc9cSJason Wang return ERR_PTR(-EFAULT); 166766ccbc9cSJason Wang 16687df13219SJason Wang /* There's a small window that XDP may be set after the check 16697df13219SJason Wang * of xdp_prog above, this should be rare and for simplicity 16707df13219SJason Wang * we do XDP on skb in case the headroom is not enough. 16717df13219SJason Wang */ 1672ac1f1f6cSJason Wang if (hdr->gso_type || !xdp_prog) { 16731cfe6e93SJason Wang *skb_xdp = 1; 16748ae1aff0SJason Wang return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1675ac1f1f6cSJason Wang } 1676ac1f1f6cSJason Wang 16771cfe6e93SJason Wang *skb_xdp = 0; 167866ccbc9cSJason Wang 16796547e387SToshiaki Makita local_bh_disable(); 1680761876c8SJason Wang rcu_read_lock(); 1681761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 16828ae1aff0SJason Wang if (xdp_prog) { 1683761876c8SJason Wang struct xdp_buff xdp; 1684761876c8SJason Wang u32 act; 1685761876c8SJason Wang 1686761876c8SJason Wang xdp.data_hard_start = buf; 16877df13219SJason Wang xdp.data = buf + pad; 1688de8f3a83SDaniel Borkmann xdp_set_data_meta_invalid(&xdp); 1689761876c8SJason Wang xdp.data_end = xdp.data + len; 16908bf5c4eeSJesper Dangaard Brouer xdp.rxq = &tfile->xdp_rxq; 1691761876c8SJason Wang 16928ae1aff0SJason Wang act = bpf_prog_run_xdp(xdp_prog, &xdp); 16938ae1aff0SJason Wang if (act == XDP_REDIRECT || act == XDP_TX) { 1694761876c8SJason Wang get_page(alloc_frag->page); 1695761876c8SJason Wang alloc_frag->offset += buflen; 1696761876c8SJason Wang } 16978ae1aff0SJason Wang err = tun_xdp_act(tun, xdp_prog, &xdp, act); 16988ae1aff0SJason Wang if (err < 0) 16998ae1aff0SJason Wang goto err_xdp; 17001a097910SJason Wang if (err == XDP_REDIRECT) 17011a097910SJason Wang xdp_do_flush_map(); 17028ae1aff0SJason Wang if (err != XDP_PASS) 17038ae1aff0SJason Wang goto out; 17048ae1aff0SJason Wang 17058ae1aff0SJason Wang pad = xdp.data - xdp.data_hard_start; 17068ae1aff0SJason Wang len = xdp.data_end - xdp.data; 1707761876c8SJason Wang } 1708761876c8SJason Wang rcu_read_unlock(); 17096547e387SToshiaki Makita local_bh_enable(); 1710291aeb2bSJason Wang 17118ae1aff0SJason Wang return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1712761876c8SJason Wang 17138ae1aff0SJason Wang err_xdp: 1714761876c8SJason Wang put_page(alloc_frag->page); 1715f7053b6cSJason Wang out: 1716761876c8SJason Wang rcu_read_unlock(); 17176547e387SToshiaki Makita local_bh_enable(); 1718761876c8SJason Wang return NULL; 171966ccbc9cSJason Wang } 172066ccbc9cSJason Wang 17211da177e4SLinus Torvalds /* Get packet from user space buffer */ 172254f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1723f5ff53b4SAl Viro void *msg_control, struct iov_iter *from, 17245503fcecSJason Wang int noblock, bool more) 17251da177e4SLinus Torvalds { 172609640e63SHarvey Harrison struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 17271da177e4SLinus Torvalds struct sk_buff *skb; 1728f5ff53b4SAl Viro size_t total_len = iov_iter_count(from); 1729eaea34b2SPaolo Abeni size_t len = total_len, align = tun->align, linear; 1730f43798c2SRusty Russell struct virtio_net_hdr gso = { 0 }; 1731608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 173296f8d9ecSJason Wang int good_linear; 17330690899bSMichael S. Tsirkin int copylen; 17340690899bSMichael S. Tsirkin bool zerocopy = false; 17350690899bSMichael S. Tsirkin int err; 173696f84061SJason Wang u32 rxhash = 0; 17371cfe6e93SJason Wang int skb_xdp = 1; 1738af3fb24eSEric Dumazet bool frags = tun_napi_frags_enabled(tfile); 17391da177e4SLinus Torvalds 17401bd4978aSEric Dumazet if (!(tun->dev->flags & IFF_UP)) 17411bd4978aSEric Dumazet return -EIO; 17421bd4978aSEric Dumazet 174340630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 174415718ea0SDan Carpenter if (len < sizeof(pi)) 17451da177e4SLinus Torvalds return -EINVAL; 174615718ea0SDan Carpenter len -= sizeof(pi); 17471da177e4SLinus Torvalds 1748cbbd26b8SAl Viro if (!copy_from_iter_full(&pi, sizeof(pi), from)) 17491da177e4SLinus Torvalds return -EFAULT; 17501da177e4SLinus Torvalds } 17511da177e4SLinus Torvalds 175240630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) { 1753e1edab87SWillem de Bruijn int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1754e1edab87SWillem de Bruijn 1755e1edab87SWillem de Bruijn if (len < vnet_hdr_sz) 1756f43798c2SRusty Russell return -EINVAL; 1757e1edab87SWillem de Bruijn len -= vnet_hdr_sz; 1758f43798c2SRusty Russell 1759cbbd26b8SAl Viro if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1760f43798c2SRusty Russell return -EFAULT; 1761f43798c2SRusty Russell 17624909122fSHerbert Xu if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 176356f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 176456f0dcc5SMichael S. Tsirkin gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 17654909122fSHerbert Xu 176656f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > len) 1767f43798c2SRusty Russell return -EINVAL; 1768e1edab87SWillem de Bruijn iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1769f43798c2SRusty Russell } 1770f43798c2SRusty Russell 177140630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1772a504b86eSstephen hemminger align += NET_IP_ALIGN; 17730eca93bcSHerbert Xu if (unlikely(len < ETH_HLEN || 177456f0dcc5SMichael S. Tsirkin (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1775e01bf1c8SRusty Russell return -EINVAL; 1776e01bf1c8SRusty Russell } 17771da177e4SLinus Torvalds 177896f8d9ecSJason Wang good_linear = SKB_MAX_HEAD(align); 177996f8d9ecSJason Wang 178088529176SJason Wang if (msg_control) { 1781f5ff53b4SAl Viro struct iov_iter i = *from; 1782f5ff53b4SAl Viro 178388529176SJason Wang /* There are 256 bytes to be copied in skb, so there is 178488529176SJason Wang * enough room for skb expand head in case it is used. 17850690899bSMichael S. Tsirkin * The rest of the buffer is mapped from userspace. 17860690899bSMichael S. Tsirkin */ 178756f0dcc5SMichael S. Tsirkin copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 178896f8d9ecSJason Wang if (copylen > good_linear) 178996f8d9ecSJason Wang copylen = good_linear; 17903dd5c330SJason Wang linear = copylen; 1791f5ff53b4SAl Viro iov_iter_advance(&i, copylen); 1792f5ff53b4SAl Viro if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 179388529176SJason Wang zerocopy = true; 179488529176SJason Wang } 179588529176SJason Wang 179690e33d45SPetar Penkov if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 17971cfe6e93SJason Wang /* For the packet that is not easy to be processed 17981cfe6e93SJason Wang * (e.g gso or jumbo packet), we will do it at after 17991cfe6e93SJason Wang * skb was created with generic XDP routine. 18001cfe6e93SJason Wang */ 18011cfe6e93SJason Wang skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 180266ccbc9cSJason Wang if (IS_ERR(skb)) { 180366ccbc9cSJason Wang this_cpu_inc(tun->pcpu_stats->rx_dropped); 180466ccbc9cSJason Wang return PTR_ERR(skb); 180566ccbc9cSJason Wang } 1806761876c8SJason Wang if (!skb) 1807761876c8SJason Wang return total_len; 180866ccbc9cSJason Wang } else { 180988529176SJason Wang if (!zerocopy) { 18100690899bSMichael S. Tsirkin copylen = len; 181156f0dcc5SMichael S. Tsirkin if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 181296f8d9ecSJason Wang linear = good_linear; 181396f8d9ecSJason Wang else 181456f0dcc5SMichael S. Tsirkin linear = tun16_to_cpu(tun, gso.hdr_len); 18153dd5c330SJason Wang } 18160690899bSMichael S. Tsirkin 181790e33d45SPetar Penkov if (frags) { 181890e33d45SPetar Penkov mutex_lock(&tfile->napi_mutex); 181990e33d45SPetar Penkov skb = tun_napi_alloc_frags(tfile, copylen, from); 182090e33d45SPetar Penkov /* tun_napi_alloc_frags() enforces a layout for the skb. 182190e33d45SPetar Penkov * If zerocopy is enabled, then this layout will be 182290e33d45SPetar Penkov * overwritten by zerocopy_sg_from_iter(). 182390e33d45SPetar Penkov */ 182490e33d45SPetar Penkov zerocopy = false; 182590e33d45SPetar Penkov } else { 182690e33d45SPetar Penkov skb = tun_alloc_skb(tfile, align, copylen, linear, 182790e33d45SPetar Penkov noblock); 182890e33d45SPetar Penkov } 182990e33d45SPetar Penkov 183033dccbb0SHerbert Xu if (IS_ERR(skb)) { 183133dccbb0SHerbert Xu if (PTR_ERR(skb) != -EAGAIN) 1832608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 183390e33d45SPetar Penkov if (frags) 183490e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 183533dccbb0SHerbert Xu return PTR_ERR(skb); 18361da177e4SLinus Torvalds } 18371da177e4SLinus Torvalds 18380690899bSMichael S. Tsirkin if (zerocopy) 1839f5ff53b4SAl Viro err = zerocopy_sg_from_iter(skb, from); 1840af1cc7a2SJason Wang else 1841f5ff53b4SAl Viro err = skb_copy_datagram_from_iter(skb, 0, from, len); 18420690899bSMichael S. Tsirkin 18430690899bSMichael S. Tsirkin if (err) { 1844608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 18458f22757eSDave Jones kfree_skb(skb); 184690e33d45SPetar Penkov if (frags) { 184790e33d45SPetar Penkov tfile->napi.skb = NULL; 184890e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 184990e33d45SPetar Penkov } 185090e33d45SPetar Penkov 18511da177e4SLinus Torvalds return -EFAULT; 18528f22757eSDave Jones } 185366ccbc9cSJason Wang } 18541da177e4SLinus Torvalds 18553e9e40e7SJarno Rajahalme if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1856df10db98SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1857df10db98SPaolo Abeni kfree_skb(skb); 185890e33d45SPetar Penkov if (frags) { 185990e33d45SPetar Penkov tfile->napi.skb = NULL; 186090e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 186190e33d45SPetar Penkov } 186290e33d45SPetar Penkov 1863df10db98SPaolo Abeni return -EINVAL; 1864df10db98SPaolo Abeni } 1865df10db98SPaolo Abeni 18661da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 186740630b82SMichael S. Tsirkin case IFF_TUN: 186840630b82SMichael S. Tsirkin if (tun->flags & IFF_NO_PI) { 18692580c4c1SAlexander Potapenko u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 18702580c4c1SAlexander Potapenko 18712580c4c1SAlexander Potapenko switch (ip_version) { 18722580c4c1SAlexander Potapenko case 4: 1873f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IP); 1874f09f7ee2SAng Way Chuang break; 18752580c4c1SAlexander Potapenko case 6: 1876f09f7ee2SAng Way Chuang pi.proto = htons(ETH_P_IPV6); 1877f09f7ee2SAng Way Chuang break; 1878f09f7ee2SAng Way Chuang default: 1879608b9977SPaolo Abeni this_cpu_inc(tun->pcpu_stats->rx_dropped); 1880f09f7ee2SAng Way Chuang kfree_skb(skb); 1881f09f7ee2SAng Way Chuang return -EINVAL; 1882f09f7ee2SAng Way Chuang } 1883f09f7ee2SAng Way Chuang } 1884f09f7ee2SAng Way Chuang 1885459a98edSArnaldo Carvalho de Melo skb_reset_mac_header(skb); 18861da177e4SLinus Torvalds skb->protocol = pi.proto; 18874c13eb66SArnaldo Carvalho de Melo skb->dev = tun->dev; 18881da177e4SLinus Torvalds break; 188940630b82SMichael S. Tsirkin case IFF_TAP: 189090e33d45SPetar Penkov if (!frags) 18911da177e4SLinus Torvalds skb->protocol = eth_type_trans(skb, tun->dev); 18921da177e4SLinus Torvalds break; 18936403eab1SJoe Perches } 18941da177e4SLinus Torvalds 18950690899bSMichael S. Tsirkin /* copy skb_ubuf_info for callback when skb has no error */ 18960690899bSMichael S. Tsirkin if (zerocopy) { 18970690899bSMichael S. Tsirkin skb_shinfo(skb)->destructor_arg = msg_control; 18980690899bSMichael S. Tsirkin skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1899c9af6db4SPravin B Shelar skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1900af1cc7a2SJason Wang } else if (msg_control) { 1901af1cc7a2SJason Wang struct ubuf_info *uarg = msg_control; 1902af1cc7a2SJason Wang uarg->callback(uarg, false); 19030690899bSMichael S. Tsirkin } 19040690899bSMichael S. Tsirkin 190572f65107SVlad Yasevich skb_reset_network_header(skb); 190640893fd0SJason Wang skb_probe_transport_header(skb, 0); 190738502af7SJason Wang 19081cfe6e93SJason Wang if (skb_xdp) { 1909761876c8SJason Wang struct bpf_prog *xdp_prog; 1910761876c8SJason Wang int ret; 1911761876c8SJason Wang 19126547e387SToshiaki Makita local_bh_disable(); 1913761876c8SJason Wang rcu_read_lock(); 1914761876c8SJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 1915761876c8SJason Wang if (xdp_prog) { 1916761876c8SJason Wang ret = do_xdp_generic(xdp_prog, skb); 1917761876c8SJason Wang if (ret != XDP_PASS) { 1918761876c8SJason Wang rcu_read_unlock(); 19196547e387SToshiaki Makita local_bh_enable(); 1920761876c8SJason Wang return total_len; 1921761876c8SJason Wang } 1922761876c8SJason Wang } 1923761876c8SJason Wang rcu_read_unlock(); 19246547e387SToshiaki Makita local_bh_enable(); 1925761876c8SJason Wang } 1926761876c8SJason Wang 1927cf1a1e07SPaolo Abeni /* Compute the costly rx hash only if needed for flow updates. 1928cf1a1e07SPaolo Abeni * We may get a very small possibility of OOO during switching, not 1929cf1a1e07SPaolo Abeni * worth to optimize. 1930cf1a1e07SPaolo Abeni */ 1931cf1a1e07SPaolo Abeni if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1932cf1a1e07SPaolo Abeni !tfile->detached) 1933feec084aSJason Wang rxhash = __skb_get_hash_symmetric(skb); 193494317099SPetar Penkov 193590e33d45SPetar Penkov if (frags) { 193690e33d45SPetar Penkov /* Exercise flow dissector code path. */ 193790e33d45SPetar Penkov u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 193890e33d45SPetar Penkov 1939010f245bSEric Dumazet if (unlikely(headlen > skb_headlen(skb))) { 194090e33d45SPetar Penkov this_cpu_inc(tun->pcpu_stats->rx_dropped); 194190e33d45SPetar Penkov napi_free_frags(&tfile->napi); 194290e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 194390e33d45SPetar Penkov WARN_ON(1); 194490e33d45SPetar Penkov return -ENOMEM; 194590e33d45SPetar Penkov } 194690e33d45SPetar Penkov 194790e33d45SPetar Penkov local_bh_disable(); 194890e33d45SPetar Penkov napi_gro_frags(&tfile->napi); 194990e33d45SPetar Penkov local_bh_enable(); 195090e33d45SPetar Penkov mutex_unlock(&tfile->napi_mutex); 1951aec72f33SEric Dumazet } else if (tfile->napi_enabled) { 195294317099SPetar Penkov struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 195394317099SPetar Penkov int queue_len; 195494317099SPetar Penkov 195594317099SPetar Penkov spin_lock_bh(&queue->lock); 195694317099SPetar Penkov __skb_queue_tail(queue, skb); 195794317099SPetar Penkov queue_len = skb_queue_len(queue); 195894317099SPetar Penkov spin_unlock(&queue->lock); 195994317099SPetar Penkov 196094317099SPetar Penkov if (!more || queue_len > NAPI_POLL_WEIGHT) 196194317099SPetar Penkov napi_schedule(&tfile->napi); 196294317099SPetar Penkov 196394317099SPetar Penkov local_bh_enable(); 196494317099SPetar Penkov } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 19655503fcecSJason Wang tun_rx_batched(tun, tfile, skb, more); 196694317099SPetar Penkov } else { 19671da177e4SLinus Torvalds netif_rx_ni(skb); 196894317099SPetar Penkov } 19691da177e4SLinus Torvalds 1970608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 1971608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 1972608b9977SPaolo Abeni stats->rx_packets++; 1973608b9977SPaolo Abeni stats->rx_bytes += len; 1974608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 1975608b9977SPaolo Abeni put_cpu_ptr(stats); 19761da177e4SLinus Torvalds 197796f84061SJason Wang if (rxhash) 19789e85722dSJason Wang tun_flow_update(tun, rxhash, tfile); 197996f84061SJason Wang 19800690899bSMichael S. Tsirkin return total_len; 19811da177e4SLinus Torvalds } 19821da177e4SLinus Torvalds 1983f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 19841da177e4SLinus Torvalds { 198533dccbb0SHerbert Xu struct file *file = iocb->ki_filp; 198654f968d6SJason Wang struct tun_file *tfile = file->private_data; 19879484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 1988631ab46bSEric W. Biederman ssize_t result; 19891da177e4SLinus Torvalds 19901da177e4SLinus Torvalds if (!tun) 19911da177e4SLinus Torvalds return -EBADFD; 19921da177e4SLinus Torvalds 19935503fcecSJason Wang result = tun_get_user(tun, tfile, NULL, from, 19945503fcecSJason Wang file->f_flags & O_NONBLOCK, false); 1995631ab46bSEric W. Biederman 1996631ab46bSEric W. Biederman tun_put(tun); 1997631ab46bSEric W. Biederman return result; 19981da177e4SLinus Torvalds } 19991da177e4SLinus Torvalds 2000fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2001fc72d1d5SJason Wang struct tun_file *tfile, 20021ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdp_frame, 2003fc72d1d5SJason Wang struct iov_iter *iter) 2004fc72d1d5SJason Wang { 2005fc72d1d5SJason Wang int vnet_hdr_sz = 0; 20061ffcbc85SJesper Dangaard Brouer size_t size = xdp_frame->len; 2007fc72d1d5SJason Wang struct tun_pcpu_stats *stats; 2008fc72d1d5SJason Wang size_t ret; 2009fc72d1d5SJason Wang 2010fc72d1d5SJason Wang if (tun->flags & IFF_VNET_HDR) { 2011fc72d1d5SJason Wang struct virtio_net_hdr gso = { 0 }; 2012fc72d1d5SJason Wang 2013fc72d1d5SJason Wang vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2014fc72d1d5SJason Wang if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2015fc72d1d5SJason Wang return -EINVAL; 2016fc72d1d5SJason Wang if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2017fc72d1d5SJason Wang sizeof(gso))) 2018fc72d1d5SJason Wang return -EFAULT; 2019fc72d1d5SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2020fc72d1d5SJason Wang } 2021fc72d1d5SJason Wang 20221ffcbc85SJesper Dangaard Brouer ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2023fc72d1d5SJason Wang 2024fc72d1d5SJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2025fc72d1d5SJason Wang u64_stats_update_begin(&stats->syncp); 2026fc72d1d5SJason Wang stats->tx_packets++; 2027fc72d1d5SJason Wang stats->tx_bytes += ret; 2028fc72d1d5SJason Wang u64_stats_update_end(&stats->syncp); 2029fc72d1d5SJason Wang put_cpu_ptr(tun->pcpu_stats); 2030fc72d1d5SJason Wang 2031fc72d1d5SJason Wang return ret; 2032fc72d1d5SJason Wang } 2033fc72d1d5SJason Wang 20341da177e4SLinus Torvalds /* Put packet to the user space buffer */ 20356f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun, 203654f968d6SJason Wang struct tun_file *tfile, 20371da177e4SLinus Torvalds struct sk_buff *skb, 2038e0b46d0eSHerbert Xu struct iov_iter *iter) 20391da177e4SLinus Torvalds { 20401da177e4SLinus Torvalds struct tun_pi pi = { 0, skb->protocol }; 2041608b9977SPaolo Abeni struct tun_pcpu_stats *stats; 2042e0b46d0eSHerbert Xu ssize_t total; 20438c847d25SJason Wang int vlan_offset = 0; 2044a8f9bfdfSHerbert Xu int vlan_hlen = 0; 20452eb783c4SHerbert Xu int vnet_hdr_sz = 0; 2046a8f9bfdfSHerbert Xu 2047df8a39deSJiri Pirko if (skb_vlan_tag_present(skb)) 2048a8f9bfdfSHerbert Xu vlan_hlen = VLAN_HLEN; 20491da177e4SLinus Torvalds 205040630b82SMichael S. Tsirkin if (tun->flags & IFF_VNET_HDR) 2051e1edab87SWillem de Bruijn vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 20521da177e4SLinus Torvalds 2053e0b46d0eSHerbert Xu total = skb->len + vlan_hlen + vnet_hdr_sz; 2054e0b46d0eSHerbert Xu 205540630b82SMichael S. Tsirkin if (!(tun->flags & IFF_NO_PI)) { 2056e0b46d0eSHerbert Xu if (iov_iter_count(iter) < sizeof(pi)) 20571da177e4SLinus Torvalds return -EINVAL; 20581da177e4SLinus Torvalds 2059e0b46d0eSHerbert Xu total += sizeof(pi); 2060e0b46d0eSHerbert Xu if (iov_iter_count(iter) < total) { 20611da177e4SLinus Torvalds /* Packet will be striped */ 20621da177e4SLinus Torvalds pi.flags |= TUN_PKT_STRIP; 20631da177e4SLinus Torvalds } 20641da177e4SLinus Torvalds 2065e0b46d0eSHerbert Xu if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 20661da177e4SLinus Torvalds return -EFAULT; 20671da177e4SLinus Torvalds } 20681da177e4SLinus Torvalds 20692eb783c4SHerbert Xu if (vnet_hdr_sz) { 20709403cd7cSJarno Rajahalme struct virtio_net_hdr gso; 207134166093SMike Rapoport 2072e0b46d0eSHerbert Xu if (iov_iter_count(iter) < vnet_hdr_sz) 2073f43798c2SRusty Russell return -EINVAL; 2074f43798c2SRusty Russell 20753e9e40e7SJarno Rajahalme if (virtio_net_hdr_from_skb(skb, &gso, 2076fd3a8862SWillem de Bruijn tun_is_little_endian(tun), true, 2077fd3a8862SWillem de Bruijn vlan_hlen)) { 2078f43798c2SRusty Russell struct skb_shared_info *sinfo = skb_shinfo(skb); 20796b8a66eeSJoe Perches pr_err("unexpected GSO type: " 2080ef3db4a5SMichael S. Tsirkin "0x%x, gso_size %d, hdr_len %d\n", 208156f0dcc5SMichael S. Tsirkin sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 208256f0dcc5SMichael S. Tsirkin tun16_to_cpu(tun, gso.hdr_len)); 2083ef3db4a5SMichael S. Tsirkin print_hex_dump(KERN_ERR, "tun: ", 2084ef3db4a5SMichael S. Tsirkin DUMP_PREFIX_NONE, 2085ef3db4a5SMichael S. Tsirkin 16, 1, skb->head, 208656f0dcc5SMichael S. Tsirkin min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2087ef3db4a5SMichael S. Tsirkin WARN_ON_ONCE(1); 2088ef3db4a5SMichael S. Tsirkin return -EINVAL; 2089ef3db4a5SMichael S. Tsirkin } 2090f43798c2SRusty Russell 2091e0b46d0eSHerbert Xu if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2092f43798c2SRusty Russell return -EFAULT; 20938c847d25SJason Wang 20948c847d25SJason Wang iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2095f43798c2SRusty Russell } 2096f43798c2SRusty Russell 2097a8f9bfdfSHerbert Xu if (vlan_hlen) { 2098e0b46d0eSHerbert Xu int ret; 2099aff3d70aSJason Wang struct veth veth; 21001da177e4SLinus Torvalds 21016680ec68SJason Wang veth.h_vlan_proto = skb->vlan_proto; 2102df8a39deSJiri Pirko veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 21031da177e4SLinus Torvalds 21046680ec68SJason Wang vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 21056680ec68SJason Wang 2106e0b46d0eSHerbert Xu ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2107e0b46d0eSHerbert Xu if (ret || !iov_iter_count(iter)) 21086680ec68SJason Wang goto done; 21096680ec68SJason Wang 2110e0b46d0eSHerbert Xu ret = copy_to_iter(&veth, sizeof(veth), iter); 2111e0b46d0eSHerbert Xu if (ret != sizeof(veth) || !iov_iter_count(iter)) 21126680ec68SJason Wang goto done; 21136680ec68SJason Wang } 21146680ec68SJason Wang 2115e0b46d0eSHerbert Xu skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 21166680ec68SJason Wang 21176680ec68SJason Wang done: 2118608b9977SPaolo Abeni /* caller is in process context, */ 2119608b9977SPaolo Abeni stats = get_cpu_ptr(tun->pcpu_stats); 2120608b9977SPaolo Abeni u64_stats_update_begin(&stats->syncp); 2121608b9977SPaolo Abeni stats->tx_packets++; 2122608b9977SPaolo Abeni stats->tx_bytes += skb->len + vlan_hlen; 2123608b9977SPaolo Abeni u64_stats_update_end(&stats->syncp); 2124608b9977SPaolo Abeni put_cpu_ptr(tun->pcpu_stats); 21251da177e4SLinus Torvalds 21261da177e4SLinus Torvalds return total; 21271da177e4SLinus Torvalds } 21281da177e4SLinus Torvalds 2129fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 21301576d986SJason Wang { 21311576d986SJason Wang DECLARE_WAITQUEUE(wait, current); 2132fc72d1d5SJason Wang void *ptr = NULL; 2133f48cc6b2SJason Wang int error = 0; 21341576d986SJason Wang 2135fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2136fc72d1d5SJason Wang if (ptr) 21371576d986SJason Wang goto out; 21381576d986SJason Wang if (noblock) { 2139f48cc6b2SJason Wang error = -EAGAIN; 21401576d986SJason Wang goto out; 21411576d986SJason Wang } 21421576d986SJason Wang 21431576d986SJason Wang add_wait_queue(&tfile->wq.wait, &wait); 21441576d986SJason Wang current->state = TASK_INTERRUPTIBLE; 21451576d986SJason Wang 21461576d986SJason Wang while (1) { 2147fc72d1d5SJason Wang ptr = ptr_ring_consume(&tfile->tx_ring); 2148fc72d1d5SJason Wang if (ptr) 21491576d986SJason Wang break; 21501576d986SJason Wang if (signal_pending(current)) { 2151f48cc6b2SJason Wang error = -ERESTARTSYS; 21521576d986SJason Wang break; 21531576d986SJason Wang } 21541576d986SJason Wang if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2155f48cc6b2SJason Wang error = -EFAULT; 21561576d986SJason Wang break; 21571576d986SJason Wang } 21581576d986SJason Wang 21591576d986SJason Wang schedule(); 21601576d986SJason Wang } 21611576d986SJason Wang 21621576d986SJason Wang current->state = TASK_RUNNING; 21631576d986SJason Wang remove_wait_queue(&tfile->wq.wait, &wait); 21641576d986SJason Wang 21651576d986SJason Wang out: 2166f48cc6b2SJason Wang *err = error; 2167fc72d1d5SJason Wang return ptr; 21681576d986SJason Wang } 21691576d986SJason Wang 217054f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 21719b067034SAl Viro struct iov_iter *to, 2172fc72d1d5SJason Wang int noblock, void *ptr) 21731da177e4SLinus Torvalds { 21749b067034SAl Viro ssize_t ret; 21751576d986SJason Wang int err; 21761da177e4SLinus Torvalds 21773872baf6SRami Rosen tun_debug(KERN_INFO, tun, "tun_do_read\n"); 21781da177e4SLinus Torvalds 2179c33ee15bSWei Xu if (!iov_iter_count(to)) { 2180fc72d1d5SJason Wang tun_ptr_free(ptr); 21819b067034SAl Viro return 0; 2182c33ee15bSWei Xu } 21831da177e4SLinus Torvalds 2184fc72d1d5SJason Wang if (!ptr) { 21851576d986SJason Wang /* Read frames from ring */ 2186fc72d1d5SJason Wang ptr = tun_ring_recv(tfile, noblock, &err); 2187fc72d1d5SJason Wang if (!ptr) 2188957f094fSAlex Gartrell return err; 2189ac77cfd4SJason Wang } 2190e0b46d0eSHerbert Xu 21911ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 21921ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2193fc72d1d5SJason Wang 21941ffcbc85SJesper Dangaard Brouer ret = tun_put_user_xdp(tun, tfile, xdpf, to); 219503993094SJesper Dangaard Brouer xdp_return_frame(xdpf); 2196fc72d1d5SJason Wang } else { 2197fc72d1d5SJason Wang struct sk_buff *skb = ptr; 2198fc72d1d5SJason Wang 21999b067034SAl Viro ret = tun_put_user(tun, tfile, skb, to); 2200f51a5e82SJason Wang if (unlikely(ret < 0)) 22011da177e4SLinus Torvalds kfree_skb(skb); 2202f51a5e82SJason Wang else 2203f51a5e82SJason Wang consume_skb(skb); 2204fc72d1d5SJason Wang } 22051da177e4SLinus Torvalds 220605c2828cSMichael S. Tsirkin return ret; 220705c2828cSMichael S. Tsirkin } 220805c2828cSMichael S. Tsirkin 22099b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 221005c2828cSMichael S. Tsirkin { 221105c2828cSMichael S. Tsirkin struct file *file = iocb->ki_filp; 221205c2828cSMichael S. Tsirkin struct tun_file *tfile = file->private_data; 22139484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 22149b067034SAl Viro ssize_t len = iov_iter_count(to), ret; 221505c2828cSMichael S. Tsirkin 221605c2828cSMichael S. Tsirkin if (!tun) 221705c2828cSMichael S. Tsirkin return -EBADFD; 2218ac77cfd4SJason Wang ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 221942404c09SDavid S. Miller ret = min_t(ssize_t, ret, len); 2220d0b7da8aSZhi Yong Wu if (ret > 0) 2221d0b7da8aSZhi Yong Wu iocb->ki_pos = ret; 2222631ab46bSEric W. Biederman tun_put(tun); 22231da177e4SLinus Torvalds return ret; 22241da177e4SLinus Torvalds } 22251da177e4SLinus Torvalds 2226cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu) 222796f84061SJason Wang { 2228cd5681d7SJason Wang struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 222996f84061SJason Wang 223096f84061SJason Wang bpf_prog_destroy(prog->prog); 223196f84061SJason Wang kfree(prog); 223296f84061SJason Wang } 223396f84061SJason Wang 22349d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, 22359d6474e4SJason Wang struct tun_prog __rcu **prog_p, 223696f84061SJason Wang struct bpf_prog *prog) 223796f84061SJason Wang { 2238cd5681d7SJason Wang struct tun_prog *old, *new = NULL; 223996f84061SJason Wang 224096f84061SJason Wang if (prog) { 224196f84061SJason Wang new = kmalloc(sizeof(*new), GFP_KERNEL); 224296f84061SJason Wang if (!new) 224396f84061SJason Wang return -ENOMEM; 224496f84061SJason Wang new->prog = prog; 224596f84061SJason Wang } 224696f84061SJason Wang 2247124da8f6SJason Wang spin_lock_bh(&tun->lock); 2248cd5681d7SJason Wang old = rcu_dereference_protected(*prog_p, 2249124da8f6SJason Wang lockdep_is_held(&tun->lock)); 2250cd5681d7SJason Wang rcu_assign_pointer(*prog_p, new); 2251124da8f6SJason Wang spin_unlock_bh(&tun->lock); 225296f84061SJason Wang 225396f84061SJason Wang if (old) 2254cd5681d7SJason Wang call_rcu(&old->rcu, tun_prog_free); 225596f84061SJason Wang 225696f84061SJason Wang return 0; 225796f84061SJason Wang } 225896f84061SJason Wang 225996442e42SJason Wang static void tun_free_netdev(struct net_device *dev) 226096442e42SJason Wang { 226196442e42SJason Wang struct tun_struct *tun = netdev_priv(dev); 226296442e42SJason Wang 22634008e97fSJason Wang BUG_ON(!(list_empty(&tun->disabled))); 2264608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 226596442e42SJason Wang tun_flow_uninit(tun); 22665dbbaf2dSPaul Moore security_tun_dev_free_security(tun->security); 2267cd5681d7SJason Wang __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2268aff3d70aSJason Wang __tun_set_ebpf(tun, &tun->filter_prog, NULL); 226996442e42SJason Wang } 227096442e42SJason Wang 22711da177e4SLinus Torvalds static void tun_setup(struct net_device *dev) 22721da177e4SLinus Torvalds { 22731da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 22741da177e4SLinus Torvalds 22750625c883SEric W. Biederman tun->owner = INVALID_UID; 22760625c883SEric W. Biederman tun->group = INVALID_GID; 22774e24f2ddSChas Williams tun_default_link_ksettings(dev, &tun->link_ksettings); 22781da177e4SLinus Torvalds 22791da177e4SLinus Torvalds dev->ethtool_ops = &tun_ethtool_ops; 2280cf124db5SDavid S. Miller dev->needs_free_netdev = true; 2281cf124db5SDavid S. Miller dev->priv_destructor = tun_free_netdev; 2282016adb72SJason Wang /* We prefer our own queue length */ 2283016adb72SJason Wang dev->tx_queue_len = TUN_READQ_SIZE; 22841da177e4SLinus Torvalds } 22851da177e4SLinus Torvalds 2286f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap 2287f019a7a5SEric W. Biederman * device with netlink. 2288f019a7a5SEric W. Biederman */ 2289a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2290a8b8a889SMatthias Schiffer struct netlink_ext_ack *extack) 2291f019a7a5SEric W. Biederman { 2292f019a7a5SEric W. Biederman return -EINVAL; 2293f019a7a5SEric W. Biederman } 2294f019a7a5SEric W. Biederman 22951ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev) 22961ec010e7SSabrina Dubroca { 22971ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 22981ec010e7SSabrina Dubroca BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 22991ec010e7SSabrina Dubroca 23001ec010e7SSabrina Dubroca return nla_total_size(sizeof(uid_t)) + /* OWNER */ 23011ec010e7SSabrina Dubroca nla_total_size(sizeof(gid_t)) + /* GROUP */ 23021ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* TYPE */ 23031ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PI */ 23041ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* VNET_HDR */ 23051ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* PERSIST */ 23061ec010e7SSabrina Dubroca nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 23071ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 23081ec010e7SSabrina Dubroca nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 23091ec010e7SSabrina Dubroca 0; 23101ec010e7SSabrina Dubroca } 23111ec010e7SSabrina Dubroca 23121ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 23131ec010e7SSabrina Dubroca { 23141ec010e7SSabrina Dubroca struct tun_struct *tun = netdev_priv(dev); 23151ec010e7SSabrina Dubroca 23161ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 23171ec010e7SSabrina Dubroca goto nla_put_failure; 23181ec010e7SSabrina Dubroca if (uid_valid(tun->owner) && 23191ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_OWNER, 23201ec010e7SSabrina Dubroca from_kuid_munged(current_user_ns(), tun->owner))) 23211ec010e7SSabrina Dubroca goto nla_put_failure; 23221ec010e7SSabrina Dubroca if (gid_valid(tun->group) && 23231ec010e7SSabrina Dubroca nla_put_u32(skb, IFLA_TUN_GROUP, 23241ec010e7SSabrina Dubroca from_kgid_munged(current_user_ns(), tun->group))) 23251ec010e7SSabrina Dubroca goto nla_put_failure; 23261ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 23271ec010e7SSabrina Dubroca goto nla_put_failure; 23281ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 23291ec010e7SSabrina Dubroca goto nla_put_failure; 23301ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 23311ec010e7SSabrina Dubroca goto nla_put_failure; 23321ec010e7SSabrina Dubroca if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 23331ec010e7SSabrina Dubroca !!(tun->flags & IFF_MULTI_QUEUE))) 23341ec010e7SSabrina Dubroca goto nla_put_failure; 23351ec010e7SSabrina Dubroca if (tun->flags & IFF_MULTI_QUEUE) { 23361ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 23371ec010e7SSabrina Dubroca goto nla_put_failure; 23381ec010e7SSabrina Dubroca if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 23391ec010e7SSabrina Dubroca tun->numdisabled)) 23401ec010e7SSabrina Dubroca goto nla_put_failure; 23411ec010e7SSabrina Dubroca } 23421ec010e7SSabrina Dubroca 23431ec010e7SSabrina Dubroca return 0; 23441ec010e7SSabrina Dubroca 23451ec010e7SSabrina Dubroca nla_put_failure: 23461ec010e7SSabrina Dubroca return -EMSGSIZE; 23471ec010e7SSabrina Dubroca } 23481ec010e7SSabrina Dubroca 2349f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = { 2350f019a7a5SEric W. Biederman .kind = DRV_NAME, 2351f019a7a5SEric W. Biederman .priv_size = sizeof(struct tun_struct), 2352f019a7a5SEric W. Biederman .setup = tun_setup, 2353f019a7a5SEric W. Biederman .validate = tun_validate, 23541ec010e7SSabrina Dubroca .get_size = tun_get_size, 23551ec010e7SSabrina Dubroca .fill_info = tun_fill_info, 2356f019a7a5SEric W. Biederman }; 2357f019a7a5SEric W. Biederman 235833dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk) 235933dccbb0SHerbert Xu { 236054f968d6SJason Wang struct tun_file *tfile; 236143815482SEric Dumazet wait_queue_head_t *wqueue; 236233dccbb0SHerbert Xu 236333dccbb0SHerbert Xu if (!sock_writeable(sk)) 236433dccbb0SHerbert Xu return; 236533dccbb0SHerbert Xu 23669cd3e072SEric Dumazet if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 236733dccbb0SHerbert Xu return; 236833dccbb0SHerbert Xu 236943815482SEric Dumazet wqueue = sk_sleep(sk); 237043815482SEric Dumazet if (wqueue && waitqueue_active(wqueue)) 2371a9a08845SLinus Torvalds wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2372a9a08845SLinus Torvalds EPOLLWRNORM | EPOLLWRBAND); 2373c722c625SHerbert Xu 237454f968d6SJason Wang tfile = container_of(sk, struct tun_file, sk); 237554f968d6SJason Wang kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 237633dccbb0SHerbert Xu } 237733dccbb0SHerbert Xu 2378043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun, 2379043d222fSJason Wang struct tun_file *tfile, 2380043d222fSJason Wang struct xdp_buff *xdp, int *flush) 2381043d222fSJason Wang { 2382043d222fSJason Wang struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2383043d222fSJason Wang struct virtio_net_hdr *gso = &hdr->gso; 2384043d222fSJason Wang struct tun_pcpu_stats *stats; 2385043d222fSJason Wang struct bpf_prog *xdp_prog; 2386043d222fSJason Wang struct sk_buff *skb = NULL; 2387043d222fSJason Wang u32 rxhash = 0, act; 2388043d222fSJason Wang int buflen = hdr->buflen; 2389043d222fSJason Wang int err = 0; 2390043d222fSJason Wang bool skb_xdp = false; 2391043d222fSJason Wang 2392043d222fSJason Wang xdp_prog = rcu_dereference(tun->xdp_prog); 2393043d222fSJason Wang if (xdp_prog) { 2394043d222fSJason Wang if (gso->gso_type) { 2395043d222fSJason Wang skb_xdp = true; 2396043d222fSJason Wang goto build; 2397043d222fSJason Wang } 2398043d222fSJason Wang xdp_set_data_meta_invalid(xdp); 2399043d222fSJason Wang xdp->rxq = &tfile->xdp_rxq; 2400043d222fSJason Wang 2401043d222fSJason Wang act = bpf_prog_run_xdp(xdp_prog, xdp); 2402043d222fSJason Wang err = tun_xdp_act(tun, xdp_prog, xdp, act); 2403043d222fSJason Wang if (err < 0) { 2404043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2405043d222fSJason Wang return err; 2406043d222fSJason Wang } 2407043d222fSJason Wang 2408043d222fSJason Wang switch (err) { 2409043d222fSJason Wang case XDP_REDIRECT: 2410043d222fSJason Wang *flush = true; 2411043d222fSJason Wang /* fall through */ 2412043d222fSJason Wang case XDP_TX: 2413043d222fSJason Wang return 0; 2414043d222fSJason Wang case XDP_PASS: 2415043d222fSJason Wang break; 2416043d222fSJason Wang default: 2417043d222fSJason Wang put_page(virt_to_head_page(xdp->data)); 2418043d222fSJason Wang return 0; 2419043d222fSJason Wang } 2420043d222fSJason Wang } 2421043d222fSJason Wang 2422043d222fSJason Wang build: 2423043d222fSJason Wang skb = build_skb(xdp->data_hard_start, buflen); 2424043d222fSJason Wang if (!skb) { 2425043d222fSJason Wang err = -ENOMEM; 2426043d222fSJason Wang goto out; 2427043d222fSJason Wang } 2428043d222fSJason Wang 2429043d222fSJason Wang skb_reserve(skb, xdp->data - xdp->data_hard_start); 2430043d222fSJason Wang skb_put(skb, xdp->data_end - xdp->data); 2431043d222fSJason Wang 2432043d222fSJason Wang if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2433043d222fSJason Wang this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 2434043d222fSJason Wang kfree_skb(skb); 2435043d222fSJason Wang err = -EINVAL; 2436043d222fSJason Wang goto out; 2437043d222fSJason Wang } 2438043d222fSJason Wang 2439043d222fSJason Wang skb->protocol = eth_type_trans(skb, tun->dev); 2440043d222fSJason Wang skb_reset_network_header(skb); 2441043d222fSJason Wang skb_probe_transport_header(skb, 0); 2442043d222fSJason Wang 2443043d222fSJason Wang if (skb_xdp) { 2444043d222fSJason Wang err = do_xdp_generic(xdp_prog, skb); 2445043d222fSJason Wang if (err != XDP_PASS) 2446043d222fSJason Wang goto out; 2447043d222fSJason Wang } 2448043d222fSJason Wang 2449043d222fSJason Wang if (!rcu_dereference(tun->steering_prog)) 2450043d222fSJason Wang rxhash = __skb_get_hash_symmetric(skb); 2451043d222fSJason Wang 2452043d222fSJason Wang netif_receive_skb(skb); 2453043d222fSJason Wang 2454043d222fSJason Wang stats = get_cpu_ptr(tun->pcpu_stats); 2455043d222fSJason Wang u64_stats_update_begin(&stats->syncp); 2456043d222fSJason Wang stats->rx_packets++; 2457043d222fSJason Wang stats->rx_bytes += skb->len; 2458043d222fSJason Wang u64_stats_update_end(&stats->syncp); 2459043d222fSJason Wang put_cpu_ptr(stats); 2460043d222fSJason Wang 2461043d222fSJason Wang if (rxhash) 2462043d222fSJason Wang tun_flow_update(tun, rxhash, tfile); 2463043d222fSJason Wang 2464043d222fSJason Wang out: 2465043d222fSJason Wang return err; 2466043d222fSJason Wang } 2467043d222fSJason Wang 24681b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 246905c2828cSMichael S. Tsirkin { 2470043d222fSJason Wang int ret, i; 247154f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 24729484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2473fe8dd45bSJason Wang struct tun_msg_ctl *ctl = m->msg_control; 2474043d222fSJason Wang struct xdp_buff *xdp; 247554f968d6SJason Wang 247654f968d6SJason Wang if (!tun) 247754f968d6SJason Wang return -EBADFD; 2478f5ff53b4SAl Viro 2479043d222fSJason Wang if (ctl && (ctl->type == TUN_MSG_PTR)) { 2480043d222fSJason Wang int n = ctl->num; 2481043d222fSJason Wang int flush = 0; 2482043d222fSJason Wang 2483043d222fSJason Wang local_bh_disable(); 2484043d222fSJason Wang rcu_read_lock(); 2485043d222fSJason Wang 2486043d222fSJason Wang for (i = 0; i < n; i++) { 2487043d222fSJason Wang xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2488043d222fSJason Wang tun_xdp_one(tun, tfile, xdp, &flush); 2489043d222fSJason Wang } 2490043d222fSJason Wang 2491043d222fSJason Wang if (flush) 2492043d222fSJason Wang xdp_do_flush_map(); 2493043d222fSJason Wang 2494043d222fSJason Wang rcu_read_unlock(); 2495043d222fSJason Wang local_bh_enable(); 2496043d222fSJason Wang 2497043d222fSJason Wang ret = total_len; 2498043d222fSJason Wang goto out; 2499043d222fSJason Wang } 2500fe8dd45bSJason Wang 2501fe8dd45bSJason Wang ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 25025503fcecSJason Wang m->msg_flags & MSG_DONTWAIT, 25035503fcecSJason Wang m->msg_flags & MSG_MORE); 2504043d222fSJason Wang out: 250554f968d6SJason Wang tun_put(tun); 250654f968d6SJason Wang return ret; 250705c2828cSMichael S. Tsirkin } 250805c2828cSMichael S. Tsirkin 25091b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 251005c2828cSMichael S. Tsirkin int flags) 251105c2828cSMichael S. Tsirkin { 251254f968d6SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25139484dc74Syuan linyu struct tun_struct *tun = tun_get(tfile); 2514fc72d1d5SJason Wang void *ptr = m->msg_control; 251505c2828cSMichael S. Tsirkin int ret; 251654f968d6SJason Wang 2517c33ee15bSWei Xu if (!tun) { 2518c33ee15bSWei Xu ret = -EBADFD; 2519fc72d1d5SJason Wang goto out_free; 2520c33ee15bSWei Xu } 252154f968d6SJason Wang 2522eda29772SRichard Cochran if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 25233811ae76SGao feng ret = -EINVAL; 2524c33ee15bSWei Xu goto out_put_tun; 25253811ae76SGao feng } 2526eda29772SRichard Cochran if (flags & MSG_ERRQUEUE) { 2527eda29772SRichard Cochran ret = sock_recv_errqueue(sock->sk, m, total_len, 2528eda29772SRichard Cochran SOL_PACKET, TUN_TX_TIMESTAMP); 2529eda29772SRichard Cochran goto out; 2530eda29772SRichard Cochran } 2531fc72d1d5SJason Wang ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 253287897931SAlex Gartrell if (ret > (ssize_t)total_len) { 253342404c09SDavid S. Miller m->msg_flags |= MSG_TRUNC; 253442404c09SDavid S. Miller ret = flags & MSG_TRUNC ? ret : total_len; 253542404c09SDavid S. Miller } 25363811ae76SGao feng out: 253754f968d6SJason Wang tun_put(tun); 253805c2828cSMichael S. Tsirkin return ret; 2539c33ee15bSWei Xu 2540c33ee15bSWei Xu out_put_tun: 2541c33ee15bSWei Xu tun_put(tun); 2542fc72d1d5SJason Wang out_free: 2543fc72d1d5SJason Wang tun_ptr_free(ptr); 2544c33ee15bSWei Xu return ret; 254505c2828cSMichael S. Tsirkin } 254605c2828cSMichael S. Tsirkin 2547fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr) 2548fc72d1d5SJason Wang { 2549fc72d1d5SJason Wang if (likely(ptr)) { 25501ffcbc85SJesper Dangaard Brouer if (tun_is_xdp_frame(ptr)) { 25511ffcbc85SJesper Dangaard Brouer struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2552fc72d1d5SJason Wang 25531ffcbc85SJesper Dangaard Brouer return xdpf->len; 2554fc72d1d5SJason Wang } 2555fc72d1d5SJason Wang return __skb_array_len_with_tag(ptr); 2556fc72d1d5SJason Wang } else { 2557fc72d1d5SJason Wang return 0; 2558fc72d1d5SJason Wang } 2559fc72d1d5SJason Wang } 2560fc72d1d5SJason Wang 25611576d986SJason Wang static int tun_peek_len(struct socket *sock) 25621576d986SJason Wang { 25631576d986SJason Wang struct tun_file *tfile = container_of(sock, struct tun_file, socket); 25641576d986SJason Wang struct tun_struct *tun; 25651576d986SJason Wang int ret = 0; 25661576d986SJason Wang 25679484dc74Syuan linyu tun = tun_get(tfile); 25681576d986SJason Wang if (!tun) 25691576d986SJason Wang return 0; 25701576d986SJason Wang 2571fc72d1d5SJason Wang ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 25721576d986SJason Wang tun_put(tun); 25731576d986SJason Wang 25741576d986SJason Wang return ret; 25751576d986SJason Wang } 25761576d986SJason Wang 257705c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */ 257805c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = { 25791576d986SJason Wang .peek_len = tun_peek_len, 258005c2828cSMichael S. Tsirkin .sendmsg = tun_sendmsg, 258105c2828cSMichael S. Tsirkin .recvmsg = tun_recvmsg, 258205c2828cSMichael S. Tsirkin }; 258305c2828cSMichael S. Tsirkin 258433dccbb0SHerbert Xu static struct proto tun_proto = { 258533dccbb0SHerbert Xu .name = "tun", 258633dccbb0SHerbert Xu .owner = THIS_MODULE, 258754f968d6SJason Wang .obj_size = sizeof(struct tun_file), 258833dccbb0SHerbert Xu }; 2589f019a7a5SEric W. Biederman 2590980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun) 2591980c9e8cSDavid Woodhouse { 2592031f5e03SMichael S. Tsirkin return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2593980c9e8cSDavid Woodhouse } 2594980c9e8cSDavid Woodhouse 2595980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2596980c9e8cSDavid Woodhouse char *buf) 2597980c9e8cSDavid Woodhouse { 2598980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2599980c9e8cSDavid Woodhouse return sprintf(buf, "0x%x\n", tun_flags(tun)); 2600980c9e8cSDavid Woodhouse } 2601980c9e8cSDavid Woodhouse 2602980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2603980c9e8cSDavid Woodhouse char *buf) 2604980c9e8cSDavid Woodhouse { 2605980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26060625c883SEric W. Biederman return uid_valid(tun->owner)? 26070625c883SEric W. Biederman sprintf(buf, "%u\n", 26080625c883SEric W. Biederman from_kuid_munged(current_user_ns(), tun->owner)): 26090625c883SEric W. Biederman sprintf(buf, "-1\n"); 2610980c9e8cSDavid Woodhouse } 2611980c9e8cSDavid Woodhouse 2612980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2613980c9e8cSDavid Woodhouse char *buf) 2614980c9e8cSDavid Woodhouse { 2615980c9e8cSDavid Woodhouse struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 26160625c883SEric W. Biederman return gid_valid(tun->group) ? 26170625c883SEric W. Biederman sprintf(buf, "%u\n", 26180625c883SEric W. Biederman from_kgid_munged(current_user_ns(), tun->group)): 26190625c883SEric W. Biederman sprintf(buf, "-1\n"); 2620980c9e8cSDavid Woodhouse } 2621980c9e8cSDavid Woodhouse 2622980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2623980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2624980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2625980c9e8cSDavid Woodhouse 2626c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = { 2627c4d33e24STakashi Iwai &dev_attr_tun_flags.attr, 2628c4d33e24STakashi Iwai &dev_attr_owner.attr, 2629c4d33e24STakashi Iwai &dev_attr_group.attr, 2630c4d33e24STakashi Iwai NULL 2631c4d33e24STakashi Iwai }; 2632c4d33e24STakashi Iwai 2633c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = { 2634c4d33e24STakashi Iwai .attrs = tun_dev_attrs 2635c4d33e24STakashi Iwai }; 2636c4d33e24STakashi Iwai 2637d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 26381da177e4SLinus Torvalds { 26391da177e4SLinus Torvalds struct tun_struct *tun; 264054f968d6SJason Wang struct tun_file *tfile = file->private_data; 26411da177e4SLinus Torvalds struct net_device *dev; 26421da177e4SLinus Torvalds int err; 26431da177e4SLinus Torvalds 26447c0c3b1aSJason Wang if (tfile->detached) 26457c0c3b1aSJason Wang return -EINVAL; 26467c0c3b1aSJason Wang 264790e33d45SPetar Penkov if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 264890e33d45SPetar Penkov if (!capable(CAP_NET_ADMIN)) 264990e33d45SPetar Penkov return -EPERM; 265090e33d45SPetar Penkov 265190e33d45SPetar Penkov if (!(ifr->ifr_flags & IFF_NAPI) || 265290e33d45SPetar Penkov (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 265390e33d45SPetar Penkov return -EINVAL; 265490e33d45SPetar Penkov } 265590e33d45SPetar Penkov 265674a3e5a7SEric W. Biederman dev = __dev_get_by_name(net, ifr->ifr_name); 265774a3e5a7SEric W. Biederman if (dev) { 2658f85ba780SDavid Woodhouse if (ifr->ifr_flags & IFF_TUN_EXCL) 2659f85ba780SDavid Woodhouse return -EBUSY; 266074a3e5a7SEric W. Biederman if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 266174a3e5a7SEric W. Biederman tun = netdev_priv(dev); 266274a3e5a7SEric W. Biederman else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 266374a3e5a7SEric W. Biederman tun = netdev_priv(dev); 266474a3e5a7SEric W. Biederman else 266574a3e5a7SEric W. Biederman return -EINVAL; 266674a3e5a7SEric W. Biederman 26678e6d91aeSJason Wang if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 266840630b82SMichael S. Tsirkin !!(tun->flags & IFF_MULTI_QUEUE)) 26698e6d91aeSJason Wang return -EINVAL; 26708e6d91aeSJason Wang 2671cde8b15fSJason Wang if (tun_not_capable(tun)) 26722b980dbdSPaul Moore return -EPERM; 26735dbbaf2dSPaul Moore err = security_tun_dev_open(tun->security); 26742b980dbdSPaul Moore if (err < 0) 26752b980dbdSPaul Moore return err; 26762b980dbdSPaul Moore 267794317099SPetar Penkov err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2678af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI, 2679af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2680a7385ba2SEric W. Biederman if (err < 0) 2681a7385ba2SEric W. Biederman return err; 26824008e97fSJason Wang 268340630b82SMichael S. Tsirkin if (tun->flags & IFF_MULTI_QUEUE && 2684e8dbad66SJason Wang (tun->numqueues + tun->numdisabled > 1)) { 2685e8dbad66SJason Wang /* One or more queue has already been attached, no need 2686e8dbad66SJason Wang * to initialize the device again. 2687e8dbad66SJason Wang */ 268883c1f36fSSabrina Dubroca netdev_state_change(dev); 2689e8dbad66SJason Wang return 0; 2690e8dbad66SJason Wang } 26919fffc5c6SSabrina Dubroca 26929fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 26939fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 269483c1f36fSSabrina Dubroca 269583c1f36fSSabrina Dubroca netdev_state_change(dev); 269683c1f36fSSabrina Dubroca } else { 26971da177e4SLinus Torvalds char *name; 26981da177e4SLinus Torvalds unsigned long flags = 0; 2699edfb6a14SJason Wang int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2700edfb6a14SJason Wang MAX_TAP_QUEUES : 1; 27011da177e4SLinus Torvalds 2702c260b772SEric W. Biederman if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2703ca6bb5d7SDavid Woodhouse return -EPERM; 27042b980dbdSPaul Moore err = security_tun_dev_create(); 27052b980dbdSPaul Moore if (err < 0) 27062b980dbdSPaul Moore return err; 2707ca6bb5d7SDavid Woodhouse 27081da177e4SLinus Torvalds /* Set dev type */ 27091da177e4SLinus Torvalds if (ifr->ifr_flags & IFF_TUN) { 27101da177e4SLinus Torvalds /* TUN device */ 271140630b82SMichael S. Tsirkin flags |= IFF_TUN; 27121da177e4SLinus Torvalds name = "tun%d"; 27131da177e4SLinus Torvalds } else if (ifr->ifr_flags & IFF_TAP) { 27141da177e4SLinus Torvalds /* TAP device */ 271540630b82SMichael S. Tsirkin flags |= IFF_TAP; 27161da177e4SLinus Torvalds name = "tap%d"; 27171da177e4SLinus Torvalds } else 271836989b90SKusanagi Kouichi return -EINVAL; 27191da177e4SLinus Torvalds 27201da177e4SLinus Torvalds if (*ifr->ifr_name) 27211da177e4SLinus Torvalds name = ifr->ifr_name; 27221da177e4SLinus Torvalds 2723c8d68e6bSJason Wang dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2724c835a677STom Gundersen NET_NAME_UNKNOWN, tun_setup, queues, 2725c835a677STom Gundersen queues); 2726edfb6a14SJason Wang 27271da177e4SLinus Torvalds if (!dev) 27281da177e4SLinus Torvalds return -ENOMEM; 27290ad646c8SCong Wang err = dev_get_valid_name(net, dev, name); 27305c25f65fSJulien Gomes if (err < 0) 27310ad646c8SCong Wang goto err_free_dev; 27321da177e4SLinus Torvalds 2733fc54c658SPavel Emelyanov dev_net_set(dev, net); 2734f019a7a5SEric W. Biederman dev->rtnl_link_ops = &tun_link_ops; 2735fb7589a1SPavel Emelyanov dev->ifindex = tfile->ifindex; 2736c4d33e24STakashi Iwai dev->sysfs_groups[0] = &tun_attr_group; 2737758e43b7SStephen Hemminger 27381da177e4SLinus Torvalds tun = netdev_priv(dev); 27391da177e4SLinus Torvalds tun->dev = dev; 27401da177e4SLinus Torvalds tun->flags = flags; 2741f271b2ccSMax Krasnyansky tun->txflt.count = 0; 2742d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 27431da177e4SLinus Torvalds 2744eaea34b2SPaolo Abeni tun->align = NET_SKB_PAD; 274554f968d6SJason Wang tun->filter_attached = false; 274654f968d6SJason Wang tun->sndbuf = tfile->socket.sk->sk_sndbuf; 27475503fcecSJason Wang tun->rx_batched = 0; 274896f84061SJason Wang RCU_INIT_POINTER(tun->steering_prog, NULL); 274933dccbb0SHerbert Xu 2750608b9977SPaolo Abeni tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2751608b9977SPaolo Abeni if (!tun->pcpu_stats) { 2752608b9977SPaolo Abeni err = -ENOMEM; 2753608b9977SPaolo Abeni goto err_free_dev; 2754608b9977SPaolo Abeni } 2755608b9977SPaolo Abeni 275696442e42SJason Wang spin_lock_init(&tun->lock); 275796442e42SJason Wang 27585dbbaf2dSPaul Moore err = security_tun_dev_alloc_security(&tun->security); 27595dbbaf2dSPaul Moore if (err < 0) 2760608b9977SPaolo Abeni goto err_free_stat; 27612b980dbdSPaul Moore 27621da177e4SLinus Torvalds tun_net_init(dev); 2763944a1376SPavel Emelyanov tun_flow_init(tun); 276496442e42SJason Wang 276588255375SMichał Mirosław dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 27666680ec68SJason Wang TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 27676680ec68SJason Wang NETIF_F_HW_VLAN_STAG_TX; 27682a2bbf17SPaolo Abeni dev->features = dev->hw_features | NETIF_F_LLTX; 27696671b224SFernando Luis Vazquez Cao dev->vlan_features = dev->features & 27706671b224SFernando Luis Vazquez Cao ~(NETIF_F_HW_VLAN_CTAG_TX | 27716671b224SFernando Luis Vazquez Cao NETIF_F_HW_VLAN_STAG_TX); 277288255375SMichał Mirosław 27739fffc5c6SSabrina Dubroca tun->flags = (tun->flags & ~TUN_FEATURES) | 27749fffc5c6SSabrina Dubroca (ifr->ifr_flags & TUN_FEATURES); 27759fffc5c6SSabrina Dubroca 27764008e97fSJason Wang INIT_LIST_HEAD(&tun->disabled); 2777af3fb24eSEric Dumazet err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2778af3fb24eSEric Dumazet ifr->ifr_flags & IFF_NAPI_FRAGS); 2779eb0fb363SJason Wang if (err < 0) 2780662ca437SJason Wang goto err_free_flow; 2781eb0fb363SJason Wang 27821da177e4SLinus Torvalds err = register_netdevice(tun->dev); 27831da177e4SLinus Torvalds if (err < 0) 2784662ca437SJason Wang goto err_detach; 2785af668b3cSMichael S. Tsirkin } 2786980c9e8cSDavid Woodhouse 2787eb0fb363SJason Wang netif_carrier_on(tun->dev); 27881da177e4SLinus Torvalds 27896b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 27901da177e4SLinus Torvalds 2791e35259a9SMax Krasnyansky /* Make sure persistent devices do not get stuck in 2792e35259a9SMax Krasnyansky * xoff state. 2793e35259a9SMax Krasnyansky */ 2794e35259a9SMax Krasnyansky if (netif_running(tun->dev)) 2795c8d68e6bSJason Wang netif_tx_wake_all_queues(tun->dev); 2796e35259a9SMax Krasnyansky 27971da177e4SLinus Torvalds strcpy(ifr->ifr_name, tun->dev->name); 27981da177e4SLinus Torvalds return 0; 27991da177e4SLinus Torvalds 2800662ca437SJason Wang err_detach: 2801662ca437SJason Wang tun_detach_all(dev); 2802ff244c6bSEric Dumazet /* register_netdevice() already called tun_free_netdev() */ 2803ff244c6bSEric Dumazet goto err_free_dev; 2804ff244c6bSEric Dumazet 2805662ca437SJason Wang err_free_flow: 2806662ca437SJason Wang tun_flow_uninit(tun); 2807662ca437SJason Wang security_tun_dev_free_security(tun->security); 2808608b9977SPaolo Abeni err_free_stat: 2809608b9977SPaolo Abeni free_percpu(tun->pcpu_stats); 28101da177e4SLinus Torvalds err_free_dev: 28111da177e4SLinus Torvalds free_netdev(dev); 28121da177e4SLinus Torvalds return err; 28131da177e4SLinus Torvalds } 28141da177e4SLinus Torvalds 28159ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun, 2816876bfd4dSHerbert Xu struct ifreq *ifr) 2817e3b99556SMark McLoughlin { 28186b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2819e3b99556SMark McLoughlin 2820e3b99556SMark McLoughlin strcpy(ifr->ifr_name, tun->dev->name); 2821e3b99556SMark McLoughlin 2822980c9e8cSDavid Woodhouse ifr->ifr_flags = tun_flags(tun); 2823e3b99556SMark McLoughlin 2824e3b99556SMark McLoughlin } 2825e3b99556SMark McLoughlin 28265228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no 28275228ddc9SRusty Russell * privs required. */ 282888255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg) 28295228ddc9SRusty Russell { 2830c8f44affSMichał Mirosław netdev_features_t features = 0; 28315228ddc9SRusty Russell 28325228ddc9SRusty Russell if (arg & TUN_F_CSUM) { 283388255375SMichał Mirosław features |= NETIF_F_HW_CSUM; 28345228ddc9SRusty Russell arg &= ~TUN_F_CSUM; 28355228ddc9SRusty Russell 28365228ddc9SRusty Russell if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 28375228ddc9SRusty Russell if (arg & TUN_F_TSO_ECN) { 28385228ddc9SRusty Russell features |= NETIF_F_TSO_ECN; 28395228ddc9SRusty Russell arg &= ~TUN_F_TSO_ECN; 28405228ddc9SRusty Russell } 28415228ddc9SRusty Russell if (arg & TUN_F_TSO4) 28425228ddc9SRusty Russell features |= NETIF_F_TSO; 28435228ddc9SRusty Russell if (arg & TUN_F_TSO6) 28445228ddc9SRusty Russell features |= NETIF_F_TSO6; 28455228ddc9SRusty Russell arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 28465228ddc9SRusty Russell } 28470c19f846SWillem de Bruijn 28480c19f846SWillem de Bruijn arg &= ~TUN_F_UFO; 28495228ddc9SRusty Russell } 28505228ddc9SRusty Russell 28515228ddc9SRusty Russell /* This gives the user a way to test for new features in future by 28525228ddc9SRusty Russell * trying to set them. */ 28535228ddc9SRusty Russell if (arg) 28545228ddc9SRusty Russell return -EINVAL; 28555228ddc9SRusty Russell 285688255375SMichał Mirosław tun->set_features = features; 285709050957SYaroslav Isakov tun->dev->wanted_features &= ~TUN_USER_FEATURES; 285809050957SYaroslav Isakov tun->dev->wanted_features |= features; 285988255375SMichał Mirosław netdev_update_features(tun->dev); 28605228ddc9SRusty Russell 28615228ddc9SRusty Russell return 0; 28625228ddc9SRusty Russell } 28635228ddc9SRusty Russell 2864c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n) 2865c8d68e6bSJason Wang { 2866c8d68e6bSJason Wang int i; 2867c8d68e6bSJason Wang struct tun_file *tfile; 2868c8d68e6bSJason Wang 2869c8d68e6bSJason Wang for (i = 0; i < n; i++) { 2870b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 28718ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 28728ced425eSHannes Frederic Sowa sk_detach_filter(tfile->socket.sk); 28738ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2874c8d68e6bSJason Wang } 2875c8d68e6bSJason Wang 2876c8d68e6bSJason Wang tun->filter_attached = false; 2877c8d68e6bSJason Wang } 2878c8d68e6bSJason Wang 2879c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun) 2880c8d68e6bSJason Wang { 2881c8d68e6bSJason Wang int i, ret = 0; 2882c8d68e6bSJason Wang struct tun_file *tfile; 2883c8d68e6bSJason Wang 2884c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2885b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 28868ced425eSHannes Frederic Sowa lock_sock(tfile->socket.sk); 28878ced425eSHannes Frederic Sowa ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 28888ced425eSHannes Frederic Sowa release_sock(tfile->socket.sk); 2889c8d68e6bSJason Wang if (ret) { 2890c8d68e6bSJason Wang tun_detach_filter(tun, i); 2891c8d68e6bSJason Wang return ret; 2892c8d68e6bSJason Wang } 2893c8d68e6bSJason Wang } 2894c8d68e6bSJason Wang 2895c8d68e6bSJason Wang tun->filter_attached = true; 2896c8d68e6bSJason Wang return ret; 2897c8d68e6bSJason Wang } 2898c8d68e6bSJason Wang 2899c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun) 2900c8d68e6bSJason Wang { 2901c8d68e6bSJason Wang struct tun_file *tfile; 2902c8d68e6bSJason Wang int i; 2903c8d68e6bSJason Wang 2904c8d68e6bSJason Wang for (i = 0; i < tun->numqueues; i++) { 2905b8deabd3SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 2906c8d68e6bSJason Wang tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2907c8d68e6bSJason Wang } 2908c8d68e6bSJason Wang } 2909c8d68e6bSJason Wang 2910cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr) 2911cde8b15fSJason Wang { 2912cde8b15fSJason Wang struct tun_file *tfile = file->private_data; 2913cde8b15fSJason Wang struct tun_struct *tun; 2914cde8b15fSJason Wang int ret = 0; 2915cde8b15fSJason Wang 2916cde8b15fSJason Wang rtnl_lock(); 2917cde8b15fSJason Wang 2918cde8b15fSJason Wang if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 29194008e97fSJason Wang tun = tfile->detached; 29205dbbaf2dSPaul Moore if (!tun) { 2921cde8b15fSJason Wang ret = -EINVAL; 29225dbbaf2dSPaul Moore goto unlock; 29235dbbaf2dSPaul Moore } 29245dbbaf2dSPaul Moore ret = security_tun_dev_attach_queue(tun->security); 29255dbbaf2dSPaul Moore if (ret < 0) 29265dbbaf2dSPaul Moore goto unlock; 2927af3fb24eSEric Dumazet ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2928af3fb24eSEric Dumazet tun->flags & IFF_NAPI_FRAGS); 29294008e97fSJason Wang } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2930b8deabd3SJason Wang tun = rtnl_dereference(tfile->tun); 293140630b82SMichael S. Tsirkin if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 29324008e97fSJason Wang ret = -EINVAL; 2933cde8b15fSJason Wang else 29344008e97fSJason Wang __tun_detach(tfile, false); 29354008e97fSJason Wang } else 2936cde8b15fSJason Wang ret = -EINVAL; 2937cde8b15fSJason Wang 293883c1f36fSSabrina Dubroca if (ret >= 0) 293983c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 294083c1f36fSSabrina Dubroca 29415dbbaf2dSPaul Moore unlock: 2942cde8b15fSJason Wang rtnl_unlock(); 2943cde8b15fSJason Wang return ret; 2944cde8b15fSJason Wang } 2945cde8b15fSJason Wang 2946cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2947cd5681d7SJason Wang void __user *data) 294896f84061SJason Wang { 294996f84061SJason Wang struct bpf_prog *prog; 295096f84061SJason Wang int fd; 295196f84061SJason Wang 295296f84061SJason Wang if (copy_from_user(&fd, data, sizeof(fd))) 295396f84061SJason Wang return -EFAULT; 295496f84061SJason Wang 295596f84061SJason Wang if (fd == -1) { 295696f84061SJason Wang prog = NULL; 295796f84061SJason Wang } else { 295896f84061SJason Wang prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 295996f84061SJason Wang if (IS_ERR(prog)) 296096f84061SJason Wang return PTR_ERR(prog); 296196f84061SJason Wang } 296296f84061SJason Wang 2963cd5681d7SJason Wang return __tun_set_ebpf(tun, prog_p, prog); 296496f84061SJason Wang } 296596f84061SJason Wang 296650857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 296750857e2aSArnd Bergmann unsigned long arg, int ifreq_len) 29681da177e4SLinus Torvalds { 296936b50babSEric W. Biederman struct tun_file *tfile = file->private_data; 2970f663706aSKirill Tkhai struct net *net = sock_net(&tfile->sk); 2971631ab46bSEric W. Biederman struct tun_struct *tun; 29721da177e4SLinus Torvalds void __user* argp = (void __user*)arg; 29731da177e4SLinus Torvalds struct ifreq ifr; 29740625c883SEric W. Biederman kuid_t owner; 29750625c883SEric W. Biederman kgid_t group; 297633dccbb0SHerbert Xu int sndbuf; 2977d9d52b51SMichael S. Tsirkin int vnet_hdr_sz; 2978fb7589a1SPavel Emelyanov unsigned int ifindex; 29791cf8e410SMichael S. Tsirkin int le; 2980f271b2ccSMax Krasnyansky int ret; 298183c1f36fSSabrina Dubroca bool do_notify = false; 29821da177e4SLinus Torvalds 2983f2780d6dSKirill Tkhai if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2984f2780d6dSKirill Tkhai (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 298550857e2aSArnd Bergmann if (copy_from_user(&ifr, argp, ifreq_len)) 29861da177e4SLinus Torvalds return -EFAULT; 29878bbb1813SDavid S. Miller } else { 2988a117dacdSMathias Krause memset(&ifr, 0, sizeof(ifr)); 29898bbb1813SDavid S. Miller } 2990631ab46bSEric W. Biederman if (cmd == TUNGETFEATURES) { 2991631ab46bSEric W. Biederman /* Currently this just means: "what IFF flags are valid?". 2992631ab46bSEric W. Biederman * This is needed because we never checked for invalid flags on 2993031f5e03SMichael S. Tsirkin * TUNSETIFF. 2994031f5e03SMichael S. Tsirkin */ 2995031f5e03SMichael S. Tsirkin return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2996631ab46bSEric W. Biederman (unsigned int __user*)argp); 2997f663706aSKirill Tkhai } else if (cmd == TUNSETQUEUE) { 2998cde8b15fSJason Wang return tun_set_queue(file, &ifr); 2999f663706aSKirill Tkhai } else if (cmd == SIOCGSKNS) { 3000f663706aSKirill Tkhai if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3001f663706aSKirill Tkhai return -EPERM; 3002f663706aSKirill Tkhai return open_related_ns(&net->ns, get_net_ns); 3003f663706aSKirill Tkhai } 3004631ab46bSEric W. Biederman 3005c8d68e6bSJason Wang ret = 0; 3006876bfd4dSHerbert Xu rtnl_lock(); 3007876bfd4dSHerbert Xu 30089484dc74Syuan linyu tun = tun_get(tfile); 30090f16bc13SGao Feng if (cmd == TUNSETIFF) { 30100f16bc13SGao Feng ret = -EEXIST; 30110f16bc13SGao Feng if (tun) 30120f16bc13SGao Feng goto unlock; 30130f16bc13SGao Feng 30141da177e4SLinus Torvalds ifr.ifr_name[IFNAMSIZ-1] = '\0'; 30151da177e4SLinus Torvalds 3016f2780d6dSKirill Tkhai ret = tun_set_iff(net, file, &ifr); 30171da177e4SLinus Torvalds 3018876bfd4dSHerbert Xu if (ret) 3019876bfd4dSHerbert Xu goto unlock; 30201da177e4SLinus Torvalds 302150857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3022876bfd4dSHerbert Xu ret = -EFAULT; 3023876bfd4dSHerbert Xu goto unlock; 30241da177e4SLinus Torvalds } 3025fb7589a1SPavel Emelyanov if (cmd == TUNSETIFINDEX) { 3026fb7589a1SPavel Emelyanov ret = -EPERM; 3027fb7589a1SPavel Emelyanov if (tun) 3028fb7589a1SPavel Emelyanov goto unlock; 3029fb7589a1SPavel Emelyanov 3030fb7589a1SPavel Emelyanov ret = -EFAULT; 3031fb7589a1SPavel Emelyanov if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3032fb7589a1SPavel Emelyanov goto unlock; 3033fb7589a1SPavel Emelyanov 3034fb7589a1SPavel Emelyanov ret = 0; 3035fb7589a1SPavel Emelyanov tfile->ifindex = ifindex; 3036fb7589a1SPavel Emelyanov goto unlock; 3037fb7589a1SPavel Emelyanov } 30381da177e4SLinus Torvalds 3039876bfd4dSHerbert Xu ret = -EBADFD; 30401da177e4SLinus Torvalds if (!tun) 3041876bfd4dSHerbert Xu goto unlock; 30421da177e4SLinus Torvalds 30431e588338SJason Wang tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 30441da177e4SLinus Torvalds 3045631ab46bSEric W. Biederman ret = 0; 30461da177e4SLinus Torvalds switch (cmd) { 3047e3b99556SMark McLoughlin case TUNGETIFF: 30489ce99cf6SRami Rosen tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 3049e3b99556SMark McLoughlin 30503d407a80SPavel Emelyanov if (tfile->detached) 30513d407a80SPavel Emelyanov ifr.ifr_flags |= IFF_DETACH_QUEUE; 3052849c9b6fSPavel Emelyanov if (!tfile->socket.sk->sk_filter) 3053849c9b6fSPavel Emelyanov ifr.ifr_flags |= IFF_NOFILTER; 30543d407a80SPavel Emelyanov 305550857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3056631ab46bSEric W. Biederman ret = -EFAULT; 3057e3b99556SMark McLoughlin break; 3058e3b99556SMark McLoughlin 30591da177e4SLinus Torvalds case TUNSETNOCSUM: 30601da177e4SLinus Torvalds /* Disable/Enable checksum */ 30611da177e4SLinus Torvalds 306288255375SMichał Mirosław /* [unimplemented] */ 306388255375SMichał Mirosław tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 30646b8a66eeSJoe Perches arg ? "disabled" : "enabled"); 30651da177e4SLinus Torvalds break; 30661da177e4SLinus Torvalds 30671da177e4SLinus Torvalds case TUNSETPERSIST: 306854f968d6SJason Wang /* Disable/Enable persist mode. Keep an extra reference to the 306954f968d6SJason Wang * module to prevent the module being unprobed. 307054f968d6SJason Wang */ 307140630b82SMichael S. Tsirkin if (arg && !(tun->flags & IFF_PERSIST)) { 307240630b82SMichael S. Tsirkin tun->flags |= IFF_PERSIST; 307354f968d6SJason Wang __module_get(THIS_MODULE); 307483c1f36fSSabrina Dubroca do_notify = true; 3075dd38bd85SJason Wang } 307640630b82SMichael S. Tsirkin if (!arg && (tun->flags & IFF_PERSIST)) { 307740630b82SMichael S. Tsirkin tun->flags &= ~IFF_PERSIST; 307854f968d6SJason Wang module_put(THIS_MODULE); 307983c1f36fSSabrina Dubroca do_notify = true; 308054f968d6SJason Wang } 30811da177e4SLinus Torvalds 30826b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "persist %s\n", 30836b8a66eeSJoe Perches arg ? "enabled" : "disabled"); 30841da177e4SLinus Torvalds break; 30851da177e4SLinus Torvalds 30861da177e4SLinus Torvalds case TUNSETOWNER: 30871da177e4SLinus Torvalds /* Set owner of the device */ 30880625c883SEric W. Biederman owner = make_kuid(current_user_ns(), arg); 30890625c883SEric W. Biederman if (!uid_valid(owner)) { 30900625c883SEric W. Biederman ret = -EINVAL; 30910625c883SEric W. Biederman break; 30920625c883SEric W. Biederman } 30930625c883SEric W. Biederman tun->owner = owner; 309483c1f36fSSabrina Dubroca do_notify = true; 30951e588338SJason Wang tun_debug(KERN_INFO, tun, "owner set to %u\n", 30960625c883SEric W. Biederman from_kuid(&init_user_ns, tun->owner)); 30971da177e4SLinus Torvalds break; 30981da177e4SLinus Torvalds 30998c644623SGuido Guenther case TUNSETGROUP: 31008c644623SGuido Guenther /* Set group of the device */ 31010625c883SEric W. Biederman group = make_kgid(current_user_ns(), arg); 31020625c883SEric W. Biederman if (!gid_valid(group)) { 31030625c883SEric W. Biederman ret = -EINVAL; 31040625c883SEric W. Biederman break; 31050625c883SEric W. Biederman } 31060625c883SEric W. Biederman tun->group = group; 310783c1f36fSSabrina Dubroca do_notify = true; 31081e588338SJason Wang tun_debug(KERN_INFO, tun, "group set to %u\n", 31090625c883SEric W. Biederman from_kgid(&init_user_ns, tun->group)); 31108c644623SGuido Guenther break; 31118c644623SGuido Guenther 3112ff4cc3acSMike Kershaw case TUNSETLINK: 3113ff4cc3acSMike Kershaw /* Only allow setting the type when the interface is down */ 3114ff4cc3acSMike Kershaw if (tun->dev->flags & IFF_UP) { 31156b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, 31166b8a66eeSJoe Perches "Linktype set failed because interface is up\n"); 311748abfe05SDavid S. Miller ret = -EBUSY; 3118ff4cc3acSMike Kershaw } else { 3119ff4cc3acSMike Kershaw tun->dev->type = (int) arg; 31206b8a66eeSJoe Perches tun_debug(KERN_INFO, tun, "linktype set to %d\n", 31216b8a66eeSJoe Perches tun->dev->type); 312248abfe05SDavid S. Miller ret = 0; 3123ff4cc3acSMike Kershaw } 3124631ab46bSEric W. Biederman break; 3125ff4cc3acSMike Kershaw 31261da177e4SLinus Torvalds #ifdef TUN_DEBUG 31271da177e4SLinus Torvalds case TUNSETDEBUG: 31281da177e4SLinus Torvalds tun->debug = arg; 31291da177e4SLinus Torvalds break; 31301da177e4SLinus Torvalds #endif 31315228ddc9SRusty Russell case TUNSETOFFLOAD: 313288255375SMichał Mirosław ret = set_offload(tun, arg); 3133631ab46bSEric W. Biederman break; 31345228ddc9SRusty Russell 3135f271b2ccSMax Krasnyansky case TUNSETTXFILTER: 3136f271b2ccSMax Krasnyansky /* Can be set only for TAPs */ 3137631ab46bSEric W. Biederman ret = -EINVAL; 313840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3139631ab46bSEric W. Biederman break; 3140c0e5a8c2SHarvey Harrison ret = update_filter(&tun->txflt, (void __user *)arg); 3141631ab46bSEric W. Biederman break; 31421da177e4SLinus Torvalds 31431da177e4SLinus Torvalds case SIOCGIFHWADDR: 3144b595076aSUwe Kleine-König /* Get hw address */ 3145f271b2ccSMax Krasnyansky memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3146f271b2ccSMax Krasnyansky ifr.ifr_hwaddr.sa_family = tun->dev->type; 314750857e2aSArnd Bergmann if (copy_to_user(argp, &ifr, ifreq_len)) 3148631ab46bSEric W. Biederman ret = -EFAULT; 3149631ab46bSEric W. Biederman break; 31501da177e4SLinus Torvalds 31511da177e4SLinus Torvalds case SIOCSIFHWADDR: 3152f271b2ccSMax Krasnyansky /* Set hw address */ 31536b8a66eeSJoe Perches tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 31546b8a66eeSJoe Perches ifr.ifr_hwaddr.sa_data); 315540102371SKim B. Heino 315640102371SKim B. Heino ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 3157631ab46bSEric W. Biederman break; 315833dccbb0SHerbert Xu 315933dccbb0SHerbert Xu case TUNGETSNDBUF: 316054f968d6SJason Wang sndbuf = tfile->socket.sk->sk_sndbuf; 316133dccbb0SHerbert Xu if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 316233dccbb0SHerbert Xu ret = -EFAULT; 316333dccbb0SHerbert Xu break; 316433dccbb0SHerbert Xu 316533dccbb0SHerbert Xu case TUNSETSNDBUF: 316633dccbb0SHerbert Xu if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 316733dccbb0SHerbert Xu ret = -EFAULT; 316833dccbb0SHerbert Xu break; 316933dccbb0SHerbert Xu } 317093161922SCraig Gallek if (sndbuf <= 0) { 317193161922SCraig Gallek ret = -EINVAL; 317293161922SCraig Gallek break; 317393161922SCraig Gallek } 317433dccbb0SHerbert Xu 3175c8d68e6bSJason Wang tun->sndbuf = sndbuf; 3176c8d68e6bSJason Wang tun_set_sndbuf(tun); 317733dccbb0SHerbert Xu break; 317833dccbb0SHerbert Xu 3179d9d52b51SMichael S. Tsirkin case TUNGETVNETHDRSZ: 3180d9d52b51SMichael S. Tsirkin vnet_hdr_sz = tun->vnet_hdr_sz; 3181d9d52b51SMichael S. Tsirkin if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3182d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3183d9d52b51SMichael S. Tsirkin break; 3184d9d52b51SMichael S. Tsirkin 3185d9d52b51SMichael S. Tsirkin case TUNSETVNETHDRSZ: 3186d9d52b51SMichael S. Tsirkin if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3187d9d52b51SMichael S. Tsirkin ret = -EFAULT; 3188d9d52b51SMichael S. Tsirkin break; 3189d9d52b51SMichael S. Tsirkin } 3190d9d52b51SMichael S. Tsirkin if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3191d9d52b51SMichael S. Tsirkin ret = -EINVAL; 3192d9d52b51SMichael S. Tsirkin break; 3193d9d52b51SMichael S. Tsirkin } 3194d9d52b51SMichael S. Tsirkin 3195d9d52b51SMichael S. Tsirkin tun->vnet_hdr_sz = vnet_hdr_sz; 3196d9d52b51SMichael S. Tsirkin break; 3197d9d52b51SMichael S. Tsirkin 31981cf8e410SMichael S. Tsirkin case TUNGETVNETLE: 31991cf8e410SMichael S. Tsirkin le = !!(tun->flags & TUN_VNET_LE); 32001cf8e410SMichael S. Tsirkin if (put_user(le, (int __user *)argp)) 32011cf8e410SMichael S. Tsirkin ret = -EFAULT; 32021cf8e410SMichael S. Tsirkin break; 32031cf8e410SMichael S. Tsirkin 32041cf8e410SMichael S. Tsirkin case TUNSETVNETLE: 32051cf8e410SMichael S. Tsirkin if (get_user(le, (int __user *)argp)) { 32061cf8e410SMichael S. Tsirkin ret = -EFAULT; 32071cf8e410SMichael S. Tsirkin break; 32081cf8e410SMichael S. Tsirkin } 32091cf8e410SMichael S. Tsirkin if (le) 32101cf8e410SMichael S. Tsirkin tun->flags |= TUN_VNET_LE; 32111cf8e410SMichael S. Tsirkin else 32121cf8e410SMichael S. Tsirkin tun->flags &= ~TUN_VNET_LE; 32131cf8e410SMichael S. Tsirkin break; 32141cf8e410SMichael S. Tsirkin 32158b8e658bSGreg Kurz case TUNGETVNETBE: 32168b8e658bSGreg Kurz ret = tun_get_vnet_be(tun, argp); 32178b8e658bSGreg Kurz break; 32188b8e658bSGreg Kurz 32198b8e658bSGreg Kurz case TUNSETVNETBE: 32208b8e658bSGreg Kurz ret = tun_set_vnet_be(tun, argp); 32218b8e658bSGreg Kurz break; 32228b8e658bSGreg Kurz 322399405162SMichael S. Tsirkin case TUNATTACHFILTER: 322499405162SMichael S. Tsirkin /* Can be set only for TAPs */ 322599405162SMichael S. Tsirkin ret = -EINVAL; 322640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 322799405162SMichael S. Tsirkin break; 322899405162SMichael S. Tsirkin ret = -EFAULT; 322954f968d6SJason Wang if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 323099405162SMichael S. Tsirkin break; 323199405162SMichael S. Tsirkin 3232c8d68e6bSJason Wang ret = tun_attach_filter(tun); 323399405162SMichael S. Tsirkin break; 323499405162SMichael S. Tsirkin 323599405162SMichael S. Tsirkin case TUNDETACHFILTER: 323699405162SMichael S. Tsirkin /* Can be set only for TAPs */ 323799405162SMichael S. Tsirkin ret = -EINVAL; 323840630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 323999405162SMichael S. Tsirkin break; 3240c8d68e6bSJason Wang ret = 0; 3241c8d68e6bSJason Wang tun_detach_filter(tun, tun->numqueues); 324299405162SMichael S. Tsirkin break; 324399405162SMichael S. Tsirkin 324476975e9cSPavel Emelyanov case TUNGETFILTER: 324576975e9cSPavel Emelyanov ret = -EINVAL; 324640630b82SMichael S. Tsirkin if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 324776975e9cSPavel Emelyanov break; 324876975e9cSPavel Emelyanov ret = -EFAULT; 324976975e9cSPavel Emelyanov if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 325076975e9cSPavel Emelyanov break; 325176975e9cSPavel Emelyanov ret = 0; 325276975e9cSPavel Emelyanov break; 325376975e9cSPavel Emelyanov 325496f84061SJason Wang case TUNSETSTEERINGEBPF: 3255cd5681d7SJason Wang ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 325696f84061SJason Wang break; 325796f84061SJason Wang 3258aff3d70aSJason Wang case TUNSETFILTEREBPF: 3259aff3d70aSJason Wang ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3260aff3d70aSJason Wang break; 3261aff3d70aSJason Wang 32621da177e4SLinus Torvalds default: 3263631ab46bSEric W. Biederman ret = -EINVAL; 3264631ab46bSEric W. Biederman break; 3265ee289b64SJoe Perches } 32661da177e4SLinus Torvalds 326783c1f36fSSabrina Dubroca if (do_notify) 326883c1f36fSSabrina Dubroca netdev_state_change(tun->dev); 326983c1f36fSSabrina Dubroca 3270876bfd4dSHerbert Xu unlock: 3271876bfd4dSHerbert Xu rtnl_unlock(); 3272876bfd4dSHerbert Xu if (tun) 3273631ab46bSEric W. Biederman tun_put(tun); 3274631ab46bSEric W. Biederman return ret; 32751da177e4SLinus Torvalds } 32761da177e4SLinus Torvalds 327750857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file, 327850857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 327950857e2aSArnd Bergmann { 328050857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 328150857e2aSArnd Bergmann } 328250857e2aSArnd Bergmann 328350857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 328450857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file, 328550857e2aSArnd Bergmann unsigned int cmd, unsigned long arg) 328650857e2aSArnd Bergmann { 328750857e2aSArnd Bergmann switch (cmd) { 328850857e2aSArnd Bergmann case TUNSETIFF: 328950857e2aSArnd Bergmann case TUNGETIFF: 329050857e2aSArnd Bergmann case TUNSETTXFILTER: 329150857e2aSArnd Bergmann case TUNGETSNDBUF: 329250857e2aSArnd Bergmann case TUNSETSNDBUF: 329350857e2aSArnd Bergmann case SIOCGIFHWADDR: 329450857e2aSArnd Bergmann case SIOCSIFHWADDR: 329550857e2aSArnd Bergmann arg = (unsigned long)compat_ptr(arg); 329650857e2aSArnd Bergmann break; 329750857e2aSArnd Bergmann default: 329850857e2aSArnd Bergmann arg = (compat_ulong_t)arg; 329950857e2aSArnd Bergmann break; 330050857e2aSArnd Bergmann } 330150857e2aSArnd Bergmann 330250857e2aSArnd Bergmann /* 330350857e2aSArnd Bergmann * compat_ifreq is shorter than ifreq, so we must not access beyond 330450857e2aSArnd Bergmann * the end of that structure. All fields that are used in this 330550857e2aSArnd Bergmann * driver are compatible though, we don't need to convert the 330650857e2aSArnd Bergmann * contents. 330750857e2aSArnd Bergmann */ 330850857e2aSArnd Bergmann return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 330950857e2aSArnd Bergmann } 331050857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */ 331150857e2aSArnd Bergmann 33121da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on) 33131da177e4SLinus Torvalds { 331454f968d6SJason Wang struct tun_file *tfile = file->private_data; 33151da177e4SLinus Torvalds int ret; 33161da177e4SLinus Torvalds 331754f968d6SJason Wang if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 33189d319522SJonathan Corbet goto out; 33191da177e4SLinus Torvalds 33201da177e4SLinus Torvalds if (on) { 332101919134SEric W. Biederman __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 332254f968d6SJason Wang tfile->flags |= TUN_FASYNC; 33231da177e4SLinus Torvalds } else 332454f968d6SJason Wang tfile->flags &= ~TUN_FASYNC; 33259d319522SJonathan Corbet ret = 0; 33269d319522SJonathan Corbet out: 33279d319522SJonathan Corbet return ret; 33281da177e4SLinus Torvalds } 33291da177e4SLinus Torvalds 33301da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file) 33311da177e4SLinus Torvalds { 3332140e807dSEric W. Biederman struct net *net = current->nsproxy->net_ns; 3333631ab46bSEric W. Biederman struct tun_file *tfile; 3334deed49fbSThomas Gleixner 33356b8a66eeSJoe Perches DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3336631ab46bSEric W. Biederman 3337140e807dSEric W. Biederman tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 333811aa9c28SEric W. Biederman &tun_proto, 0); 3339631ab46bSEric W. Biederman if (!tfile) 3340631ab46bSEric W. Biederman return -ENOMEM; 3341b196d88aSJason Wang if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3342b196d88aSJason Wang sk_free(&tfile->sk); 3343b196d88aSJason Wang return -ENOMEM; 3344b196d88aSJason Wang } 3345b196d88aSJason Wang 3346c7256f57SEric Dumazet mutex_init(&tfile->napi_mutex); 3347c956674bSMonam Agarwal RCU_INIT_POINTER(tfile->tun, NULL); 334854f968d6SJason Wang tfile->flags = 0; 3349fb7589a1SPavel Emelyanov tfile->ifindex = 0; 335054f968d6SJason Wang 335154f968d6SJason Wang init_waitqueue_head(&tfile->wq.wait); 33529e641bdcSXi Wang RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 335354f968d6SJason Wang 335454f968d6SJason Wang tfile->socket.file = file; 335554f968d6SJason Wang tfile->socket.ops = &tun_socket_ops; 335654f968d6SJason Wang 335754f968d6SJason Wang sock_init_data(&tfile->socket, &tfile->sk); 335854f968d6SJason Wang 335954f968d6SJason Wang tfile->sk.sk_write_space = tun_sock_write_space; 336054f968d6SJason Wang tfile->sk.sk_sndbuf = INT_MAX; 336154f968d6SJason Wang 3362631ab46bSEric W. Biederman file->private_data = tfile; 33634008e97fSJason Wang INIT_LIST_HEAD(&tfile->next); 336454f968d6SJason Wang 336519a6afb2SJason Wang sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 336619a6afb2SJason Wang 33671da177e4SLinus Torvalds return 0; 33681da177e4SLinus Torvalds } 33691da177e4SLinus Torvalds 33701da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file) 33711da177e4SLinus Torvalds { 3372631ab46bSEric W. Biederman struct tun_file *tfile = file->private_data; 33731da177e4SLinus Torvalds 3374c8d68e6bSJason Wang tun_detach(tfile, true); 33751da177e4SLinus Torvalds 33761da177e4SLinus Torvalds return 0; 33771da177e4SLinus Torvalds } 33781da177e4SLinus Torvalds 337993e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 33809484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 338193e14b6dSMasatake YAMATO { 33829484dc74Syuan linyu struct tun_file *tfile = file->private_data; 338393e14b6dSMasatake YAMATO struct tun_struct *tun; 338493e14b6dSMasatake YAMATO struct ifreq ifr; 338593e14b6dSMasatake YAMATO 338693e14b6dSMasatake YAMATO memset(&ifr, 0, sizeof(ifr)); 338793e14b6dSMasatake YAMATO 338893e14b6dSMasatake YAMATO rtnl_lock(); 33899484dc74Syuan linyu tun = tun_get(tfile); 339093e14b6dSMasatake YAMATO if (tun) 339193e14b6dSMasatake YAMATO tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 339293e14b6dSMasatake YAMATO rtnl_unlock(); 339393e14b6dSMasatake YAMATO 339493e14b6dSMasatake YAMATO if (tun) 339593e14b6dSMasatake YAMATO tun_put(tun); 339693e14b6dSMasatake YAMATO 3397a3816ab0SJoe Perches seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 339893e14b6dSMasatake YAMATO } 339993e14b6dSMasatake YAMATO #endif 340093e14b6dSMasatake YAMATO 3401d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = { 34021da177e4SLinus Torvalds .owner = THIS_MODULE, 34031da177e4SLinus Torvalds .llseek = no_llseek, 34049b067034SAl Viro .read_iter = tun_chr_read_iter, 3405f5ff53b4SAl Viro .write_iter = tun_chr_write_iter, 34061da177e4SLinus Torvalds .poll = tun_chr_poll, 3407876bfd4dSHerbert Xu .unlocked_ioctl = tun_chr_ioctl, 340850857e2aSArnd Bergmann #ifdef CONFIG_COMPAT 340950857e2aSArnd Bergmann .compat_ioctl = tun_chr_compat_ioctl, 341050857e2aSArnd Bergmann #endif 34111da177e4SLinus Torvalds .open = tun_chr_open, 34121da177e4SLinus Torvalds .release = tun_chr_close, 341393e14b6dSMasatake YAMATO .fasync = tun_chr_fasync, 341493e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS 341593e14b6dSMasatake YAMATO .show_fdinfo = tun_chr_show_fdinfo, 341693e14b6dSMasatake YAMATO #endif 34171da177e4SLinus Torvalds }; 34181da177e4SLinus Torvalds 34191da177e4SLinus Torvalds static struct miscdevice tun_miscdev = { 34201da177e4SLinus Torvalds .minor = TUN_MINOR, 34211da177e4SLinus Torvalds .name = "tun", 3422e454cea2SKay Sievers .nodename = "net/tun", 34231da177e4SLinus Torvalds .fops = &tun_fops, 34241da177e4SLinus Torvalds }; 34251da177e4SLinus Torvalds 34261da177e4SLinus Torvalds /* ethtool interface */ 34271da177e4SLinus Torvalds 34284e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev, 342929ccc49dSPhilippe Reynes struct ethtool_link_ksettings *cmd) 34301da177e4SLinus Torvalds { 343129ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, supported); 343229ccc49dSPhilippe Reynes ethtool_link_ksettings_zero_link_mode(cmd, advertising); 343329ccc49dSPhilippe Reynes cmd->base.speed = SPEED_10; 343429ccc49dSPhilippe Reynes cmd->base.duplex = DUPLEX_FULL; 343529ccc49dSPhilippe Reynes cmd->base.port = PORT_TP; 343629ccc49dSPhilippe Reynes cmd->base.phy_address = 0; 343729ccc49dSPhilippe Reynes cmd->base.autoneg = AUTONEG_DISABLE; 34384e24f2ddSChas Williams } 34394e24f2ddSChas Williams 34404e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev, 34414e24f2ddSChas Williams struct ethtool_link_ksettings *cmd) 34424e24f2ddSChas Williams { 34434e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 34444e24f2ddSChas Williams 34454e24f2ddSChas Williams memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 34464e24f2ddSChas Williams return 0; 34474e24f2ddSChas Williams } 34484e24f2ddSChas Williams 34494e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev, 34504e24f2ddSChas Williams const struct ethtool_link_ksettings *cmd) 34514e24f2ddSChas Williams { 34524e24f2ddSChas Williams struct tun_struct *tun = netdev_priv(dev); 34534e24f2ddSChas Williams 34544e24f2ddSChas Williams memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 34551da177e4SLinus Torvalds return 0; 34561da177e4SLinus Torvalds } 34571da177e4SLinus Torvalds 34581da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 34591da177e4SLinus Torvalds { 34601da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34611da177e4SLinus Torvalds 346233a5ba14SRick Jones strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 346333a5ba14SRick Jones strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 34641da177e4SLinus Torvalds 34651da177e4SLinus Torvalds switch (tun->flags & TUN_TYPE_MASK) { 346640630b82SMichael S. Tsirkin case IFF_TUN: 346733a5ba14SRick Jones strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 34681da177e4SLinus Torvalds break; 346940630b82SMichael S. Tsirkin case IFF_TAP: 347033a5ba14SRick Jones strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 34711da177e4SLinus Torvalds break; 34721da177e4SLinus Torvalds } 34731da177e4SLinus Torvalds } 34741da177e4SLinus Torvalds 34751da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev) 34761da177e4SLinus Torvalds { 34771da177e4SLinus Torvalds #ifdef TUN_DEBUG 34781da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34791da177e4SLinus Torvalds return tun->debug; 34801da177e4SLinus Torvalds #else 34811da177e4SLinus Torvalds return -EOPNOTSUPP; 34821da177e4SLinus Torvalds #endif 34831da177e4SLinus Torvalds } 34841da177e4SLinus Torvalds 34851da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value) 34861da177e4SLinus Torvalds { 34871da177e4SLinus Torvalds #ifdef TUN_DEBUG 34881da177e4SLinus Torvalds struct tun_struct *tun = netdev_priv(dev); 34891da177e4SLinus Torvalds tun->debug = value; 34901da177e4SLinus Torvalds #endif 34911da177e4SLinus Torvalds } 34921da177e4SLinus Torvalds 34935503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev, 34945503fcecSJason Wang struct ethtool_coalesce *ec) 34955503fcecSJason Wang { 34965503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 34975503fcecSJason Wang 34985503fcecSJason Wang ec->rx_max_coalesced_frames = tun->rx_batched; 34995503fcecSJason Wang 35005503fcecSJason Wang return 0; 35015503fcecSJason Wang } 35025503fcecSJason Wang 35035503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev, 35045503fcecSJason Wang struct ethtool_coalesce *ec) 35055503fcecSJason Wang { 35065503fcecSJason Wang struct tun_struct *tun = netdev_priv(dev); 35075503fcecSJason Wang 35085503fcecSJason Wang if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 35095503fcecSJason Wang tun->rx_batched = NAPI_POLL_WEIGHT; 35105503fcecSJason Wang else 35115503fcecSJason Wang tun->rx_batched = ec->rx_max_coalesced_frames; 35125503fcecSJason Wang 35135503fcecSJason Wang return 0; 35145503fcecSJason Wang } 35155503fcecSJason Wang 35167282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = { 35171da177e4SLinus Torvalds .get_drvinfo = tun_get_drvinfo, 35181da177e4SLinus Torvalds .get_msglevel = tun_get_msglevel, 35191da177e4SLinus Torvalds .set_msglevel = tun_set_msglevel, 3520bee31369SNolan Leake .get_link = ethtool_op_get_link, 3521eda29772SRichard Cochran .get_ts_info = ethtool_op_get_ts_info, 35225503fcecSJason Wang .get_coalesce = tun_get_coalesce, 35235503fcecSJason Wang .set_coalesce = tun_set_coalesce, 352429ccc49dSPhilippe Reynes .get_link_ksettings = tun_get_link_ksettings, 35254e24f2ddSChas Williams .set_link_ksettings = tun_set_link_ksettings, 35261da177e4SLinus Torvalds }; 35271da177e4SLinus Torvalds 35281576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun) 35291576d986SJason Wang { 35301576d986SJason Wang struct net_device *dev = tun->dev; 35311576d986SJason Wang struct tun_file *tfile; 35325990a305SJason Wang struct ptr_ring **rings; 35331576d986SJason Wang int n = tun->numqueues + tun->numdisabled; 35341576d986SJason Wang int ret, i; 35351576d986SJason Wang 35365990a305SJason Wang rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 35375990a305SJason Wang if (!rings) 35381576d986SJason Wang return -ENOMEM; 35391576d986SJason Wang 35401576d986SJason Wang for (i = 0; i < tun->numqueues; i++) { 35411576d986SJason Wang tfile = rtnl_dereference(tun->tfiles[i]); 35425990a305SJason Wang rings[i] = &tfile->tx_ring; 35431576d986SJason Wang } 35441576d986SJason Wang list_for_each_entry(tfile, &tun->disabled, next) 35455990a305SJason Wang rings[i++] = &tfile->tx_ring; 35461576d986SJason Wang 35475990a305SJason Wang ret = ptr_ring_resize_multiple(rings, n, 35485990a305SJason Wang dev->tx_queue_len, GFP_KERNEL, 3549fc72d1d5SJason Wang tun_ptr_free); 35501576d986SJason Wang 35515990a305SJason Wang kfree(rings); 35521576d986SJason Wang return ret; 35531576d986SJason Wang } 35541576d986SJason Wang 35551576d986SJason Wang static int tun_device_event(struct notifier_block *unused, 35561576d986SJason Wang unsigned long event, void *ptr) 35571576d986SJason Wang { 35581576d986SJason Wang struct net_device *dev = netdev_notifier_info_to_dev(ptr); 35591576d986SJason Wang struct tun_struct *tun = netdev_priv(dev); 35601576d986SJason Wang 356186dfb4acSCraig Gallek if (dev->rtnl_link_ops != &tun_link_ops) 356286dfb4acSCraig Gallek return NOTIFY_DONE; 356386dfb4acSCraig Gallek 35641576d986SJason Wang switch (event) { 35651576d986SJason Wang case NETDEV_CHANGE_TX_QUEUE_LEN: 35661576d986SJason Wang if (tun_queue_resize(tun)) 35671576d986SJason Wang return NOTIFY_BAD; 35681576d986SJason Wang break; 35691576d986SJason Wang default: 35701576d986SJason Wang break; 35711576d986SJason Wang } 35721576d986SJason Wang 35731576d986SJason Wang return NOTIFY_DONE; 35741576d986SJason Wang } 35751576d986SJason Wang 35761576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = { 35771576d986SJason Wang .notifier_call = tun_device_event, 35781576d986SJason Wang }; 357979d17604SPavel Emelyanov 35801da177e4SLinus Torvalds static int __init tun_init(void) 35811da177e4SLinus Torvalds { 35821da177e4SLinus Torvalds int ret = 0; 35831da177e4SLinus Torvalds 35846b8a66eeSJoe Perches pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 35851da177e4SLinus Torvalds 3586f019a7a5SEric W. Biederman ret = rtnl_link_register(&tun_link_ops); 358779d17604SPavel Emelyanov if (ret) { 35886b8a66eeSJoe Perches pr_err("Can't register link_ops\n"); 3589f019a7a5SEric W. Biederman goto err_linkops; 359079d17604SPavel Emelyanov } 359179d17604SPavel Emelyanov 35921da177e4SLinus Torvalds ret = misc_register(&tun_miscdev); 359379d17604SPavel Emelyanov if (ret) { 35946b8a66eeSJoe Perches pr_err("Can't register misc device %d\n", TUN_MINOR); 359579d17604SPavel Emelyanov goto err_misc; 359679d17604SPavel Emelyanov } 35971576d986SJason Wang 35985edfbd3cSTonghao Zhang ret = register_netdevice_notifier(&tun_notifier_block); 35995edfbd3cSTonghao Zhang if (ret) { 36005edfbd3cSTonghao Zhang pr_err("Can't register netdevice notifier\n"); 36015edfbd3cSTonghao Zhang goto err_notifier; 36025edfbd3cSTonghao Zhang } 36035edfbd3cSTonghao Zhang 360479d17604SPavel Emelyanov return 0; 36055edfbd3cSTonghao Zhang 36065edfbd3cSTonghao Zhang err_notifier: 36075edfbd3cSTonghao Zhang misc_deregister(&tun_miscdev); 360879d17604SPavel Emelyanov err_misc: 3609f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 3610f019a7a5SEric W. Biederman err_linkops: 36111da177e4SLinus Torvalds return ret; 36121da177e4SLinus Torvalds } 36131da177e4SLinus Torvalds 36141da177e4SLinus Torvalds static void tun_cleanup(void) 36151da177e4SLinus Torvalds { 36161da177e4SLinus Torvalds misc_deregister(&tun_miscdev); 3617f019a7a5SEric W. Biederman rtnl_link_unregister(&tun_link_ops); 36181576d986SJason Wang unregister_netdevice_notifier(&tun_notifier_block); 36191da177e4SLinus Torvalds } 36201da177e4SLinus Torvalds 362105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file. Returns error unless file is 362205c2828cSMichael S. Tsirkin * attached to a device. The returned object works like a packet socket, it 362305c2828cSMichael S. Tsirkin * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 362405c2828cSMichael S. Tsirkin * holding a reference to the file for as long as the socket is in use. */ 362505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file) 362605c2828cSMichael S. Tsirkin { 36276e914fc7SJason Wang struct tun_file *tfile; 362805c2828cSMichael S. Tsirkin if (file->f_op != &tun_fops) 362905c2828cSMichael S. Tsirkin return ERR_PTR(-EINVAL); 36306e914fc7SJason Wang tfile = file->private_data; 36316e914fc7SJason Wang if (!tfile) 363205c2828cSMichael S. Tsirkin return ERR_PTR(-EBADFD); 363354f968d6SJason Wang return &tfile->socket; 363405c2828cSMichael S. Tsirkin } 363505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket); 363605c2828cSMichael S. Tsirkin 36375990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file) 363883339c6bSJason Wang { 363983339c6bSJason Wang struct tun_file *tfile; 364083339c6bSJason Wang 364183339c6bSJason Wang if (file->f_op != &tun_fops) 364283339c6bSJason Wang return ERR_PTR(-EINVAL); 364383339c6bSJason Wang tfile = file->private_data; 364483339c6bSJason Wang if (!tfile) 364583339c6bSJason Wang return ERR_PTR(-EBADFD); 36465990a305SJason Wang return &tfile->tx_ring; 364783339c6bSJason Wang } 36485990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring); 364983339c6bSJason Wang 36501da177e4SLinus Torvalds module_init(tun_init); 36511da177e4SLinus Torvalds module_exit(tun_cleanup); 36521da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION); 36531da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT); 36541da177e4SLinus Torvalds MODULE_LICENSE("GPL"); 36551da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR); 3656578454ffSKay Sievers MODULE_ALIAS("devname:net/tun"); 3657