xref: /openbmc/linux/drivers/net/tun.c (revision fc72d1d54dd9ffe2552c76b17e9129803ca7b255)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
31da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
61da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
71da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or
81da177e4SLinus Torvalds  *  (at your option) any later version.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful,
111da177e4SLinus Torvalds  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
121da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
131da177e4SLinus Torvalds  *  GNU General Public License for more details.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
181da177e4SLinus Torvalds /*
191da177e4SLinus Torvalds  *  Changes:
201da177e4SLinus Torvalds  *
21ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
23ff4cc3acSMike Kershaw  *
241da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
25344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
281da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
291da177e4SLinus Torvalds  *    Increased default tx queue length.
301da177e4SLinus Torvalds  *    Added ethtool API.
311da177e4SLinus Torvalds  *    Minor cleanups
321da177e4SLinus Torvalds  *
331da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
341da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
386b8a66eeSJoe Perches 
391da177e4SLinus Torvalds #define DRV_NAME	"tun"
401da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
411da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
421da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds #include <linux/module.h>
451da177e4SLinus Torvalds #include <linux/errno.h>
461da177e4SLinus Torvalds #include <linux/kernel.h>
47174cd4b1SIngo Molnar #include <linux/sched/signal.h>
481da177e4SLinus Torvalds #include <linux/major.h>
491da177e4SLinus Torvalds #include <linux/slab.h>
501da177e4SLinus Torvalds #include <linux/poll.h>
511da177e4SLinus Torvalds #include <linux/fcntl.h>
521da177e4SLinus Torvalds #include <linux/init.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
541da177e4SLinus Torvalds #include <linux/netdevice.h>
551da177e4SLinus Torvalds #include <linux/etherdevice.h>
561da177e4SLinus Torvalds #include <linux/miscdevice.h>
571da177e4SLinus Torvalds #include <linux/ethtool.h>
581da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5950857e2aSArnd Bergmann #include <linux/compat.h>
601da177e4SLinus Torvalds #include <linux/if.h>
611da177e4SLinus Torvalds #include <linux/if_arp.h>
621da177e4SLinus Torvalds #include <linux/if_ether.h>
631da177e4SLinus Torvalds #include <linux/if_tun.h>
646680ec68SJason Wang #include <linux/if_vlan.h>
651da177e4SLinus Torvalds #include <linux/crc32.h>
66d647a591SPavel Emelyanov #include <linux/nsproxy.h>
67f43798c2SRusty Russell #include <linux/virtio_net.h>
6899405162SMichael S. Tsirkin #include <linux/rcupdate.h>
69881d966bSEric W. Biederman #include <net/net_namespace.h>
7079d17604SPavel Emelyanov #include <net/netns/generic.h>
71f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
7233dccbb0SHerbert Xu #include <net/sock.h>
7393e14b6dSMasatake YAMATO #include <linux/seq_file.h>
74e0b46d0eSHerbert Xu #include <linux/uio.h>
751576d986SJason Wang #include <linux/skb_array.h>
76761876c8SJason Wang #include <linux/bpf.h>
77761876c8SJason Wang #include <linux/bpf_trace.h>
7890e33d45SPetar Penkov #include <linux/mutex.h>
791da177e4SLinus Torvalds 
807c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
811da177e4SLinus Torvalds 
8214daa021SRusty Russell /* Uncomment to enable debugging */
8314daa021SRusty Russell /* #define TUN_DEBUG 1 */
8414daa021SRusty Russell 
851da177e4SLinus Torvalds #ifdef TUN_DEBUG
861da177e4SLinus Torvalds static int debug;
8714daa021SRusty Russell 
886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
896b8a66eeSJoe Perches do {								\
906b8a66eeSJoe Perches 	if (tun->debug)						\
916b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
926b8a66eeSJoe Perches } while (0)
936b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
946b8a66eeSJoe Perches do {								\
956b8a66eeSJoe Perches 	if (debug == 2)						\
966b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
976b8a66eeSJoe Perches } while (0)
9814daa021SRusty Russell #else
996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
1006b8a66eeSJoe Perches do {								\
1016b8a66eeSJoe Perches 	if (0)							\
1026b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
1036b8a66eeSJoe Perches } while (0)
1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
1056b8a66eeSJoe Perches do {								\
1066b8a66eeSJoe Perches 	if (0)							\
1076b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
1086b8a66eeSJoe Perches } while (0)
1091da177e4SLinus Torvalds #endif
1101da177e4SLinus Torvalds 
111761876c8SJason Wang #define TUN_HEADROOM 256
1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
11366ccbc9cSJason Wang 
114031f5e03SMichael S. Tsirkin /* TUN device flags */
115031f5e03SMichael S. Tsirkin 
116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
117031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
118031f5e03SMichael S. Tsirkin  */
119031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
1228b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
123031f5e03SMichael S. Tsirkin 
124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
12590e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
12690e33d45SPetar Penkov 
1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
1280690899bSMichael S. Tsirkin 
129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
130f271b2ccSMax Krasnyansky struct tap_filter {
131f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
132f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
133f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
134f271b2ccSMax Krasnyansky };
135f271b2ccSMax Krasnyansky 
136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
137baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
139b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
140c8d68e6bSJason Wang 
14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
14296442e42SJason Wang 
143608b9977SPaolo Abeni struct tun_pcpu_stats {
144608b9977SPaolo Abeni 	u64 rx_packets;
145608b9977SPaolo Abeni 	u64 rx_bytes;
146608b9977SPaolo Abeni 	u64 tx_packets;
147608b9977SPaolo Abeni 	u64 tx_bytes;
148608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
149608b9977SPaolo Abeni 	u32 rx_dropped;
150608b9977SPaolo Abeni 	u32 tx_dropped;
151608b9977SPaolo Abeni 	u32 rx_frame_errors;
152608b9977SPaolo Abeni };
153608b9977SPaolo Abeni 
15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
15592d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
15654f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
15754f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
15836fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
15954f968d6SJason Wang  * this).
1606e914fc7SJason Wang  *
1616e914fc7SJason Wang  * RCU usage:
16236fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1636e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
16454f968d6SJason Wang  */
165631ab46bSEric W. Biederman struct tun_file {
16654f968d6SJason Wang 	struct sock sk;
16754f968d6SJason Wang 	struct socket socket;
16854f968d6SJason Wang 	struct socket_wq wq;
1696e914fc7SJason Wang 	struct tun_struct __rcu *tun;
17054f968d6SJason Wang 	struct fasync_struct *fasync;
17154f968d6SJason Wang 	/* only used for fasnyc */
17254f968d6SJason Wang 	unsigned int flags;
173fb7589a1SPavel Emelyanov 	union {
174c8d68e6bSJason Wang 		u16 queue_index;
175fb7589a1SPavel Emelyanov 		unsigned int ifindex;
176fb7589a1SPavel Emelyanov 	};
17794317099SPetar Penkov 	struct napi_struct napi;
178aec72f33SEric Dumazet 	bool napi_enabled;
17990e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1804008e97fSJason Wang 	struct list_head next;
1814008e97fSJason Wang 	struct tun_struct *detached;
1825990a305SJason Wang 	struct ptr_ring tx_ring;
1838bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
184631ab46bSEric W. Biederman };
185631ab46bSEric W. Biederman 
18696442e42SJason Wang struct tun_flow_entry {
18796442e42SJason Wang 	struct hlist_node hash_link;
18896442e42SJason Wang 	struct rcu_head rcu;
18996442e42SJason Wang 	struct tun_struct *tun;
19096442e42SJason Wang 
19196442e42SJason Wang 	u32 rxhash;
1929bc88939STom Herbert 	u32 rps_rxhash;
19396442e42SJason Wang 	int queue_index;
19496442e42SJason Wang 	unsigned long updated;
19596442e42SJason Wang };
19696442e42SJason Wang 
19796442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
19896442e42SJason Wang 
19996f84061SJason Wang struct tun_steering_prog {
20096f84061SJason Wang 	struct rcu_head rcu;
20196f84061SJason Wang 	struct bpf_prog *prog;
20296f84061SJason Wang };
20396f84061SJason Wang 
20454f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
20536fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
20654f968d6SJason Wang  * file were attached to a persist device.
20754f968d6SJason Wang  */
20814daa021SRusty Russell struct tun_struct {
209c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
210c8d68e6bSJason Wang 	unsigned int            numqueues;
211f271b2ccSMax Krasnyansky 	unsigned int 		flags;
2120625c883SEric W. Biederman 	kuid_t			owner;
2130625c883SEric W. Biederman 	kgid_t			group;
21414daa021SRusty Russell 
21514daa021SRusty Russell 	struct net_device	*dev;
216c8f44affSMichał Mirosław 	netdev_features_t	set_features;
21788255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
218d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
219d9d52b51SMichael S. Tsirkin 
220eaea34b2SPaolo Abeni 	int			align;
221d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
22254f968d6SJason Wang 	int			sndbuf;
22354f968d6SJason Wang 	struct tap_filter	txflt;
22454f968d6SJason Wang 	struct sock_fprog	fprog;
22554f968d6SJason Wang 	/* protected by rtnl lock */
22654f968d6SJason Wang 	bool			filter_attached;
22714daa021SRusty Russell #ifdef TUN_DEBUG
22814daa021SRusty Russell 	int debug;
22914daa021SRusty Russell #endif
23096442e42SJason Wang 	spinlock_t lock;
23196442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
23296442e42SJason Wang 	struct timer_list flow_gc_timer;
23396442e42SJason Wang 	unsigned long ageing_time;
2344008e97fSJason Wang 	unsigned int numdisabled;
2354008e97fSJason Wang 	struct list_head disabled;
2365dbbaf2dSPaul Moore 	void *security;
237b8732fb7SJason Wang 	u32 flow_count;
2385503fcecSJason Wang 	u32 rx_batched;
239608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
240761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
24196f84061SJason Wang 	struct tun_steering_prog __rcu *steering_prog;
24214daa021SRusty Russell };
24314daa021SRusty Russell 
244*fc72d1d5SJason Wang bool tun_is_xdp_buff(void *ptr)
245*fc72d1d5SJason Wang {
246*fc72d1d5SJason Wang 	return (unsigned long)ptr & TUN_XDP_FLAG;
247*fc72d1d5SJason Wang }
248*fc72d1d5SJason Wang EXPORT_SYMBOL(tun_is_xdp_buff);
249*fc72d1d5SJason Wang 
250*fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr)
251*fc72d1d5SJason Wang {
252*fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
253*fc72d1d5SJason Wang }
254*fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr);
255*fc72d1d5SJason Wang 
256*fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr)
257*fc72d1d5SJason Wang {
258*fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
259*fc72d1d5SJason Wang }
260*fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp);
261*fc72d1d5SJason Wang 
26294317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
26394317099SPetar Penkov {
26494317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
26594317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
26694317099SPetar Penkov 	struct sk_buff_head process_queue;
26794317099SPetar Penkov 	struct sk_buff *skb;
26894317099SPetar Penkov 	int received = 0;
26994317099SPetar Penkov 
27094317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
27194317099SPetar Penkov 
27294317099SPetar Penkov 	spin_lock(&queue->lock);
27394317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
27494317099SPetar Penkov 	spin_unlock(&queue->lock);
27594317099SPetar Penkov 
27694317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
27794317099SPetar Penkov 		napi_gro_receive(napi, skb);
27894317099SPetar Penkov 		++received;
27994317099SPetar Penkov 	}
28094317099SPetar Penkov 
28194317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
28294317099SPetar Penkov 		spin_lock(&queue->lock);
28394317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
28494317099SPetar Penkov 		spin_unlock(&queue->lock);
28594317099SPetar Penkov 	}
28694317099SPetar Penkov 
28794317099SPetar Penkov 	return received;
28894317099SPetar Penkov }
28994317099SPetar Penkov 
29094317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
29194317099SPetar Penkov {
29294317099SPetar Penkov 	unsigned int received;
29394317099SPetar Penkov 
29494317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
29594317099SPetar Penkov 
29694317099SPetar Penkov 	if (received < budget)
29794317099SPetar Penkov 		napi_complete_done(napi, received);
29894317099SPetar Penkov 
29994317099SPetar Penkov 	return received;
30094317099SPetar Penkov }
30194317099SPetar Penkov 
30294317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
30394317099SPetar Penkov 			  bool napi_en)
30494317099SPetar Penkov {
305aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
30694317099SPetar Penkov 	if (napi_en) {
30794317099SPetar Penkov 		netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
30894317099SPetar Penkov 			       NAPI_POLL_WEIGHT);
30994317099SPetar Penkov 		napi_enable(&tfile->napi);
31090e33d45SPetar Penkov 		mutex_init(&tfile->napi_mutex);
31194317099SPetar Penkov 	}
31294317099SPetar Penkov }
31394317099SPetar Penkov 
31494317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
31594317099SPetar Penkov {
316aec72f33SEric Dumazet 	if (tfile->napi_enabled)
31794317099SPetar Penkov 		napi_disable(&tfile->napi);
31894317099SPetar Penkov }
31994317099SPetar Penkov 
32094317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
32194317099SPetar Penkov {
322aec72f33SEric Dumazet 	if (tfile->napi_enabled)
32394317099SPetar Penkov 		netif_napi_del(&tfile->napi);
32494317099SPetar Penkov }
32594317099SPetar Penkov 
32690e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun)
32790e33d45SPetar Penkov {
32890e33d45SPetar Penkov 	return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
32990e33d45SPetar Penkov }
33090e33d45SPetar Penkov 
3318b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
3328b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3338b8e658bSGreg Kurz {
3348b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
3358b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
3368b8e658bSGreg Kurz }
3378b8e658bSGreg Kurz 
3388b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3398b8e658bSGreg Kurz {
3408b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3418b8e658bSGreg Kurz 
3428b8e658bSGreg Kurz 	if (put_user(be, argp))
3438b8e658bSGreg Kurz 		return -EFAULT;
3448b8e658bSGreg Kurz 
3458b8e658bSGreg Kurz 	return 0;
3468b8e658bSGreg Kurz }
3478b8e658bSGreg Kurz 
3488b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3498b8e658bSGreg Kurz {
3508b8e658bSGreg Kurz 	int be;
3518b8e658bSGreg Kurz 
3528b8e658bSGreg Kurz 	if (get_user(be, argp))
3538b8e658bSGreg Kurz 		return -EFAULT;
3548b8e658bSGreg Kurz 
3558b8e658bSGreg Kurz 	if (be)
3568b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3578b8e658bSGreg Kurz 	else
3588b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3598b8e658bSGreg Kurz 
3608b8e658bSGreg Kurz 	return 0;
3618b8e658bSGreg Kurz }
3628b8e658bSGreg Kurz #else
3638b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3648b8e658bSGreg Kurz {
3658b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3668b8e658bSGreg Kurz }
3678b8e658bSGreg Kurz 
3688b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3698b8e658bSGreg Kurz {
3708b8e658bSGreg Kurz 	return -EINVAL;
3718b8e658bSGreg Kurz }
3728b8e658bSGreg Kurz 
3738b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3748b8e658bSGreg Kurz {
3758b8e658bSGreg Kurz 	return -EINVAL;
3768b8e658bSGreg Kurz }
3778b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3788b8e658bSGreg Kurz 
37925bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
38025bd55bbSGreg Kurz {
3817d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3828b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
38325bd55bbSGreg Kurz }
38425bd55bbSGreg Kurz 
38556f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
38656f0dcc5SMichael S. Tsirkin {
38725bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
38856f0dcc5SMichael S. Tsirkin }
38956f0dcc5SMichael S. Tsirkin 
39056f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
39156f0dcc5SMichael S. Tsirkin {
39225bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
39356f0dcc5SMichael S. Tsirkin }
39456f0dcc5SMichael S. Tsirkin 
39596442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
39696442e42SJason Wang {
39796442e42SJason Wang 	return rxhash & 0x3ff;
39896442e42SJason Wang }
39996442e42SJason Wang 
40096442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
40196442e42SJason Wang {
40296442e42SJason Wang 	struct tun_flow_entry *e;
40396442e42SJason Wang 
404b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
40596442e42SJason Wang 		if (e->rxhash == rxhash)
40696442e42SJason Wang 			return e;
40796442e42SJason Wang 	}
40896442e42SJason Wang 	return NULL;
40996442e42SJason Wang }
41096442e42SJason Wang 
41196442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
41296442e42SJason Wang 					      struct hlist_head *head,
41396442e42SJason Wang 					      u32 rxhash, u16 queue_index)
41496442e42SJason Wang {
4159fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
4169fdc6befSEric Dumazet 
41796442e42SJason Wang 	if (e) {
41896442e42SJason Wang 		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
41996442e42SJason Wang 			  rxhash, queue_index);
42096442e42SJason Wang 		e->updated = jiffies;
42196442e42SJason Wang 		e->rxhash = rxhash;
4229bc88939STom Herbert 		e->rps_rxhash = 0;
42396442e42SJason Wang 		e->queue_index = queue_index;
42496442e42SJason Wang 		e->tun = tun;
42596442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
426b8732fb7SJason Wang 		++tun->flow_count;
42796442e42SJason Wang 	}
42896442e42SJason Wang 	return e;
42996442e42SJason Wang }
43096442e42SJason Wang 
43196442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
43296442e42SJason Wang {
43396442e42SJason Wang 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
43496442e42SJason Wang 		  e->rxhash, e->queue_index);
43596442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
4369fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
437b8732fb7SJason Wang 	--tun->flow_count;
43896442e42SJason Wang }
43996442e42SJason Wang 
44096442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
44196442e42SJason Wang {
44296442e42SJason Wang 	int i;
44396442e42SJason Wang 
44496442e42SJason Wang 	spin_lock_bh(&tun->lock);
44596442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
44696442e42SJason Wang 		struct tun_flow_entry *e;
447b67bfe0dSSasha Levin 		struct hlist_node *n;
44896442e42SJason Wang 
449b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
45096442e42SJason Wang 			tun_flow_delete(tun, e);
45196442e42SJason Wang 	}
45296442e42SJason Wang 	spin_unlock_bh(&tun->lock);
45396442e42SJason Wang }
45496442e42SJason Wang 
45596442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
45696442e42SJason Wang {
45796442e42SJason Wang 	int i;
45896442e42SJason Wang 
45996442e42SJason Wang 	spin_lock_bh(&tun->lock);
46096442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
46196442e42SJason Wang 		struct tun_flow_entry *e;
462b67bfe0dSSasha Levin 		struct hlist_node *n;
46396442e42SJason Wang 
464b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
46596442e42SJason Wang 			if (e->queue_index == queue_index)
46696442e42SJason Wang 				tun_flow_delete(tun, e);
46796442e42SJason Wang 		}
46896442e42SJason Wang 	}
46996442e42SJason Wang 	spin_unlock_bh(&tun->lock);
47096442e42SJason Wang }
47196442e42SJason Wang 
472e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
47396442e42SJason Wang {
474e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
47596442e42SJason Wang 	unsigned long delay = tun->ageing_time;
47696442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
47796442e42SJason Wang 	unsigned long count = 0;
47896442e42SJason Wang 	int i;
47996442e42SJason Wang 
48096442e42SJason Wang 	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
48196442e42SJason Wang 
4827dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
48396442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
48496442e42SJason Wang 		struct tun_flow_entry *e;
485b67bfe0dSSasha Levin 		struct hlist_node *n;
48696442e42SJason Wang 
487b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
48896442e42SJason Wang 			unsigned long this_timer;
48981d98fa4SEric Dumazet 
49096442e42SJason Wang 			this_timer = e->updated + delay;
49181d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
49296442e42SJason Wang 				tun_flow_delete(tun, e);
49381d98fa4SEric Dumazet 				continue;
49481d98fa4SEric Dumazet 			}
49581d98fa4SEric Dumazet 			count++;
49681d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
49796442e42SJason Wang 				next_timer = this_timer;
49896442e42SJason Wang 		}
49996442e42SJason Wang 	}
50096442e42SJason Wang 
50196442e42SJason Wang 	if (count)
50296442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
5037dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
50496442e42SJason Wang }
50596442e42SJason Wang 
50649974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
5079e85722dSJason Wang 			    struct tun_file *tfile)
50896442e42SJason Wang {
50996442e42SJason Wang 	struct hlist_head *head;
51096442e42SJason Wang 	struct tun_flow_entry *e;
51196442e42SJason Wang 	unsigned long delay = tun->ageing_time;
5129e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
51396442e42SJason Wang 
51496442e42SJason Wang 	if (!rxhash)
51596442e42SJason Wang 		return;
51696442e42SJason Wang 	else
51796442e42SJason Wang 		head = &tun->flows[tun_hashfn(rxhash)];
51896442e42SJason Wang 
51996442e42SJason Wang 	rcu_read_lock();
52096442e42SJason Wang 
5219e85722dSJason Wang 	/* We may get a very small possibility of OOO during switching, not
5229e85722dSJason Wang 	 * worth to optimize.*/
5239e85722dSJason Wang 	if (tun->numqueues == 1 || tfile->detached)
52496442e42SJason Wang 		goto unlock;
52596442e42SJason Wang 
52696442e42SJason Wang 	e = tun_flow_find(head, rxhash);
52796442e42SJason Wang 	if (likely(e)) {
52896442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
52996442e42SJason Wang 		e->queue_index = queue_index;
53096442e42SJason Wang 		e->updated = jiffies;
5319bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
53296442e42SJason Wang 	} else {
53396442e42SJason Wang 		spin_lock_bh(&tun->lock);
534b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
535b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
53696442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
53796442e42SJason Wang 
53896442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
53996442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
54096442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
54196442e42SJason Wang 		spin_unlock_bh(&tun->lock);
54296442e42SJason Wang 	}
54396442e42SJason Wang 
54496442e42SJason Wang unlock:
54596442e42SJason Wang 	rcu_read_unlock();
54696442e42SJason Wang }
54796442e42SJason Wang 
5489bc88939STom Herbert /**
5499bc88939STom Herbert  * Save the hash received in the stack receive path and update the
5509bc88939STom Herbert  * flow_hash table accordingly.
5519bc88939STom Herbert  */
5529bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5539bc88939STom Herbert {
554567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5559bc88939STom Herbert 		e->rps_rxhash = hash;
5569bc88939STom Herbert }
5579bc88939STom Herbert 
558c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that
55992d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
560c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
561c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
562c8d68e6bSJason Wang  * different rxq no. here. If we could not get rxhash, then we would
563c8d68e6bSJason Wang  * hope the rxq no. may help here.
564c8d68e6bSJason Wang  */
56596f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
566c8d68e6bSJason Wang {
56796442e42SJason Wang 	struct tun_flow_entry *e;
568c8d68e6bSJason Wang 	u32 txq = 0;
569c8d68e6bSJason Wang 	u32 numqueues = 0;
570c8d68e6bSJason Wang 
5716aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
572c8d68e6bSJason Wang 
573feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
574c8d68e6bSJason Wang 	if (txq) {
57596442e42SJason Wang 		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5769bc88939STom Herbert 		if (e) {
5779bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, txq);
578fbe4d456SZhi Yong Wu 			txq = e->queue_index;
5799bc88939STom Herbert 		} else
580c8d68e6bSJason Wang 			/* use multiply and shift instead of expensive divide */
581c8d68e6bSJason Wang 			txq = ((u64)txq * numqueues) >> 32;
582c8d68e6bSJason Wang 	} else if (likely(skb_rx_queue_recorded(skb))) {
583c8d68e6bSJason Wang 		txq = skb_get_rx_queue(skb);
584c8d68e6bSJason Wang 		while (unlikely(txq >= numqueues))
585c8d68e6bSJason Wang 			txq -= numqueues;
586c8d68e6bSJason Wang 	}
587c8d68e6bSJason Wang 
588c8d68e6bSJason Wang 	return txq;
589c8d68e6bSJason Wang }
590c8d68e6bSJason Wang 
59196f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
59296f84061SJason Wang {
59396f84061SJason Wang 	struct tun_steering_prog *prog;
59496f84061SJason Wang 	u16 ret = 0;
59596f84061SJason Wang 
59696f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
59796f84061SJason Wang 	if (prog)
59896f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
59996f84061SJason Wang 
60096f84061SJason Wang 	return ret % tun->numqueues;
60196f84061SJason Wang }
60296f84061SJason Wang 
60396f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
60496f84061SJason Wang 			    void *accel_priv, select_queue_fallback_t fallback)
60596f84061SJason Wang {
60696f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
60796f84061SJason Wang 	u16 ret;
60896f84061SJason Wang 
60996f84061SJason Wang 	rcu_read_lock();
61096f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
61196f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
61296f84061SJason Wang 	else
61396f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
61496f84061SJason Wang 	rcu_read_unlock();
61596f84061SJason Wang 
61696f84061SJason Wang 	return ret;
61796f84061SJason Wang }
61896f84061SJason Wang 
619cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
620cde8b15fSJason Wang {
621cde8b15fSJason Wang 	const struct cred *cred = current_cred();
622c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
623cde8b15fSJason Wang 
624cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
625cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
626c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
627cde8b15fSJason Wang }
628cde8b15fSJason Wang 
629c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
630c8d68e6bSJason Wang {
631c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
632c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
633c8d68e6bSJason Wang }
634c8d68e6bSJason Wang 
6354008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
6364008e97fSJason Wang {
6374008e97fSJason Wang 	tfile->detached = tun;
6384008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
6394008e97fSJason Wang 	++tun->numdisabled;
6404008e97fSJason Wang }
6414008e97fSJason Wang 
642d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
6434008e97fSJason Wang {
6444008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
6454008e97fSJason Wang 
6464008e97fSJason Wang 	tfile->detached = NULL;
6474008e97fSJason Wang 	list_del_init(&tfile->next);
6484008e97fSJason Wang 	--tun->numdisabled;
6494008e97fSJason Wang 	return tun;
6504008e97fSJason Wang }
6514008e97fSJason Wang 
652*fc72d1d5SJason Wang static void tun_ptr_free(void *ptr)
653*fc72d1d5SJason Wang {
654*fc72d1d5SJason Wang 	if (!ptr)
655*fc72d1d5SJason Wang 		return;
656*fc72d1d5SJason Wang 	if (tun_is_xdp_buff(ptr)) {
657*fc72d1d5SJason Wang 		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
658*fc72d1d5SJason Wang 
659*fc72d1d5SJason Wang 		put_page(virt_to_head_page(xdp->data));
660*fc72d1d5SJason Wang 	} else {
661*fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
662*fc72d1d5SJason Wang 	}
663*fc72d1d5SJason Wang }
664*fc72d1d5SJason Wang 
6654bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6664bfb0513SJason Wang {
667*fc72d1d5SJason Wang 	void *ptr;
6681576d986SJason Wang 
669*fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
670*fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6711576d986SJason Wang 
6725503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6734bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6744bfb0513SJason Wang }
6754bfb0513SJason Wang 
676c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
677c8d68e6bSJason Wang {
678c8d68e6bSJason Wang 	struct tun_file *ntfile;
679c8d68e6bSJason Wang 	struct tun_struct *tun;
680c8d68e6bSJason Wang 
681b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
682b8deabd3SJason Wang 
68394317099SPetar Penkov 	if (tun && clean) {
68494317099SPetar Penkov 		tun_napi_disable(tun, tfile);
68594317099SPetar Penkov 		tun_napi_del(tun, tfile);
68694317099SPetar Penkov 	}
68794317099SPetar Penkov 
6889e85722dSJason Wang 	if (tun && !tfile->detached) {
689c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
690c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
691c8d68e6bSJason Wang 
692c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
693c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
694b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
695c8d68e6bSJason Wang 		ntfile->queue_index = index;
696c8d68e6bSJason Wang 
697c8d68e6bSJason Wang 		--tun->numqueues;
6989e85722dSJason Wang 		if (clean) {
699c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
700c8d68e6bSJason Wang 			sock_put(&tfile->sk);
7019e85722dSJason Wang 		} else
7024008e97fSJason Wang 			tun_disable_queue(tun, tfile);
703c8d68e6bSJason Wang 
704c8d68e6bSJason Wang 		synchronize_net();
70596442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
706c8d68e6bSJason Wang 		/* Drop read queue */
7074bfb0513SJason Wang 		tun_queue_purge(tfile);
708c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
709dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
7104008e97fSJason Wang 		tun = tun_enable_queue(tfile);
711dd38bd85SJason Wang 		sock_put(&tfile->sk);
712dd38bd85SJason Wang 	}
713c8d68e6bSJason Wang 
714c8d68e6bSJason Wang 	if (clean) {
715af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
716af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
717af668b3cSMichael S. Tsirkin 
71840630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
719af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
7204008e97fSJason Wang 				unregister_netdevice(tun->dev);
721af668b3cSMichael S. Tsirkin 		}
7228bf5c4eeSJesper Dangaard Brouer 		if (tun) {
723*fc72d1d5SJason Wang 			ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
7248bf5c4eeSJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
7258bf5c4eeSJesper Dangaard Brouer 		}
726140e807dSEric W. Biederman 		sock_put(&tfile->sk);
727c8d68e6bSJason Wang 	}
728c8d68e6bSJason Wang }
729c8d68e6bSJason Wang 
730c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
731c8d68e6bSJason Wang {
732c8d68e6bSJason Wang 	rtnl_lock();
733c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
734c8d68e6bSJason Wang 	rtnl_unlock();
735c8d68e6bSJason Wang }
736c8d68e6bSJason Wang 
737c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
738c8d68e6bSJason Wang {
739c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7404008e97fSJason Wang 	struct tun_file *tfile, *tmp;
741c8d68e6bSJason Wang 	int i, n = tun->numqueues;
742c8d68e6bSJason Wang 
743c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
744b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
745c8d68e6bSJason Wang 		BUG_ON(!tfile);
74694317099SPetar Penkov 		tun_napi_disable(tun, tfile);
747addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7489e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
749c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
750c8d68e6bSJason Wang 		--tun->numqueues;
751c8d68e6bSJason Wang 	}
7529e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
753addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7549e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
755c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7569e85722dSJason Wang 	}
757c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
758c8d68e6bSJason Wang 
759c8d68e6bSJason Wang 	synchronize_net();
760c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
761b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
76294317099SPetar Penkov 		tun_napi_del(tun, tfile);
763c8d68e6bSJason Wang 		/* Drop read queue */
7644bfb0513SJason Wang 		tun_queue_purge(tfile);
7658bf5c4eeSJesper Dangaard Brouer 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
766c8d68e6bSJason Wang 		sock_put(&tfile->sk);
767c8d68e6bSJason Wang 	}
7684008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7694008e97fSJason Wang 		tun_enable_queue(tfile);
7704bfb0513SJason Wang 		tun_queue_purge(tfile);
7718bf5c4eeSJesper Dangaard Brouer 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7724008e97fSJason Wang 		sock_put(&tfile->sk);
7734008e97fSJason Wang 	}
7744008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
775dd38bd85SJason Wang 
77640630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
777dd38bd85SJason Wang 		module_put(THIS_MODULE);
778c8d68e6bSJason Wang }
779c8d68e6bSJason Wang 
78094317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
78194317099SPetar Penkov 		      bool skip_filter, bool napi)
782a7385ba2SEric W. Biederman {
783631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7841576d986SJason Wang 	struct net_device *dev = tun->dev;
78538231b7aSEric W. Biederman 	int err;
786a7385ba2SEric W. Biederman 
7875dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7885dbbaf2dSPaul Moore 	if (err < 0)
7895dbbaf2dSPaul Moore 		goto out;
7905dbbaf2dSPaul Moore 
79138231b7aSEric W. Biederman 	err = -EINVAL;
7929e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
79338231b7aSEric W. Biederman 		goto out;
79438231b7aSEric W. Biederman 
79538231b7aSEric W. Biederman 	err = -EBUSY;
79640630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
797c8d68e6bSJason Wang 		goto out;
798c8d68e6bSJason Wang 
799c8d68e6bSJason Wang 	err = -E2BIG;
8004008e97fSJason Wang 	if (!tfile->detached &&
8014008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
80238231b7aSEric W. Biederman 		goto out;
80338231b7aSEric W. Biederman 
80438231b7aSEric W. Biederman 	err = 0;
80554f968d6SJason Wang 
80692d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
807849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
8088ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
8098ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
8108ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
81154f968d6SJason Wang 		if (!err)
81254f968d6SJason Wang 			goto out;
81354f968d6SJason Wang 	}
8141576d986SJason Wang 
8151576d986SJason Wang 	if (!tfile->detached &&
8165990a305SJason Wang 	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
8171576d986SJason Wang 		err = -ENOMEM;
8181576d986SJason Wang 		goto out;
8191576d986SJason Wang 	}
8201576d986SJason Wang 
821c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
822addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8238bf5c4eeSJesper Dangaard Brouer 
8248bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
8258bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
8268bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
8278bf5c4eeSJesper Dangaard Brouer 
8288bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
8298bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
8308bf5c4eeSJesper Dangaard Brouer 	} else {
8318bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
8328bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
8338bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
8348bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
8358bf5c4eeSJesper Dangaard Brouer 			goto out;
8368bf5c4eeSJesper Dangaard Brouer 		err = 0;
8378bf5c4eeSJesper Dangaard Brouer 	}
8388bf5c4eeSJesper Dangaard Brouer 
8396e914fc7SJason Wang 	rcu_assign_pointer(tfile->tun, tun);
840c8d68e6bSJason Wang 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
841c8d68e6bSJason Wang 	tun->numqueues++;
842c8d68e6bSJason Wang 
84394317099SPetar Penkov 	if (tfile->detached) {
8444008e97fSJason Wang 		tun_enable_queue(tfile);
84594317099SPetar Penkov 	} else {
8464008e97fSJason Wang 		sock_hold(&tfile->sk);
84794317099SPetar Penkov 		tun_napi_init(tun, tfile, napi);
84894317099SPetar Penkov 	}
8494008e97fSJason Wang 
850c8d68e6bSJason Wang 	tun_set_real_num_queues(tun);
851c8d68e6bSJason Wang 
852c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
853c8d68e6bSJason Wang 	 * refcnt.
854c8d68e6bSJason Wang 	 */
855a7385ba2SEric W. Biederman 
85638231b7aSEric W. Biederman out:
85738231b7aSEric W. Biederman 	return err;
858a7385ba2SEric W. Biederman }
859a7385ba2SEric W. Biederman 
8609484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
861631ab46bSEric W. Biederman {
8626e914fc7SJason Wang 	struct tun_struct *tun;
863c70f1829SEric W. Biederman 
8646e914fc7SJason Wang 	rcu_read_lock();
8656e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8666e914fc7SJason Wang 	if (tun)
8676e914fc7SJason Wang 		dev_hold(tun->dev);
8686e914fc7SJason Wang 	rcu_read_unlock();
869c70f1829SEric W. Biederman 
870c70f1829SEric W. Biederman 	return tun;
871631ab46bSEric W. Biederman }
872631ab46bSEric W. Biederman 
873631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
874631ab46bSEric W. Biederman {
8756e914fc7SJason Wang 	dev_put(tun->dev);
876631ab46bSEric W. Biederman }
877631ab46bSEric W. Biederman 
8786b8a66eeSJoe Perches /* TAP filtering */
879f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
880f271b2ccSMax Krasnyansky {
881f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
882f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
883f271b2ccSMax Krasnyansky }
884f271b2ccSMax Krasnyansky 
885f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
886f271b2ccSMax Krasnyansky {
887f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
888f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
889f271b2ccSMax Krasnyansky }
890f271b2ccSMax Krasnyansky 
891f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
892f271b2ccSMax Krasnyansky {
893f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
894f271b2ccSMax Krasnyansky 	struct tun_filter uf;
895f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
896f271b2ccSMax Krasnyansky 
897f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
898f271b2ccSMax Krasnyansky 		return -EFAULT;
899f271b2ccSMax Krasnyansky 
900f271b2ccSMax Krasnyansky 	if (!uf.count) {
901f271b2ccSMax Krasnyansky 		/* Disabled */
902f271b2ccSMax Krasnyansky 		filter->count = 0;
903f271b2ccSMax Krasnyansky 		return 0;
904f271b2ccSMax Krasnyansky 	}
905f271b2ccSMax Krasnyansky 
906f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
90728e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
90828e8190dSMarkus Elfring 	if (IS_ERR(addr))
90928e8190dSMarkus Elfring 		return PTR_ERR(addr);
910f271b2ccSMax Krasnyansky 
911f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
912f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
913f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
914f271b2ccSMax Krasnyansky 	filter->count = 0;
915f271b2ccSMax Krasnyansky 	wmb();
916f271b2ccSMax Krasnyansky 
917f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
918f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
919f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
920f271b2ccSMax Krasnyansky 
921f271b2ccSMax Krasnyansky 	nexact = n;
922f271b2ccSMax Krasnyansky 
923cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
924cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
925f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
926cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
927cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
928cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9293b8d2a69SMarkus Elfring 			goto free_addr;
930cfbf84fcSAlex Williamson 		}
931f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
932cfbf84fcSAlex Williamson 	}
933f271b2ccSMax Krasnyansky 
934f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
935f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
936f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
937f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
938f271b2ccSMax Krasnyansky 
939f271b2ccSMax Krasnyansky 	/* Now enable the filter */
940f271b2ccSMax Krasnyansky 	wmb();
941f271b2ccSMax Krasnyansky 	filter->count = nexact;
942f271b2ccSMax Krasnyansky 
943f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
944f271b2ccSMax Krasnyansky 	err = nexact;
9453b8d2a69SMarkus Elfring free_addr:
946f271b2ccSMax Krasnyansky 	kfree(addr);
947f271b2ccSMax Krasnyansky 	return err;
948f271b2ccSMax Krasnyansky }
949f271b2ccSMax Krasnyansky 
950f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
951f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
952f271b2ccSMax Krasnyansky {
953f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
954f271b2ccSMax Krasnyansky 	 * at this point. */
955f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
956f271b2ccSMax Krasnyansky 	int i;
957f271b2ccSMax Krasnyansky 
958f271b2ccSMax Krasnyansky 	/* Exact match */
959f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9602e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
961f271b2ccSMax Krasnyansky 			return 1;
962f271b2ccSMax Krasnyansky 
963f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
964f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
965f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
966f271b2ccSMax Krasnyansky 
967f271b2ccSMax Krasnyansky 	return 0;
968f271b2ccSMax Krasnyansky }
969f271b2ccSMax Krasnyansky 
970f271b2ccSMax Krasnyansky /*
971f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
972f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
973f271b2ccSMax Krasnyansky  */
974f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
975f271b2ccSMax Krasnyansky {
976f271b2ccSMax Krasnyansky 	if (!filter->count)
977f271b2ccSMax Krasnyansky 		return 1;
978f271b2ccSMax Krasnyansky 
979f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
980f271b2ccSMax Krasnyansky }
981f271b2ccSMax Krasnyansky 
9821da177e4SLinus Torvalds /* Network device part of the driver */
9831da177e4SLinus Torvalds 
9847282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops;
9851da177e4SLinus Torvalds 
986c70f1829SEric W. Biederman /* Net device detach from fd. */
987c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
988c70f1829SEric W. Biederman {
989c8d68e6bSJason Wang 	tun_detach_all(dev);
990c70f1829SEric W. Biederman }
991c70f1829SEric W. Biederman 
9921da177e4SLinus Torvalds /* Net device open. */
9931da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
9941da177e4SLinus Torvalds {
995b20e2d54SHannes Frederic Sowa 	struct tun_struct *tun = netdev_priv(dev);
996b20e2d54SHannes Frederic Sowa 	int i;
997b20e2d54SHannes Frederic Sowa 
998c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
999b20e2d54SHannes Frederic Sowa 
1000b20e2d54SHannes Frederic Sowa 	for (i = 0; i < tun->numqueues; i++) {
1001b20e2d54SHannes Frederic Sowa 		struct tun_file *tfile;
1002b20e2d54SHannes Frederic Sowa 
1003b20e2d54SHannes Frederic Sowa 		tfile = rtnl_dereference(tun->tfiles[i]);
1004b20e2d54SHannes Frederic Sowa 		tfile->socket.sk->sk_write_space(tfile->socket.sk);
1005b20e2d54SHannes Frederic Sowa 	}
1006b20e2d54SHannes Frederic Sowa 
10071da177e4SLinus Torvalds 	return 0;
10081da177e4SLinus Torvalds }
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds /* Net device close. */
10111da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
10121da177e4SLinus Torvalds {
1013c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
10141da177e4SLinus Torvalds 	return 0;
10151da177e4SLinus Torvalds }
10161da177e4SLinus Torvalds 
10171da177e4SLinus Torvalds /* Net device start xmit */
101896f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10191da177e4SLinus Torvalds {
10203df97ba8SJason Wang #ifdef CONFIG_RPS
102196f84061SJason Wang 	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
10229bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10239bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10249bc88939STom Herbert 		 */
10259bc88939STom Herbert 		__u32 rxhash;
10269bc88939STom Herbert 
1027feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10289bc88939STom Herbert 		if (rxhash) {
10299bc88939STom Herbert 			struct tun_flow_entry *e;
10309bc88939STom Herbert 			e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
10319bc88939STom Herbert 					rxhash);
10329bc88939STom Herbert 			if (e)
10339bc88939STom Herbert 				tun_flow_save_rps_rxhash(e, rxhash);
10349bc88939STom Herbert 		}
10359bc88939STom Herbert 	}
10363df97ba8SJason Wang #endif
103796f84061SJason Wang }
103896f84061SJason Wang 
103996f84061SJason Wang /* Net device start xmit */
104096f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
104196f84061SJason Wang {
104296f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
104396f84061SJason Wang 	int txq = skb->queue_mapping;
104496f84061SJason Wang 	struct tun_file *tfile;
104596f84061SJason Wang 
104696f84061SJason Wang 	rcu_read_lock();
104796f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
104896f84061SJason Wang 
104996f84061SJason Wang 	/* Drop packet if interface is not attached */
1050cc166427SWillem de Bruijn 	if (txq >= tun->numqueues)
105196f84061SJason Wang 		goto drop;
105296f84061SJason Wang 
105396f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
105496f84061SJason Wang 		tun_automq_xmit(tun, skb);
10559bc88939STom Herbert 
10566e914fc7SJason Wang 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
10576e914fc7SJason Wang 
1058c8d68e6bSJason Wang 	BUG_ON(!tfile);
1059c8d68e6bSJason Wang 
1060f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1061f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1062f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1063f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1064f271b2ccSMax Krasnyansky 		goto drop;
1065f271b2ccSMax Krasnyansky 
106654f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
106754f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
106899405162SMichael S. Tsirkin 		goto drop;
106999405162SMichael S. Tsirkin 
10701f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
10717bf66305SJason Wang 		goto drop;
10727bf66305SJason Wang 
10737b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1074eda29772SRichard Cochran 
10750110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
10767bf66305SJason Wang 	 * for indefinite time.
10777bf66305SJason Wang 	 */
10780110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
10790110d6f2SMichael S. Tsirkin 
1080f8af75f3SEric Dumazet 	nf_reset(skb);
1081f8af75f3SEric Dumazet 
10825990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
10831576d986SJason Wang 		goto drop;
10841da177e4SLinus Torvalds 
10851da177e4SLinus Torvalds 	/* Notify and wake up reader process */
108654f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
108754f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
10889e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
10896e914fc7SJason Wang 
10906e914fc7SJason Wang 	rcu_read_unlock();
10916ed10654SPatrick McHardy 	return NETDEV_TX_OK;
10921da177e4SLinus Torvalds 
10931da177e4SLinus Torvalds drop:
1094608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1095149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
10961da177e4SLinus Torvalds 	kfree_skb(skb);
10976e914fc7SJason Wang 	rcu_read_unlock();
1098baeababbSJason Wang 	return NET_XMIT_DROP;
10991da177e4SLinus Torvalds }
11001da177e4SLinus Torvalds 
1101f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
11021da177e4SLinus Torvalds {
1103f271b2ccSMax Krasnyansky 	/*
1104f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1105f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1106f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1107f271b2ccSMax Krasnyansky 	 */
11081da177e4SLinus Torvalds }
11091da177e4SLinus Torvalds 
1110c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1111c8f44affSMichał Mirosław 	netdev_features_t features)
111288255375SMichał Mirosław {
111388255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
111488255375SMichał Mirosław 
111588255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
111688255375SMichał Mirosław }
1117bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1118bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev)
1119bebd097aSNeil Horman {
1120bebd097aSNeil Horman 	/*
1121bebd097aSNeil Horman 	 * Tun only receives frames when:
1122bebd097aSNeil Horman 	 * 1) the char device endpoint gets data from user space
1123bebd097aSNeil Horman 	 * 2) the tun socket gets a sendmsg call from user space
112494317099SPetar Penkov 	 * If NAPI is not enabled, since both of those are synchronous
112594317099SPetar Penkov 	 * operations, we are guaranteed never to have pending data when we poll
112694317099SPetar Penkov 	 * for it so there is nothing to do here but return.
1127bebd097aSNeil Horman 	 * We need this though so netpoll recognizes us as an interface that
1128bebd097aSNeil Horman 	 * supports polling, which enables bridge devices in virt setups to
1129bebd097aSNeil Horman 	 * still use netconsole
113094317099SPetar Penkov 	 * If NAPI is enabled, however, we need to schedule polling for all
113190e33d45SPetar Penkov 	 * queues unless we are using napi_gro_frags(), which we call in
113290e33d45SPetar Penkov 	 * process context and not in NAPI context.
1133bebd097aSNeil Horman 	 */
113494317099SPetar Penkov 	struct tun_struct *tun = netdev_priv(dev);
113594317099SPetar Penkov 
113694317099SPetar Penkov 	if (tun->flags & IFF_NAPI) {
113794317099SPetar Penkov 		struct tun_file *tfile;
113894317099SPetar Penkov 		int i;
113994317099SPetar Penkov 
114090e33d45SPetar Penkov 		if (tun_napi_frags_enabled(tun))
114190e33d45SPetar Penkov 			return;
114290e33d45SPetar Penkov 
114394317099SPetar Penkov 		rcu_read_lock();
114494317099SPetar Penkov 		for (i = 0; i < tun->numqueues; i++) {
114594317099SPetar Penkov 			tfile = rcu_dereference(tun->tfiles[i]);
1146aec72f33SEric Dumazet 			if (tfile->napi_enabled)
114794317099SPetar Penkov 				napi_schedule(&tfile->napi);
114894317099SPetar Penkov 		}
114994317099SPetar Penkov 		rcu_read_unlock();
115094317099SPetar Penkov 	}
1151bebd097aSNeil Horman 	return;
1152bebd097aSNeil Horman }
1153bebd097aSNeil Horman #endif
1154eaea34b2SPaolo Abeni 
1155eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1156eaea34b2SPaolo Abeni {
1157eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1158eaea34b2SPaolo Abeni 
1159eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1160eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1161eaea34b2SPaolo Abeni 
1162eaea34b2SPaolo Abeni 	tun->align = new_hr;
1163eaea34b2SPaolo Abeni }
1164eaea34b2SPaolo Abeni 
1165bc1f4470Sstephen hemminger static void
1166608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1167608b9977SPaolo Abeni {
1168608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1169608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1170608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1171608b9977SPaolo Abeni 	int i;
1172608b9977SPaolo Abeni 
1173608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1174608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1175608b9977SPaolo Abeni 		unsigned int start;
1176608b9977SPaolo Abeni 
1177608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1178608b9977SPaolo Abeni 		do {
1179608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
1180608b9977SPaolo Abeni 			rxpackets	= p->rx_packets;
1181608b9977SPaolo Abeni 			rxbytes		= p->rx_bytes;
1182608b9977SPaolo Abeni 			txpackets	= p->tx_packets;
1183608b9977SPaolo Abeni 			txbytes		= p->tx_bytes;
1184608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1185608b9977SPaolo Abeni 
1186608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1187608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1188608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1189608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1190608b9977SPaolo Abeni 
1191608b9977SPaolo Abeni 		/* u32 counters */
1192608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1193608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1194608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1195608b9977SPaolo Abeni 	}
1196608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1197608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1198608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1199608b9977SPaolo Abeni }
1200608b9977SPaolo Abeni 
1201761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1202761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1203761876c8SJason Wang {
1204761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1205761876c8SJason Wang 	struct bpf_prog *old_prog;
1206761876c8SJason Wang 
1207761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1208761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1209761876c8SJason Wang 	if (old_prog)
1210761876c8SJason Wang 		bpf_prog_put(old_prog);
1211761876c8SJason Wang 
1212761876c8SJason Wang 	return 0;
1213761876c8SJason Wang }
1214761876c8SJason Wang 
1215761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev)
1216761876c8SJason Wang {
1217761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1218761876c8SJason Wang 	const struct bpf_prog *xdp_prog;
1219761876c8SJason Wang 
1220761876c8SJason Wang 	xdp_prog = rtnl_dereference(tun->xdp_prog);
1221761876c8SJason Wang 	if (xdp_prog)
1222761876c8SJason Wang 		return xdp_prog->aux->id;
1223761876c8SJason Wang 
1224761876c8SJason Wang 	return 0;
1225761876c8SJason Wang }
1226761876c8SJason Wang 
1227f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1228761876c8SJason Wang {
1229761876c8SJason Wang 	switch (xdp->command) {
1230761876c8SJason Wang 	case XDP_SETUP_PROG:
1231761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1232761876c8SJason Wang 	case XDP_QUERY_PROG:
1233761876c8SJason Wang 		xdp->prog_id = tun_xdp_query(dev);
1234761876c8SJason Wang 		xdp->prog_attached = !!xdp->prog_id;
1235761876c8SJason Wang 		return 0;
1236761876c8SJason Wang 	default:
1237761876c8SJason Wang 		return -EINVAL;
1238761876c8SJason Wang 	}
1239761876c8SJason Wang }
1240761876c8SJason Wang 
1241758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1242c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1243758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1244758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
124500829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
124688255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1247c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1248bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1249bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1250bebd097aSNeil Horman #endif
1251eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1252608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1253758e43b7SStephen Hemminger };
1254758e43b7SStephen Hemminger 
1255*fc72d1d5SJason Wang static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
1256*fc72d1d5SJason Wang {
1257*fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1258*fc72d1d5SJason Wang 	struct xdp_buff *buff = xdp->data_hard_start;
1259*fc72d1d5SJason Wang 	int headroom = xdp->data - xdp->data_hard_start;
1260*fc72d1d5SJason Wang 	struct tun_file *tfile;
1261*fc72d1d5SJason Wang 	u32 numqueues;
1262*fc72d1d5SJason Wang 	int ret = 0;
1263*fc72d1d5SJason Wang 
1264*fc72d1d5SJason Wang 	/* Assure headroom is available and buff is properly aligned */
1265*fc72d1d5SJason Wang 	if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
1266*fc72d1d5SJason Wang 		return -ENOSPC;
1267*fc72d1d5SJason Wang 
1268*fc72d1d5SJason Wang 	*buff = *xdp;
1269*fc72d1d5SJason Wang 
1270*fc72d1d5SJason Wang 	rcu_read_lock();
1271*fc72d1d5SJason Wang 
1272*fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1273*fc72d1d5SJason Wang 	if (!numqueues) {
1274*fc72d1d5SJason Wang 		ret = -ENOSPC;
1275*fc72d1d5SJason Wang 		goto out;
1276*fc72d1d5SJason Wang 	}
1277*fc72d1d5SJason Wang 
1278*fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1279*fc72d1d5SJason Wang 					    numqueues]);
1280*fc72d1d5SJason Wang 	/* Encode the XDP flag into lowest bit for consumer to differ
1281*fc72d1d5SJason Wang 	 * XDP buffer from sk_buff.
1282*fc72d1d5SJason Wang 	 */
1283*fc72d1d5SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
1284*fc72d1d5SJason Wang 		this_cpu_inc(tun->pcpu_stats->tx_dropped);
1285*fc72d1d5SJason Wang 		ret = -ENOSPC;
1286*fc72d1d5SJason Wang 	}
1287*fc72d1d5SJason Wang 
1288*fc72d1d5SJason Wang out:
1289*fc72d1d5SJason Wang 	rcu_read_unlock();
1290*fc72d1d5SJason Wang 	return ret;
1291*fc72d1d5SJason Wang }
1292*fc72d1d5SJason Wang 
1293*fc72d1d5SJason Wang static void tun_xdp_flush(struct net_device *dev)
1294*fc72d1d5SJason Wang {
1295*fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1296*fc72d1d5SJason Wang 	struct tun_file *tfile;
1297*fc72d1d5SJason Wang 	u32 numqueues;
1298*fc72d1d5SJason Wang 
1299*fc72d1d5SJason Wang 	rcu_read_lock();
1300*fc72d1d5SJason Wang 
1301*fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1302*fc72d1d5SJason Wang 	if (!numqueues)
1303*fc72d1d5SJason Wang 		goto out;
1304*fc72d1d5SJason Wang 
1305*fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1306*fc72d1d5SJason Wang 					    numqueues]);
1307*fc72d1d5SJason Wang 	/* Notify and wake up reader process */
1308*fc72d1d5SJason Wang 	if (tfile->flags & TUN_FASYNC)
1309*fc72d1d5SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1310*fc72d1d5SJason Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1311*fc72d1d5SJason Wang 
1312*fc72d1d5SJason Wang out:
1313*fc72d1d5SJason Wang 	rcu_read_unlock();
1314*fc72d1d5SJason Wang }
1315*fc72d1d5SJason Wang 
1316758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1317c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1318758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1319758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
132000829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
132188255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1322afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1323758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1324758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1325c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1326bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1327bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1328bebd097aSNeil Horman #endif
13295e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1330eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1331608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1332f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1333*fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
1334*fc72d1d5SJason Wang 	.ndo_xdp_flush		= tun_xdp_flush,
1335758e43b7SStephen Hemminger };
1336758e43b7SStephen Hemminger 
1337944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
133896442e42SJason Wang {
133996442e42SJason Wang 	int i;
134096442e42SJason Wang 
134196442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
134296442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
134396442e42SJason Wang 
134496442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1345e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1346e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1347e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
134896442e42SJason Wang }
134996442e42SJason Wang 
135096442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
135196442e42SJason Wang {
135296442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
135396442e42SJason Wang 	tun_flow_flush(tun);
135496442e42SJason Wang }
135596442e42SJason Wang 
135691572088SJarod Wilson #define MIN_MTU 68
135791572088SJarod Wilson #define MAX_MTU 65535
135891572088SJarod Wilson 
13591da177e4SLinus Torvalds /* Initialize net device. */
13601da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
13611da177e4SLinus Torvalds {
13621da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13631da177e4SLinus Torvalds 
13641da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
136540630b82SMichael S. Tsirkin 	case IFF_TUN:
1366758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1367758e43b7SStephen Hemminger 
13681da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13691da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13701da177e4SLinus Torvalds 		dev->addr_len = 0;
13711da177e4SLinus Torvalds 		dev->mtu = 1500;
13721da177e4SLinus Torvalds 
13731da177e4SLinus Torvalds 		/* Zero header length */
13741da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
13751da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
13761da177e4SLinus Torvalds 		break;
13771da177e4SLinus Torvalds 
137840630b82SMichael S. Tsirkin 	case IFF_TAP:
13797a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
13801da177e4SLinus Torvalds 		/* Ethernet TAP Device */
13811da177e4SLinus Torvalds 		ether_setup(dev);
1382550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1383a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
138436226a8dSBrian Braunstein 
1385f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
138636226a8dSBrian Braunstein 
13871da177e4SLinus Torvalds 		break;
13881da177e4SLinus Torvalds 	}
138991572088SJarod Wilson 
139091572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
139191572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
13921da177e4SLinus Torvalds }
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds /* Character device part */
13951da177e4SLinus Torvalds 
13961da177e4SLinus Torvalds /* Poll */
13971da177e4SLinus Torvalds static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
13981da177e4SLinus Torvalds {
1399b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
14009484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
14013c8a9c63SMariusz Kozlowski 	struct sock *sk;
140233dccbb0SHerbert Xu 	unsigned int mask = 0;
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds 	if (!tun)
1405eac9e902SEric W. Biederman 		return POLLERR;
14061da177e4SLinus Torvalds 
140754f968d6SJason Wang 	sk = tfile->socket.sk;
14083c8a9c63SMariusz Kozlowski 
14096b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
14101da177e4SLinus Torvalds 
14119e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
14121da177e4SLinus Torvalds 
14135990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
14141da177e4SLinus Torvalds 		mask |= POLLIN | POLLRDNORM;
14151da177e4SLinus Torvalds 
1416b20e2d54SHannes Frederic Sowa 	if (tun->dev->flags & IFF_UP &&
1417b20e2d54SHannes Frederic Sowa 	    (sock_writeable(sk) ||
14189cd3e072SEric Dumazet 	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1419b20e2d54SHannes Frederic Sowa 	      sock_writeable(sk))))
142033dccbb0SHerbert Xu 		mask |= POLLOUT | POLLWRNORM;
142133dccbb0SHerbert Xu 
1422c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1423c70f1829SEric W. Biederman 		mask = POLLERR;
1424c70f1829SEric W. Biederman 
1425631ab46bSEric W. Biederman 	tun_put(tun);
14261da177e4SLinus Torvalds 	return mask;
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds 
142990e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
143090e33d45SPetar Penkov 					    size_t len,
143190e33d45SPetar Penkov 					    const struct iov_iter *it)
143290e33d45SPetar Penkov {
143390e33d45SPetar Penkov 	struct sk_buff *skb;
143490e33d45SPetar Penkov 	size_t linear;
143590e33d45SPetar Penkov 	int err;
143690e33d45SPetar Penkov 	int i;
143790e33d45SPetar Penkov 
143890e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
143990e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
144090e33d45SPetar Penkov 
144190e33d45SPetar Penkov 	local_bh_disable();
144290e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
144390e33d45SPetar Penkov 	local_bh_enable();
144490e33d45SPetar Penkov 	if (!skb)
144590e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
144690e33d45SPetar Penkov 
144790e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
144890e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
144990e33d45SPetar Penkov 	if (err)
145090e33d45SPetar Penkov 		goto free;
145190e33d45SPetar Penkov 
145290e33d45SPetar Penkov 	skb->len = len;
145390e33d45SPetar Penkov 	skb->data_len = len - linear;
145490e33d45SPetar Penkov 	skb->truesize += skb->data_len;
145590e33d45SPetar Penkov 
145690e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
145790e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
145890e33d45SPetar Penkov 		unsigned long offset;
145990e33d45SPetar Penkov 		struct page *page;
146090e33d45SPetar Penkov 		void *data;
146190e33d45SPetar Penkov 
146290e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
146390e33d45SPetar Penkov 			err = -EINVAL;
146490e33d45SPetar Penkov 			goto free;
146590e33d45SPetar Penkov 		}
146690e33d45SPetar Penkov 
146790e33d45SPetar Penkov 		local_bh_disable();
146890e33d45SPetar Penkov 		data = napi_alloc_frag(fragsz);
146990e33d45SPetar Penkov 		local_bh_enable();
147090e33d45SPetar Penkov 		if (!data) {
147190e33d45SPetar Penkov 			err = -ENOMEM;
147290e33d45SPetar Penkov 			goto free;
147390e33d45SPetar Penkov 		}
147490e33d45SPetar Penkov 
147590e33d45SPetar Penkov 		page = virt_to_head_page(data);
147690e33d45SPetar Penkov 		offset = data - page_address(page);
147790e33d45SPetar Penkov 		skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
147890e33d45SPetar Penkov 	}
147990e33d45SPetar Penkov 
148090e33d45SPetar Penkov 	return skb;
148190e33d45SPetar Penkov free:
148290e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
148390e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
148490e33d45SPetar Penkov 	return ERR_PTR(err);
148590e33d45SPetar Penkov }
148690e33d45SPetar Penkov 
1487f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1488f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
148954f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
149033dccbb0SHerbert Xu 				     size_t prepad, size_t len,
149133dccbb0SHerbert Xu 				     size_t linear, int noblock)
1492f42157cbSRusty Russell {
149354f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1494f42157cbSRusty Russell 	struct sk_buff *skb;
149533dccbb0SHerbert Xu 	int err;
1496f42157cbSRusty Russell 
1497f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
14980eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
149933dccbb0SHerbert Xu 		linear = len;
1500f42157cbSRusty Russell 
150133dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
150228d64271SEric Dumazet 				   &err, 0);
1503f42157cbSRusty Russell 	if (!skb)
150433dccbb0SHerbert Xu 		return ERR_PTR(err);
1505f42157cbSRusty Russell 
1506f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1507f42157cbSRusty Russell 	skb_put(skb, linear);
150833dccbb0SHerbert Xu 	skb->data_len = len - linear;
150933dccbb0SHerbert Xu 	skb->len += len - linear;
1510f42157cbSRusty Russell 
1511f42157cbSRusty Russell 	return skb;
1512f42157cbSRusty Russell }
1513f42157cbSRusty Russell 
15145503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
15155503fcecSJason Wang 			   struct sk_buff *skb, int more)
15165503fcecSJason Wang {
15175503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
15185503fcecSJason Wang 	struct sk_buff_head process_queue;
15195503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
15205503fcecSJason Wang 	bool rcv = false;
15215503fcecSJason Wang 
15225503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15235503fcecSJason Wang 		local_bh_disable();
15245503fcecSJason Wang 		netif_receive_skb(skb);
15255503fcecSJason Wang 		local_bh_enable();
15265503fcecSJason Wang 		return;
15275503fcecSJason Wang 	}
15285503fcecSJason Wang 
15295503fcecSJason Wang 	spin_lock(&queue->lock);
15305503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15315503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15325503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15335503fcecSJason Wang 		rcv = true;
15345503fcecSJason Wang 	} else {
15355503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15365503fcecSJason Wang 	}
15375503fcecSJason Wang 	spin_unlock(&queue->lock);
15385503fcecSJason Wang 
15395503fcecSJason Wang 	if (rcv) {
15405503fcecSJason Wang 		struct sk_buff *nskb;
15415503fcecSJason Wang 
15425503fcecSJason Wang 		local_bh_disable();
15435503fcecSJason Wang 		while ((nskb = __skb_dequeue(&process_queue)))
15445503fcecSJason Wang 			netif_receive_skb(nskb);
15455503fcecSJason Wang 		netif_receive_skb(skb);
15465503fcecSJason Wang 		local_bh_enable();
15475503fcecSJason Wang 	}
15485503fcecSJason Wang }
15495503fcecSJason Wang 
155066ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
155166ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
155266ccbc9cSJason Wang {
155366ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
155466ccbc9cSJason Wang 		return false;
155566ccbc9cSJason Wang 
155666ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
155766ccbc9cSJason Wang 		return false;
155866ccbc9cSJason Wang 
155966ccbc9cSJason Wang 	if (!noblock)
156066ccbc9cSJason Wang 		return false;
156166ccbc9cSJason Wang 
156266ccbc9cSJason Wang 	if (zerocopy)
156366ccbc9cSJason Wang 		return false;
156466ccbc9cSJason Wang 
156566ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
156666ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
156766ccbc9cSJason Wang 		return false;
156866ccbc9cSJason Wang 
156966ccbc9cSJason Wang 	return true;
157066ccbc9cSJason Wang }
157166ccbc9cSJason Wang 
1572761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1573761876c8SJason Wang 				     struct tun_file *tfile,
157466ccbc9cSJason Wang 				     struct iov_iter *from,
1575761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
15761cfe6e93SJason Wang 				     int len, int *skb_xdp)
157766ccbc9cSJason Wang {
15780bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
157966ccbc9cSJason Wang 	struct sk_buff *skb;
1580761876c8SJason Wang 	struct bpf_prog *xdp_prog;
15817df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1582761876c8SJason Wang 	unsigned int delta = 0;
158366ccbc9cSJason Wang 	char *buf;
158466ccbc9cSJason Wang 	size_t copied;
1585761876c8SJason Wang 	bool xdp_xmit = false;
15867df13219SJason Wang 	int err, pad = TUN_RX_PAD;
15877df13219SJason Wang 
15887df13219SJason Wang 	rcu_read_lock();
15897df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
15907df13219SJason Wang 	if (xdp_prog)
15917df13219SJason Wang 		pad += TUN_HEADROOM;
15927df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
15937df13219SJason Wang 	rcu_read_unlock();
159466ccbc9cSJason Wang 
159563b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
159666ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
159766ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
159866ccbc9cSJason Wang 
159966ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
160066ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16017df13219SJason Wang 				     alloc_frag->offset + pad,
160266ccbc9cSJason Wang 				     len, from);
160366ccbc9cSJason Wang 	if (copied != len)
160466ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
160566ccbc9cSJason Wang 
16067df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16077df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16087df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16097df13219SJason Wang 	 */
16107df13219SJason Wang 	if (hdr->gso_type || !xdp_prog)
16111cfe6e93SJason Wang 		*skb_xdp = 1;
1612761876c8SJason Wang 	else
16131cfe6e93SJason Wang 		*skb_xdp = 0;
161466ccbc9cSJason Wang 
1615761876c8SJason Wang 	rcu_read_lock();
1616761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16171cfe6e93SJason Wang 	if (xdp_prog && !*skb_xdp) {
1618761876c8SJason Wang 		struct xdp_buff xdp;
1619761876c8SJason Wang 		void *orig_data;
1620761876c8SJason Wang 		u32 act;
1621761876c8SJason Wang 
1622761876c8SJason Wang 		xdp.data_hard_start = buf;
16237df13219SJason Wang 		xdp.data = buf + pad;
1624de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1625761876c8SJason Wang 		xdp.data_end = xdp.data + len;
16268bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1627761876c8SJason Wang 		orig_data = xdp.data;
1628761876c8SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1629761876c8SJason Wang 
1630761876c8SJason Wang 		switch (act) {
1631761876c8SJason Wang 		case XDP_REDIRECT:
1632761876c8SJason Wang 			get_page(alloc_frag->page);
1633761876c8SJason Wang 			alloc_frag->offset += buflen;
1634761876c8SJason Wang 			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1635761876c8SJason Wang 			if (err)
1636761876c8SJason Wang 				goto err_redirect;
1637654d5738SXin Long 			rcu_read_unlock();
1638761876c8SJason Wang 			return NULL;
1639761876c8SJason Wang 		case XDP_TX:
1640761876c8SJason Wang 			xdp_xmit = true;
1641761876c8SJason Wang 			/* fall through */
1642761876c8SJason Wang 		case XDP_PASS:
1643761876c8SJason Wang 			delta = orig_data - xdp.data;
1644761876c8SJason Wang 			break;
1645761876c8SJason Wang 		default:
1646761876c8SJason Wang 			bpf_warn_invalid_xdp_action(act);
1647761876c8SJason Wang 			/* fall through */
1648761876c8SJason Wang 		case XDP_ABORTED:
1649761876c8SJason Wang 			trace_xdp_exception(tun->dev, xdp_prog, act);
1650761876c8SJason Wang 			/* fall through */
1651761876c8SJason Wang 		case XDP_DROP:
1652761876c8SJason Wang 			goto err_xdp;
1653761876c8SJason Wang 		}
1654761876c8SJason Wang 	}
1655761876c8SJason Wang 
1656761876c8SJason Wang 	skb = build_skb(buf, buflen);
1657761876c8SJason Wang 	if (!skb) {
1658761876c8SJason Wang 		rcu_read_unlock();
1659761876c8SJason Wang 		return ERR_PTR(-ENOMEM);
1660761876c8SJason Wang 	}
1661761876c8SJason Wang 
16627df13219SJason Wang 	skb_reserve(skb, pad - delta);
1663761876c8SJason Wang 	skb_put(skb, len + delta);
166466ccbc9cSJason Wang 	get_page(alloc_frag->page);
166566ccbc9cSJason Wang 	alloc_frag->offset += buflen;
166666ccbc9cSJason Wang 
1667761876c8SJason Wang 	if (xdp_xmit) {
1668761876c8SJason Wang 		skb->dev = tun->dev;
1669761876c8SJason Wang 		generic_xdp_tx(skb, xdp_prog);
1670654d5738SXin Long 		rcu_read_unlock();
1671761876c8SJason Wang 		return NULL;
1672761876c8SJason Wang 	}
1673761876c8SJason Wang 
1674761876c8SJason Wang 	rcu_read_unlock();
1675761876c8SJason Wang 
167666ccbc9cSJason Wang 	return skb;
1677761876c8SJason Wang 
1678761876c8SJason Wang err_redirect:
1679761876c8SJason Wang 	put_page(alloc_frag->page);
1680761876c8SJason Wang err_xdp:
1681761876c8SJason Wang 	rcu_read_unlock();
1682761876c8SJason Wang 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
1683761876c8SJason Wang 	return NULL;
168466ccbc9cSJason Wang }
168566ccbc9cSJason Wang 
16861da177e4SLinus Torvalds /* Get packet from user space buffer */
168754f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1688f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
16895503fcecSJason Wang 			    int noblock, bool more)
16901da177e4SLinus Torvalds {
169109640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
16921da177e4SLinus Torvalds 	struct sk_buff *skb;
1693f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1694eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1695f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1696608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
169796f8d9ecSJason Wang 	int good_linear;
16980690899bSMichael S. Tsirkin 	int copylen;
16990690899bSMichael S. Tsirkin 	bool zerocopy = false;
17000690899bSMichael S. Tsirkin 	int err;
170196f84061SJason Wang 	u32 rxhash = 0;
17021cfe6e93SJason Wang 	int skb_xdp = 1;
170390e33d45SPetar Penkov 	bool frags = tun_napi_frags_enabled(tun);
17041da177e4SLinus Torvalds 
17051bd4978aSEric Dumazet 	if (!(tun->dev->flags & IFF_UP))
17061bd4978aSEric Dumazet 		return -EIO;
17071bd4978aSEric Dumazet 
170840630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
170915718ea0SDan Carpenter 		if (len < sizeof(pi))
17101da177e4SLinus Torvalds 			return -EINVAL;
171115718ea0SDan Carpenter 		len -= sizeof(pi);
17121da177e4SLinus Torvalds 
1713cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17141da177e4SLinus Torvalds 			return -EFAULT;
17151da177e4SLinus Torvalds 	}
17161da177e4SLinus Torvalds 
171740630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1718e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1719e1edab87SWillem de Bruijn 
1720e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1721f43798c2SRusty Russell 			return -EINVAL;
1722e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1723f43798c2SRusty Russell 
1724cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1725f43798c2SRusty Russell 			return -EFAULT;
1726f43798c2SRusty Russell 
17274909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
172856f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
172956f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17304909122fSHerbert Xu 
173156f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1732f43798c2SRusty Russell 			return -EINVAL;
1733e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1734f43798c2SRusty Russell 	}
1735f43798c2SRusty Russell 
173640630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1737a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17380eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
173956f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1740e01bf1c8SRusty Russell 			return -EINVAL;
1741e01bf1c8SRusty Russell 	}
17421da177e4SLinus Torvalds 
174396f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
174496f8d9ecSJason Wang 
174588529176SJason Wang 	if (msg_control) {
1746f5ff53b4SAl Viro 		struct iov_iter i = *from;
1747f5ff53b4SAl Viro 
174888529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
174988529176SJason Wang 		 * enough room for skb expand head in case it is used.
17500690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
17510690899bSMichael S. Tsirkin 		 */
175256f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
175396f8d9ecSJason Wang 		if (copylen > good_linear)
175496f8d9ecSJason Wang 			copylen = good_linear;
17553dd5c330SJason Wang 		linear = copylen;
1756f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1757f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
175888529176SJason Wang 			zerocopy = true;
175988529176SJason Wang 	}
176088529176SJason Wang 
176190e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
17621cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
17631cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
17641cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
17651cfe6e93SJason Wang 		 */
17661cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
176766ccbc9cSJason Wang 		if (IS_ERR(skb)) {
176866ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
176966ccbc9cSJason Wang 			return PTR_ERR(skb);
177066ccbc9cSJason Wang 		}
1771761876c8SJason Wang 		if (!skb)
1772761876c8SJason Wang 			return total_len;
177366ccbc9cSJason Wang 	} else {
177488529176SJason Wang 		if (!zerocopy) {
17750690899bSMichael S. Tsirkin 			copylen = len;
177656f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
177796f8d9ecSJason Wang 				linear = good_linear;
177896f8d9ecSJason Wang 			else
177956f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
17803dd5c330SJason Wang 		}
17810690899bSMichael S. Tsirkin 
178290e33d45SPetar Penkov 		if (frags) {
178390e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
178490e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
178590e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
178690e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
178790e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
178890e33d45SPetar Penkov 			 */
178990e33d45SPetar Penkov 			zerocopy = false;
179090e33d45SPetar Penkov 		} else {
179190e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
179290e33d45SPetar Penkov 					    noblock);
179390e33d45SPetar Penkov 		}
179490e33d45SPetar Penkov 
179533dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
179633dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1797608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
179890e33d45SPetar Penkov 			if (frags)
179990e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
180033dccbb0SHerbert Xu 			return PTR_ERR(skb);
18011da177e4SLinus Torvalds 		}
18021da177e4SLinus Torvalds 
18030690899bSMichael S. Tsirkin 		if (zerocopy)
1804f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1805af1cc7a2SJason Wang 		else
1806f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18070690899bSMichael S. Tsirkin 
18080690899bSMichael S. Tsirkin 		if (err) {
1809608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
18108f22757eSDave Jones 			kfree_skb(skb);
181190e33d45SPetar Penkov 			if (frags) {
181290e33d45SPetar Penkov 				tfile->napi.skb = NULL;
181390e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
181490e33d45SPetar Penkov 			}
181590e33d45SPetar Penkov 
18161da177e4SLinus Torvalds 			return -EFAULT;
18178f22757eSDave Jones 		}
181866ccbc9cSJason Wang 	}
18191da177e4SLinus Torvalds 
18203e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1821df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1822df10db98SPaolo Abeni 		kfree_skb(skb);
182390e33d45SPetar Penkov 		if (frags) {
182490e33d45SPetar Penkov 			tfile->napi.skb = NULL;
182590e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
182690e33d45SPetar Penkov 		}
182790e33d45SPetar Penkov 
1828df10db98SPaolo Abeni 		return -EINVAL;
1829df10db98SPaolo Abeni 	}
1830df10db98SPaolo Abeni 
18311da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
183240630b82SMichael S. Tsirkin 	case IFF_TUN:
183340630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18342580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18352580c4c1SAlexander Potapenko 
18362580c4c1SAlexander Potapenko 			switch (ip_version) {
18372580c4c1SAlexander Potapenko 			case 4:
1838f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1839f09f7ee2SAng Way Chuang 				break;
18402580c4c1SAlexander Potapenko 			case 6:
1841f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1842f09f7ee2SAng Way Chuang 				break;
1843f09f7ee2SAng Way Chuang 			default:
1844608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1845f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1846f09f7ee2SAng Way Chuang 				return -EINVAL;
1847f09f7ee2SAng Way Chuang 			}
1848f09f7ee2SAng Way Chuang 		}
1849f09f7ee2SAng Way Chuang 
1850459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
18511da177e4SLinus Torvalds 		skb->protocol = pi.proto;
18524c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
18531da177e4SLinus Torvalds 		break;
185440630b82SMichael S. Tsirkin 	case IFF_TAP:
185590e33d45SPetar Penkov 		if (!frags)
18561da177e4SLinus Torvalds 			skb->protocol = eth_type_trans(skb, tun->dev);
18571da177e4SLinus Torvalds 		break;
18586403eab1SJoe Perches 	}
18591da177e4SLinus Torvalds 
18600690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
18610690899bSMichael S. Tsirkin 	if (zerocopy) {
18620690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
18630690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1864c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1865af1cc7a2SJason Wang 	} else if (msg_control) {
1866af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1867af1cc7a2SJason Wang 		uarg->callback(uarg, false);
18680690899bSMichael S. Tsirkin 	}
18690690899bSMichael S. Tsirkin 
187072f65107SVlad Yasevich 	skb_reset_network_header(skb);
187140893fd0SJason Wang 	skb_probe_transport_header(skb, 0);
187238502af7SJason Wang 
18731cfe6e93SJason Wang 	if (skb_xdp) {
1874761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1875761876c8SJason Wang 		int ret;
1876761876c8SJason Wang 
1877761876c8SJason Wang 		rcu_read_lock();
1878761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1879761876c8SJason Wang 		if (xdp_prog) {
1880761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1881761876c8SJason Wang 			if (ret != XDP_PASS) {
1882761876c8SJason Wang 				rcu_read_unlock();
1883761876c8SJason Wang 				return total_len;
1884761876c8SJason Wang 			}
1885761876c8SJason Wang 		}
1886761876c8SJason Wang 		rcu_read_unlock();
1887761876c8SJason Wang 	}
1888761876c8SJason Wang 
188996f84061SJason Wang 	rcu_read_lock();
189096f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
1891feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
189296f84061SJason Wang 	rcu_read_unlock();
189394317099SPetar Penkov 
189490e33d45SPetar Penkov 	if (frags) {
189590e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
189690e33d45SPetar Penkov 		u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
189790e33d45SPetar Penkov 
1898010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
189990e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
190090e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
190190e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
190290e33d45SPetar Penkov 			WARN_ON(1);
190390e33d45SPetar Penkov 			return -ENOMEM;
190490e33d45SPetar Penkov 		}
190590e33d45SPetar Penkov 
190690e33d45SPetar Penkov 		local_bh_disable();
190790e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
190890e33d45SPetar Penkov 		local_bh_enable();
190990e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1910aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
191194317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
191294317099SPetar Penkov 		int queue_len;
191394317099SPetar Penkov 
191494317099SPetar Penkov 		spin_lock_bh(&queue->lock);
191594317099SPetar Penkov 		__skb_queue_tail(queue, skb);
191694317099SPetar Penkov 		queue_len = skb_queue_len(queue);
191794317099SPetar Penkov 		spin_unlock(&queue->lock);
191894317099SPetar Penkov 
191994317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
192094317099SPetar Penkov 			napi_schedule(&tfile->napi);
192194317099SPetar Penkov 
192294317099SPetar Penkov 		local_bh_enable();
192394317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19245503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
192594317099SPetar Penkov 	} else {
19261da177e4SLinus Torvalds 		netif_rx_ni(skb);
192794317099SPetar Penkov 	}
19281da177e4SLinus Torvalds 
1929608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
1930608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
1931608b9977SPaolo Abeni 	stats->rx_packets++;
1932608b9977SPaolo Abeni 	stats->rx_bytes += len;
1933608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
1934608b9977SPaolo Abeni 	put_cpu_ptr(stats);
19351da177e4SLinus Torvalds 
193696f84061SJason Wang 	if (rxhash)
19379e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
193896f84061SJason Wang 
19390690899bSMichael S. Tsirkin 	return total_len;
19401da177e4SLinus Torvalds }
19411da177e4SLinus Torvalds 
1942f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
19431da177e4SLinus Torvalds {
194433dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
194554f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
19469484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
1947631ab46bSEric W. Biederman 	ssize_t result;
19481da177e4SLinus Torvalds 
19491da177e4SLinus Torvalds 	if (!tun)
19501da177e4SLinus Torvalds 		return -EBADFD;
19511da177e4SLinus Torvalds 
19525503fcecSJason Wang 	result = tun_get_user(tun, tfile, NULL, from,
19535503fcecSJason Wang 			      file->f_flags & O_NONBLOCK, false);
1954631ab46bSEric W. Biederman 
1955631ab46bSEric W. Biederman 	tun_put(tun);
1956631ab46bSEric W. Biederman 	return result;
19571da177e4SLinus Torvalds }
19581da177e4SLinus Torvalds 
1959*fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
1960*fc72d1d5SJason Wang 				struct tun_file *tfile,
1961*fc72d1d5SJason Wang 				struct xdp_buff *xdp,
1962*fc72d1d5SJason Wang 				struct iov_iter *iter)
1963*fc72d1d5SJason Wang {
1964*fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
1965*fc72d1d5SJason Wang 	size_t size = xdp->data_end - xdp->data;
1966*fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
1967*fc72d1d5SJason Wang 	size_t ret;
1968*fc72d1d5SJason Wang 
1969*fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
1970*fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
1971*fc72d1d5SJason Wang 
1972*fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1973*fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
1974*fc72d1d5SJason Wang 			return -EINVAL;
1975*fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
1976*fc72d1d5SJason Wang 			     sizeof(gso)))
1977*fc72d1d5SJason Wang 			return -EFAULT;
1978*fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1979*fc72d1d5SJason Wang 	}
1980*fc72d1d5SJason Wang 
1981*fc72d1d5SJason Wang 	ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
1982*fc72d1d5SJason Wang 
1983*fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
1984*fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
1985*fc72d1d5SJason Wang 	stats->tx_packets++;
1986*fc72d1d5SJason Wang 	stats->tx_bytes += ret;
1987*fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
1988*fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
1989*fc72d1d5SJason Wang 
1990*fc72d1d5SJason Wang 	return ret;
1991*fc72d1d5SJason Wang }
1992*fc72d1d5SJason Wang 
19931da177e4SLinus Torvalds /* Put packet to the user space buffer */
19946f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
199554f968d6SJason Wang 			    struct tun_file *tfile,
19961da177e4SLinus Torvalds 			    struct sk_buff *skb,
1997e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
19981da177e4SLinus Torvalds {
19991da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2000608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2001e0b46d0eSHerbert Xu 	ssize_t total;
20028c847d25SJason Wang 	int vlan_offset = 0;
2003a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20042eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2005a8f9bfdfSHerbert Xu 
2006df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2007a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20081da177e4SLinus Torvalds 
200940630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2010e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20111da177e4SLinus Torvalds 
2012e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2013e0b46d0eSHerbert Xu 
201440630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2015e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20161da177e4SLinus Torvalds 			return -EINVAL;
20171da177e4SLinus Torvalds 
2018e0b46d0eSHerbert Xu 		total += sizeof(pi);
2019e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20201da177e4SLinus Torvalds 			/* Packet will be striped */
20211da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20221da177e4SLinus Torvalds 		}
20231da177e4SLinus Torvalds 
2024e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20251da177e4SLinus Torvalds 			return -EFAULT;
20261da177e4SLinus Torvalds 	}
20271da177e4SLinus Torvalds 
20282eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20299403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
203034166093SMike Rapoport 
2031e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2032f43798c2SRusty Russell 			return -EINVAL;
2033f43798c2SRusty Russell 
20343e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
20356391a448SJason Wang 					    tun_is_little_endian(tun), true)) {
2036f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20376b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2038ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
203956f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
204056f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2041ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2042ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2043ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
204456f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2045ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2046ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2047ef3db4a5SMichael S. Tsirkin 		}
2048f43798c2SRusty Russell 
2049e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2050f43798c2SRusty Russell 			return -EFAULT;
20518c847d25SJason Wang 
20528c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2053f43798c2SRusty Russell 	}
2054f43798c2SRusty Russell 
2055a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2056e0b46d0eSHerbert Xu 		int ret;
20576680ec68SJason Wang 		struct {
20586680ec68SJason Wang 			__be16 h_vlan_proto;
20596680ec68SJason Wang 			__be16 h_vlan_TCI;
20606680ec68SJason Wang 		} veth;
20611da177e4SLinus Torvalds 
20626680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2063df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
20641da177e4SLinus Torvalds 
20656680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
20666680ec68SJason Wang 
2067e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2068e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
20696680ec68SJason Wang 			goto done;
20706680ec68SJason Wang 
2071e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2072e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
20736680ec68SJason Wang 			goto done;
20746680ec68SJason Wang 	}
20756680ec68SJason Wang 
2076e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
20776680ec68SJason Wang 
20786680ec68SJason Wang done:
2079608b9977SPaolo Abeni 	/* caller is in process context, */
2080608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2081608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
2082608b9977SPaolo Abeni 	stats->tx_packets++;
2083608b9977SPaolo Abeni 	stats->tx_bytes += skb->len + vlan_hlen;
2084608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2085608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
20861da177e4SLinus Torvalds 
20871da177e4SLinus Torvalds 	return total;
20881da177e4SLinus Torvalds }
20891da177e4SLinus Torvalds 
2090*fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
20911576d986SJason Wang {
20921576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2093*fc72d1d5SJason Wang 	void *ptr = NULL;
2094f48cc6b2SJason Wang 	int error = 0;
20951576d986SJason Wang 
2096*fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2097*fc72d1d5SJason Wang 	if (ptr)
20981576d986SJason Wang 		goto out;
20991576d986SJason Wang 	if (noblock) {
2100f48cc6b2SJason Wang 		error = -EAGAIN;
21011576d986SJason Wang 		goto out;
21021576d986SJason Wang 	}
21031576d986SJason Wang 
21041576d986SJason Wang 	add_wait_queue(&tfile->wq.wait, &wait);
21051576d986SJason Wang 	current->state = TASK_INTERRUPTIBLE;
21061576d986SJason Wang 
21071576d986SJason Wang 	while (1) {
2108*fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2109*fc72d1d5SJason Wang 		if (ptr)
21101576d986SJason Wang 			break;
21111576d986SJason Wang 		if (signal_pending(current)) {
2112f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21131576d986SJason Wang 			break;
21141576d986SJason Wang 		}
21151576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2116f48cc6b2SJason Wang 			error = -EFAULT;
21171576d986SJason Wang 			break;
21181576d986SJason Wang 		}
21191576d986SJason Wang 
21201576d986SJason Wang 		schedule();
21211576d986SJason Wang 	}
21221576d986SJason Wang 
21231576d986SJason Wang 	current->state = TASK_RUNNING;
21241576d986SJason Wang 	remove_wait_queue(&tfile->wq.wait, &wait);
21251576d986SJason Wang 
21261576d986SJason Wang out:
2127f48cc6b2SJason Wang 	*err = error;
2128*fc72d1d5SJason Wang 	return ptr;
21291576d986SJason Wang }
21301576d986SJason Wang 
213154f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21329b067034SAl Viro 			   struct iov_iter *to,
2133*fc72d1d5SJason Wang 			   int noblock, void *ptr)
21341da177e4SLinus Torvalds {
21359b067034SAl Viro 	ssize_t ret;
21361576d986SJason Wang 	int err;
21371da177e4SLinus Torvalds 
21383872baf6SRami Rosen 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
21391da177e4SLinus Torvalds 
2140c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2141*fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21429b067034SAl Viro 		return 0;
2143c33ee15bSWei Xu 	}
21441da177e4SLinus Torvalds 
2145*fc72d1d5SJason Wang 	if (!ptr) {
21461576d986SJason Wang 		/* Read frames from ring */
2147*fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2148*fc72d1d5SJason Wang 		if (!ptr)
2149957f094fSAlex Gartrell 			return err;
2150ac77cfd4SJason Wang 	}
2151e0b46d0eSHerbert Xu 
2152*fc72d1d5SJason Wang 	if (tun_is_xdp_buff(ptr)) {
2153*fc72d1d5SJason Wang 		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
2154*fc72d1d5SJason Wang 
2155*fc72d1d5SJason Wang 		ret = tun_put_user_xdp(tun, tfile, xdp, to);
2156*fc72d1d5SJason Wang 		put_page(virt_to_head_page(xdp->data));
2157*fc72d1d5SJason Wang 	} else {
2158*fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2159*fc72d1d5SJason Wang 
21609b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2161f51a5e82SJason Wang 		if (unlikely(ret < 0))
21621da177e4SLinus Torvalds 			kfree_skb(skb);
2163f51a5e82SJason Wang 		else
2164f51a5e82SJason Wang 			consume_skb(skb);
2165*fc72d1d5SJason Wang 	}
21661da177e4SLinus Torvalds 
216705c2828cSMichael S. Tsirkin 	return ret;
216805c2828cSMichael S. Tsirkin }
216905c2828cSMichael S. Tsirkin 
21709b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
217105c2828cSMichael S. Tsirkin {
217205c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
217305c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
21749484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
21759b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
217605c2828cSMichael S. Tsirkin 
217705c2828cSMichael S. Tsirkin 	if (!tun)
217805c2828cSMichael S. Tsirkin 		return -EBADFD;
2179ac77cfd4SJason Wang 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
218042404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2181d0b7da8aSZhi Yong Wu 	if (ret > 0)
2182d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2183631ab46bSEric W. Biederman 	tun_put(tun);
21841da177e4SLinus Torvalds 	return ret;
21851da177e4SLinus Torvalds }
21861da177e4SLinus Torvalds 
218796f84061SJason Wang static void tun_steering_prog_free(struct rcu_head *rcu)
218896f84061SJason Wang {
218996f84061SJason Wang 	struct tun_steering_prog *prog = container_of(rcu,
219096f84061SJason Wang 					 struct tun_steering_prog, rcu);
219196f84061SJason Wang 
219296f84061SJason Wang 	bpf_prog_destroy(prog->prog);
219396f84061SJason Wang 	kfree(prog);
219496f84061SJason Wang }
219596f84061SJason Wang 
219696f84061SJason Wang static int __tun_set_steering_ebpf(struct tun_struct *tun,
219796f84061SJason Wang 				   struct bpf_prog *prog)
219896f84061SJason Wang {
219996f84061SJason Wang 	struct tun_steering_prog *old, *new = NULL;
220096f84061SJason Wang 
220196f84061SJason Wang 	if (prog) {
220296f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
220396f84061SJason Wang 		if (!new)
220496f84061SJason Wang 			return -ENOMEM;
220596f84061SJason Wang 		new->prog = prog;
220696f84061SJason Wang 	}
220796f84061SJason Wang 
2208124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2209124da8f6SJason Wang 	old = rcu_dereference_protected(tun->steering_prog,
2210124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
221196f84061SJason Wang 	rcu_assign_pointer(tun->steering_prog, new);
2212124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
221396f84061SJason Wang 
221496f84061SJason Wang 	if (old)
221596f84061SJason Wang 		call_rcu(&old->rcu, tun_steering_prog_free);
221696f84061SJason Wang 
221796f84061SJason Wang 	return 0;
221896f84061SJason Wang }
221996f84061SJason Wang 
222096442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
222196442e42SJason Wang {
222296442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
222396442e42SJason Wang 
22244008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
2225608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
222696442e42SJason Wang 	tun_flow_uninit(tun);
22275dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
222896f84061SJason Wang 	__tun_set_steering_ebpf(tun, NULL);
222996442e42SJason Wang }
223096442e42SJason Wang 
22311da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22321da177e4SLinus Torvalds {
22331da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22341da177e4SLinus Torvalds 
22350625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22360625c883SEric W. Biederman 	tun->group = INVALID_GID;
22371da177e4SLinus Torvalds 
22381da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2239cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2240cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2241016adb72SJason Wang 	/* We prefer our own queue length */
2242016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
22431da177e4SLinus Torvalds }
22441da177e4SLinus Torvalds 
2245f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2246f019a7a5SEric W. Biederman  * device with netlink.
2247f019a7a5SEric W. Biederman  */
2248a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2249a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2250f019a7a5SEric W. Biederman {
2251f019a7a5SEric W. Biederman 	return -EINVAL;
2252f019a7a5SEric W. Biederman }
2253f019a7a5SEric W. Biederman 
2254f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2255f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2256f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2257f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2258f019a7a5SEric W. Biederman 	.validate	= tun_validate,
2259f019a7a5SEric W. Biederman };
2260f019a7a5SEric W. Biederman 
226133dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
226233dccbb0SHerbert Xu {
226354f968d6SJason Wang 	struct tun_file *tfile;
226443815482SEric Dumazet 	wait_queue_head_t *wqueue;
226533dccbb0SHerbert Xu 
226633dccbb0SHerbert Xu 	if (!sock_writeable(sk))
226733dccbb0SHerbert Xu 		return;
226833dccbb0SHerbert Xu 
22699cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
227033dccbb0SHerbert Xu 		return;
227133dccbb0SHerbert Xu 
227243815482SEric Dumazet 	wqueue = sk_sleep(sk);
227343815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
227443815482SEric Dumazet 		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
227505c2828cSMichael S. Tsirkin 						POLLWRNORM | POLLWRBAND);
2276c722c625SHerbert Xu 
227754f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
227854f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
227933dccbb0SHerbert Xu }
228033dccbb0SHerbert Xu 
22811b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
228205c2828cSMichael S. Tsirkin {
228354f968d6SJason Wang 	int ret;
228454f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
22859484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
228654f968d6SJason Wang 
228754f968d6SJason Wang 	if (!tun)
228854f968d6SJason Wang 		return -EBADFD;
2289f5ff53b4SAl Viro 
2290c0371da6SAl Viro 	ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
22915503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
22925503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
229354f968d6SJason Wang 	tun_put(tun);
229454f968d6SJason Wang 	return ret;
229505c2828cSMichael S. Tsirkin }
229605c2828cSMichael S. Tsirkin 
22971b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
229805c2828cSMichael S. Tsirkin 		       int flags)
229905c2828cSMichael S. Tsirkin {
230054f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23019484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2302*fc72d1d5SJason Wang 	void *ptr = m->msg_control;
230305c2828cSMichael S. Tsirkin 	int ret;
230454f968d6SJason Wang 
2305c33ee15bSWei Xu 	if (!tun) {
2306c33ee15bSWei Xu 		ret = -EBADFD;
2307*fc72d1d5SJason Wang 		goto out_free;
2308c33ee15bSWei Xu 	}
230954f968d6SJason Wang 
2310eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
23113811ae76SGao feng 		ret = -EINVAL;
2312c33ee15bSWei Xu 		goto out_put_tun;
23133811ae76SGao feng 	}
2314eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2315eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2316eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2317eda29772SRichard Cochran 		goto out;
2318eda29772SRichard Cochran 	}
2319*fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
232087897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
232142404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
232242404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
232342404c09SDavid S. Miller 	}
23243811ae76SGao feng out:
232554f968d6SJason Wang 	tun_put(tun);
232605c2828cSMichael S. Tsirkin 	return ret;
2327c33ee15bSWei Xu 
2328c33ee15bSWei Xu out_put_tun:
2329c33ee15bSWei Xu 	tun_put(tun);
2330*fc72d1d5SJason Wang out_free:
2331*fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2332c33ee15bSWei Xu 	return ret;
233305c2828cSMichael S. Tsirkin }
233405c2828cSMichael S. Tsirkin 
2335*fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2336*fc72d1d5SJason Wang {
2337*fc72d1d5SJason Wang 	if (likely(ptr)) {
2338*fc72d1d5SJason Wang 		if (tun_is_xdp_buff(ptr)) {
2339*fc72d1d5SJason Wang 			struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
2340*fc72d1d5SJason Wang 
2341*fc72d1d5SJason Wang 			return xdp->data_end - xdp->data;
2342*fc72d1d5SJason Wang 		}
2343*fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2344*fc72d1d5SJason Wang 	} else {
2345*fc72d1d5SJason Wang 		return 0;
2346*fc72d1d5SJason Wang 	}
2347*fc72d1d5SJason Wang }
2348*fc72d1d5SJason Wang 
23491576d986SJason Wang static int tun_peek_len(struct socket *sock)
23501576d986SJason Wang {
23511576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23521576d986SJason Wang 	struct tun_struct *tun;
23531576d986SJason Wang 	int ret = 0;
23541576d986SJason Wang 
23559484dc74Syuan linyu 	tun = tun_get(tfile);
23561576d986SJason Wang 	if (!tun)
23571576d986SJason Wang 		return 0;
23581576d986SJason Wang 
2359*fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
23601576d986SJason Wang 	tun_put(tun);
23611576d986SJason Wang 
23621576d986SJason Wang 	return ret;
23631576d986SJason Wang }
23641576d986SJason Wang 
236505c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
236605c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
23671576d986SJason Wang 	.peek_len = tun_peek_len,
236805c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
236905c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
237005c2828cSMichael S. Tsirkin };
237105c2828cSMichael S. Tsirkin 
237233dccbb0SHerbert Xu static struct proto tun_proto = {
237333dccbb0SHerbert Xu 	.name		= "tun",
237433dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
237554f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
237633dccbb0SHerbert Xu };
2377f019a7a5SEric W. Biederman 
2378980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2379980c9e8cSDavid Woodhouse {
2380031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2381980c9e8cSDavid Woodhouse }
2382980c9e8cSDavid Woodhouse 
2383980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2384980c9e8cSDavid Woodhouse 			      char *buf)
2385980c9e8cSDavid Woodhouse {
2386980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2387980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2388980c9e8cSDavid Woodhouse }
2389980c9e8cSDavid Woodhouse 
2390980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2391980c9e8cSDavid Woodhouse 			      char *buf)
2392980c9e8cSDavid Woodhouse {
2393980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
23940625c883SEric W. Biederman 	return uid_valid(tun->owner)?
23950625c883SEric W. Biederman 		sprintf(buf, "%u\n",
23960625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
23970625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2398980c9e8cSDavid Woodhouse }
2399980c9e8cSDavid Woodhouse 
2400980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2401980c9e8cSDavid Woodhouse 			      char *buf)
2402980c9e8cSDavid Woodhouse {
2403980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
24040625c883SEric W. Biederman 	return gid_valid(tun->group) ?
24050625c883SEric W. Biederman 		sprintf(buf, "%u\n",
24060625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
24070625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2408980c9e8cSDavid Woodhouse }
2409980c9e8cSDavid Woodhouse 
2410980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2411980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2412980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2413980c9e8cSDavid Woodhouse 
2414c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2415c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2416c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2417c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2418c4d33e24STakashi Iwai 	NULL
2419c4d33e24STakashi Iwai };
2420c4d33e24STakashi Iwai 
2421c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2422c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2423c4d33e24STakashi Iwai };
2424c4d33e24STakashi Iwai 
2425d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
24261da177e4SLinus Torvalds {
24271da177e4SLinus Torvalds 	struct tun_struct *tun;
242854f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
24291da177e4SLinus Torvalds 	struct net_device *dev;
24301da177e4SLinus Torvalds 	int err;
24311da177e4SLinus Torvalds 
24327c0c3b1aSJason Wang 	if (tfile->detached)
24337c0c3b1aSJason Wang 		return -EINVAL;
24347c0c3b1aSJason Wang 
243590e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
243690e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
243790e33d45SPetar Penkov 			return -EPERM;
243890e33d45SPetar Penkov 
243990e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
244090e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
244190e33d45SPetar Penkov 			return -EINVAL;
244290e33d45SPetar Penkov 	}
244390e33d45SPetar Penkov 
244474a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
244574a3e5a7SEric W. Biederman 	if (dev) {
2446f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2447f85ba780SDavid Woodhouse 			return -EBUSY;
244874a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
244974a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
245074a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
245174a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
245274a3e5a7SEric W. Biederman 		else
245374a3e5a7SEric W. Biederman 			return -EINVAL;
245474a3e5a7SEric W. Biederman 
24558e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
245640630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
24578e6d91aeSJason Wang 			return -EINVAL;
24588e6d91aeSJason Wang 
2459cde8b15fSJason Wang 		if (tun_not_capable(tun))
24602b980dbdSPaul Moore 			return -EPERM;
24615dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
24622b980dbdSPaul Moore 		if (err < 0)
24632b980dbdSPaul Moore 			return err;
24642b980dbdSPaul Moore 
246594317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
246694317099SPetar Penkov 				 ifr->ifr_flags & IFF_NAPI);
2467a7385ba2SEric W. Biederman 		if (err < 0)
2468a7385ba2SEric W. Biederman 			return err;
24694008e97fSJason Wang 
247040630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2471e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2472e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2473e8dbad66SJason Wang 			 * to initialize the device again.
2474e8dbad66SJason Wang 			 */
2475e8dbad66SJason Wang 			return 0;
2476e8dbad66SJason Wang 		}
247786a264abSDavid Howells 	}
24781da177e4SLinus Torvalds 	else {
24791da177e4SLinus Torvalds 		char *name;
24801da177e4SLinus Torvalds 		unsigned long flags = 0;
2481edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2482edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
24831da177e4SLinus Torvalds 
2484c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2485ca6bb5d7SDavid Woodhouse 			return -EPERM;
24862b980dbdSPaul Moore 		err = security_tun_dev_create();
24872b980dbdSPaul Moore 		if (err < 0)
24882b980dbdSPaul Moore 			return err;
2489ca6bb5d7SDavid Woodhouse 
24901da177e4SLinus Torvalds 		/* Set dev type */
24911da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
24921da177e4SLinus Torvalds 			/* TUN device */
249340630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
24941da177e4SLinus Torvalds 			name = "tun%d";
24951da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
24961da177e4SLinus Torvalds 			/* TAP device */
249740630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
24981da177e4SLinus Torvalds 			name = "tap%d";
24991da177e4SLinus Torvalds 		} else
250036989b90SKusanagi Kouichi 			return -EINVAL;
25011da177e4SLinus Torvalds 
25021da177e4SLinus Torvalds 		if (*ifr->ifr_name)
25031da177e4SLinus Torvalds 			name = ifr->ifr_name;
25041da177e4SLinus Torvalds 
2505c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2506c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2507c835a677STom Gundersen 				       queues);
2508edfb6a14SJason Wang 
25091da177e4SLinus Torvalds 		if (!dev)
25101da177e4SLinus Torvalds 			return -ENOMEM;
25110ad646c8SCong Wang 		err = dev_get_valid_name(net, dev, name);
25125c25f65fSJulien Gomes 		if (err < 0)
25130ad646c8SCong Wang 			goto err_free_dev;
25141da177e4SLinus Torvalds 
2515fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2516f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2517fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2518c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2519758e43b7SStephen Hemminger 
25201da177e4SLinus Torvalds 		tun = netdev_priv(dev);
25211da177e4SLinus Torvalds 		tun->dev = dev;
25221da177e4SLinus Torvalds 		tun->flags = flags;
2523f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2524d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
25251da177e4SLinus Torvalds 
2526eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
252754f968d6SJason Wang 		tun->filter_attached = false;
252854f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
25295503fcecSJason Wang 		tun->rx_batched = 0;
253096f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
253133dccbb0SHerbert Xu 
2532608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2533608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2534608b9977SPaolo Abeni 			err = -ENOMEM;
2535608b9977SPaolo Abeni 			goto err_free_dev;
2536608b9977SPaolo Abeni 		}
2537608b9977SPaolo Abeni 
253896442e42SJason Wang 		spin_lock_init(&tun->lock);
253996442e42SJason Wang 
25405dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
25415dbbaf2dSPaul Moore 		if (err < 0)
2542608b9977SPaolo Abeni 			goto err_free_stat;
25432b980dbdSPaul Moore 
25441da177e4SLinus Torvalds 		tun_net_init(dev);
2545944a1376SPavel Emelyanov 		tun_flow_init(tun);
254696442e42SJason Wang 
254788255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
25486680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
25496680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
25502a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
25516671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
25526671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
25536671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
255488255375SMichał Mirosław 
25554008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
255694317099SPetar Penkov 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
2557eb0fb363SJason Wang 		if (err < 0)
2558662ca437SJason Wang 			goto err_free_flow;
2559eb0fb363SJason Wang 
25601da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
25611da177e4SLinus Torvalds 		if (err < 0)
2562662ca437SJason Wang 			goto err_detach;
2563af668b3cSMichael S. Tsirkin 	}
2564980c9e8cSDavid Woodhouse 
2565eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
25661da177e4SLinus Torvalds 
25676b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
25681da177e4SLinus Torvalds 
2569031f5e03SMichael S. Tsirkin 	tun->flags = (tun->flags & ~TUN_FEATURES) |
2570031f5e03SMichael S. Tsirkin 		(ifr->ifr_flags & TUN_FEATURES);
2571c8d68e6bSJason Wang 
2572e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2573e35259a9SMax Krasnyansky 	 * xoff state.
2574e35259a9SMax Krasnyansky 	 */
2575e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2576c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2577e35259a9SMax Krasnyansky 
25781da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
25791da177e4SLinus Torvalds 	return 0;
25801da177e4SLinus Torvalds 
2581662ca437SJason Wang err_detach:
2582662ca437SJason Wang 	tun_detach_all(dev);
2583ff244c6bSEric Dumazet 	/* register_netdevice() already called tun_free_netdev() */
2584ff244c6bSEric Dumazet 	goto err_free_dev;
2585ff244c6bSEric Dumazet 
2586662ca437SJason Wang err_free_flow:
2587662ca437SJason Wang 	tun_flow_uninit(tun);
2588662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2589608b9977SPaolo Abeni err_free_stat:
2590608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
25911da177e4SLinus Torvalds err_free_dev:
25921da177e4SLinus Torvalds 	free_netdev(dev);
25931da177e4SLinus Torvalds 	return err;
25941da177e4SLinus Torvalds }
25951da177e4SLinus Torvalds 
25969ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun,
2597876bfd4dSHerbert Xu 		       struct ifreq *ifr)
2598e3b99556SMark McLoughlin {
25996b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2600e3b99556SMark McLoughlin 
2601e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2602e3b99556SMark McLoughlin 
2603980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2604e3b99556SMark McLoughlin 
2605e3b99556SMark McLoughlin }
2606e3b99556SMark McLoughlin 
26075228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
26085228ddc9SRusty Russell  * privs required. */
260988255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
26105228ddc9SRusty Russell {
2611c8f44affSMichał Mirosław 	netdev_features_t features = 0;
26125228ddc9SRusty Russell 
26135228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
261488255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
26155228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
26165228ddc9SRusty Russell 
26175228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
26185228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
26195228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
26205228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
26215228ddc9SRusty Russell 			}
26225228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
26235228ddc9SRusty Russell 				features |= NETIF_F_TSO;
26245228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
26255228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
26265228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
26275228ddc9SRusty Russell 		}
26280c19f846SWillem de Bruijn 
26290c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
26305228ddc9SRusty Russell 	}
26315228ddc9SRusty Russell 
26325228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
26335228ddc9SRusty Russell 	 * trying to set them. */
26345228ddc9SRusty Russell 	if (arg)
26355228ddc9SRusty Russell 		return -EINVAL;
26365228ddc9SRusty Russell 
263788255375SMichał Mirosław 	tun->set_features = features;
263809050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
263909050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
264088255375SMichał Mirosław 	netdev_update_features(tun->dev);
26415228ddc9SRusty Russell 
26425228ddc9SRusty Russell 	return 0;
26435228ddc9SRusty Russell }
26445228ddc9SRusty Russell 
2645c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2646c8d68e6bSJason Wang {
2647c8d68e6bSJason Wang 	int i;
2648c8d68e6bSJason Wang 	struct tun_file *tfile;
2649c8d68e6bSJason Wang 
2650c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2651b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
26528ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
26538ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
26548ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2655c8d68e6bSJason Wang 	}
2656c8d68e6bSJason Wang 
2657c8d68e6bSJason Wang 	tun->filter_attached = false;
2658c8d68e6bSJason Wang }
2659c8d68e6bSJason Wang 
2660c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2661c8d68e6bSJason Wang {
2662c8d68e6bSJason Wang 	int i, ret = 0;
2663c8d68e6bSJason Wang 	struct tun_file *tfile;
2664c8d68e6bSJason Wang 
2665c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2666b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
26678ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
26688ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
26698ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2670c8d68e6bSJason Wang 		if (ret) {
2671c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2672c8d68e6bSJason Wang 			return ret;
2673c8d68e6bSJason Wang 		}
2674c8d68e6bSJason Wang 	}
2675c8d68e6bSJason Wang 
2676c8d68e6bSJason Wang 	tun->filter_attached = true;
2677c8d68e6bSJason Wang 	return ret;
2678c8d68e6bSJason Wang }
2679c8d68e6bSJason Wang 
2680c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2681c8d68e6bSJason Wang {
2682c8d68e6bSJason Wang 	struct tun_file *tfile;
2683c8d68e6bSJason Wang 	int i;
2684c8d68e6bSJason Wang 
2685c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2686b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2687c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2688c8d68e6bSJason Wang 	}
2689c8d68e6bSJason Wang }
2690c8d68e6bSJason Wang 
2691cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2692cde8b15fSJason Wang {
2693cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2694cde8b15fSJason Wang 	struct tun_struct *tun;
2695cde8b15fSJason Wang 	int ret = 0;
2696cde8b15fSJason Wang 
2697cde8b15fSJason Wang 	rtnl_lock();
2698cde8b15fSJason Wang 
2699cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
27004008e97fSJason Wang 		tun = tfile->detached;
27015dbbaf2dSPaul Moore 		if (!tun) {
2702cde8b15fSJason Wang 			ret = -EINVAL;
27035dbbaf2dSPaul Moore 			goto unlock;
27045dbbaf2dSPaul Moore 		}
27055dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
27065dbbaf2dSPaul Moore 		if (ret < 0)
27075dbbaf2dSPaul Moore 			goto unlock;
270894317099SPetar Penkov 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
27094008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2710b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
271140630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
27124008e97fSJason Wang 			ret = -EINVAL;
2713cde8b15fSJason Wang 		else
27144008e97fSJason Wang 			__tun_detach(tfile, false);
27154008e97fSJason Wang 	} else
2716cde8b15fSJason Wang 		ret = -EINVAL;
2717cde8b15fSJason Wang 
27185dbbaf2dSPaul Moore unlock:
2719cde8b15fSJason Wang 	rtnl_unlock();
2720cde8b15fSJason Wang 	return ret;
2721cde8b15fSJason Wang }
2722cde8b15fSJason Wang 
272396f84061SJason Wang static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data)
272496f84061SJason Wang {
272596f84061SJason Wang 	struct bpf_prog *prog;
272696f84061SJason Wang 	int fd;
272796f84061SJason Wang 
272896f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
272996f84061SJason Wang 		return -EFAULT;
273096f84061SJason Wang 
273196f84061SJason Wang 	if (fd == -1) {
273296f84061SJason Wang 		prog = NULL;
273396f84061SJason Wang 	} else {
273496f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
273596f84061SJason Wang 		if (IS_ERR(prog))
273696f84061SJason Wang 			return PTR_ERR(prog);
273796f84061SJason Wang 	}
273896f84061SJason Wang 
273996f84061SJason Wang 	return __tun_set_steering_ebpf(tun, prog);
274096f84061SJason Wang }
274196f84061SJason Wang 
274250857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
274350857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
27441da177e4SLinus Torvalds {
274536b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
2746631ab46bSEric W. Biederman 	struct tun_struct *tun;
27471da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
27481da177e4SLinus Torvalds 	struct ifreq ifr;
27490625c883SEric W. Biederman 	kuid_t owner;
27500625c883SEric W. Biederman 	kgid_t group;
275133dccbb0SHerbert Xu 	int sndbuf;
2752d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
2753fb7589a1SPavel Emelyanov 	unsigned int ifindex;
27541cf8e410SMichael S. Tsirkin 	int le;
2755f271b2ccSMax Krasnyansky 	int ret;
27561da177e4SLinus Torvalds 
275720861f26SGao Feng 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
275850857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
27591da177e4SLinus Torvalds 			return -EFAULT;
27608bbb1813SDavid S. Miller 	} else {
2761a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
27628bbb1813SDavid S. Miller 	}
2763631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
2764631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
2765631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
2766031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
2767031f5e03SMichael S. Tsirkin 		 */
2768031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
2769631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
2770cde8b15fSJason Wang 	} else if (cmd == TUNSETQUEUE)
2771cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
2772631ab46bSEric W. Biederman 
2773c8d68e6bSJason Wang 	ret = 0;
2774876bfd4dSHerbert Xu 	rtnl_lock();
2775876bfd4dSHerbert Xu 
27769484dc74Syuan linyu 	tun = tun_get(tfile);
27770f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
27780f16bc13SGao Feng 		ret = -EEXIST;
27790f16bc13SGao Feng 		if (tun)
27800f16bc13SGao Feng 			goto unlock;
27810f16bc13SGao Feng 
27821da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
27831da177e4SLinus Torvalds 
2784140e807dSEric W. Biederman 		ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
27851da177e4SLinus Torvalds 
2786876bfd4dSHerbert Xu 		if (ret)
2787876bfd4dSHerbert Xu 			goto unlock;
27881da177e4SLinus Torvalds 
278950857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2790876bfd4dSHerbert Xu 			ret = -EFAULT;
2791876bfd4dSHerbert Xu 		goto unlock;
27921da177e4SLinus Torvalds 	}
2793fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
2794fb7589a1SPavel Emelyanov 		ret = -EPERM;
2795fb7589a1SPavel Emelyanov 		if (tun)
2796fb7589a1SPavel Emelyanov 			goto unlock;
2797fb7589a1SPavel Emelyanov 
2798fb7589a1SPavel Emelyanov 		ret = -EFAULT;
2799fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2800fb7589a1SPavel Emelyanov 			goto unlock;
2801fb7589a1SPavel Emelyanov 
2802fb7589a1SPavel Emelyanov 		ret = 0;
2803fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
2804fb7589a1SPavel Emelyanov 		goto unlock;
2805fb7589a1SPavel Emelyanov 	}
28061da177e4SLinus Torvalds 
2807876bfd4dSHerbert Xu 	ret = -EBADFD;
28081da177e4SLinus Torvalds 	if (!tun)
2809876bfd4dSHerbert Xu 		goto unlock;
28101da177e4SLinus Torvalds 
28111e588338SJason Wang 	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
28121da177e4SLinus Torvalds 
2813631ab46bSEric W. Biederman 	ret = 0;
28141da177e4SLinus Torvalds 	switch (cmd) {
2815e3b99556SMark McLoughlin 	case TUNGETIFF:
28169ce99cf6SRami Rosen 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2817e3b99556SMark McLoughlin 
28183d407a80SPavel Emelyanov 		if (tfile->detached)
28193d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
2820849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
2821849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
28223d407a80SPavel Emelyanov 
282350857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2824631ab46bSEric W. Biederman 			ret = -EFAULT;
2825e3b99556SMark McLoughlin 		break;
2826e3b99556SMark McLoughlin 
28271da177e4SLinus Torvalds 	case TUNSETNOCSUM:
28281da177e4SLinus Torvalds 		/* Disable/Enable checksum */
28291da177e4SLinus Torvalds 
283088255375SMichał Mirosław 		/* [unimplemented] */
283188255375SMichał Mirosław 		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
28326b8a66eeSJoe Perches 			  arg ? "disabled" : "enabled");
28331da177e4SLinus Torvalds 		break;
28341da177e4SLinus Torvalds 
28351da177e4SLinus Torvalds 	case TUNSETPERSIST:
283654f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
283754f968d6SJason Wang 		 * module to prevent the module being unprobed.
283854f968d6SJason Wang 		 */
283940630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
284040630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
284154f968d6SJason Wang 			__module_get(THIS_MODULE);
2842dd38bd85SJason Wang 		}
284340630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
284440630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
284554f968d6SJason Wang 			module_put(THIS_MODULE);
284654f968d6SJason Wang 		}
28471da177e4SLinus Torvalds 
28486b8a66eeSJoe Perches 		tun_debug(KERN_INFO, tun, "persist %s\n",
28496b8a66eeSJoe Perches 			  arg ? "enabled" : "disabled");
28501da177e4SLinus Torvalds 		break;
28511da177e4SLinus Torvalds 
28521da177e4SLinus Torvalds 	case TUNSETOWNER:
28531da177e4SLinus Torvalds 		/* Set owner of the device */
28540625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
28550625c883SEric W. Biederman 		if (!uid_valid(owner)) {
28560625c883SEric W. Biederman 			ret = -EINVAL;
28570625c883SEric W. Biederman 			break;
28580625c883SEric W. Biederman 		}
28590625c883SEric W. Biederman 		tun->owner = owner;
28601e588338SJason Wang 		tun_debug(KERN_INFO, tun, "owner set to %u\n",
28610625c883SEric W. Biederman 			  from_kuid(&init_user_ns, tun->owner));
28621da177e4SLinus Torvalds 		break;
28631da177e4SLinus Torvalds 
28648c644623SGuido Guenther 	case TUNSETGROUP:
28658c644623SGuido Guenther 		/* Set group of the device */
28660625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
28670625c883SEric W. Biederman 		if (!gid_valid(group)) {
28680625c883SEric W. Biederman 			ret = -EINVAL;
28690625c883SEric W. Biederman 			break;
28700625c883SEric W. Biederman 		}
28710625c883SEric W. Biederman 		tun->group = group;
28721e588338SJason Wang 		tun_debug(KERN_INFO, tun, "group set to %u\n",
28730625c883SEric W. Biederman 			  from_kgid(&init_user_ns, tun->group));
28748c644623SGuido Guenther 		break;
28758c644623SGuido Guenther 
2876ff4cc3acSMike Kershaw 	case TUNSETLINK:
2877ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
2878ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
28796b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun,
28806b8a66eeSJoe Perches 				  "Linktype set failed because interface is up\n");
288148abfe05SDavid S. Miller 			ret = -EBUSY;
2882ff4cc3acSMike Kershaw 		} else {
2883ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
28846b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
28856b8a66eeSJoe Perches 				  tun->dev->type);
288648abfe05SDavid S. Miller 			ret = 0;
2887ff4cc3acSMike Kershaw 		}
2888631ab46bSEric W. Biederman 		break;
2889ff4cc3acSMike Kershaw 
28901da177e4SLinus Torvalds #ifdef TUN_DEBUG
28911da177e4SLinus Torvalds 	case TUNSETDEBUG:
28921da177e4SLinus Torvalds 		tun->debug = arg;
28931da177e4SLinus Torvalds 		break;
28941da177e4SLinus Torvalds #endif
28955228ddc9SRusty Russell 	case TUNSETOFFLOAD:
289688255375SMichał Mirosław 		ret = set_offload(tun, arg);
2897631ab46bSEric W. Biederman 		break;
28985228ddc9SRusty Russell 
2899f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
2900f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
2901631ab46bSEric W. Biederman 		ret = -EINVAL;
290240630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2903631ab46bSEric W. Biederman 			break;
2904c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
2905631ab46bSEric W. Biederman 		break;
29061da177e4SLinus Torvalds 
29071da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
2908b595076aSUwe Kleine-König 		/* Get hw address */
2909f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2910f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
291150857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2912631ab46bSEric W. Biederman 			ret = -EFAULT;
2913631ab46bSEric W. Biederman 		break;
29141da177e4SLinus Torvalds 
29151da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
2916f271b2ccSMax Krasnyansky 		/* Set hw address */
29176b8a66eeSJoe Perches 		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
29186b8a66eeSJoe Perches 			  ifr.ifr_hwaddr.sa_data);
291940102371SKim B. Heino 
292040102371SKim B. Heino 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2921631ab46bSEric W. Biederman 		break;
292233dccbb0SHerbert Xu 
292333dccbb0SHerbert Xu 	case TUNGETSNDBUF:
292454f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
292533dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
292633dccbb0SHerbert Xu 			ret = -EFAULT;
292733dccbb0SHerbert Xu 		break;
292833dccbb0SHerbert Xu 
292933dccbb0SHerbert Xu 	case TUNSETSNDBUF:
293033dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
293133dccbb0SHerbert Xu 			ret = -EFAULT;
293233dccbb0SHerbert Xu 			break;
293333dccbb0SHerbert Xu 		}
293493161922SCraig Gallek 		if (sndbuf <= 0) {
293593161922SCraig Gallek 			ret = -EINVAL;
293693161922SCraig Gallek 			break;
293793161922SCraig Gallek 		}
293833dccbb0SHerbert Xu 
2939c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
2940c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
294133dccbb0SHerbert Xu 		break;
294233dccbb0SHerbert Xu 
2943d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
2944d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
2945d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2946d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
2947d9d52b51SMichael S. Tsirkin 		break;
2948d9d52b51SMichael S. Tsirkin 
2949d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
2950d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2951d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
2952d9d52b51SMichael S. Tsirkin 			break;
2953d9d52b51SMichael S. Tsirkin 		}
2954d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2955d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
2956d9d52b51SMichael S. Tsirkin 			break;
2957d9d52b51SMichael S. Tsirkin 		}
2958d9d52b51SMichael S. Tsirkin 
2959d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
2960d9d52b51SMichael S. Tsirkin 		break;
2961d9d52b51SMichael S. Tsirkin 
29621cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
29631cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
29641cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
29651cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
29661cf8e410SMichael S. Tsirkin 		break;
29671cf8e410SMichael S. Tsirkin 
29681cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
29691cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
29701cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
29711cf8e410SMichael S. Tsirkin 			break;
29721cf8e410SMichael S. Tsirkin 		}
29731cf8e410SMichael S. Tsirkin 		if (le)
29741cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
29751cf8e410SMichael S. Tsirkin 		else
29761cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
29771cf8e410SMichael S. Tsirkin 		break;
29781cf8e410SMichael S. Tsirkin 
29798b8e658bSGreg Kurz 	case TUNGETVNETBE:
29808b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
29818b8e658bSGreg Kurz 		break;
29828b8e658bSGreg Kurz 
29838b8e658bSGreg Kurz 	case TUNSETVNETBE:
29848b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
29858b8e658bSGreg Kurz 		break;
29868b8e658bSGreg Kurz 
298799405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
298899405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
298999405162SMichael S. Tsirkin 		ret = -EINVAL;
299040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
299199405162SMichael S. Tsirkin 			break;
299299405162SMichael S. Tsirkin 		ret = -EFAULT;
299354f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
299499405162SMichael S. Tsirkin 			break;
299599405162SMichael S. Tsirkin 
2996c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
299799405162SMichael S. Tsirkin 		break;
299899405162SMichael S. Tsirkin 
299999405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
300099405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
300199405162SMichael S. Tsirkin 		ret = -EINVAL;
300240630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
300399405162SMichael S. Tsirkin 			break;
3004c8d68e6bSJason Wang 		ret = 0;
3005c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
300699405162SMichael S. Tsirkin 		break;
300799405162SMichael S. Tsirkin 
300876975e9cSPavel Emelyanov 	case TUNGETFILTER:
300976975e9cSPavel Emelyanov 		ret = -EINVAL;
301040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
301176975e9cSPavel Emelyanov 			break;
301276975e9cSPavel Emelyanov 		ret = -EFAULT;
301376975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
301476975e9cSPavel Emelyanov 			break;
301576975e9cSPavel Emelyanov 		ret = 0;
301676975e9cSPavel Emelyanov 		break;
301776975e9cSPavel Emelyanov 
301896f84061SJason Wang 	case TUNSETSTEERINGEBPF:
301996f84061SJason Wang 		ret = tun_set_steering_ebpf(tun, argp);
302096f84061SJason Wang 		break;
302196f84061SJason Wang 
30221da177e4SLinus Torvalds 	default:
3023631ab46bSEric W. Biederman 		ret = -EINVAL;
3024631ab46bSEric W. Biederman 		break;
3025ee289b64SJoe Perches 	}
30261da177e4SLinus Torvalds 
3027876bfd4dSHerbert Xu unlock:
3028876bfd4dSHerbert Xu 	rtnl_unlock();
3029876bfd4dSHerbert Xu 	if (tun)
3030631ab46bSEric W. Biederman 		tun_put(tun);
3031631ab46bSEric W. Biederman 	return ret;
30321da177e4SLinus Torvalds }
30331da177e4SLinus Torvalds 
303450857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
303550857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
303650857e2aSArnd Bergmann {
303750857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
303850857e2aSArnd Bergmann }
303950857e2aSArnd Bergmann 
304050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
304150857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
304250857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
304350857e2aSArnd Bergmann {
304450857e2aSArnd Bergmann 	switch (cmd) {
304550857e2aSArnd Bergmann 	case TUNSETIFF:
304650857e2aSArnd Bergmann 	case TUNGETIFF:
304750857e2aSArnd Bergmann 	case TUNSETTXFILTER:
304850857e2aSArnd Bergmann 	case TUNGETSNDBUF:
304950857e2aSArnd Bergmann 	case TUNSETSNDBUF:
305050857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
305150857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
305250857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
305350857e2aSArnd Bergmann 		break;
305450857e2aSArnd Bergmann 	default:
305550857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
305650857e2aSArnd Bergmann 		break;
305750857e2aSArnd Bergmann 	}
305850857e2aSArnd Bergmann 
305950857e2aSArnd Bergmann 	/*
306050857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
306150857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
306250857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
306350857e2aSArnd Bergmann 	 * contents.
306450857e2aSArnd Bergmann 	 */
306550857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
306650857e2aSArnd Bergmann }
306750857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
306850857e2aSArnd Bergmann 
30691da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
30701da177e4SLinus Torvalds {
307154f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
30721da177e4SLinus Torvalds 	int ret;
30731da177e4SLinus Torvalds 
307454f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
30759d319522SJonathan Corbet 		goto out;
30761da177e4SLinus Torvalds 
30771da177e4SLinus Torvalds 	if (on) {
3078e0b93eddSJeff Layton 		__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
307954f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
30801da177e4SLinus Torvalds 	} else
308154f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
30829d319522SJonathan Corbet 	ret = 0;
30839d319522SJonathan Corbet out:
30849d319522SJonathan Corbet 	return ret;
30851da177e4SLinus Torvalds }
30861da177e4SLinus Torvalds 
30871da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
30881da177e4SLinus Torvalds {
3089140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3090631ab46bSEric W. Biederman 	struct tun_file *tfile;
3091deed49fbSThomas Gleixner 
30926b8a66eeSJoe Perches 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3093631ab46bSEric W. Biederman 
3094140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
309511aa9c28SEric W. Biederman 					    &tun_proto, 0);
3096631ab46bSEric W. Biederman 	if (!tfile)
3097631ab46bSEric W. Biederman 		return -ENOMEM;
3098c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
309954f968d6SJason Wang 	tfile->flags = 0;
3100fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
310154f968d6SJason Wang 
310254f968d6SJason Wang 	init_waitqueue_head(&tfile->wq.wait);
31039e641bdcSXi Wang 	RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
310454f968d6SJason Wang 
310554f968d6SJason Wang 	tfile->socket.file = file;
310654f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
310754f968d6SJason Wang 
310854f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
310954f968d6SJason Wang 
311054f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
311154f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
311254f968d6SJason Wang 
3113631ab46bSEric W. Biederman 	file->private_data = tfile;
31144008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
311554f968d6SJason Wang 
311619a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
311719a6afb2SJason Wang 
31181da177e4SLinus Torvalds 	return 0;
31191da177e4SLinus Torvalds }
31201da177e4SLinus Torvalds 
31211da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
31221da177e4SLinus Torvalds {
3123631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
31241da177e4SLinus Torvalds 
3125c8d68e6bSJason Wang 	tun_detach(tfile, true);
31261da177e4SLinus Torvalds 
31271da177e4SLinus Torvalds 	return 0;
31281da177e4SLinus Torvalds }
31291da177e4SLinus Torvalds 
313093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
31319484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
313293e14b6dSMasatake YAMATO {
31339484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
313493e14b6dSMasatake YAMATO 	struct tun_struct *tun;
313593e14b6dSMasatake YAMATO 	struct ifreq ifr;
313693e14b6dSMasatake YAMATO 
313793e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
313893e14b6dSMasatake YAMATO 
313993e14b6dSMasatake YAMATO 	rtnl_lock();
31409484dc74Syuan linyu 	tun = tun_get(tfile);
314193e14b6dSMasatake YAMATO 	if (tun)
314293e14b6dSMasatake YAMATO 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
314393e14b6dSMasatake YAMATO 	rtnl_unlock();
314493e14b6dSMasatake YAMATO 
314593e14b6dSMasatake YAMATO 	if (tun)
314693e14b6dSMasatake YAMATO 		tun_put(tun);
314793e14b6dSMasatake YAMATO 
3148a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
314993e14b6dSMasatake YAMATO }
315093e14b6dSMasatake YAMATO #endif
315193e14b6dSMasatake YAMATO 
3152d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
31531da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
31541da177e4SLinus Torvalds 	.llseek = no_llseek,
31559b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3156f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
31571da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3158876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
315950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
316050857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
316150857e2aSArnd Bergmann #endif
31621da177e4SLinus Torvalds 	.open	= tun_chr_open,
31631da177e4SLinus Torvalds 	.release = tun_chr_close,
316493e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
316593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
316693e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
316793e14b6dSMasatake YAMATO #endif
31681da177e4SLinus Torvalds };
31691da177e4SLinus Torvalds 
31701da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
31711da177e4SLinus Torvalds 	.minor = TUN_MINOR,
31721da177e4SLinus Torvalds 	.name = "tun",
3173e454cea2SKay Sievers 	.nodename = "net/tun",
31741da177e4SLinus Torvalds 	.fops = &tun_fops,
31751da177e4SLinus Torvalds };
31761da177e4SLinus Torvalds 
31771da177e4SLinus Torvalds /* ethtool interface */
31781da177e4SLinus Torvalds 
317929ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev,
318029ccc49dSPhilippe Reynes 				  struct ethtool_link_ksettings *cmd)
31811da177e4SLinus Torvalds {
318229ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
318329ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
318429ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
318529ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
318629ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
318729ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
318829ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
31891da177e4SLinus Torvalds 	return 0;
31901da177e4SLinus Torvalds }
31911da177e4SLinus Torvalds 
31921da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
31931da177e4SLinus Torvalds {
31941da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
31951da177e4SLinus Torvalds 
319633a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
319733a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
31981da177e4SLinus Torvalds 
31991da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
320040630b82SMichael S. Tsirkin 	case IFF_TUN:
320133a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
32021da177e4SLinus Torvalds 		break;
320340630b82SMichael S. Tsirkin 	case IFF_TAP:
320433a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
32051da177e4SLinus Torvalds 		break;
32061da177e4SLinus Torvalds 	}
32071da177e4SLinus Torvalds }
32081da177e4SLinus Torvalds 
32091da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
32101da177e4SLinus Torvalds {
32111da177e4SLinus Torvalds #ifdef TUN_DEBUG
32121da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
32131da177e4SLinus Torvalds 	return tun->debug;
32141da177e4SLinus Torvalds #else
32151da177e4SLinus Torvalds 	return -EOPNOTSUPP;
32161da177e4SLinus Torvalds #endif
32171da177e4SLinus Torvalds }
32181da177e4SLinus Torvalds 
32191da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
32201da177e4SLinus Torvalds {
32211da177e4SLinus Torvalds #ifdef TUN_DEBUG
32221da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
32231da177e4SLinus Torvalds 	tun->debug = value;
32241da177e4SLinus Torvalds #endif
32251da177e4SLinus Torvalds }
32261da177e4SLinus Torvalds 
32275503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
32285503fcecSJason Wang 			    struct ethtool_coalesce *ec)
32295503fcecSJason Wang {
32305503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
32315503fcecSJason Wang 
32325503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
32335503fcecSJason Wang 
32345503fcecSJason Wang 	return 0;
32355503fcecSJason Wang }
32365503fcecSJason Wang 
32375503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
32385503fcecSJason Wang 			    struct ethtool_coalesce *ec)
32395503fcecSJason Wang {
32405503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
32415503fcecSJason Wang 
32425503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
32435503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
32445503fcecSJason Wang 	else
32455503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
32465503fcecSJason Wang 
32475503fcecSJason Wang 	return 0;
32485503fcecSJason Wang }
32495503fcecSJason Wang 
32507282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
32511da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
32521da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
32531da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3254bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3255eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
32565503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
32575503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
325829ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
32591da177e4SLinus Torvalds };
32601da177e4SLinus Torvalds 
32611576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
32621576d986SJason Wang {
32631576d986SJason Wang 	struct net_device *dev = tun->dev;
32641576d986SJason Wang 	struct tun_file *tfile;
32655990a305SJason Wang 	struct ptr_ring **rings;
32661576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
32671576d986SJason Wang 	int ret, i;
32681576d986SJason Wang 
32695990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
32705990a305SJason Wang 	if (!rings)
32711576d986SJason Wang 		return -ENOMEM;
32721576d986SJason Wang 
32731576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
32741576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
32755990a305SJason Wang 		rings[i] = &tfile->tx_ring;
32761576d986SJason Wang 	}
32771576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
32785990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
32791576d986SJason Wang 
32805990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
32815990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3282*fc72d1d5SJason Wang 				       tun_ptr_free);
32831576d986SJason Wang 
32845990a305SJason Wang 	kfree(rings);
32851576d986SJason Wang 	return ret;
32861576d986SJason Wang }
32871576d986SJason Wang 
32881576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
32891576d986SJason Wang 			    unsigned long event, void *ptr)
32901576d986SJason Wang {
32911576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
32921576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
32931576d986SJason Wang 
329486dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
329586dfb4acSCraig Gallek 		return NOTIFY_DONE;
329686dfb4acSCraig Gallek 
32971576d986SJason Wang 	switch (event) {
32981576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
32991576d986SJason Wang 		if (tun_queue_resize(tun))
33001576d986SJason Wang 			return NOTIFY_BAD;
33011576d986SJason Wang 		break;
33021576d986SJason Wang 	default:
33031576d986SJason Wang 		break;
33041576d986SJason Wang 	}
33051576d986SJason Wang 
33061576d986SJason Wang 	return NOTIFY_DONE;
33071576d986SJason Wang }
33081576d986SJason Wang 
33091576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
33101576d986SJason Wang 	.notifier_call	= tun_device_event,
33111576d986SJason Wang };
331279d17604SPavel Emelyanov 
33131da177e4SLinus Torvalds static int __init tun_init(void)
33141da177e4SLinus Torvalds {
33151da177e4SLinus Torvalds 	int ret = 0;
33161da177e4SLinus Torvalds 
33176b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
33181da177e4SLinus Torvalds 
3319f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
332079d17604SPavel Emelyanov 	if (ret) {
33216b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3322f019a7a5SEric W. Biederman 		goto err_linkops;
332379d17604SPavel Emelyanov 	}
332479d17604SPavel Emelyanov 
33251da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
332679d17604SPavel Emelyanov 	if (ret) {
33276b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
332879d17604SPavel Emelyanov 		goto err_misc;
332979d17604SPavel Emelyanov 	}
33301576d986SJason Wang 
33315edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
33325edfbd3cSTonghao Zhang 	if (ret) {
33335edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
33345edfbd3cSTonghao Zhang 		goto err_notifier;
33355edfbd3cSTonghao Zhang 	}
33365edfbd3cSTonghao Zhang 
333779d17604SPavel Emelyanov 	return  0;
33385edfbd3cSTonghao Zhang 
33395edfbd3cSTonghao Zhang err_notifier:
33405edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
334179d17604SPavel Emelyanov err_misc:
3342f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3343f019a7a5SEric W. Biederman err_linkops:
33441da177e4SLinus Torvalds 	return ret;
33451da177e4SLinus Torvalds }
33461da177e4SLinus Torvalds 
33471da177e4SLinus Torvalds static void tun_cleanup(void)
33481da177e4SLinus Torvalds {
33491da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3350f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
33511576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
33521da177e4SLinus Torvalds }
33531da177e4SLinus Torvalds 
335405c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
335505c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
335605c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
335705c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
335805c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
335905c2828cSMichael S. Tsirkin {
33606e914fc7SJason Wang 	struct tun_file *tfile;
336105c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
336205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
33636e914fc7SJason Wang 	tfile = file->private_data;
33646e914fc7SJason Wang 	if (!tfile)
336505c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
336654f968d6SJason Wang 	return &tfile->socket;
336705c2828cSMichael S. Tsirkin }
336805c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
336905c2828cSMichael S. Tsirkin 
33705990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
337183339c6bSJason Wang {
337283339c6bSJason Wang 	struct tun_file *tfile;
337383339c6bSJason Wang 
337483339c6bSJason Wang 	if (file->f_op != &tun_fops)
337583339c6bSJason Wang 		return ERR_PTR(-EINVAL);
337683339c6bSJason Wang 	tfile = file->private_data;
337783339c6bSJason Wang 	if (!tfile)
337883339c6bSJason Wang 		return ERR_PTR(-EBADFD);
33795990a305SJason Wang 	return &tfile->tx_ring;
338083339c6bSJason Wang }
33815990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
338283339c6bSJason Wang 
33831da177e4SLinus Torvalds module_init(tun_init);
33841da177e4SLinus Torvalds module_exit(tun_cleanup);
33851da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
33861da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
33871da177e4SLinus Torvalds MODULE_LICENSE("GPL");
33881da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3389578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3390