xref: /openbmc/linux/drivers/net/tun.c (revision aff3d70a07fffc0abb53663e4a4acb059d2f36af)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
31da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
61da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
71da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or
81da177e4SLinus Torvalds  *  (at your option) any later version.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful,
111da177e4SLinus Torvalds  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
121da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
131da177e4SLinus Torvalds  *  GNU General Public License for more details.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
181da177e4SLinus Torvalds /*
191da177e4SLinus Torvalds  *  Changes:
201da177e4SLinus Torvalds  *
21ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
23ff4cc3acSMike Kershaw  *
241da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
25344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
281da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
291da177e4SLinus Torvalds  *    Increased default tx queue length.
301da177e4SLinus Torvalds  *    Added ethtool API.
311da177e4SLinus Torvalds  *    Minor cleanups
321da177e4SLinus Torvalds  *
331da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
341da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
386b8a66eeSJoe Perches 
391da177e4SLinus Torvalds #define DRV_NAME	"tun"
401da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
411da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
421da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds #include <linux/module.h>
451da177e4SLinus Torvalds #include <linux/errno.h>
461da177e4SLinus Torvalds #include <linux/kernel.h>
47174cd4b1SIngo Molnar #include <linux/sched/signal.h>
481da177e4SLinus Torvalds #include <linux/major.h>
491da177e4SLinus Torvalds #include <linux/slab.h>
501da177e4SLinus Torvalds #include <linux/poll.h>
511da177e4SLinus Torvalds #include <linux/fcntl.h>
521da177e4SLinus Torvalds #include <linux/init.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
541da177e4SLinus Torvalds #include <linux/netdevice.h>
551da177e4SLinus Torvalds #include <linux/etherdevice.h>
561da177e4SLinus Torvalds #include <linux/miscdevice.h>
571da177e4SLinus Torvalds #include <linux/ethtool.h>
581da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5950857e2aSArnd Bergmann #include <linux/compat.h>
601da177e4SLinus Torvalds #include <linux/if.h>
611da177e4SLinus Torvalds #include <linux/if_arp.h>
621da177e4SLinus Torvalds #include <linux/if_ether.h>
631da177e4SLinus Torvalds #include <linux/if_tun.h>
646680ec68SJason Wang #include <linux/if_vlan.h>
651da177e4SLinus Torvalds #include <linux/crc32.h>
66d647a591SPavel Emelyanov #include <linux/nsproxy.h>
67f43798c2SRusty Russell #include <linux/virtio_net.h>
6899405162SMichael S. Tsirkin #include <linux/rcupdate.h>
69881d966bSEric W. Biederman #include <net/net_namespace.h>
7079d17604SPavel Emelyanov #include <net/netns/generic.h>
71f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
7233dccbb0SHerbert Xu #include <net/sock.h>
7393e14b6dSMasatake YAMATO #include <linux/seq_file.h>
74e0b46d0eSHerbert Xu #include <linux/uio.h>
751576d986SJason Wang #include <linux/skb_array.h>
76761876c8SJason Wang #include <linux/bpf.h>
77761876c8SJason Wang #include <linux/bpf_trace.h>
7890e33d45SPetar Penkov #include <linux/mutex.h>
791da177e4SLinus Torvalds 
807c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
811da177e4SLinus Torvalds 
8214daa021SRusty Russell /* Uncomment to enable debugging */
8314daa021SRusty Russell /* #define TUN_DEBUG 1 */
8414daa021SRusty Russell 
851da177e4SLinus Torvalds #ifdef TUN_DEBUG
861da177e4SLinus Torvalds static int debug;
8714daa021SRusty Russell 
886b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
896b8a66eeSJoe Perches do {								\
906b8a66eeSJoe Perches 	if (tun->debug)						\
916b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
926b8a66eeSJoe Perches } while (0)
936b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
946b8a66eeSJoe Perches do {								\
956b8a66eeSJoe Perches 	if (debug == 2)						\
966b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
976b8a66eeSJoe Perches } while (0)
9814daa021SRusty Russell #else
996b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
1006b8a66eeSJoe Perches do {								\
1016b8a66eeSJoe Perches 	if (0)							\
1026b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
1036b8a66eeSJoe Perches } while (0)
1046b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
1056b8a66eeSJoe Perches do {								\
1066b8a66eeSJoe Perches 	if (0)							\
1076b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
1086b8a66eeSJoe Perches } while (0)
1091da177e4SLinus Torvalds #endif
1101da177e4SLinus Torvalds 
111761876c8SJason Wang #define TUN_HEADROOM 256
1127df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
11366ccbc9cSJason Wang 
114031f5e03SMichael S. Tsirkin /* TUN device flags */
115031f5e03SMichael S. Tsirkin 
116031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
117031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
118031f5e03SMichael S. Tsirkin  */
119031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
1201cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
1211cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
1228b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
123031f5e03SMichael S. Tsirkin 
124031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
12590e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
12690e33d45SPetar Penkov 
1270690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
1280690899bSMichael S. Tsirkin 
129f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
130f271b2ccSMax Krasnyansky struct tap_filter {
131f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
132f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
133f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
134f271b2ccSMax Krasnyansky };
135f271b2ccSMax Krasnyansky 
136baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
137baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
138baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
139b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
140c8d68e6bSJason Wang 
14196442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
14296442e42SJason Wang 
143608b9977SPaolo Abeni struct tun_pcpu_stats {
144608b9977SPaolo Abeni 	u64 rx_packets;
145608b9977SPaolo Abeni 	u64 rx_bytes;
146608b9977SPaolo Abeni 	u64 tx_packets;
147608b9977SPaolo Abeni 	u64 tx_bytes;
148608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
149608b9977SPaolo Abeni 	u32 rx_dropped;
150608b9977SPaolo Abeni 	u32 tx_dropped;
151608b9977SPaolo Abeni 	u32 rx_frame_errors;
152608b9977SPaolo Abeni };
153608b9977SPaolo Abeni 
15454f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
15592d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
15654f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
15754f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
15836fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
15954f968d6SJason Wang  * this).
1606e914fc7SJason Wang  *
1616e914fc7SJason Wang  * RCU usage:
16236fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1636e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
16454f968d6SJason Wang  */
165631ab46bSEric W. Biederman struct tun_file {
16654f968d6SJason Wang 	struct sock sk;
16754f968d6SJason Wang 	struct socket socket;
16854f968d6SJason Wang 	struct socket_wq wq;
1696e914fc7SJason Wang 	struct tun_struct __rcu *tun;
17054f968d6SJason Wang 	struct fasync_struct *fasync;
17154f968d6SJason Wang 	/* only used for fasnyc */
17254f968d6SJason Wang 	unsigned int flags;
173fb7589a1SPavel Emelyanov 	union {
174c8d68e6bSJason Wang 		u16 queue_index;
175fb7589a1SPavel Emelyanov 		unsigned int ifindex;
176fb7589a1SPavel Emelyanov 	};
17794317099SPetar Penkov 	struct napi_struct napi;
178aec72f33SEric Dumazet 	bool napi_enabled;
17990e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1804008e97fSJason Wang 	struct list_head next;
1814008e97fSJason Wang 	struct tun_struct *detached;
1825990a305SJason Wang 	struct ptr_ring tx_ring;
1838bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
184631ab46bSEric W. Biederman };
185631ab46bSEric W. Biederman 
18696442e42SJason Wang struct tun_flow_entry {
18796442e42SJason Wang 	struct hlist_node hash_link;
18896442e42SJason Wang 	struct rcu_head rcu;
18996442e42SJason Wang 	struct tun_struct *tun;
19096442e42SJason Wang 
19196442e42SJason Wang 	u32 rxhash;
1929bc88939STom Herbert 	u32 rps_rxhash;
19396442e42SJason Wang 	int queue_index;
19496442e42SJason Wang 	unsigned long updated;
19596442e42SJason Wang };
19696442e42SJason Wang 
19796442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
19896442e42SJason Wang 
199cd5681d7SJason Wang struct tun_prog {
20096f84061SJason Wang 	struct rcu_head rcu;
20196f84061SJason Wang 	struct bpf_prog *prog;
20296f84061SJason Wang };
20396f84061SJason Wang 
20454f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
20536fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
20654f968d6SJason Wang  * file were attached to a persist device.
20754f968d6SJason Wang  */
20814daa021SRusty Russell struct tun_struct {
209c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
210c8d68e6bSJason Wang 	unsigned int            numqueues;
211f271b2ccSMax Krasnyansky 	unsigned int 		flags;
2120625c883SEric W. Biederman 	kuid_t			owner;
2130625c883SEric W. Biederman 	kgid_t			group;
21414daa021SRusty Russell 
21514daa021SRusty Russell 	struct net_device	*dev;
216c8f44affSMichał Mirosław 	netdev_features_t	set_features;
21788255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
218d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
219d9d52b51SMichael S. Tsirkin 
220eaea34b2SPaolo Abeni 	int			align;
221d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
22254f968d6SJason Wang 	int			sndbuf;
22354f968d6SJason Wang 	struct tap_filter	txflt;
22454f968d6SJason Wang 	struct sock_fprog	fprog;
22554f968d6SJason Wang 	/* protected by rtnl lock */
22654f968d6SJason Wang 	bool			filter_attached;
22714daa021SRusty Russell #ifdef TUN_DEBUG
22814daa021SRusty Russell 	int debug;
22914daa021SRusty Russell #endif
23096442e42SJason Wang 	spinlock_t lock;
23196442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
23296442e42SJason Wang 	struct timer_list flow_gc_timer;
23396442e42SJason Wang 	unsigned long ageing_time;
2344008e97fSJason Wang 	unsigned int numdisabled;
2354008e97fSJason Wang 	struct list_head disabled;
2365dbbaf2dSPaul Moore 	void *security;
237b8732fb7SJason Wang 	u32 flow_count;
2385503fcecSJason Wang 	u32 rx_batched;
239608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
240761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
241cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
242*aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
243*aff3d70aSJason Wang };
244*aff3d70aSJason Wang 
245*aff3d70aSJason Wang struct veth {
246*aff3d70aSJason Wang 	__be16 h_vlan_proto;
247*aff3d70aSJason Wang 	__be16 h_vlan_TCI;
24814daa021SRusty Russell };
24914daa021SRusty Russell 
250fc72d1d5SJason Wang bool tun_is_xdp_buff(void *ptr)
251fc72d1d5SJason Wang {
252fc72d1d5SJason Wang 	return (unsigned long)ptr & TUN_XDP_FLAG;
253fc72d1d5SJason Wang }
254fc72d1d5SJason Wang EXPORT_SYMBOL(tun_is_xdp_buff);
255fc72d1d5SJason Wang 
256fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr)
257fc72d1d5SJason Wang {
258fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
259fc72d1d5SJason Wang }
260fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr);
261fc72d1d5SJason Wang 
262fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr)
263fc72d1d5SJason Wang {
264fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
265fc72d1d5SJason Wang }
266fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp);
267fc72d1d5SJason Wang 
26894317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
26994317099SPetar Penkov {
27094317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
27194317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
27294317099SPetar Penkov 	struct sk_buff_head process_queue;
27394317099SPetar Penkov 	struct sk_buff *skb;
27494317099SPetar Penkov 	int received = 0;
27594317099SPetar Penkov 
27694317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
27794317099SPetar Penkov 
27894317099SPetar Penkov 	spin_lock(&queue->lock);
27994317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
28094317099SPetar Penkov 	spin_unlock(&queue->lock);
28194317099SPetar Penkov 
28294317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
28394317099SPetar Penkov 		napi_gro_receive(napi, skb);
28494317099SPetar Penkov 		++received;
28594317099SPetar Penkov 	}
28694317099SPetar Penkov 
28794317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
28894317099SPetar Penkov 		spin_lock(&queue->lock);
28994317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
29094317099SPetar Penkov 		spin_unlock(&queue->lock);
29194317099SPetar Penkov 	}
29294317099SPetar Penkov 
29394317099SPetar Penkov 	return received;
29494317099SPetar Penkov }
29594317099SPetar Penkov 
29694317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
29794317099SPetar Penkov {
29894317099SPetar Penkov 	unsigned int received;
29994317099SPetar Penkov 
30094317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
30194317099SPetar Penkov 
30294317099SPetar Penkov 	if (received < budget)
30394317099SPetar Penkov 		napi_complete_done(napi, received);
30494317099SPetar Penkov 
30594317099SPetar Penkov 	return received;
30694317099SPetar Penkov }
30794317099SPetar Penkov 
30894317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
30994317099SPetar Penkov 			  bool napi_en)
31094317099SPetar Penkov {
311aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
31294317099SPetar Penkov 	if (napi_en) {
31394317099SPetar Penkov 		netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
31494317099SPetar Penkov 			       NAPI_POLL_WEIGHT);
31594317099SPetar Penkov 		napi_enable(&tfile->napi);
31690e33d45SPetar Penkov 		mutex_init(&tfile->napi_mutex);
31794317099SPetar Penkov 	}
31894317099SPetar Penkov }
31994317099SPetar Penkov 
32094317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
32194317099SPetar Penkov {
322aec72f33SEric Dumazet 	if (tfile->napi_enabled)
32394317099SPetar Penkov 		napi_disable(&tfile->napi);
32494317099SPetar Penkov }
32594317099SPetar Penkov 
32694317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
32794317099SPetar Penkov {
328aec72f33SEric Dumazet 	if (tfile->napi_enabled)
32994317099SPetar Penkov 		netif_napi_del(&tfile->napi);
33094317099SPetar Penkov }
33194317099SPetar Penkov 
33290e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun)
33390e33d45SPetar Penkov {
33490e33d45SPetar Penkov 	return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
33590e33d45SPetar Penkov }
33690e33d45SPetar Penkov 
3378b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
3388b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3398b8e658bSGreg Kurz {
3408b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
3418b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
3428b8e658bSGreg Kurz }
3438b8e658bSGreg Kurz 
3448b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3458b8e658bSGreg Kurz {
3468b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3478b8e658bSGreg Kurz 
3488b8e658bSGreg Kurz 	if (put_user(be, argp))
3498b8e658bSGreg Kurz 		return -EFAULT;
3508b8e658bSGreg Kurz 
3518b8e658bSGreg Kurz 	return 0;
3528b8e658bSGreg Kurz }
3538b8e658bSGreg Kurz 
3548b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3558b8e658bSGreg Kurz {
3568b8e658bSGreg Kurz 	int be;
3578b8e658bSGreg Kurz 
3588b8e658bSGreg Kurz 	if (get_user(be, argp))
3598b8e658bSGreg Kurz 		return -EFAULT;
3608b8e658bSGreg Kurz 
3618b8e658bSGreg Kurz 	if (be)
3628b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3638b8e658bSGreg Kurz 	else
3648b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3658b8e658bSGreg Kurz 
3668b8e658bSGreg Kurz 	return 0;
3678b8e658bSGreg Kurz }
3688b8e658bSGreg Kurz #else
3698b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3708b8e658bSGreg Kurz {
3718b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3728b8e658bSGreg Kurz }
3738b8e658bSGreg Kurz 
3748b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3758b8e658bSGreg Kurz {
3768b8e658bSGreg Kurz 	return -EINVAL;
3778b8e658bSGreg Kurz }
3788b8e658bSGreg Kurz 
3798b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3808b8e658bSGreg Kurz {
3818b8e658bSGreg Kurz 	return -EINVAL;
3828b8e658bSGreg Kurz }
3838b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3848b8e658bSGreg Kurz 
38525bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
38625bd55bbSGreg Kurz {
3877d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3888b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
38925bd55bbSGreg Kurz }
39025bd55bbSGreg Kurz 
39156f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
39256f0dcc5SMichael S. Tsirkin {
39325bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
39456f0dcc5SMichael S. Tsirkin }
39556f0dcc5SMichael S. Tsirkin 
39656f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
39756f0dcc5SMichael S. Tsirkin {
39825bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
39956f0dcc5SMichael S. Tsirkin }
40056f0dcc5SMichael S. Tsirkin 
40196442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
40296442e42SJason Wang {
40396442e42SJason Wang 	return rxhash & 0x3ff;
40496442e42SJason Wang }
40596442e42SJason Wang 
40696442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
40796442e42SJason Wang {
40896442e42SJason Wang 	struct tun_flow_entry *e;
40996442e42SJason Wang 
410b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
41196442e42SJason Wang 		if (e->rxhash == rxhash)
41296442e42SJason Wang 			return e;
41396442e42SJason Wang 	}
41496442e42SJason Wang 	return NULL;
41596442e42SJason Wang }
41696442e42SJason Wang 
41796442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
41896442e42SJason Wang 					      struct hlist_head *head,
41996442e42SJason Wang 					      u32 rxhash, u16 queue_index)
42096442e42SJason Wang {
4219fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
4229fdc6befSEric Dumazet 
42396442e42SJason Wang 	if (e) {
42496442e42SJason Wang 		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
42596442e42SJason Wang 			  rxhash, queue_index);
42696442e42SJason Wang 		e->updated = jiffies;
42796442e42SJason Wang 		e->rxhash = rxhash;
4289bc88939STom Herbert 		e->rps_rxhash = 0;
42996442e42SJason Wang 		e->queue_index = queue_index;
43096442e42SJason Wang 		e->tun = tun;
43196442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
432b8732fb7SJason Wang 		++tun->flow_count;
43396442e42SJason Wang 	}
43496442e42SJason Wang 	return e;
43596442e42SJason Wang }
43696442e42SJason Wang 
43796442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
43896442e42SJason Wang {
43996442e42SJason Wang 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
44096442e42SJason Wang 		  e->rxhash, e->queue_index);
44196442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
4429fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
443b8732fb7SJason Wang 	--tun->flow_count;
44496442e42SJason Wang }
44596442e42SJason Wang 
44696442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
44796442e42SJason Wang {
44896442e42SJason Wang 	int i;
44996442e42SJason Wang 
45096442e42SJason Wang 	spin_lock_bh(&tun->lock);
45196442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
45296442e42SJason Wang 		struct tun_flow_entry *e;
453b67bfe0dSSasha Levin 		struct hlist_node *n;
45496442e42SJason Wang 
455b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
45696442e42SJason Wang 			tun_flow_delete(tun, e);
45796442e42SJason Wang 	}
45896442e42SJason Wang 	spin_unlock_bh(&tun->lock);
45996442e42SJason Wang }
46096442e42SJason Wang 
46196442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
46296442e42SJason Wang {
46396442e42SJason Wang 	int i;
46496442e42SJason Wang 
46596442e42SJason Wang 	spin_lock_bh(&tun->lock);
46696442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
46796442e42SJason Wang 		struct tun_flow_entry *e;
468b67bfe0dSSasha Levin 		struct hlist_node *n;
46996442e42SJason Wang 
470b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
47196442e42SJason Wang 			if (e->queue_index == queue_index)
47296442e42SJason Wang 				tun_flow_delete(tun, e);
47396442e42SJason Wang 		}
47496442e42SJason Wang 	}
47596442e42SJason Wang 	spin_unlock_bh(&tun->lock);
47696442e42SJason Wang }
47796442e42SJason Wang 
478e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
47996442e42SJason Wang {
480e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
48196442e42SJason Wang 	unsigned long delay = tun->ageing_time;
48296442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
48396442e42SJason Wang 	unsigned long count = 0;
48496442e42SJason Wang 	int i;
48596442e42SJason Wang 
48696442e42SJason Wang 	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
48796442e42SJason Wang 
4887dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
48996442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
49096442e42SJason Wang 		struct tun_flow_entry *e;
491b67bfe0dSSasha Levin 		struct hlist_node *n;
49296442e42SJason Wang 
493b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
49496442e42SJason Wang 			unsigned long this_timer;
49581d98fa4SEric Dumazet 
49696442e42SJason Wang 			this_timer = e->updated + delay;
49781d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
49896442e42SJason Wang 				tun_flow_delete(tun, e);
49981d98fa4SEric Dumazet 				continue;
50081d98fa4SEric Dumazet 			}
50181d98fa4SEric Dumazet 			count++;
50281d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
50396442e42SJason Wang 				next_timer = this_timer;
50496442e42SJason Wang 		}
50596442e42SJason Wang 	}
50696442e42SJason Wang 
50796442e42SJason Wang 	if (count)
50896442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
5097dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
51096442e42SJason Wang }
51196442e42SJason Wang 
51249974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
5139e85722dSJason Wang 			    struct tun_file *tfile)
51496442e42SJason Wang {
51596442e42SJason Wang 	struct hlist_head *head;
51696442e42SJason Wang 	struct tun_flow_entry *e;
51796442e42SJason Wang 	unsigned long delay = tun->ageing_time;
5189e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
51996442e42SJason Wang 
52096442e42SJason Wang 	if (!rxhash)
52196442e42SJason Wang 		return;
52296442e42SJason Wang 	else
52396442e42SJason Wang 		head = &tun->flows[tun_hashfn(rxhash)];
52496442e42SJason Wang 
52596442e42SJason Wang 	rcu_read_lock();
52696442e42SJason Wang 
5279e85722dSJason Wang 	/* We may get a very small possibility of OOO during switching, not
5289e85722dSJason Wang 	 * worth to optimize.*/
5299e85722dSJason Wang 	if (tun->numqueues == 1 || tfile->detached)
53096442e42SJason Wang 		goto unlock;
53196442e42SJason Wang 
53296442e42SJason Wang 	e = tun_flow_find(head, rxhash);
53396442e42SJason Wang 	if (likely(e)) {
53496442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
53596442e42SJason Wang 		e->queue_index = queue_index;
53696442e42SJason Wang 		e->updated = jiffies;
5379bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
53896442e42SJason Wang 	} else {
53996442e42SJason Wang 		spin_lock_bh(&tun->lock);
540b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
541b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
54296442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
54396442e42SJason Wang 
54496442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
54596442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
54696442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
54796442e42SJason Wang 		spin_unlock_bh(&tun->lock);
54896442e42SJason Wang 	}
54996442e42SJason Wang 
55096442e42SJason Wang unlock:
55196442e42SJason Wang 	rcu_read_unlock();
55296442e42SJason Wang }
55396442e42SJason Wang 
5549bc88939STom Herbert /**
5559bc88939STom Herbert  * Save the hash received in the stack receive path and update the
5569bc88939STom Herbert  * flow_hash table accordingly.
5579bc88939STom Herbert  */
5589bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5599bc88939STom Herbert {
560567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5619bc88939STom Herbert 		e->rps_rxhash = hash;
5629bc88939STom Herbert }
5639bc88939STom Herbert 
564c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that
56592d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
566c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
567c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
568c8d68e6bSJason Wang  * different rxq no. here. If we could not get rxhash, then we would
569c8d68e6bSJason Wang  * hope the rxq no. may help here.
570c8d68e6bSJason Wang  */
57196f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
572c8d68e6bSJason Wang {
57396442e42SJason Wang 	struct tun_flow_entry *e;
574c8d68e6bSJason Wang 	u32 txq = 0;
575c8d68e6bSJason Wang 	u32 numqueues = 0;
576c8d68e6bSJason Wang 
5776aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
578c8d68e6bSJason Wang 
579feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
580c8d68e6bSJason Wang 	if (txq) {
58196442e42SJason Wang 		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5829bc88939STom Herbert 		if (e) {
5839bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, txq);
584fbe4d456SZhi Yong Wu 			txq = e->queue_index;
5859bc88939STom Herbert 		} else
586c8d68e6bSJason Wang 			/* use multiply and shift instead of expensive divide */
587c8d68e6bSJason Wang 			txq = ((u64)txq * numqueues) >> 32;
588c8d68e6bSJason Wang 	} else if (likely(skb_rx_queue_recorded(skb))) {
589c8d68e6bSJason Wang 		txq = skb_get_rx_queue(skb);
590c8d68e6bSJason Wang 		while (unlikely(txq >= numqueues))
591c8d68e6bSJason Wang 			txq -= numqueues;
592c8d68e6bSJason Wang 	}
593c8d68e6bSJason Wang 
594c8d68e6bSJason Wang 	return txq;
595c8d68e6bSJason Wang }
596c8d68e6bSJason Wang 
59796f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
59896f84061SJason Wang {
599cd5681d7SJason Wang 	struct tun_prog *prog;
60096f84061SJason Wang 	u16 ret = 0;
60196f84061SJason Wang 
60296f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
60396f84061SJason Wang 	if (prog)
60496f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
60596f84061SJason Wang 
60696f84061SJason Wang 	return ret % tun->numqueues;
60796f84061SJason Wang }
60896f84061SJason Wang 
60996f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
61096f84061SJason Wang 			    void *accel_priv, select_queue_fallback_t fallback)
61196f84061SJason Wang {
61296f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
61396f84061SJason Wang 	u16 ret;
61496f84061SJason Wang 
61596f84061SJason Wang 	rcu_read_lock();
61696f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
61796f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
61896f84061SJason Wang 	else
61996f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
62096f84061SJason Wang 	rcu_read_unlock();
62196f84061SJason Wang 
62296f84061SJason Wang 	return ret;
62396f84061SJason Wang }
62496f84061SJason Wang 
625cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
626cde8b15fSJason Wang {
627cde8b15fSJason Wang 	const struct cred *cred = current_cred();
628c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
629cde8b15fSJason Wang 
630cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
631cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
632c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
633cde8b15fSJason Wang }
634cde8b15fSJason Wang 
635c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
636c8d68e6bSJason Wang {
637c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
638c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
639c8d68e6bSJason Wang }
640c8d68e6bSJason Wang 
6414008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
6424008e97fSJason Wang {
6434008e97fSJason Wang 	tfile->detached = tun;
6444008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
6454008e97fSJason Wang 	++tun->numdisabled;
6464008e97fSJason Wang }
6474008e97fSJason Wang 
648d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
6494008e97fSJason Wang {
6504008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
6514008e97fSJason Wang 
6524008e97fSJason Wang 	tfile->detached = NULL;
6534008e97fSJason Wang 	list_del_init(&tfile->next);
6544008e97fSJason Wang 	--tun->numdisabled;
6554008e97fSJason Wang 	return tun;
6564008e97fSJason Wang }
6574008e97fSJason Wang 
658fc72d1d5SJason Wang static void tun_ptr_free(void *ptr)
659fc72d1d5SJason Wang {
660fc72d1d5SJason Wang 	if (!ptr)
661fc72d1d5SJason Wang 		return;
662fc72d1d5SJason Wang 	if (tun_is_xdp_buff(ptr)) {
663fc72d1d5SJason Wang 		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
664fc72d1d5SJason Wang 
665fc72d1d5SJason Wang 		put_page(virt_to_head_page(xdp->data));
666fc72d1d5SJason Wang 	} else {
667fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
668fc72d1d5SJason Wang 	}
669fc72d1d5SJason Wang }
670fc72d1d5SJason Wang 
6714bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6724bfb0513SJason Wang {
673fc72d1d5SJason Wang 	void *ptr;
6741576d986SJason Wang 
675fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
676fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6771576d986SJason Wang 
6785503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6794bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6804bfb0513SJason Wang }
6814bfb0513SJason Wang 
682c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
683c8d68e6bSJason Wang {
684c8d68e6bSJason Wang 	struct tun_file *ntfile;
685c8d68e6bSJason Wang 	struct tun_struct *tun;
686c8d68e6bSJason Wang 
687b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
688b8deabd3SJason Wang 
68994317099SPetar Penkov 	if (tun && clean) {
69094317099SPetar Penkov 		tun_napi_disable(tun, tfile);
69194317099SPetar Penkov 		tun_napi_del(tun, tfile);
69294317099SPetar Penkov 	}
69394317099SPetar Penkov 
6949e85722dSJason Wang 	if (tun && !tfile->detached) {
695c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
696c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
697c8d68e6bSJason Wang 
698c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
699c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
700b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
701c8d68e6bSJason Wang 		ntfile->queue_index = index;
702c8d68e6bSJason Wang 
703c8d68e6bSJason Wang 		--tun->numqueues;
7049e85722dSJason Wang 		if (clean) {
705c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
706c8d68e6bSJason Wang 			sock_put(&tfile->sk);
7079e85722dSJason Wang 		} else
7084008e97fSJason Wang 			tun_disable_queue(tun, tfile);
709c8d68e6bSJason Wang 
710c8d68e6bSJason Wang 		synchronize_net();
71196442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
712c8d68e6bSJason Wang 		/* Drop read queue */
7134bfb0513SJason Wang 		tun_queue_purge(tfile);
714c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
715dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
7164008e97fSJason Wang 		tun = tun_enable_queue(tfile);
717dd38bd85SJason Wang 		sock_put(&tfile->sk);
718dd38bd85SJason Wang 	}
719c8d68e6bSJason Wang 
720c8d68e6bSJason Wang 	if (clean) {
721af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
722af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
723af668b3cSMichael S. Tsirkin 
72440630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
725af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
7264008e97fSJason Wang 				unregister_netdevice(tun->dev);
727af668b3cSMichael S. Tsirkin 		}
7288bf5c4eeSJesper Dangaard Brouer 		if (tun) {
729fc72d1d5SJason Wang 			ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
7308bf5c4eeSJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
7318bf5c4eeSJesper Dangaard Brouer 		}
732140e807dSEric W. Biederman 		sock_put(&tfile->sk);
733c8d68e6bSJason Wang 	}
734c8d68e6bSJason Wang }
735c8d68e6bSJason Wang 
736c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
737c8d68e6bSJason Wang {
738c8d68e6bSJason Wang 	rtnl_lock();
739c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
740c8d68e6bSJason Wang 	rtnl_unlock();
741c8d68e6bSJason Wang }
742c8d68e6bSJason Wang 
743c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
744c8d68e6bSJason Wang {
745c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7464008e97fSJason Wang 	struct tun_file *tfile, *tmp;
747c8d68e6bSJason Wang 	int i, n = tun->numqueues;
748c8d68e6bSJason Wang 
749c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
750b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
751c8d68e6bSJason Wang 		BUG_ON(!tfile);
75294317099SPetar Penkov 		tun_napi_disable(tun, tfile);
753addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7549e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
755c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
756c8d68e6bSJason Wang 		--tun->numqueues;
757c8d68e6bSJason Wang 	}
7589e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
759addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7609e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
761c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7629e85722dSJason Wang 	}
763c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
764c8d68e6bSJason Wang 
765c8d68e6bSJason Wang 	synchronize_net();
766c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
767b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
76894317099SPetar Penkov 		tun_napi_del(tun, tfile);
769c8d68e6bSJason Wang 		/* Drop read queue */
7704bfb0513SJason Wang 		tun_queue_purge(tfile);
7718bf5c4eeSJesper Dangaard Brouer 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
772c8d68e6bSJason Wang 		sock_put(&tfile->sk);
773c8d68e6bSJason Wang 	}
7744008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7754008e97fSJason Wang 		tun_enable_queue(tfile);
7764bfb0513SJason Wang 		tun_queue_purge(tfile);
7778bf5c4eeSJesper Dangaard Brouer 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7784008e97fSJason Wang 		sock_put(&tfile->sk);
7794008e97fSJason Wang 	}
7804008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
781dd38bd85SJason Wang 
78240630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
783dd38bd85SJason Wang 		module_put(THIS_MODULE);
784c8d68e6bSJason Wang }
785c8d68e6bSJason Wang 
78694317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
78794317099SPetar Penkov 		      bool skip_filter, bool napi)
788a7385ba2SEric W. Biederman {
789631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7901576d986SJason Wang 	struct net_device *dev = tun->dev;
79138231b7aSEric W. Biederman 	int err;
792a7385ba2SEric W. Biederman 
7935dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7945dbbaf2dSPaul Moore 	if (err < 0)
7955dbbaf2dSPaul Moore 		goto out;
7965dbbaf2dSPaul Moore 
79738231b7aSEric W. Biederman 	err = -EINVAL;
7989e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
79938231b7aSEric W. Biederman 		goto out;
80038231b7aSEric W. Biederman 
80138231b7aSEric W. Biederman 	err = -EBUSY;
80240630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
803c8d68e6bSJason Wang 		goto out;
804c8d68e6bSJason Wang 
805c8d68e6bSJason Wang 	err = -E2BIG;
8064008e97fSJason Wang 	if (!tfile->detached &&
8074008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
80838231b7aSEric W. Biederman 		goto out;
80938231b7aSEric W. Biederman 
81038231b7aSEric W. Biederman 	err = 0;
81154f968d6SJason Wang 
81292d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
813849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
8148ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
8158ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
8168ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
81754f968d6SJason Wang 		if (!err)
81854f968d6SJason Wang 			goto out;
81954f968d6SJason Wang 	}
8201576d986SJason Wang 
8211576d986SJason Wang 	if (!tfile->detached &&
8225990a305SJason Wang 	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
8231576d986SJason Wang 		err = -ENOMEM;
8241576d986SJason Wang 		goto out;
8251576d986SJason Wang 	}
8261576d986SJason Wang 
827c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
828addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8298bf5c4eeSJesper Dangaard Brouer 
8308bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
8318bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
8328bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
8338bf5c4eeSJesper Dangaard Brouer 
8348bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
8358bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
8368bf5c4eeSJesper Dangaard Brouer 	} else {
8378bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
8388bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
8398bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
8408bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
8418bf5c4eeSJesper Dangaard Brouer 			goto out;
8428bf5c4eeSJesper Dangaard Brouer 		err = 0;
8438bf5c4eeSJesper Dangaard Brouer 	}
8448bf5c4eeSJesper Dangaard Brouer 
8456e914fc7SJason Wang 	rcu_assign_pointer(tfile->tun, tun);
846c8d68e6bSJason Wang 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
847c8d68e6bSJason Wang 	tun->numqueues++;
848c8d68e6bSJason Wang 
84994317099SPetar Penkov 	if (tfile->detached) {
8504008e97fSJason Wang 		tun_enable_queue(tfile);
85194317099SPetar Penkov 	} else {
8524008e97fSJason Wang 		sock_hold(&tfile->sk);
85394317099SPetar Penkov 		tun_napi_init(tun, tfile, napi);
85494317099SPetar Penkov 	}
8554008e97fSJason Wang 
856c8d68e6bSJason Wang 	tun_set_real_num_queues(tun);
857c8d68e6bSJason Wang 
858c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
859c8d68e6bSJason Wang 	 * refcnt.
860c8d68e6bSJason Wang 	 */
861a7385ba2SEric W. Biederman 
86238231b7aSEric W. Biederman out:
86338231b7aSEric W. Biederman 	return err;
864a7385ba2SEric W. Biederman }
865a7385ba2SEric W. Biederman 
8669484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
867631ab46bSEric W. Biederman {
8686e914fc7SJason Wang 	struct tun_struct *tun;
869c70f1829SEric W. Biederman 
8706e914fc7SJason Wang 	rcu_read_lock();
8716e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8726e914fc7SJason Wang 	if (tun)
8736e914fc7SJason Wang 		dev_hold(tun->dev);
8746e914fc7SJason Wang 	rcu_read_unlock();
875c70f1829SEric W. Biederman 
876c70f1829SEric W. Biederman 	return tun;
877631ab46bSEric W. Biederman }
878631ab46bSEric W. Biederman 
879631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
880631ab46bSEric W. Biederman {
8816e914fc7SJason Wang 	dev_put(tun->dev);
882631ab46bSEric W. Biederman }
883631ab46bSEric W. Biederman 
8846b8a66eeSJoe Perches /* TAP filtering */
885f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
886f271b2ccSMax Krasnyansky {
887f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
888f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
889f271b2ccSMax Krasnyansky }
890f271b2ccSMax Krasnyansky 
891f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
892f271b2ccSMax Krasnyansky {
893f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
894f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
895f271b2ccSMax Krasnyansky }
896f271b2ccSMax Krasnyansky 
897f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
898f271b2ccSMax Krasnyansky {
899f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
900f271b2ccSMax Krasnyansky 	struct tun_filter uf;
901f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
902f271b2ccSMax Krasnyansky 
903f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
904f271b2ccSMax Krasnyansky 		return -EFAULT;
905f271b2ccSMax Krasnyansky 
906f271b2ccSMax Krasnyansky 	if (!uf.count) {
907f271b2ccSMax Krasnyansky 		/* Disabled */
908f271b2ccSMax Krasnyansky 		filter->count = 0;
909f271b2ccSMax Krasnyansky 		return 0;
910f271b2ccSMax Krasnyansky 	}
911f271b2ccSMax Krasnyansky 
912f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
91328e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
91428e8190dSMarkus Elfring 	if (IS_ERR(addr))
91528e8190dSMarkus Elfring 		return PTR_ERR(addr);
916f271b2ccSMax Krasnyansky 
917f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
918f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
919f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
920f271b2ccSMax Krasnyansky 	filter->count = 0;
921f271b2ccSMax Krasnyansky 	wmb();
922f271b2ccSMax Krasnyansky 
923f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
924f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
925f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
926f271b2ccSMax Krasnyansky 
927f271b2ccSMax Krasnyansky 	nexact = n;
928f271b2ccSMax Krasnyansky 
929cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
930cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
931f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
932cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
933cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
934cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9353b8d2a69SMarkus Elfring 			goto free_addr;
936cfbf84fcSAlex Williamson 		}
937f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
938cfbf84fcSAlex Williamson 	}
939f271b2ccSMax Krasnyansky 
940f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
941f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
942f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
943f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
944f271b2ccSMax Krasnyansky 
945f271b2ccSMax Krasnyansky 	/* Now enable the filter */
946f271b2ccSMax Krasnyansky 	wmb();
947f271b2ccSMax Krasnyansky 	filter->count = nexact;
948f271b2ccSMax Krasnyansky 
949f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
950f271b2ccSMax Krasnyansky 	err = nexact;
9513b8d2a69SMarkus Elfring free_addr:
952f271b2ccSMax Krasnyansky 	kfree(addr);
953f271b2ccSMax Krasnyansky 	return err;
954f271b2ccSMax Krasnyansky }
955f271b2ccSMax Krasnyansky 
956f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
957f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
958f271b2ccSMax Krasnyansky {
959f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
960f271b2ccSMax Krasnyansky 	 * at this point. */
961f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
962f271b2ccSMax Krasnyansky 	int i;
963f271b2ccSMax Krasnyansky 
964f271b2ccSMax Krasnyansky 	/* Exact match */
965f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9662e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
967f271b2ccSMax Krasnyansky 			return 1;
968f271b2ccSMax Krasnyansky 
969f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
970f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
971f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
972f271b2ccSMax Krasnyansky 
973f271b2ccSMax Krasnyansky 	return 0;
974f271b2ccSMax Krasnyansky }
975f271b2ccSMax Krasnyansky 
976f271b2ccSMax Krasnyansky /*
977f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
978f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
979f271b2ccSMax Krasnyansky  */
980f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
981f271b2ccSMax Krasnyansky {
982f271b2ccSMax Krasnyansky 	if (!filter->count)
983f271b2ccSMax Krasnyansky 		return 1;
984f271b2ccSMax Krasnyansky 
985f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
986f271b2ccSMax Krasnyansky }
987f271b2ccSMax Krasnyansky 
9881da177e4SLinus Torvalds /* Network device part of the driver */
9891da177e4SLinus Torvalds 
9907282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops;
9911da177e4SLinus Torvalds 
992c70f1829SEric W. Biederman /* Net device detach from fd. */
993c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
994c70f1829SEric W. Biederman {
995c8d68e6bSJason Wang 	tun_detach_all(dev);
996c70f1829SEric W. Biederman }
997c70f1829SEric W. Biederman 
9981da177e4SLinus Torvalds /* Net device open. */
9991da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
10001da177e4SLinus Torvalds {
1001b20e2d54SHannes Frederic Sowa 	struct tun_struct *tun = netdev_priv(dev);
1002b20e2d54SHannes Frederic Sowa 	int i;
1003b20e2d54SHannes Frederic Sowa 
1004c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
1005b20e2d54SHannes Frederic Sowa 
1006b20e2d54SHannes Frederic Sowa 	for (i = 0; i < tun->numqueues; i++) {
1007b20e2d54SHannes Frederic Sowa 		struct tun_file *tfile;
1008b20e2d54SHannes Frederic Sowa 
1009b20e2d54SHannes Frederic Sowa 		tfile = rtnl_dereference(tun->tfiles[i]);
1010b20e2d54SHannes Frederic Sowa 		tfile->socket.sk->sk_write_space(tfile->socket.sk);
1011b20e2d54SHannes Frederic Sowa 	}
1012b20e2d54SHannes Frederic Sowa 
10131da177e4SLinus Torvalds 	return 0;
10141da177e4SLinus Torvalds }
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds /* Net device close. */
10171da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
10181da177e4SLinus Torvalds {
1019c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
10201da177e4SLinus Torvalds 	return 0;
10211da177e4SLinus Torvalds }
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds /* Net device start xmit */
102496f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10251da177e4SLinus Torvalds {
10263df97ba8SJason Wang #ifdef CONFIG_RPS
102796f84061SJason Wang 	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
10289bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10299bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10309bc88939STom Herbert 		 */
10319bc88939STom Herbert 		__u32 rxhash;
10329bc88939STom Herbert 
1033feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10349bc88939STom Herbert 		if (rxhash) {
10359bc88939STom Herbert 			struct tun_flow_entry *e;
10369bc88939STom Herbert 			e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
10379bc88939STom Herbert 					rxhash);
10389bc88939STom Herbert 			if (e)
10399bc88939STom Herbert 				tun_flow_save_rps_rxhash(e, rxhash);
10409bc88939STom Herbert 		}
10419bc88939STom Herbert 	}
10423df97ba8SJason Wang #endif
104396f84061SJason Wang }
104496f84061SJason Wang 
1045*aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1046*aff3d70aSJason Wang 				    struct sk_buff *skb,
1047*aff3d70aSJason Wang 				    int len)
1048*aff3d70aSJason Wang {
1049*aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1050*aff3d70aSJason Wang 
1051*aff3d70aSJason Wang 	if (prog)
1052*aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1053*aff3d70aSJason Wang 
1054*aff3d70aSJason Wang 	return len;
1055*aff3d70aSJason Wang }
1056*aff3d70aSJason Wang 
105796f84061SJason Wang /* Net device start xmit */
105896f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
105996f84061SJason Wang {
106096f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
106196f84061SJason Wang 	int txq = skb->queue_mapping;
106296f84061SJason Wang 	struct tun_file *tfile;
1063*aff3d70aSJason Wang 	int len = skb->len;
106496f84061SJason Wang 
106596f84061SJason Wang 	rcu_read_lock();
106696f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
106796f84061SJason Wang 
106896f84061SJason Wang 	/* Drop packet if interface is not attached */
1069cc166427SWillem de Bruijn 	if (txq >= tun->numqueues)
107096f84061SJason Wang 		goto drop;
107196f84061SJason Wang 
107296f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
107396f84061SJason Wang 		tun_automq_xmit(tun, skb);
10749bc88939STom Herbert 
10756e914fc7SJason Wang 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
10766e914fc7SJason Wang 
1077c8d68e6bSJason Wang 	BUG_ON(!tfile);
1078c8d68e6bSJason Wang 
1079f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1080f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1081f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1082f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1083f271b2ccSMax Krasnyansky 		goto drop;
1084f271b2ccSMax Krasnyansky 
108554f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
108654f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
108799405162SMichael S. Tsirkin 		goto drop;
108899405162SMichael S. Tsirkin 
1089*aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
1090*aff3d70aSJason Wang 
1091*aff3d70aSJason Wang 	/* Trim extra bytes since we may insert vlan proto & TCI
1092*aff3d70aSJason Wang 	 * in tun_put_user().
1093*aff3d70aSJason Wang 	 */
1094*aff3d70aSJason Wang 	len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
1095*aff3d70aSJason Wang 	if (len <= 0 || pskb_trim(skb, len))
1096*aff3d70aSJason Wang 		goto drop;
1097*aff3d70aSJason Wang 
10981f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
10997bf66305SJason Wang 		goto drop;
11007bf66305SJason Wang 
11017b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1102eda29772SRichard Cochran 
11030110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
11047bf66305SJason Wang 	 * for indefinite time.
11057bf66305SJason Wang 	 */
11060110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
11070110d6f2SMichael S. Tsirkin 
1108f8af75f3SEric Dumazet 	nf_reset(skb);
1109f8af75f3SEric Dumazet 
11105990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
11111576d986SJason Wang 		goto drop;
11121da177e4SLinus Torvalds 
11131da177e4SLinus Torvalds 	/* Notify and wake up reader process */
111454f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
111554f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
11169e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
11176e914fc7SJason Wang 
11186e914fc7SJason Wang 	rcu_read_unlock();
11196ed10654SPatrick McHardy 	return NETDEV_TX_OK;
11201da177e4SLinus Torvalds 
11211da177e4SLinus Torvalds drop:
1122608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1123149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
11241da177e4SLinus Torvalds 	kfree_skb(skb);
11256e914fc7SJason Wang 	rcu_read_unlock();
1126baeababbSJason Wang 	return NET_XMIT_DROP;
11271da177e4SLinus Torvalds }
11281da177e4SLinus Torvalds 
1129f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
11301da177e4SLinus Torvalds {
1131f271b2ccSMax Krasnyansky 	/*
1132f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1133f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1134f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1135f271b2ccSMax Krasnyansky 	 */
11361da177e4SLinus Torvalds }
11371da177e4SLinus Torvalds 
1138c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1139c8f44affSMichał Mirosław 	netdev_features_t features)
114088255375SMichał Mirosław {
114188255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
114288255375SMichał Mirosław 
114388255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
114488255375SMichał Mirosław }
1145bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1146bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev)
1147bebd097aSNeil Horman {
1148bebd097aSNeil Horman 	/*
1149bebd097aSNeil Horman 	 * Tun only receives frames when:
1150bebd097aSNeil Horman 	 * 1) the char device endpoint gets data from user space
1151bebd097aSNeil Horman 	 * 2) the tun socket gets a sendmsg call from user space
115294317099SPetar Penkov 	 * If NAPI is not enabled, since both of those are synchronous
115394317099SPetar Penkov 	 * operations, we are guaranteed never to have pending data when we poll
115494317099SPetar Penkov 	 * for it so there is nothing to do here but return.
1155bebd097aSNeil Horman 	 * We need this though so netpoll recognizes us as an interface that
1156bebd097aSNeil Horman 	 * supports polling, which enables bridge devices in virt setups to
1157bebd097aSNeil Horman 	 * still use netconsole
115894317099SPetar Penkov 	 * If NAPI is enabled, however, we need to schedule polling for all
115990e33d45SPetar Penkov 	 * queues unless we are using napi_gro_frags(), which we call in
116090e33d45SPetar Penkov 	 * process context and not in NAPI context.
1161bebd097aSNeil Horman 	 */
116294317099SPetar Penkov 	struct tun_struct *tun = netdev_priv(dev);
116394317099SPetar Penkov 
116494317099SPetar Penkov 	if (tun->flags & IFF_NAPI) {
116594317099SPetar Penkov 		struct tun_file *tfile;
116694317099SPetar Penkov 		int i;
116794317099SPetar Penkov 
116890e33d45SPetar Penkov 		if (tun_napi_frags_enabled(tun))
116990e33d45SPetar Penkov 			return;
117090e33d45SPetar Penkov 
117194317099SPetar Penkov 		rcu_read_lock();
117294317099SPetar Penkov 		for (i = 0; i < tun->numqueues; i++) {
117394317099SPetar Penkov 			tfile = rcu_dereference(tun->tfiles[i]);
1174aec72f33SEric Dumazet 			if (tfile->napi_enabled)
117594317099SPetar Penkov 				napi_schedule(&tfile->napi);
117694317099SPetar Penkov 		}
117794317099SPetar Penkov 		rcu_read_unlock();
117894317099SPetar Penkov 	}
1179bebd097aSNeil Horman 	return;
1180bebd097aSNeil Horman }
1181bebd097aSNeil Horman #endif
1182eaea34b2SPaolo Abeni 
1183eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1184eaea34b2SPaolo Abeni {
1185eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1186eaea34b2SPaolo Abeni 
1187eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1188eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1189eaea34b2SPaolo Abeni 
1190eaea34b2SPaolo Abeni 	tun->align = new_hr;
1191eaea34b2SPaolo Abeni }
1192eaea34b2SPaolo Abeni 
1193bc1f4470Sstephen hemminger static void
1194608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1195608b9977SPaolo Abeni {
1196608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1197608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1198608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1199608b9977SPaolo Abeni 	int i;
1200608b9977SPaolo Abeni 
1201608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1202608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1203608b9977SPaolo Abeni 		unsigned int start;
1204608b9977SPaolo Abeni 
1205608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1206608b9977SPaolo Abeni 		do {
1207608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
1208608b9977SPaolo Abeni 			rxpackets	= p->rx_packets;
1209608b9977SPaolo Abeni 			rxbytes		= p->rx_bytes;
1210608b9977SPaolo Abeni 			txpackets	= p->tx_packets;
1211608b9977SPaolo Abeni 			txbytes		= p->tx_bytes;
1212608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1213608b9977SPaolo Abeni 
1214608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1215608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1216608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1217608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1218608b9977SPaolo Abeni 
1219608b9977SPaolo Abeni 		/* u32 counters */
1220608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1221608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1222608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1223608b9977SPaolo Abeni 	}
1224608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1225608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1226608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1227608b9977SPaolo Abeni }
1228608b9977SPaolo Abeni 
1229761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1230761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1231761876c8SJason Wang {
1232761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1233761876c8SJason Wang 	struct bpf_prog *old_prog;
1234761876c8SJason Wang 
1235761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1236761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1237761876c8SJason Wang 	if (old_prog)
1238761876c8SJason Wang 		bpf_prog_put(old_prog);
1239761876c8SJason Wang 
1240761876c8SJason Wang 	return 0;
1241761876c8SJason Wang }
1242761876c8SJason Wang 
1243761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev)
1244761876c8SJason Wang {
1245761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1246761876c8SJason Wang 	const struct bpf_prog *xdp_prog;
1247761876c8SJason Wang 
1248761876c8SJason Wang 	xdp_prog = rtnl_dereference(tun->xdp_prog);
1249761876c8SJason Wang 	if (xdp_prog)
1250761876c8SJason Wang 		return xdp_prog->aux->id;
1251761876c8SJason Wang 
1252761876c8SJason Wang 	return 0;
1253761876c8SJason Wang }
1254761876c8SJason Wang 
1255f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1256761876c8SJason Wang {
1257761876c8SJason Wang 	switch (xdp->command) {
1258761876c8SJason Wang 	case XDP_SETUP_PROG:
1259761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1260761876c8SJason Wang 	case XDP_QUERY_PROG:
1261761876c8SJason Wang 		xdp->prog_id = tun_xdp_query(dev);
1262761876c8SJason Wang 		xdp->prog_attached = !!xdp->prog_id;
1263761876c8SJason Wang 		return 0;
1264761876c8SJason Wang 	default:
1265761876c8SJason Wang 		return -EINVAL;
1266761876c8SJason Wang 	}
1267761876c8SJason Wang }
1268761876c8SJason Wang 
1269758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1270c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1271758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1272758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
127300829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
127488255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1275c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1276bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1277bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1278bebd097aSNeil Horman #endif
1279eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1280608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1281758e43b7SStephen Hemminger };
1282758e43b7SStephen Hemminger 
1283fc72d1d5SJason Wang static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
1284fc72d1d5SJason Wang {
1285fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1286fc72d1d5SJason Wang 	struct xdp_buff *buff = xdp->data_hard_start;
1287fc72d1d5SJason Wang 	int headroom = xdp->data - xdp->data_hard_start;
1288fc72d1d5SJason Wang 	struct tun_file *tfile;
1289fc72d1d5SJason Wang 	u32 numqueues;
1290fc72d1d5SJason Wang 	int ret = 0;
1291fc72d1d5SJason Wang 
1292fc72d1d5SJason Wang 	/* Assure headroom is available and buff is properly aligned */
1293fc72d1d5SJason Wang 	if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
1294fc72d1d5SJason Wang 		return -ENOSPC;
1295fc72d1d5SJason Wang 
1296fc72d1d5SJason Wang 	*buff = *xdp;
1297fc72d1d5SJason Wang 
1298fc72d1d5SJason Wang 	rcu_read_lock();
1299fc72d1d5SJason Wang 
1300fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1301fc72d1d5SJason Wang 	if (!numqueues) {
1302fc72d1d5SJason Wang 		ret = -ENOSPC;
1303fc72d1d5SJason Wang 		goto out;
1304fc72d1d5SJason Wang 	}
1305fc72d1d5SJason Wang 
1306fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1307fc72d1d5SJason Wang 					    numqueues]);
1308fc72d1d5SJason Wang 	/* Encode the XDP flag into lowest bit for consumer to differ
1309fc72d1d5SJason Wang 	 * XDP buffer from sk_buff.
1310fc72d1d5SJason Wang 	 */
1311fc72d1d5SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
1312fc72d1d5SJason Wang 		this_cpu_inc(tun->pcpu_stats->tx_dropped);
1313fc72d1d5SJason Wang 		ret = -ENOSPC;
1314fc72d1d5SJason Wang 	}
1315fc72d1d5SJason Wang 
1316fc72d1d5SJason Wang out:
1317fc72d1d5SJason Wang 	rcu_read_unlock();
1318fc72d1d5SJason Wang 	return ret;
1319fc72d1d5SJason Wang }
1320fc72d1d5SJason Wang 
1321fc72d1d5SJason Wang static void tun_xdp_flush(struct net_device *dev)
1322fc72d1d5SJason Wang {
1323fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1324fc72d1d5SJason Wang 	struct tun_file *tfile;
1325fc72d1d5SJason Wang 	u32 numqueues;
1326fc72d1d5SJason Wang 
1327fc72d1d5SJason Wang 	rcu_read_lock();
1328fc72d1d5SJason Wang 
1329fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1330fc72d1d5SJason Wang 	if (!numqueues)
1331fc72d1d5SJason Wang 		goto out;
1332fc72d1d5SJason Wang 
1333fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1334fc72d1d5SJason Wang 					    numqueues]);
1335fc72d1d5SJason Wang 	/* Notify and wake up reader process */
1336fc72d1d5SJason Wang 	if (tfile->flags & TUN_FASYNC)
1337fc72d1d5SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1338fc72d1d5SJason Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1339fc72d1d5SJason Wang 
1340fc72d1d5SJason Wang out:
1341fc72d1d5SJason Wang 	rcu_read_unlock();
1342fc72d1d5SJason Wang }
1343fc72d1d5SJason Wang 
1344758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1345c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1346758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1347758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
134800829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
134988255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1350afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1351758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1352758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1353c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1354bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1355bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1356bebd097aSNeil Horman #endif
13575e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1358eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1359608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1360f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1361fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
1362fc72d1d5SJason Wang 	.ndo_xdp_flush		= tun_xdp_flush,
1363758e43b7SStephen Hemminger };
1364758e43b7SStephen Hemminger 
1365944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
136696442e42SJason Wang {
136796442e42SJason Wang 	int i;
136896442e42SJason Wang 
136996442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
137096442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
137196442e42SJason Wang 
137296442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1373e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1374e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1375e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
137696442e42SJason Wang }
137796442e42SJason Wang 
137896442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
137996442e42SJason Wang {
138096442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
138196442e42SJason Wang 	tun_flow_flush(tun);
138296442e42SJason Wang }
138396442e42SJason Wang 
138491572088SJarod Wilson #define MIN_MTU 68
138591572088SJarod Wilson #define MAX_MTU 65535
138691572088SJarod Wilson 
13871da177e4SLinus Torvalds /* Initialize net device. */
13881da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
13891da177e4SLinus Torvalds {
13901da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13911da177e4SLinus Torvalds 
13921da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
139340630b82SMichael S. Tsirkin 	case IFF_TUN:
1394758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1395758e43b7SStephen Hemminger 
13961da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13971da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13981da177e4SLinus Torvalds 		dev->addr_len = 0;
13991da177e4SLinus Torvalds 		dev->mtu = 1500;
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 		/* Zero header length */
14021da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
14031da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
14041da177e4SLinus Torvalds 		break;
14051da177e4SLinus Torvalds 
140640630b82SMichael S. Tsirkin 	case IFF_TAP:
14077a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
14081da177e4SLinus Torvalds 		/* Ethernet TAP Device */
14091da177e4SLinus Torvalds 		ether_setup(dev);
1410550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1411a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
141236226a8dSBrian Braunstein 
1413f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
141436226a8dSBrian Braunstein 
14151da177e4SLinus Torvalds 		break;
14161da177e4SLinus Torvalds 	}
141791572088SJarod Wilson 
141891572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
141991572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
14201da177e4SLinus Torvalds }
14211da177e4SLinus Torvalds 
14221da177e4SLinus Torvalds /* Character device part */
14231da177e4SLinus Torvalds 
14241da177e4SLinus Torvalds /* Poll */
14251da177e4SLinus Torvalds static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
14261da177e4SLinus Torvalds {
1427b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
14289484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
14293c8a9c63SMariusz Kozlowski 	struct sock *sk;
143033dccbb0SHerbert Xu 	unsigned int mask = 0;
14311da177e4SLinus Torvalds 
14321da177e4SLinus Torvalds 	if (!tun)
1433eac9e902SEric W. Biederman 		return POLLERR;
14341da177e4SLinus Torvalds 
143554f968d6SJason Wang 	sk = tfile->socket.sk;
14363c8a9c63SMariusz Kozlowski 
14376b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
14381da177e4SLinus Torvalds 
14399e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
14401da177e4SLinus Torvalds 
14415990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
14421da177e4SLinus Torvalds 		mask |= POLLIN | POLLRDNORM;
14431da177e4SLinus Torvalds 
1444b20e2d54SHannes Frederic Sowa 	if (tun->dev->flags & IFF_UP &&
1445b20e2d54SHannes Frederic Sowa 	    (sock_writeable(sk) ||
14469cd3e072SEric Dumazet 	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1447b20e2d54SHannes Frederic Sowa 	      sock_writeable(sk))))
144833dccbb0SHerbert Xu 		mask |= POLLOUT | POLLWRNORM;
144933dccbb0SHerbert Xu 
1450c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1451c70f1829SEric W. Biederman 		mask = POLLERR;
1452c70f1829SEric W. Biederman 
1453631ab46bSEric W. Biederman 	tun_put(tun);
14541da177e4SLinus Torvalds 	return mask;
14551da177e4SLinus Torvalds }
14561da177e4SLinus Torvalds 
145790e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
145890e33d45SPetar Penkov 					    size_t len,
145990e33d45SPetar Penkov 					    const struct iov_iter *it)
146090e33d45SPetar Penkov {
146190e33d45SPetar Penkov 	struct sk_buff *skb;
146290e33d45SPetar Penkov 	size_t linear;
146390e33d45SPetar Penkov 	int err;
146490e33d45SPetar Penkov 	int i;
146590e33d45SPetar Penkov 
146690e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
146790e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
146890e33d45SPetar Penkov 
146990e33d45SPetar Penkov 	local_bh_disable();
147090e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
147190e33d45SPetar Penkov 	local_bh_enable();
147290e33d45SPetar Penkov 	if (!skb)
147390e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
147490e33d45SPetar Penkov 
147590e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
147690e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
147790e33d45SPetar Penkov 	if (err)
147890e33d45SPetar Penkov 		goto free;
147990e33d45SPetar Penkov 
148090e33d45SPetar Penkov 	skb->len = len;
148190e33d45SPetar Penkov 	skb->data_len = len - linear;
148290e33d45SPetar Penkov 	skb->truesize += skb->data_len;
148390e33d45SPetar Penkov 
148490e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
148590e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
148690e33d45SPetar Penkov 		unsigned long offset;
148790e33d45SPetar Penkov 		struct page *page;
148890e33d45SPetar Penkov 		void *data;
148990e33d45SPetar Penkov 
149090e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
149190e33d45SPetar Penkov 			err = -EINVAL;
149290e33d45SPetar Penkov 			goto free;
149390e33d45SPetar Penkov 		}
149490e33d45SPetar Penkov 
149590e33d45SPetar Penkov 		local_bh_disable();
149690e33d45SPetar Penkov 		data = napi_alloc_frag(fragsz);
149790e33d45SPetar Penkov 		local_bh_enable();
149890e33d45SPetar Penkov 		if (!data) {
149990e33d45SPetar Penkov 			err = -ENOMEM;
150090e33d45SPetar Penkov 			goto free;
150190e33d45SPetar Penkov 		}
150290e33d45SPetar Penkov 
150390e33d45SPetar Penkov 		page = virt_to_head_page(data);
150490e33d45SPetar Penkov 		offset = data - page_address(page);
150590e33d45SPetar Penkov 		skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
150690e33d45SPetar Penkov 	}
150790e33d45SPetar Penkov 
150890e33d45SPetar Penkov 	return skb;
150990e33d45SPetar Penkov free:
151090e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
151190e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
151290e33d45SPetar Penkov 	return ERR_PTR(err);
151390e33d45SPetar Penkov }
151490e33d45SPetar Penkov 
1515f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1516f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
151754f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
151833dccbb0SHerbert Xu 				     size_t prepad, size_t len,
151933dccbb0SHerbert Xu 				     size_t linear, int noblock)
1520f42157cbSRusty Russell {
152154f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1522f42157cbSRusty Russell 	struct sk_buff *skb;
152333dccbb0SHerbert Xu 	int err;
1524f42157cbSRusty Russell 
1525f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
15260eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
152733dccbb0SHerbert Xu 		linear = len;
1528f42157cbSRusty Russell 
152933dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
153028d64271SEric Dumazet 				   &err, 0);
1531f42157cbSRusty Russell 	if (!skb)
153233dccbb0SHerbert Xu 		return ERR_PTR(err);
1533f42157cbSRusty Russell 
1534f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1535f42157cbSRusty Russell 	skb_put(skb, linear);
153633dccbb0SHerbert Xu 	skb->data_len = len - linear;
153733dccbb0SHerbert Xu 	skb->len += len - linear;
1538f42157cbSRusty Russell 
1539f42157cbSRusty Russell 	return skb;
1540f42157cbSRusty Russell }
1541f42157cbSRusty Russell 
15425503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
15435503fcecSJason Wang 			   struct sk_buff *skb, int more)
15445503fcecSJason Wang {
15455503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
15465503fcecSJason Wang 	struct sk_buff_head process_queue;
15475503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
15485503fcecSJason Wang 	bool rcv = false;
15495503fcecSJason Wang 
15505503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15515503fcecSJason Wang 		local_bh_disable();
15525503fcecSJason Wang 		netif_receive_skb(skb);
15535503fcecSJason Wang 		local_bh_enable();
15545503fcecSJason Wang 		return;
15555503fcecSJason Wang 	}
15565503fcecSJason Wang 
15575503fcecSJason Wang 	spin_lock(&queue->lock);
15585503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15595503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15605503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15615503fcecSJason Wang 		rcv = true;
15625503fcecSJason Wang 	} else {
15635503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15645503fcecSJason Wang 	}
15655503fcecSJason Wang 	spin_unlock(&queue->lock);
15665503fcecSJason Wang 
15675503fcecSJason Wang 	if (rcv) {
15685503fcecSJason Wang 		struct sk_buff *nskb;
15695503fcecSJason Wang 
15705503fcecSJason Wang 		local_bh_disable();
15715503fcecSJason Wang 		while ((nskb = __skb_dequeue(&process_queue)))
15725503fcecSJason Wang 			netif_receive_skb(nskb);
15735503fcecSJason Wang 		netif_receive_skb(skb);
15745503fcecSJason Wang 		local_bh_enable();
15755503fcecSJason Wang 	}
15765503fcecSJason Wang }
15775503fcecSJason Wang 
157866ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
157966ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
158066ccbc9cSJason Wang {
158166ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
158266ccbc9cSJason Wang 		return false;
158366ccbc9cSJason Wang 
158466ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
158566ccbc9cSJason Wang 		return false;
158666ccbc9cSJason Wang 
158766ccbc9cSJason Wang 	if (!noblock)
158866ccbc9cSJason Wang 		return false;
158966ccbc9cSJason Wang 
159066ccbc9cSJason Wang 	if (zerocopy)
159166ccbc9cSJason Wang 		return false;
159266ccbc9cSJason Wang 
159366ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
159466ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
159566ccbc9cSJason Wang 		return false;
159666ccbc9cSJason Wang 
159766ccbc9cSJason Wang 	return true;
159866ccbc9cSJason Wang }
159966ccbc9cSJason Wang 
1600761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1601761876c8SJason Wang 				     struct tun_file *tfile,
160266ccbc9cSJason Wang 				     struct iov_iter *from,
1603761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
16041cfe6e93SJason Wang 				     int len, int *skb_xdp)
160566ccbc9cSJason Wang {
16060bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
160766ccbc9cSJason Wang 	struct sk_buff *skb;
1608761876c8SJason Wang 	struct bpf_prog *xdp_prog;
16097df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1610761876c8SJason Wang 	unsigned int delta = 0;
161166ccbc9cSJason Wang 	char *buf;
161266ccbc9cSJason Wang 	size_t copied;
1613761876c8SJason Wang 	bool xdp_xmit = false;
16147df13219SJason Wang 	int err, pad = TUN_RX_PAD;
16157df13219SJason Wang 
16167df13219SJason Wang 	rcu_read_lock();
16177df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16187df13219SJason Wang 	if (xdp_prog)
16197df13219SJason Wang 		pad += TUN_HEADROOM;
16207df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16217df13219SJason Wang 	rcu_read_unlock();
162266ccbc9cSJason Wang 
162363b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
162466ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
162566ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
162666ccbc9cSJason Wang 
162766ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
162866ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16297df13219SJason Wang 				     alloc_frag->offset + pad,
163066ccbc9cSJason Wang 				     len, from);
163166ccbc9cSJason Wang 	if (copied != len)
163266ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
163366ccbc9cSJason Wang 
16347df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16357df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16367df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16377df13219SJason Wang 	 */
16387df13219SJason Wang 	if (hdr->gso_type || !xdp_prog)
16391cfe6e93SJason Wang 		*skb_xdp = 1;
1640761876c8SJason Wang 	else
16411cfe6e93SJason Wang 		*skb_xdp = 0;
164266ccbc9cSJason Wang 
1643761876c8SJason Wang 	rcu_read_lock();
1644761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16451cfe6e93SJason Wang 	if (xdp_prog && !*skb_xdp) {
1646761876c8SJason Wang 		struct xdp_buff xdp;
1647761876c8SJason Wang 		void *orig_data;
1648761876c8SJason Wang 		u32 act;
1649761876c8SJason Wang 
1650761876c8SJason Wang 		xdp.data_hard_start = buf;
16517df13219SJason Wang 		xdp.data = buf + pad;
1652de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1653761876c8SJason Wang 		xdp.data_end = xdp.data + len;
16548bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1655761876c8SJason Wang 		orig_data = xdp.data;
1656761876c8SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1657761876c8SJason Wang 
1658761876c8SJason Wang 		switch (act) {
1659761876c8SJason Wang 		case XDP_REDIRECT:
1660761876c8SJason Wang 			get_page(alloc_frag->page);
1661761876c8SJason Wang 			alloc_frag->offset += buflen;
1662761876c8SJason Wang 			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
1663761876c8SJason Wang 			if (err)
1664761876c8SJason Wang 				goto err_redirect;
1665654d5738SXin Long 			rcu_read_unlock();
1666761876c8SJason Wang 			return NULL;
1667761876c8SJason Wang 		case XDP_TX:
1668761876c8SJason Wang 			xdp_xmit = true;
1669761876c8SJason Wang 			/* fall through */
1670761876c8SJason Wang 		case XDP_PASS:
1671761876c8SJason Wang 			delta = orig_data - xdp.data;
1672761876c8SJason Wang 			break;
1673761876c8SJason Wang 		default:
1674761876c8SJason Wang 			bpf_warn_invalid_xdp_action(act);
1675761876c8SJason Wang 			/* fall through */
1676761876c8SJason Wang 		case XDP_ABORTED:
1677761876c8SJason Wang 			trace_xdp_exception(tun->dev, xdp_prog, act);
1678761876c8SJason Wang 			/* fall through */
1679761876c8SJason Wang 		case XDP_DROP:
1680761876c8SJason Wang 			goto err_xdp;
1681761876c8SJason Wang 		}
1682761876c8SJason Wang 	}
1683761876c8SJason Wang 
1684761876c8SJason Wang 	skb = build_skb(buf, buflen);
1685761876c8SJason Wang 	if (!skb) {
1686761876c8SJason Wang 		rcu_read_unlock();
1687761876c8SJason Wang 		return ERR_PTR(-ENOMEM);
1688761876c8SJason Wang 	}
1689761876c8SJason Wang 
16907df13219SJason Wang 	skb_reserve(skb, pad - delta);
1691761876c8SJason Wang 	skb_put(skb, len + delta);
169266ccbc9cSJason Wang 	get_page(alloc_frag->page);
169366ccbc9cSJason Wang 	alloc_frag->offset += buflen;
169466ccbc9cSJason Wang 
1695761876c8SJason Wang 	if (xdp_xmit) {
1696761876c8SJason Wang 		skb->dev = tun->dev;
1697761876c8SJason Wang 		generic_xdp_tx(skb, xdp_prog);
1698654d5738SXin Long 		rcu_read_unlock();
1699761876c8SJason Wang 		return NULL;
1700761876c8SJason Wang 	}
1701761876c8SJason Wang 
1702761876c8SJason Wang 	rcu_read_unlock();
1703761876c8SJason Wang 
170466ccbc9cSJason Wang 	return skb;
1705761876c8SJason Wang 
1706761876c8SJason Wang err_redirect:
1707761876c8SJason Wang 	put_page(alloc_frag->page);
1708761876c8SJason Wang err_xdp:
1709761876c8SJason Wang 	rcu_read_unlock();
1710761876c8SJason Wang 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
1711761876c8SJason Wang 	return NULL;
171266ccbc9cSJason Wang }
171366ccbc9cSJason Wang 
17141da177e4SLinus Torvalds /* Get packet from user space buffer */
171554f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1716f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
17175503fcecSJason Wang 			    int noblock, bool more)
17181da177e4SLinus Torvalds {
171909640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
17201da177e4SLinus Torvalds 	struct sk_buff *skb;
1721f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1722eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1723f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1724608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
172596f8d9ecSJason Wang 	int good_linear;
17260690899bSMichael S. Tsirkin 	int copylen;
17270690899bSMichael S. Tsirkin 	bool zerocopy = false;
17280690899bSMichael S. Tsirkin 	int err;
172996f84061SJason Wang 	u32 rxhash = 0;
17301cfe6e93SJason Wang 	int skb_xdp = 1;
173190e33d45SPetar Penkov 	bool frags = tun_napi_frags_enabled(tun);
17321da177e4SLinus Torvalds 
17331bd4978aSEric Dumazet 	if (!(tun->dev->flags & IFF_UP))
17341bd4978aSEric Dumazet 		return -EIO;
17351bd4978aSEric Dumazet 
173640630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
173715718ea0SDan Carpenter 		if (len < sizeof(pi))
17381da177e4SLinus Torvalds 			return -EINVAL;
173915718ea0SDan Carpenter 		len -= sizeof(pi);
17401da177e4SLinus Torvalds 
1741cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17421da177e4SLinus Torvalds 			return -EFAULT;
17431da177e4SLinus Torvalds 	}
17441da177e4SLinus Torvalds 
174540630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1746e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1747e1edab87SWillem de Bruijn 
1748e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1749f43798c2SRusty Russell 			return -EINVAL;
1750e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1751f43798c2SRusty Russell 
1752cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1753f43798c2SRusty Russell 			return -EFAULT;
1754f43798c2SRusty Russell 
17554909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
175656f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
175756f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17584909122fSHerbert Xu 
175956f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1760f43798c2SRusty Russell 			return -EINVAL;
1761e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1762f43798c2SRusty Russell 	}
1763f43798c2SRusty Russell 
176440630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1765a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17660eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
176756f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1768e01bf1c8SRusty Russell 			return -EINVAL;
1769e01bf1c8SRusty Russell 	}
17701da177e4SLinus Torvalds 
177196f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
177296f8d9ecSJason Wang 
177388529176SJason Wang 	if (msg_control) {
1774f5ff53b4SAl Viro 		struct iov_iter i = *from;
1775f5ff53b4SAl Viro 
177688529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
177788529176SJason Wang 		 * enough room for skb expand head in case it is used.
17780690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
17790690899bSMichael S. Tsirkin 		 */
178056f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
178196f8d9ecSJason Wang 		if (copylen > good_linear)
178296f8d9ecSJason Wang 			copylen = good_linear;
17833dd5c330SJason Wang 		linear = copylen;
1784f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1785f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
178688529176SJason Wang 			zerocopy = true;
178788529176SJason Wang 	}
178888529176SJason Wang 
178990e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
17901cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
17911cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
17921cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
17931cfe6e93SJason Wang 		 */
17941cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
179566ccbc9cSJason Wang 		if (IS_ERR(skb)) {
179666ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
179766ccbc9cSJason Wang 			return PTR_ERR(skb);
179866ccbc9cSJason Wang 		}
1799761876c8SJason Wang 		if (!skb)
1800761876c8SJason Wang 			return total_len;
180166ccbc9cSJason Wang 	} else {
180288529176SJason Wang 		if (!zerocopy) {
18030690899bSMichael S. Tsirkin 			copylen = len;
180456f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
180596f8d9ecSJason Wang 				linear = good_linear;
180696f8d9ecSJason Wang 			else
180756f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
18083dd5c330SJason Wang 		}
18090690899bSMichael S. Tsirkin 
181090e33d45SPetar Penkov 		if (frags) {
181190e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
181290e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
181390e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
181490e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
181590e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
181690e33d45SPetar Penkov 			 */
181790e33d45SPetar Penkov 			zerocopy = false;
181890e33d45SPetar Penkov 		} else {
181990e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
182090e33d45SPetar Penkov 					    noblock);
182190e33d45SPetar Penkov 		}
182290e33d45SPetar Penkov 
182333dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
182433dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1825608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
182690e33d45SPetar Penkov 			if (frags)
182790e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
182833dccbb0SHerbert Xu 			return PTR_ERR(skb);
18291da177e4SLinus Torvalds 		}
18301da177e4SLinus Torvalds 
18310690899bSMichael S. Tsirkin 		if (zerocopy)
1832f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1833af1cc7a2SJason Wang 		else
1834f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18350690899bSMichael S. Tsirkin 
18360690899bSMichael S. Tsirkin 		if (err) {
1837608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
18388f22757eSDave Jones 			kfree_skb(skb);
183990e33d45SPetar Penkov 			if (frags) {
184090e33d45SPetar Penkov 				tfile->napi.skb = NULL;
184190e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
184290e33d45SPetar Penkov 			}
184390e33d45SPetar Penkov 
18441da177e4SLinus Torvalds 			return -EFAULT;
18458f22757eSDave Jones 		}
184666ccbc9cSJason Wang 	}
18471da177e4SLinus Torvalds 
18483e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1849df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1850df10db98SPaolo Abeni 		kfree_skb(skb);
185190e33d45SPetar Penkov 		if (frags) {
185290e33d45SPetar Penkov 			tfile->napi.skb = NULL;
185390e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
185490e33d45SPetar Penkov 		}
185590e33d45SPetar Penkov 
1856df10db98SPaolo Abeni 		return -EINVAL;
1857df10db98SPaolo Abeni 	}
1858df10db98SPaolo Abeni 
18591da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
186040630b82SMichael S. Tsirkin 	case IFF_TUN:
186140630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18622580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18632580c4c1SAlexander Potapenko 
18642580c4c1SAlexander Potapenko 			switch (ip_version) {
18652580c4c1SAlexander Potapenko 			case 4:
1866f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1867f09f7ee2SAng Way Chuang 				break;
18682580c4c1SAlexander Potapenko 			case 6:
1869f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1870f09f7ee2SAng Way Chuang 				break;
1871f09f7ee2SAng Way Chuang 			default:
1872608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1873f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1874f09f7ee2SAng Way Chuang 				return -EINVAL;
1875f09f7ee2SAng Way Chuang 			}
1876f09f7ee2SAng Way Chuang 		}
1877f09f7ee2SAng Way Chuang 
1878459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
18791da177e4SLinus Torvalds 		skb->protocol = pi.proto;
18804c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
18811da177e4SLinus Torvalds 		break;
188240630b82SMichael S. Tsirkin 	case IFF_TAP:
188390e33d45SPetar Penkov 		if (!frags)
18841da177e4SLinus Torvalds 			skb->protocol = eth_type_trans(skb, tun->dev);
18851da177e4SLinus Torvalds 		break;
18866403eab1SJoe Perches 	}
18871da177e4SLinus Torvalds 
18880690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
18890690899bSMichael S. Tsirkin 	if (zerocopy) {
18900690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
18910690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1892c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1893af1cc7a2SJason Wang 	} else if (msg_control) {
1894af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1895af1cc7a2SJason Wang 		uarg->callback(uarg, false);
18960690899bSMichael S. Tsirkin 	}
18970690899bSMichael S. Tsirkin 
189872f65107SVlad Yasevich 	skb_reset_network_header(skb);
189940893fd0SJason Wang 	skb_probe_transport_header(skb, 0);
190038502af7SJason Wang 
19011cfe6e93SJason Wang 	if (skb_xdp) {
1902761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1903761876c8SJason Wang 		int ret;
1904761876c8SJason Wang 
1905761876c8SJason Wang 		rcu_read_lock();
1906761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1907761876c8SJason Wang 		if (xdp_prog) {
1908761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1909761876c8SJason Wang 			if (ret != XDP_PASS) {
1910761876c8SJason Wang 				rcu_read_unlock();
1911761876c8SJason Wang 				return total_len;
1912761876c8SJason Wang 			}
1913761876c8SJason Wang 		}
1914761876c8SJason Wang 		rcu_read_unlock();
1915761876c8SJason Wang 	}
1916761876c8SJason Wang 
191796f84061SJason Wang 	rcu_read_lock();
191896f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
1919feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
192096f84061SJason Wang 	rcu_read_unlock();
192194317099SPetar Penkov 
192290e33d45SPetar Penkov 	if (frags) {
192390e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
192490e33d45SPetar Penkov 		u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
192590e33d45SPetar Penkov 
1926010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
192790e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
192890e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
192990e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
193090e33d45SPetar Penkov 			WARN_ON(1);
193190e33d45SPetar Penkov 			return -ENOMEM;
193290e33d45SPetar Penkov 		}
193390e33d45SPetar Penkov 
193490e33d45SPetar Penkov 		local_bh_disable();
193590e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
193690e33d45SPetar Penkov 		local_bh_enable();
193790e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1938aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
193994317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
194094317099SPetar Penkov 		int queue_len;
194194317099SPetar Penkov 
194294317099SPetar Penkov 		spin_lock_bh(&queue->lock);
194394317099SPetar Penkov 		__skb_queue_tail(queue, skb);
194494317099SPetar Penkov 		queue_len = skb_queue_len(queue);
194594317099SPetar Penkov 		spin_unlock(&queue->lock);
194694317099SPetar Penkov 
194794317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
194894317099SPetar Penkov 			napi_schedule(&tfile->napi);
194994317099SPetar Penkov 
195094317099SPetar Penkov 		local_bh_enable();
195194317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19525503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
195394317099SPetar Penkov 	} else {
19541da177e4SLinus Torvalds 		netif_rx_ni(skb);
195594317099SPetar Penkov 	}
19561da177e4SLinus Torvalds 
1957608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
1958608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
1959608b9977SPaolo Abeni 	stats->rx_packets++;
1960608b9977SPaolo Abeni 	stats->rx_bytes += len;
1961608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
1962608b9977SPaolo Abeni 	put_cpu_ptr(stats);
19631da177e4SLinus Torvalds 
196496f84061SJason Wang 	if (rxhash)
19659e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
196696f84061SJason Wang 
19670690899bSMichael S. Tsirkin 	return total_len;
19681da177e4SLinus Torvalds }
19691da177e4SLinus Torvalds 
1970f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
19711da177e4SLinus Torvalds {
197233dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
197354f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
19749484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
1975631ab46bSEric W. Biederman 	ssize_t result;
19761da177e4SLinus Torvalds 
19771da177e4SLinus Torvalds 	if (!tun)
19781da177e4SLinus Torvalds 		return -EBADFD;
19791da177e4SLinus Torvalds 
19805503fcecSJason Wang 	result = tun_get_user(tun, tfile, NULL, from,
19815503fcecSJason Wang 			      file->f_flags & O_NONBLOCK, false);
1982631ab46bSEric W. Biederman 
1983631ab46bSEric W. Biederman 	tun_put(tun);
1984631ab46bSEric W. Biederman 	return result;
19851da177e4SLinus Torvalds }
19861da177e4SLinus Torvalds 
1987fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
1988fc72d1d5SJason Wang 				struct tun_file *tfile,
1989fc72d1d5SJason Wang 				struct xdp_buff *xdp,
1990fc72d1d5SJason Wang 				struct iov_iter *iter)
1991fc72d1d5SJason Wang {
1992fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
1993fc72d1d5SJason Wang 	size_t size = xdp->data_end - xdp->data;
1994fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
1995fc72d1d5SJason Wang 	size_t ret;
1996fc72d1d5SJason Wang 
1997fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
1998fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
1999fc72d1d5SJason Wang 
2000fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2001fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2002fc72d1d5SJason Wang 			return -EINVAL;
2003fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2004fc72d1d5SJason Wang 			     sizeof(gso)))
2005fc72d1d5SJason Wang 			return -EFAULT;
2006fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2007fc72d1d5SJason Wang 	}
2008fc72d1d5SJason Wang 
2009fc72d1d5SJason Wang 	ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
2010fc72d1d5SJason Wang 
2011fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
2012fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
2013fc72d1d5SJason Wang 	stats->tx_packets++;
2014fc72d1d5SJason Wang 	stats->tx_bytes += ret;
2015fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
2016fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
2017fc72d1d5SJason Wang 
2018fc72d1d5SJason Wang 	return ret;
2019fc72d1d5SJason Wang }
2020fc72d1d5SJason Wang 
20211da177e4SLinus Torvalds /* Put packet to the user space buffer */
20226f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
202354f968d6SJason Wang 			    struct tun_file *tfile,
20241da177e4SLinus Torvalds 			    struct sk_buff *skb,
2025e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20261da177e4SLinus Torvalds {
20271da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2028608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2029e0b46d0eSHerbert Xu 	ssize_t total;
20308c847d25SJason Wang 	int vlan_offset = 0;
2031a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20322eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2033a8f9bfdfSHerbert Xu 
2034df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2035a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20361da177e4SLinus Torvalds 
203740630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2038e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20391da177e4SLinus Torvalds 
2040e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2041e0b46d0eSHerbert Xu 
204240630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2043e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20441da177e4SLinus Torvalds 			return -EINVAL;
20451da177e4SLinus Torvalds 
2046e0b46d0eSHerbert Xu 		total += sizeof(pi);
2047e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20481da177e4SLinus Torvalds 			/* Packet will be striped */
20491da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20501da177e4SLinus Torvalds 		}
20511da177e4SLinus Torvalds 
2052e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20531da177e4SLinus Torvalds 			return -EFAULT;
20541da177e4SLinus Torvalds 	}
20551da177e4SLinus Torvalds 
20562eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20579403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
205834166093SMike Rapoport 
2059e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2060f43798c2SRusty Russell 			return -EINVAL;
2061f43798c2SRusty Russell 
20623e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
20636391a448SJason Wang 					    tun_is_little_endian(tun), true)) {
2064f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20656b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2066ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
206756f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
206856f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2069ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2070ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2071ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
207256f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2073ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2074ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2075ef3db4a5SMichael S. Tsirkin 		}
2076f43798c2SRusty Russell 
2077e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2078f43798c2SRusty Russell 			return -EFAULT;
20798c847d25SJason Wang 
20808c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2081f43798c2SRusty Russell 	}
2082f43798c2SRusty Russell 
2083a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2084e0b46d0eSHerbert Xu 		int ret;
2085*aff3d70aSJason Wang 		struct veth veth;
20861da177e4SLinus Torvalds 
20876680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2088df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
20891da177e4SLinus Torvalds 
20906680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
20916680ec68SJason Wang 
2092e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2093e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
20946680ec68SJason Wang 			goto done;
20956680ec68SJason Wang 
2096e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2097e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
20986680ec68SJason Wang 			goto done;
20996680ec68SJason Wang 	}
21006680ec68SJason Wang 
2101e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
21026680ec68SJason Wang 
21036680ec68SJason Wang done:
2104608b9977SPaolo Abeni 	/* caller is in process context, */
2105608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2106608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
2107608b9977SPaolo Abeni 	stats->tx_packets++;
2108608b9977SPaolo Abeni 	stats->tx_bytes += skb->len + vlan_hlen;
2109608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2110608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
21111da177e4SLinus Torvalds 
21121da177e4SLinus Torvalds 	return total;
21131da177e4SLinus Torvalds }
21141da177e4SLinus Torvalds 
2115fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21161576d986SJason Wang {
21171576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2118fc72d1d5SJason Wang 	void *ptr = NULL;
2119f48cc6b2SJason Wang 	int error = 0;
21201576d986SJason Wang 
2121fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2122fc72d1d5SJason Wang 	if (ptr)
21231576d986SJason Wang 		goto out;
21241576d986SJason Wang 	if (noblock) {
2125f48cc6b2SJason Wang 		error = -EAGAIN;
21261576d986SJason Wang 		goto out;
21271576d986SJason Wang 	}
21281576d986SJason Wang 
21291576d986SJason Wang 	add_wait_queue(&tfile->wq.wait, &wait);
21301576d986SJason Wang 	current->state = TASK_INTERRUPTIBLE;
21311576d986SJason Wang 
21321576d986SJason Wang 	while (1) {
2133fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2134fc72d1d5SJason Wang 		if (ptr)
21351576d986SJason Wang 			break;
21361576d986SJason Wang 		if (signal_pending(current)) {
2137f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21381576d986SJason Wang 			break;
21391576d986SJason Wang 		}
21401576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2141f48cc6b2SJason Wang 			error = -EFAULT;
21421576d986SJason Wang 			break;
21431576d986SJason Wang 		}
21441576d986SJason Wang 
21451576d986SJason Wang 		schedule();
21461576d986SJason Wang 	}
21471576d986SJason Wang 
21481576d986SJason Wang 	current->state = TASK_RUNNING;
21491576d986SJason Wang 	remove_wait_queue(&tfile->wq.wait, &wait);
21501576d986SJason Wang 
21511576d986SJason Wang out:
2152f48cc6b2SJason Wang 	*err = error;
2153fc72d1d5SJason Wang 	return ptr;
21541576d986SJason Wang }
21551576d986SJason Wang 
215654f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21579b067034SAl Viro 			   struct iov_iter *to,
2158fc72d1d5SJason Wang 			   int noblock, void *ptr)
21591da177e4SLinus Torvalds {
21609b067034SAl Viro 	ssize_t ret;
21611576d986SJason Wang 	int err;
21621da177e4SLinus Torvalds 
21633872baf6SRami Rosen 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
21641da177e4SLinus Torvalds 
2165c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2166fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21679b067034SAl Viro 		return 0;
2168c33ee15bSWei Xu 	}
21691da177e4SLinus Torvalds 
2170fc72d1d5SJason Wang 	if (!ptr) {
21711576d986SJason Wang 		/* Read frames from ring */
2172fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2173fc72d1d5SJason Wang 		if (!ptr)
2174957f094fSAlex Gartrell 			return err;
2175ac77cfd4SJason Wang 	}
2176e0b46d0eSHerbert Xu 
2177fc72d1d5SJason Wang 	if (tun_is_xdp_buff(ptr)) {
2178fc72d1d5SJason Wang 		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
2179fc72d1d5SJason Wang 
2180fc72d1d5SJason Wang 		ret = tun_put_user_xdp(tun, tfile, xdp, to);
2181fc72d1d5SJason Wang 		put_page(virt_to_head_page(xdp->data));
2182fc72d1d5SJason Wang 	} else {
2183fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2184fc72d1d5SJason Wang 
21859b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2186f51a5e82SJason Wang 		if (unlikely(ret < 0))
21871da177e4SLinus Torvalds 			kfree_skb(skb);
2188f51a5e82SJason Wang 		else
2189f51a5e82SJason Wang 			consume_skb(skb);
2190fc72d1d5SJason Wang 	}
21911da177e4SLinus Torvalds 
219205c2828cSMichael S. Tsirkin 	return ret;
219305c2828cSMichael S. Tsirkin }
219405c2828cSMichael S. Tsirkin 
21959b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
219605c2828cSMichael S. Tsirkin {
219705c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
219805c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
21999484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
22009b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
220105c2828cSMichael S. Tsirkin 
220205c2828cSMichael S. Tsirkin 	if (!tun)
220305c2828cSMichael S. Tsirkin 		return -EBADFD;
2204ac77cfd4SJason Wang 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
220542404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2206d0b7da8aSZhi Yong Wu 	if (ret > 0)
2207d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2208631ab46bSEric W. Biederman 	tun_put(tun);
22091da177e4SLinus Torvalds 	return ret;
22101da177e4SLinus Torvalds }
22111da177e4SLinus Torvalds 
2212cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
221396f84061SJason Wang {
2214cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
221596f84061SJason Wang 
221696f84061SJason Wang 	bpf_prog_destroy(prog->prog);
221796f84061SJason Wang 	kfree(prog);
221896f84061SJason Wang }
221996f84061SJason Wang 
2220cd5681d7SJason Wang static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
222196f84061SJason Wang 			  struct bpf_prog *prog)
222296f84061SJason Wang {
2223cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
222496f84061SJason Wang 
222596f84061SJason Wang 	if (prog) {
222696f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
222796f84061SJason Wang 		if (!new)
222896f84061SJason Wang 			return -ENOMEM;
222996f84061SJason Wang 		new->prog = prog;
223096f84061SJason Wang 	}
223196f84061SJason Wang 
2232124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2233cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2234124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2235cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2236124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
223796f84061SJason Wang 
223896f84061SJason Wang 	if (old)
2239cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
224096f84061SJason Wang 
224196f84061SJason Wang 	return 0;
224296f84061SJason Wang }
224396f84061SJason Wang 
224496442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
224596442e42SJason Wang {
224696442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
224796442e42SJason Wang 
22484008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
2249608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
225096442e42SJason Wang 	tun_flow_uninit(tun);
22515dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2252cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2253*aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
225496442e42SJason Wang }
225596442e42SJason Wang 
22561da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22571da177e4SLinus Torvalds {
22581da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22591da177e4SLinus Torvalds 
22600625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22610625c883SEric W. Biederman 	tun->group = INVALID_GID;
22621da177e4SLinus Torvalds 
22631da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2264cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2265cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2266016adb72SJason Wang 	/* We prefer our own queue length */
2267016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
22681da177e4SLinus Torvalds }
22691da177e4SLinus Torvalds 
2270f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2271f019a7a5SEric W. Biederman  * device with netlink.
2272f019a7a5SEric W. Biederman  */
2273a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2274a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2275f019a7a5SEric W. Biederman {
2276f019a7a5SEric W. Biederman 	return -EINVAL;
2277f019a7a5SEric W. Biederman }
2278f019a7a5SEric W. Biederman 
2279f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2280f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2281f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2282f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2283f019a7a5SEric W. Biederman 	.validate	= tun_validate,
2284f019a7a5SEric W. Biederman };
2285f019a7a5SEric W. Biederman 
228633dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
228733dccbb0SHerbert Xu {
228854f968d6SJason Wang 	struct tun_file *tfile;
228943815482SEric Dumazet 	wait_queue_head_t *wqueue;
229033dccbb0SHerbert Xu 
229133dccbb0SHerbert Xu 	if (!sock_writeable(sk))
229233dccbb0SHerbert Xu 		return;
229333dccbb0SHerbert Xu 
22949cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
229533dccbb0SHerbert Xu 		return;
229633dccbb0SHerbert Xu 
229743815482SEric Dumazet 	wqueue = sk_sleep(sk);
229843815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
229943815482SEric Dumazet 		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
230005c2828cSMichael S. Tsirkin 						POLLWRNORM | POLLWRBAND);
2301c722c625SHerbert Xu 
230254f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
230354f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
230433dccbb0SHerbert Xu }
230533dccbb0SHerbert Xu 
23061b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
230705c2828cSMichael S. Tsirkin {
230854f968d6SJason Wang 	int ret;
230954f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23109484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
231154f968d6SJason Wang 
231254f968d6SJason Wang 	if (!tun)
231354f968d6SJason Wang 		return -EBADFD;
2314f5ff53b4SAl Viro 
2315c0371da6SAl Viro 	ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
23165503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
23175503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
231854f968d6SJason Wang 	tun_put(tun);
231954f968d6SJason Wang 	return ret;
232005c2828cSMichael S. Tsirkin }
232105c2828cSMichael S. Tsirkin 
23221b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
232305c2828cSMichael S. Tsirkin 		       int flags)
232405c2828cSMichael S. Tsirkin {
232554f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23269484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2327fc72d1d5SJason Wang 	void *ptr = m->msg_control;
232805c2828cSMichael S. Tsirkin 	int ret;
232954f968d6SJason Wang 
2330c33ee15bSWei Xu 	if (!tun) {
2331c33ee15bSWei Xu 		ret = -EBADFD;
2332fc72d1d5SJason Wang 		goto out_free;
2333c33ee15bSWei Xu 	}
233454f968d6SJason Wang 
2335eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
23363811ae76SGao feng 		ret = -EINVAL;
2337c33ee15bSWei Xu 		goto out_put_tun;
23383811ae76SGao feng 	}
2339eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2340eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2341eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2342eda29772SRichard Cochran 		goto out;
2343eda29772SRichard Cochran 	}
2344fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
234587897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
234642404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
234742404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
234842404c09SDavid S. Miller 	}
23493811ae76SGao feng out:
235054f968d6SJason Wang 	tun_put(tun);
235105c2828cSMichael S. Tsirkin 	return ret;
2352c33ee15bSWei Xu 
2353c33ee15bSWei Xu out_put_tun:
2354c33ee15bSWei Xu 	tun_put(tun);
2355fc72d1d5SJason Wang out_free:
2356fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2357c33ee15bSWei Xu 	return ret;
235805c2828cSMichael S. Tsirkin }
235905c2828cSMichael S. Tsirkin 
2360fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2361fc72d1d5SJason Wang {
2362fc72d1d5SJason Wang 	if (likely(ptr)) {
2363fc72d1d5SJason Wang 		if (tun_is_xdp_buff(ptr)) {
2364fc72d1d5SJason Wang 			struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
2365fc72d1d5SJason Wang 
2366fc72d1d5SJason Wang 			return xdp->data_end - xdp->data;
2367fc72d1d5SJason Wang 		}
2368fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2369fc72d1d5SJason Wang 	} else {
2370fc72d1d5SJason Wang 		return 0;
2371fc72d1d5SJason Wang 	}
2372fc72d1d5SJason Wang }
2373fc72d1d5SJason Wang 
23741576d986SJason Wang static int tun_peek_len(struct socket *sock)
23751576d986SJason Wang {
23761576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23771576d986SJason Wang 	struct tun_struct *tun;
23781576d986SJason Wang 	int ret = 0;
23791576d986SJason Wang 
23809484dc74Syuan linyu 	tun = tun_get(tfile);
23811576d986SJason Wang 	if (!tun)
23821576d986SJason Wang 		return 0;
23831576d986SJason Wang 
2384fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
23851576d986SJason Wang 	tun_put(tun);
23861576d986SJason Wang 
23871576d986SJason Wang 	return ret;
23881576d986SJason Wang }
23891576d986SJason Wang 
239005c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
239105c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
23921576d986SJason Wang 	.peek_len = tun_peek_len,
239305c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
239405c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
239505c2828cSMichael S. Tsirkin };
239605c2828cSMichael S. Tsirkin 
239733dccbb0SHerbert Xu static struct proto tun_proto = {
239833dccbb0SHerbert Xu 	.name		= "tun",
239933dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
240054f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
240133dccbb0SHerbert Xu };
2402f019a7a5SEric W. Biederman 
2403980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2404980c9e8cSDavid Woodhouse {
2405031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2406980c9e8cSDavid Woodhouse }
2407980c9e8cSDavid Woodhouse 
2408980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2409980c9e8cSDavid Woodhouse 			      char *buf)
2410980c9e8cSDavid Woodhouse {
2411980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2412980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2413980c9e8cSDavid Woodhouse }
2414980c9e8cSDavid Woodhouse 
2415980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2416980c9e8cSDavid Woodhouse 			      char *buf)
2417980c9e8cSDavid Woodhouse {
2418980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
24190625c883SEric W. Biederman 	return uid_valid(tun->owner)?
24200625c883SEric W. Biederman 		sprintf(buf, "%u\n",
24210625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
24220625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2423980c9e8cSDavid Woodhouse }
2424980c9e8cSDavid Woodhouse 
2425980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2426980c9e8cSDavid Woodhouse 			      char *buf)
2427980c9e8cSDavid Woodhouse {
2428980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
24290625c883SEric W. Biederman 	return gid_valid(tun->group) ?
24300625c883SEric W. Biederman 		sprintf(buf, "%u\n",
24310625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
24320625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2433980c9e8cSDavid Woodhouse }
2434980c9e8cSDavid Woodhouse 
2435980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2436980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2437980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2438980c9e8cSDavid Woodhouse 
2439c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2440c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2441c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2442c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2443c4d33e24STakashi Iwai 	NULL
2444c4d33e24STakashi Iwai };
2445c4d33e24STakashi Iwai 
2446c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2447c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2448c4d33e24STakashi Iwai };
2449c4d33e24STakashi Iwai 
2450d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
24511da177e4SLinus Torvalds {
24521da177e4SLinus Torvalds 	struct tun_struct *tun;
245354f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
24541da177e4SLinus Torvalds 	struct net_device *dev;
24551da177e4SLinus Torvalds 	int err;
24561da177e4SLinus Torvalds 
24577c0c3b1aSJason Wang 	if (tfile->detached)
24587c0c3b1aSJason Wang 		return -EINVAL;
24597c0c3b1aSJason Wang 
246090e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
246190e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
246290e33d45SPetar Penkov 			return -EPERM;
246390e33d45SPetar Penkov 
246490e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
246590e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
246690e33d45SPetar Penkov 			return -EINVAL;
246790e33d45SPetar Penkov 	}
246890e33d45SPetar Penkov 
246974a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
247074a3e5a7SEric W. Biederman 	if (dev) {
2471f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2472f85ba780SDavid Woodhouse 			return -EBUSY;
247374a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
247474a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
247574a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
247674a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
247774a3e5a7SEric W. Biederman 		else
247874a3e5a7SEric W. Biederman 			return -EINVAL;
247974a3e5a7SEric W. Biederman 
24808e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
248140630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
24828e6d91aeSJason Wang 			return -EINVAL;
24838e6d91aeSJason Wang 
2484cde8b15fSJason Wang 		if (tun_not_capable(tun))
24852b980dbdSPaul Moore 			return -EPERM;
24865dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
24872b980dbdSPaul Moore 		if (err < 0)
24882b980dbdSPaul Moore 			return err;
24892b980dbdSPaul Moore 
249094317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
249194317099SPetar Penkov 				 ifr->ifr_flags & IFF_NAPI);
2492a7385ba2SEric W. Biederman 		if (err < 0)
2493a7385ba2SEric W. Biederman 			return err;
24944008e97fSJason Wang 
249540630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2496e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2497e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2498e8dbad66SJason Wang 			 * to initialize the device again.
2499e8dbad66SJason Wang 			 */
2500e8dbad66SJason Wang 			return 0;
2501e8dbad66SJason Wang 		}
250286a264abSDavid Howells 	}
25031da177e4SLinus Torvalds 	else {
25041da177e4SLinus Torvalds 		char *name;
25051da177e4SLinus Torvalds 		unsigned long flags = 0;
2506edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2507edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
25081da177e4SLinus Torvalds 
2509c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2510ca6bb5d7SDavid Woodhouse 			return -EPERM;
25112b980dbdSPaul Moore 		err = security_tun_dev_create();
25122b980dbdSPaul Moore 		if (err < 0)
25132b980dbdSPaul Moore 			return err;
2514ca6bb5d7SDavid Woodhouse 
25151da177e4SLinus Torvalds 		/* Set dev type */
25161da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
25171da177e4SLinus Torvalds 			/* TUN device */
251840630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
25191da177e4SLinus Torvalds 			name = "tun%d";
25201da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
25211da177e4SLinus Torvalds 			/* TAP device */
252240630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
25231da177e4SLinus Torvalds 			name = "tap%d";
25241da177e4SLinus Torvalds 		} else
252536989b90SKusanagi Kouichi 			return -EINVAL;
25261da177e4SLinus Torvalds 
25271da177e4SLinus Torvalds 		if (*ifr->ifr_name)
25281da177e4SLinus Torvalds 			name = ifr->ifr_name;
25291da177e4SLinus Torvalds 
2530c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2531c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2532c835a677STom Gundersen 				       queues);
2533edfb6a14SJason Wang 
25341da177e4SLinus Torvalds 		if (!dev)
25351da177e4SLinus Torvalds 			return -ENOMEM;
25360ad646c8SCong Wang 		err = dev_get_valid_name(net, dev, name);
25375c25f65fSJulien Gomes 		if (err < 0)
25380ad646c8SCong Wang 			goto err_free_dev;
25391da177e4SLinus Torvalds 
2540fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2541f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2542fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2543c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2544758e43b7SStephen Hemminger 
25451da177e4SLinus Torvalds 		tun = netdev_priv(dev);
25461da177e4SLinus Torvalds 		tun->dev = dev;
25471da177e4SLinus Torvalds 		tun->flags = flags;
2548f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2549d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
25501da177e4SLinus Torvalds 
2551eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
255254f968d6SJason Wang 		tun->filter_attached = false;
255354f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
25545503fcecSJason Wang 		tun->rx_batched = 0;
255596f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
255633dccbb0SHerbert Xu 
2557608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2558608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2559608b9977SPaolo Abeni 			err = -ENOMEM;
2560608b9977SPaolo Abeni 			goto err_free_dev;
2561608b9977SPaolo Abeni 		}
2562608b9977SPaolo Abeni 
256396442e42SJason Wang 		spin_lock_init(&tun->lock);
256496442e42SJason Wang 
25655dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
25665dbbaf2dSPaul Moore 		if (err < 0)
2567608b9977SPaolo Abeni 			goto err_free_stat;
25682b980dbdSPaul Moore 
25691da177e4SLinus Torvalds 		tun_net_init(dev);
2570944a1376SPavel Emelyanov 		tun_flow_init(tun);
257196442e42SJason Wang 
257288255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
25736680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
25746680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
25752a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
25766671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
25776671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
25786671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
257988255375SMichał Mirosław 
25804008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
258194317099SPetar Penkov 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
2582eb0fb363SJason Wang 		if (err < 0)
2583662ca437SJason Wang 			goto err_free_flow;
2584eb0fb363SJason Wang 
25851da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
25861da177e4SLinus Torvalds 		if (err < 0)
2587662ca437SJason Wang 			goto err_detach;
2588af668b3cSMichael S. Tsirkin 	}
2589980c9e8cSDavid Woodhouse 
2590eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
25911da177e4SLinus Torvalds 
25926b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
25931da177e4SLinus Torvalds 
2594031f5e03SMichael S. Tsirkin 	tun->flags = (tun->flags & ~TUN_FEATURES) |
2595031f5e03SMichael S. Tsirkin 		(ifr->ifr_flags & TUN_FEATURES);
2596c8d68e6bSJason Wang 
2597e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2598e35259a9SMax Krasnyansky 	 * xoff state.
2599e35259a9SMax Krasnyansky 	 */
2600e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2601c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2602e35259a9SMax Krasnyansky 
26031da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
26041da177e4SLinus Torvalds 	return 0;
26051da177e4SLinus Torvalds 
2606662ca437SJason Wang err_detach:
2607662ca437SJason Wang 	tun_detach_all(dev);
2608ff244c6bSEric Dumazet 	/* register_netdevice() already called tun_free_netdev() */
2609ff244c6bSEric Dumazet 	goto err_free_dev;
2610ff244c6bSEric Dumazet 
2611662ca437SJason Wang err_free_flow:
2612662ca437SJason Wang 	tun_flow_uninit(tun);
2613662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2614608b9977SPaolo Abeni err_free_stat:
2615608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
26161da177e4SLinus Torvalds err_free_dev:
26171da177e4SLinus Torvalds 	free_netdev(dev);
26181da177e4SLinus Torvalds 	return err;
26191da177e4SLinus Torvalds }
26201da177e4SLinus Torvalds 
26219ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun,
2622876bfd4dSHerbert Xu 		       struct ifreq *ifr)
2623e3b99556SMark McLoughlin {
26246b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2625e3b99556SMark McLoughlin 
2626e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2627e3b99556SMark McLoughlin 
2628980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2629e3b99556SMark McLoughlin 
2630e3b99556SMark McLoughlin }
2631e3b99556SMark McLoughlin 
26325228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
26335228ddc9SRusty Russell  * privs required. */
263488255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
26355228ddc9SRusty Russell {
2636c8f44affSMichał Mirosław 	netdev_features_t features = 0;
26375228ddc9SRusty Russell 
26385228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
263988255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
26405228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
26415228ddc9SRusty Russell 
26425228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
26435228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
26445228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
26455228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
26465228ddc9SRusty Russell 			}
26475228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
26485228ddc9SRusty Russell 				features |= NETIF_F_TSO;
26495228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
26505228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
26515228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
26525228ddc9SRusty Russell 		}
26530c19f846SWillem de Bruijn 
26540c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
26555228ddc9SRusty Russell 	}
26565228ddc9SRusty Russell 
26575228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
26585228ddc9SRusty Russell 	 * trying to set them. */
26595228ddc9SRusty Russell 	if (arg)
26605228ddc9SRusty Russell 		return -EINVAL;
26615228ddc9SRusty Russell 
266288255375SMichał Mirosław 	tun->set_features = features;
266309050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
266409050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
266588255375SMichał Mirosław 	netdev_update_features(tun->dev);
26665228ddc9SRusty Russell 
26675228ddc9SRusty Russell 	return 0;
26685228ddc9SRusty Russell }
26695228ddc9SRusty Russell 
2670c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2671c8d68e6bSJason Wang {
2672c8d68e6bSJason Wang 	int i;
2673c8d68e6bSJason Wang 	struct tun_file *tfile;
2674c8d68e6bSJason Wang 
2675c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2676b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
26778ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
26788ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
26798ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2680c8d68e6bSJason Wang 	}
2681c8d68e6bSJason Wang 
2682c8d68e6bSJason Wang 	tun->filter_attached = false;
2683c8d68e6bSJason Wang }
2684c8d68e6bSJason Wang 
2685c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2686c8d68e6bSJason Wang {
2687c8d68e6bSJason Wang 	int i, ret = 0;
2688c8d68e6bSJason Wang 	struct tun_file *tfile;
2689c8d68e6bSJason Wang 
2690c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2691b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
26928ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
26938ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
26948ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2695c8d68e6bSJason Wang 		if (ret) {
2696c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2697c8d68e6bSJason Wang 			return ret;
2698c8d68e6bSJason Wang 		}
2699c8d68e6bSJason Wang 	}
2700c8d68e6bSJason Wang 
2701c8d68e6bSJason Wang 	tun->filter_attached = true;
2702c8d68e6bSJason Wang 	return ret;
2703c8d68e6bSJason Wang }
2704c8d68e6bSJason Wang 
2705c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2706c8d68e6bSJason Wang {
2707c8d68e6bSJason Wang 	struct tun_file *tfile;
2708c8d68e6bSJason Wang 	int i;
2709c8d68e6bSJason Wang 
2710c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2711b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2712c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2713c8d68e6bSJason Wang 	}
2714c8d68e6bSJason Wang }
2715c8d68e6bSJason Wang 
2716cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2717cde8b15fSJason Wang {
2718cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2719cde8b15fSJason Wang 	struct tun_struct *tun;
2720cde8b15fSJason Wang 	int ret = 0;
2721cde8b15fSJason Wang 
2722cde8b15fSJason Wang 	rtnl_lock();
2723cde8b15fSJason Wang 
2724cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
27254008e97fSJason Wang 		tun = tfile->detached;
27265dbbaf2dSPaul Moore 		if (!tun) {
2727cde8b15fSJason Wang 			ret = -EINVAL;
27285dbbaf2dSPaul Moore 			goto unlock;
27295dbbaf2dSPaul Moore 		}
27305dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
27315dbbaf2dSPaul Moore 		if (ret < 0)
27325dbbaf2dSPaul Moore 			goto unlock;
273394317099SPetar Penkov 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
27344008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2735b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
273640630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
27374008e97fSJason Wang 			ret = -EINVAL;
2738cde8b15fSJason Wang 		else
27394008e97fSJason Wang 			__tun_detach(tfile, false);
27404008e97fSJason Wang 	} else
2741cde8b15fSJason Wang 		ret = -EINVAL;
2742cde8b15fSJason Wang 
27435dbbaf2dSPaul Moore unlock:
2744cde8b15fSJason Wang 	rtnl_unlock();
2745cde8b15fSJason Wang 	return ret;
2746cde8b15fSJason Wang }
2747cde8b15fSJason Wang 
2748cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
2749cd5681d7SJason Wang 			void __user *data)
275096f84061SJason Wang {
275196f84061SJason Wang 	struct bpf_prog *prog;
275296f84061SJason Wang 	int fd;
275396f84061SJason Wang 
275496f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
275596f84061SJason Wang 		return -EFAULT;
275696f84061SJason Wang 
275796f84061SJason Wang 	if (fd == -1) {
275896f84061SJason Wang 		prog = NULL;
275996f84061SJason Wang 	} else {
276096f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
276196f84061SJason Wang 		if (IS_ERR(prog))
276296f84061SJason Wang 			return PTR_ERR(prog);
276396f84061SJason Wang 	}
276496f84061SJason Wang 
2765cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
276696f84061SJason Wang }
276796f84061SJason Wang 
276850857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
276950857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
27701da177e4SLinus Torvalds {
277136b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
2772631ab46bSEric W. Biederman 	struct tun_struct *tun;
27731da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
27741da177e4SLinus Torvalds 	struct ifreq ifr;
27750625c883SEric W. Biederman 	kuid_t owner;
27760625c883SEric W. Biederman 	kgid_t group;
277733dccbb0SHerbert Xu 	int sndbuf;
2778d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
2779fb7589a1SPavel Emelyanov 	unsigned int ifindex;
27801cf8e410SMichael S. Tsirkin 	int le;
2781f271b2ccSMax Krasnyansky 	int ret;
27821da177e4SLinus Torvalds 
278320861f26SGao Feng 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
278450857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
27851da177e4SLinus Torvalds 			return -EFAULT;
27868bbb1813SDavid S. Miller 	} else {
2787a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
27888bbb1813SDavid S. Miller 	}
2789631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
2790631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
2791631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
2792031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
2793031f5e03SMichael S. Tsirkin 		 */
2794031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
2795631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
2796cde8b15fSJason Wang 	} else if (cmd == TUNSETQUEUE)
2797cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
2798631ab46bSEric W. Biederman 
2799c8d68e6bSJason Wang 	ret = 0;
2800876bfd4dSHerbert Xu 	rtnl_lock();
2801876bfd4dSHerbert Xu 
28029484dc74Syuan linyu 	tun = tun_get(tfile);
28030f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
28040f16bc13SGao Feng 		ret = -EEXIST;
28050f16bc13SGao Feng 		if (tun)
28060f16bc13SGao Feng 			goto unlock;
28070f16bc13SGao Feng 
28081da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
28091da177e4SLinus Torvalds 
2810140e807dSEric W. Biederman 		ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
28111da177e4SLinus Torvalds 
2812876bfd4dSHerbert Xu 		if (ret)
2813876bfd4dSHerbert Xu 			goto unlock;
28141da177e4SLinus Torvalds 
281550857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2816876bfd4dSHerbert Xu 			ret = -EFAULT;
2817876bfd4dSHerbert Xu 		goto unlock;
28181da177e4SLinus Torvalds 	}
2819fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
2820fb7589a1SPavel Emelyanov 		ret = -EPERM;
2821fb7589a1SPavel Emelyanov 		if (tun)
2822fb7589a1SPavel Emelyanov 			goto unlock;
2823fb7589a1SPavel Emelyanov 
2824fb7589a1SPavel Emelyanov 		ret = -EFAULT;
2825fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2826fb7589a1SPavel Emelyanov 			goto unlock;
2827fb7589a1SPavel Emelyanov 
2828fb7589a1SPavel Emelyanov 		ret = 0;
2829fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
2830fb7589a1SPavel Emelyanov 		goto unlock;
2831fb7589a1SPavel Emelyanov 	}
28321da177e4SLinus Torvalds 
2833876bfd4dSHerbert Xu 	ret = -EBADFD;
28341da177e4SLinus Torvalds 	if (!tun)
2835876bfd4dSHerbert Xu 		goto unlock;
28361da177e4SLinus Torvalds 
28371e588338SJason Wang 	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
28381da177e4SLinus Torvalds 
2839631ab46bSEric W. Biederman 	ret = 0;
28401da177e4SLinus Torvalds 	switch (cmd) {
2841e3b99556SMark McLoughlin 	case TUNGETIFF:
28429ce99cf6SRami Rosen 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2843e3b99556SMark McLoughlin 
28443d407a80SPavel Emelyanov 		if (tfile->detached)
28453d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
2846849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
2847849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
28483d407a80SPavel Emelyanov 
284950857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2850631ab46bSEric W. Biederman 			ret = -EFAULT;
2851e3b99556SMark McLoughlin 		break;
2852e3b99556SMark McLoughlin 
28531da177e4SLinus Torvalds 	case TUNSETNOCSUM:
28541da177e4SLinus Torvalds 		/* Disable/Enable checksum */
28551da177e4SLinus Torvalds 
285688255375SMichał Mirosław 		/* [unimplemented] */
285788255375SMichał Mirosław 		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
28586b8a66eeSJoe Perches 			  arg ? "disabled" : "enabled");
28591da177e4SLinus Torvalds 		break;
28601da177e4SLinus Torvalds 
28611da177e4SLinus Torvalds 	case TUNSETPERSIST:
286254f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
286354f968d6SJason Wang 		 * module to prevent the module being unprobed.
286454f968d6SJason Wang 		 */
286540630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
286640630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
286754f968d6SJason Wang 			__module_get(THIS_MODULE);
2868dd38bd85SJason Wang 		}
286940630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
287040630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
287154f968d6SJason Wang 			module_put(THIS_MODULE);
287254f968d6SJason Wang 		}
28731da177e4SLinus Torvalds 
28746b8a66eeSJoe Perches 		tun_debug(KERN_INFO, tun, "persist %s\n",
28756b8a66eeSJoe Perches 			  arg ? "enabled" : "disabled");
28761da177e4SLinus Torvalds 		break;
28771da177e4SLinus Torvalds 
28781da177e4SLinus Torvalds 	case TUNSETOWNER:
28791da177e4SLinus Torvalds 		/* Set owner of the device */
28800625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
28810625c883SEric W. Biederman 		if (!uid_valid(owner)) {
28820625c883SEric W. Biederman 			ret = -EINVAL;
28830625c883SEric W. Biederman 			break;
28840625c883SEric W. Biederman 		}
28850625c883SEric W. Biederman 		tun->owner = owner;
28861e588338SJason Wang 		tun_debug(KERN_INFO, tun, "owner set to %u\n",
28870625c883SEric W. Biederman 			  from_kuid(&init_user_ns, tun->owner));
28881da177e4SLinus Torvalds 		break;
28891da177e4SLinus Torvalds 
28908c644623SGuido Guenther 	case TUNSETGROUP:
28918c644623SGuido Guenther 		/* Set group of the device */
28920625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
28930625c883SEric W. Biederman 		if (!gid_valid(group)) {
28940625c883SEric W. Biederman 			ret = -EINVAL;
28950625c883SEric W. Biederman 			break;
28960625c883SEric W. Biederman 		}
28970625c883SEric W. Biederman 		tun->group = group;
28981e588338SJason Wang 		tun_debug(KERN_INFO, tun, "group set to %u\n",
28990625c883SEric W. Biederman 			  from_kgid(&init_user_ns, tun->group));
29008c644623SGuido Guenther 		break;
29018c644623SGuido Guenther 
2902ff4cc3acSMike Kershaw 	case TUNSETLINK:
2903ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
2904ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
29056b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun,
29066b8a66eeSJoe Perches 				  "Linktype set failed because interface is up\n");
290748abfe05SDavid S. Miller 			ret = -EBUSY;
2908ff4cc3acSMike Kershaw 		} else {
2909ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
29106b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
29116b8a66eeSJoe Perches 				  tun->dev->type);
291248abfe05SDavid S. Miller 			ret = 0;
2913ff4cc3acSMike Kershaw 		}
2914631ab46bSEric W. Biederman 		break;
2915ff4cc3acSMike Kershaw 
29161da177e4SLinus Torvalds #ifdef TUN_DEBUG
29171da177e4SLinus Torvalds 	case TUNSETDEBUG:
29181da177e4SLinus Torvalds 		tun->debug = arg;
29191da177e4SLinus Torvalds 		break;
29201da177e4SLinus Torvalds #endif
29215228ddc9SRusty Russell 	case TUNSETOFFLOAD:
292288255375SMichał Mirosław 		ret = set_offload(tun, arg);
2923631ab46bSEric W. Biederman 		break;
29245228ddc9SRusty Russell 
2925f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
2926f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
2927631ab46bSEric W. Biederman 		ret = -EINVAL;
292840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
2929631ab46bSEric W. Biederman 			break;
2930c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
2931631ab46bSEric W. Biederman 		break;
29321da177e4SLinus Torvalds 
29331da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
2934b595076aSUwe Kleine-König 		/* Get hw address */
2935f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2936f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
293750857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2938631ab46bSEric W. Biederman 			ret = -EFAULT;
2939631ab46bSEric W. Biederman 		break;
29401da177e4SLinus Torvalds 
29411da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
2942f271b2ccSMax Krasnyansky 		/* Set hw address */
29436b8a66eeSJoe Perches 		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
29446b8a66eeSJoe Perches 			  ifr.ifr_hwaddr.sa_data);
294540102371SKim B. Heino 
294640102371SKim B. Heino 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2947631ab46bSEric W. Biederman 		break;
294833dccbb0SHerbert Xu 
294933dccbb0SHerbert Xu 	case TUNGETSNDBUF:
295054f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
295133dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
295233dccbb0SHerbert Xu 			ret = -EFAULT;
295333dccbb0SHerbert Xu 		break;
295433dccbb0SHerbert Xu 
295533dccbb0SHerbert Xu 	case TUNSETSNDBUF:
295633dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
295733dccbb0SHerbert Xu 			ret = -EFAULT;
295833dccbb0SHerbert Xu 			break;
295933dccbb0SHerbert Xu 		}
296093161922SCraig Gallek 		if (sndbuf <= 0) {
296193161922SCraig Gallek 			ret = -EINVAL;
296293161922SCraig Gallek 			break;
296393161922SCraig Gallek 		}
296433dccbb0SHerbert Xu 
2965c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
2966c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
296733dccbb0SHerbert Xu 		break;
296833dccbb0SHerbert Xu 
2969d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
2970d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
2971d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2972d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
2973d9d52b51SMichael S. Tsirkin 		break;
2974d9d52b51SMichael S. Tsirkin 
2975d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
2976d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2977d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
2978d9d52b51SMichael S. Tsirkin 			break;
2979d9d52b51SMichael S. Tsirkin 		}
2980d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2981d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
2982d9d52b51SMichael S. Tsirkin 			break;
2983d9d52b51SMichael S. Tsirkin 		}
2984d9d52b51SMichael S. Tsirkin 
2985d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
2986d9d52b51SMichael S. Tsirkin 		break;
2987d9d52b51SMichael S. Tsirkin 
29881cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
29891cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
29901cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
29911cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
29921cf8e410SMichael S. Tsirkin 		break;
29931cf8e410SMichael S. Tsirkin 
29941cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
29951cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
29961cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
29971cf8e410SMichael S. Tsirkin 			break;
29981cf8e410SMichael S. Tsirkin 		}
29991cf8e410SMichael S. Tsirkin 		if (le)
30001cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
30011cf8e410SMichael S. Tsirkin 		else
30021cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
30031cf8e410SMichael S. Tsirkin 		break;
30041cf8e410SMichael S. Tsirkin 
30058b8e658bSGreg Kurz 	case TUNGETVNETBE:
30068b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
30078b8e658bSGreg Kurz 		break;
30088b8e658bSGreg Kurz 
30098b8e658bSGreg Kurz 	case TUNSETVNETBE:
30108b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
30118b8e658bSGreg Kurz 		break;
30128b8e658bSGreg Kurz 
301399405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
301499405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
301599405162SMichael S. Tsirkin 		ret = -EINVAL;
301640630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
301799405162SMichael S. Tsirkin 			break;
301899405162SMichael S. Tsirkin 		ret = -EFAULT;
301954f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
302099405162SMichael S. Tsirkin 			break;
302199405162SMichael S. Tsirkin 
3022c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
302399405162SMichael S. Tsirkin 		break;
302499405162SMichael S. Tsirkin 
302599405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
302699405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
302799405162SMichael S. Tsirkin 		ret = -EINVAL;
302840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
302999405162SMichael S. Tsirkin 			break;
3030c8d68e6bSJason Wang 		ret = 0;
3031c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
303299405162SMichael S. Tsirkin 		break;
303399405162SMichael S. Tsirkin 
303476975e9cSPavel Emelyanov 	case TUNGETFILTER:
303576975e9cSPavel Emelyanov 		ret = -EINVAL;
303640630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
303776975e9cSPavel Emelyanov 			break;
303876975e9cSPavel Emelyanov 		ret = -EFAULT;
303976975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
304076975e9cSPavel Emelyanov 			break;
304176975e9cSPavel Emelyanov 		ret = 0;
304276975e9cSPavel Emelyanov 		break;
304376975e9cSPavel Emelyanov 
304496f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3045cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
304696f84061SJason Wang 		break;
304796f84061SJason Wang 
3048*aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3049*aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3050*aff3d70aSJason Wang 		break;
3051*aff3d70aSJason Wang 
30521da177e4SLinus Torvalds 	default:
3053631ab46bSEric W. Biederman 		ret = -EINVAL;
3054631ab46bSEric W. Biederman 		break;
3055ee289b64SJoe Perches 	}
30561da177e4SLinus Torvalds 
3057876bfd4dSHerbert Xu unlock:
3058876bfd4dSHerbert Xu 	rtnl_unlock();
3059876bfd4dSHerbert Xu 	if (tun)
3060631ab46bSEric W. Biederman 		tun_put(tun);
3061631ab46bSEric W. Biederman 	return ret;
30621da177e4SLinus Torvalds }
30631da177e4SLinus Torvalds 
306450857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
306550857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
306650857e2aSArnd Bergmann {
306750857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
306850857e2aSArnd Bergmann }
306950857e2aSArnd Bergmann 
307050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
307150857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
307250857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
307350857e2aSArnd Bergmann {
307450857e2aSArnd Bergmann 	switch (cmd) {
307550857e2aSArnd Bergmann 	case TUNSETIFF:
307650857e2aSArnd Bergmann 	case TUNGETIFF:
307750857e2aSArnd Bergmann 	case TUNSETTXFILTER:
307850857e2aSArnd Bergmann 	case TUNGETSNDBUF:
307950857e2aSArnd Bergmann 	case TUNSETSNDBUF:
308050857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
308150857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
308250857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
308350857e2aSArnd Bergmann 		break;
308450857e2aSArnd Bergmann 	default:
308550857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
308650857e2aSArnd Bergmann 		break;
308750857e2aSArnd Bergmann 	}
308850857e2aSArnd Bergmann 
308950857e2aSArnd Bergmann 	/*
309050857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
309150857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
309250857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
309350857e2aSArnd Bergmann 	 * contents.
309450857e2aSArnd Bergmann 	 */
309550857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
309650857e2aSArnd Bergmann }
309750857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
309850857e2aSArnd Bergmann 
30991da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
31001da177e4SLinus Torvalds {
310154f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
31021da177e4SLinus Torvalds 	int ret;
31031da177e4SLinus Torvalds 
310454f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
31059d319522SJonathan Corbet 		goto out;
31061da177e4SLinus Torvalds 
31071da177e4SLinus Torvalds 	if (on) {
3108e0b93eddSJeff Layton 		__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
310954f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
31101da177e4SLinus Torvalds 	} else
311154f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
31129d319522SJonathan Corbet 	ret = 0;
31139d319522SJonathan Corbet out:
31149d319522SJonathan Corbet 	return ret;
31151da177e4SLinus Torvalds }
31161da177e4SLinus Torvalds 
31171da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
31181da177e4SLinus Torvalds {
3119140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3120631ab46bSEric W. Biederman 	struct tun_file *tfile;
3121deed49fbSThomas Gleixner 
31226b8a66eeSJoe Perches 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3123631ab46bSEric W. Biederman 
3124140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
312511aa9c28SEric W. Biederman 					    &tun_proto, 0);
3126631ab46bSEric W. Biederman 	if (!tfile)
3127631ab46bSEric W. Biederman 		return -ENOMEM;
3128c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
312954f968d6SJason Wang 	tfile->flags = 0;
3130fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
313154f968d6SJason Wang 
313254f968d6SJason Wang 	init_waitqueue_head(&tfile->wq.wait);
31339e641bdcSXi Wang 	RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
313454f968d6SJason Wang 
313554f968d6SJason Wang 	tfile->socket.file = file;
313654f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
313754f968d6SJason Wang 
313854f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
313954f968d6SJason Wang 
314054f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
314154f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
314254f968d6SJason Wang 
3143631ab46bSEric W. Biederman 	file->private_data = tfile;
31444008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
314554f968d6SJason Wang 
314619a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
314719a6afb2SJason Wang 
31481da177e4SLinus Torvalds 	return 0;
31491da177e4SLinus Torvalds }
31501da177e4SLinus Torvalds 
31511da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
31521da177e4SLinus Torvalds {
3153631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
31541da177e4SLinus Torvalds 
3155c8d68e6bSJason Wang 	tun_detach(tfile, true);
31561da177e4SLinus Torvalds 
31571da177e4SLinus Torvalds 	return 0;
31581da177e4SLinus Torvalds }
31591da177e4SLinus Torvalds 
316093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
31619484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
316293e14b6dSMasatake YAMATO {
31639484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
316493e14b6dSMasatake YAMATO 	struct tun_struct *tun;
316593e14b6dSMasatake YAMATO 	struct ifreq ifr;
316693e14b6dSMasatake YAMATO 
316793e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
316893e14b6dSMasatake YAMATO 
316993e14b6dSMasatake YAMATO 	rtnl_lock();
31709484dc74Syuan linyu 	tun = tun_get(tfile);
317193e14b6dSMasatake YAMATO 	if (tun)
317293e14b6dSMasatake YAMATO 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
317393e14b6dSMasatake YAMATO 	rtnl_unlock();
317493e14b6dSMasatake YAMATO 
317593e14b6dSMasatake YAMATO 	if (tun)
317693e14b6dSMasatake YAMATO 		tun_put(tun);
317793e14b6dSMasatake YAMATO 
3178a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
317993e14b6dSMasatake YAMATO }
318093e14b6dSMasatake YAMATO #endif
318193e14b6dSMasatake YAMATO 
3182d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
31831da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
31841da177e4SLinus Torvalds 	.llseek = no_llseek,
31859b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3186f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
31871da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3188876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
318950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
319050857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
319150857e2aSArnd Bergmann #endif
31921da177e4SLinus Torvalds 	.open	= tun_chr_open,
31931da177e4SLinus Torvalds 	.release = tun_chr_close,
319493e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
319593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
319693e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
319793e14b6dSMasatake YAMATO #endif
31981da177e4SLinus Torvalds };
31991da177e4SLinus Torvalds 
32001da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
32011da177e4SLinus Torvalds 	.minor = TUN_MINOR,
32021da177e4SLinus Torvalds 	.name = "tun",
3203e454cea2SKay Sievers 	.nodename = "net/tun",
32041da177e4SLinus Torvalds 	.fops = &tun_fops,
32051da177e4SLinus Torvalds };
32061da177e4SLinus Torvalds 
32071da177e4SLinus Torvalds /* ethtool interface */
32081da177e4SLinus Torvalds 
320929ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev,
321029ccc49dSPhilippe Reynes 				  struct ethtool_link_ksettings *cmd)
32111da177e4SLinus Torvalds {
321229ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
321329ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
321429ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
321529ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
321629ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
321729ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
321829ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
32191da177e4SLinus Torvalds 	return 0;
32201da177e4SLinus Torvalds }
32211da177e4SLinus Torvalds 
32221da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
32231da177e4SLinus Torvalds {
32241da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
32251da177e4SLinus Torvalds 
322633a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
322733a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
32281da177e4SLinus Torvalds 
32291da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
323040630b82SMichael S. Tsirkin 	case IFF_TUN:
323133a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
32321da177e4SLinus Torvalds 		break;
323340630b82SMichael S. Tsirkin 	case IFF_TAP:
323433a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
32351da177e4SLinus Torvalds 		break;
32361da177e4SLinus Torvalds 	}
32371da177e4SLinus Torvalds }
32381da177e4SLinus Torvalds 
32391da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
32401da177e4SLinus Torvalds {
32411da177e4SLinus Torvalds #ifdef TUN_DEBUG
32421da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
32431da177e4SLinus Torvalds 	return tun->debug;
32441da177e4SLinus Torvalds #else
32451da177e4SLinus Torvalds 	return -EOPNOTSUPP;
32461da177e4SLinus Torvalds #endif
32471da177e4SLinus Torvalds }
32481da177e4SLinus Torvalds 
32491da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
32501da177e4SLinus Torvalds {
32511da177e4SLinus Torvalds #ifdef TUN_DEBUG
32521da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
32531da177e4SLinus Torvalds 	tun->debug = value;
32541da177e4SLinus Torvalds #endif
32551da177e4SLinus Torvalds }
32561da177e4SLinus Torvalds 
32575503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
32585503fcecSJason Wang 			    struct ethtool_coalesce *ec)
32595503fcecSJason Wang {
32605503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
32615503fcecSJason Wang 
32625503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
32635503fcecSJason Wang 
32645503fcecSJason Wang 	return 0;
32655503fcecSJason Wang }
32665503fcecSJason Wang 
32675503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
32685503fcecSJason Wang 			    struct ethtool_coalesce *ec)
32695503fcecSJason Wang {
32705503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
32715503fcecSJason Wang 
32725503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
32735503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
32745503fcecSJason Wang 	else
32755503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
32765503fcecSJason Wang 
32775503fcecSJason Wang 	return 0;
32785503fcecSJason Wang }
32795503fcecSJason Wang 
32807282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
32811da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
32821da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
32831da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3284bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3285eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
32865503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
32875503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
328829ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
32891da177e4SLinus Torvalds };
32901da177e4SLinus Torvalds 
32911576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
32921576d986SJason Wang {
32931576d986SJason Wang 	struct net_device *dev = tun->dev;
32941576d986SJason Wang 	struct tun_file *tfile;
32955990a305SJason Wang 	struct ptr_ring **rings;
32961576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
32971576d986SJason Wang 	int ret, i;
32981576d986SJason Wang 
32995990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
33005990a305SJason Wang 	if (!rings)
33011576d986SJason Wang 		return -ENOMEM;
33021576d986SJason Wang 
33031576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
33041576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
33055990a305SJason Wang 		rings[i] = &tfile->tx_ring;
33061576d986SJason Wang 	}
33071576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
33085990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
33091576d986SJason Wang 
33105990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
33115990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3312fc72d1d5SJason Wang 				       tun_ptr_free);
33131576d986SJason Wang 
33145990a305SJason Wang 	kfree(rings);
33151576d986SJason Wang 	return ret;
33161576d986SJason Wang }
33171576d986SJason Wang 
33181576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
33191576d986SJason Wang 			    unsigned long event, void *ptr)
33201576d986SJason Wang {
33211576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
33221576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
33231576d986SJason Wang 
332486dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
332586dfb4acSCraig Gallek 		return NOTIFY_DONE;
332686dfb4acSCraig Gallek 
33271576d986SJason Wang 	switch (event) {
33281576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
33291576d986SJason Wang 		if (tun_queue_resize(tun))
33301576d986SJason Wang 			return NOTIFY_BAD;
33311576d986SJason Wang 		break;
33321576d986SJason Wang 	default:
33331576d986SJason Wang 		break;
33341576d986SJason Wang 	}
33351576d986SJason Wang 
33361576d986SJason Wang 	return NOTIFY_DONE;
33371576d986SJason Wang }
33381576d986SJason Wang 
33391576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
33401576d986SJason Wang 	.notifier_call	= tun_device_event,
33411576d986SJason Wang };
334279d17604SPavel Emelyanov 
33431da177e4SLinus Torvalds static int __init tun_init(void)
33441da177e4SLinus Torvalds {
33451da177e4SLinus Torvalds 	int ret = 0;
33461da177e4SLinus Torvalds 
33476b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
33481da177e4SLinus Torvalds 
3349f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
335079d17604SPavel Emelyanov 	if (ret) {
33516b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3352f019a7a5SEric W. Biederman 		goto err_linkops;
335379d17604SPavel Emelyanov 	}
335479d17604SPavel Emelyanov 
33551da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
335679d17604SPavel Emelyanov 	if (ret) {
33576b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
335879d17604SPavel Emelyanov 		goto err_misc;
335979d17604SPavel Emelyanov 	}
33601576d986SJason Wang 
33615edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
33625edfbd3cSTonghao Zhang 	if (ret) {
33635edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
33645edfbd3cSTonghao Zhang 		goto err_notifier;
33655edfbd3cSTonghao Zhang 	}
33665edfbd3cSTonghao Zhang 
336779d17604SPavel Emelyanov 	return  0;
33685edfbd3cSTonghao Zhang 
33695edfbd3cSTonghao Zhang err_notifier:
33705edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
337179d17604SPavel Emelyanov err_misc:
3372f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3373f019a7a5SEric W. Biederman err_linkops:
33741da177e4SLinus Torvalds 	return ret;
33751da177e4SLinus Torvalds }
33761da177e4SLinus Torvalds 
33771da177e4SLinus Torvalds static void tun_cleanup(void)
33781da177e4SLinus Torvalds {
33791da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3380f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
33811576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
33821da177e4SLinus Torvalds }
33831da177e4SLinus Torvalds 
338405c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
338505c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
338605c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
338705c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
338805c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
338905c2828cSMichael S. Tsirkin {
33906e914fc7SJason Wang 	struct tun_file *tfile;
339105c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
339205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
33936e914fc7SJason Wang 	tfile = file->private_data;
33946e914fc7SJason Wang 	if (!tfile)
339505c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
339654f968d6SJason Wang 	return &tfile->socket;
339705c2828cSMichael S. Tsirkin }
339805c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
339905c2828cSMichael S. Tsirkin 
34005990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
340183339c6bSJason Wang {
340283339c6bSJason Wang 	struct tun_file *tfile;
340383339c6bSJason Wang 
340483339c6bSJason Wang 	if (file->f_op != &tun_fops)
340583339c6bSJason Wang 		return ERR_PTR(-EINVAL);
340683339c6bSJason Wang 	tfile = file->private_data;
340783339c6bSJason Wang 	if (!tfile)
340883339c6bSJason Wang 		return ERR_PTR(-EBADFD);
34095990a305SJason Wang 	return &tfile->tx_ring;
341083339c6bSJason Wang }
34115990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
341283339c6bSJason Wang 
34131da177e4SLinus Torvalds module_init(tun_init);
34141da177e4SLinus Torvalds module_exit(tun_cleanup);
34151da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
34161da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
34171da177e4SLinus Torvalds MODULE_LICENSE("GPL");
34181da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3419578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3420