xref: /openbmc/linux/drivers/net/tun.c (revision 735fc4054b3a25034445c6713d259da0f96f8131)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
31da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  This program is free software; you can redistribute it and/or modify
61da177e4SLinus Torvalds  *  it under the terms of the GNU General Public License as published by
71da177e4SLinus Torvalds  *  the Free Software Foundation; either version 2 of the License, or
81da177e4SLinus Torvalds  *  (at your option) any later version.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  *  This program is distributed in the hope that it will be useful,
111da177e4SLinus Torvalds  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
121da177e4SLinus Torvalds  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
131da177e4SLinus Torvalds  *  GNU General Public License for more details.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
161da177e4SLinus Torvalds  */
171da177e4SLinus Torvalds 
181da177e4SLinus Torvalds /*
191da177e4SLinus Torvalds  *  Changes:
201da177e4SLinus Torvalds  *
21ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
23ff4cc3acSMike Kershaw  *
241da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
25344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
261da177e4SLinus Torvalds  *
271da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
281da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
291da177e4SLinus Torvalds  *    Increased default tx queue length.
301da177e4SLinus Torvalds  *    Added ethtool API.
311da177e4SLinus Torvalds  *    Minor cleanups
321da177e4SLinus Torvalds  *
331da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
341da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
376b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
386b8a66eeSJoe Perches 
391da177e4SLinus Torvalds #define DRV_NAME	"tun"
401da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
411da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
421da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
431da177e4SLinus Torvalds 
441da177e4SLinus Torvalds #include <linux/module.h>
451da177e4SLinus Torvalds #include <linux/errno.h>
461da177e4SLinus Torvalds #include <linux/kernel.h>
47174cd4b1SIngo Molnar #include <linux/sched/signal.h>
481da177e4SLinus Torvalds #include <linux/major.h>
491da177e4SLinus Torvalds #include <linux/slab.h>
501da177e4SLinus Torvalds #include <linux/poll.h>
511da177e4SLinus Torvalds #include <linux/fcntl.h>
521da177e4SLinus Torvalds #include <linux/init.h>
531da177e4SLinus Torvalds #include <linux/skbuff.h>
541da177e4SLinus Torvalds #include <linux/netdevice.h>
551da177e4SLinus Torvalds #include <linux/etherdevice.h>
561da177e4SLinus Torvalds #include <linux/miscdevice.h>
571da177e4SLinus Torvalds #include <linux/ethtool.h>
581da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5950857e2aSArnd Bergmann #include <linux/compat.h>
601da177e4SLinus Torvalds #include <linux/if.h>
611da177e4SLinus Torvalds #include <linux/if_arp.h>
621da177e4SLinus Torvalds #include <linux/if_ether.h>
631da177e4SLinus Torvalds #include <linux/if_tun.h>
646680ec68SJason Wang #include <linux/if_vlan.h>
651da177e4SLinus Torvalds #include <linux/crc32.h>
66d647a591SPavel Emelyanov #include <linux/nsproxy.h>
67f43798c2SRusty Russell #include <linux/virtio_net.h>
6899405162SMichael S. Tsirkin #include <linux/rcupdate.h>
69881d966bSEric W. Biederman #include <net/net_namespace.h>
7079d17604SPavel Emelyanov #include <net/netns/generic.h>
71f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
7233dccbb0SHerbert Xu #include <net/sock.h>
73*735fc405SJesper Dangaard Brouer #include <net/xdp.h>
7493e14b6dSMasatake YAMATO #include <linux/seq_file.h>
75e0b46d0eSHerbert Xu #include <linux/uio.h>
761576d986SJason Wang #include <linux/skb_array.h>
77761876c8SJason Wang #include <linux/bpf.h>
78761876c8SJason Wang #include <linux/bpf_trace.h>
7990e33d45SPetar Penkov #include <linux/mutex.h>
801da177e4SLinus Torvalds 
817c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
82f2780d6dSKirill Tkhai #include <linux/proc_fs.h>
831da177e4SLinus Torvalds 
8414daa021SRusty Russell /* Uncomment to enable debugging */
8514daa021SRusty Russell /* #define TUN_DEBUG 1 */
8614daa021SRusty Russell 
871da177e4SLinus Torvalds #ifdef TUN_DEBUG
881da177e4SLinus Torvalds static int debug;
8914daa021SRusty Russell 
906b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
916b8a66eeSJoe Perches do {								\
926b8a66eeSJoe Perches 	if (tun->debug)						\
936b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
946b8a66eeSJoe Perches } while (0)
956b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
966b8a66eeSJoe Perches do {								\
976b8a66eeSJoe Perches 	if (debug == 2)						\
986b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
996b8a66eeSJoe Perches } while (0)
10014daa021SRusty Russell #else
1016b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
1026b8a66eeSJoe Perches do {								\
1036b8a66eeSJoe Perches 	if (0)							\
1046b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
1056b8a66eeSJoe Perches } while (0)
1066b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
1076b8a66eeSJoe Perches do {								\
1086b8a66eeSJoe Perches 	if (0)							\
1096b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
1106b8a66eeSJoe Perches } while (0)
1111da177e4SLinus Torvalds #endif
1121da177e4SLinus Torvalds 
113761876c8SJason Wang #define TUN_HEADROOM 256
1147df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
11566ccbc9cSJason Wang 
116031f5e03SMichael S. Tsirkin /* TUN device flags */
117031f5e03SMichael S. Tsirkin 
118031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
119031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
120031f5e03SMichael S. Tsirkin  */
121031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
1221cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
1231cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
1248b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
125031f5e03SMichael S. Tsirkin 
126031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
12790e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
12890e33d45SPetar Penkov 
1290690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
1300690899bSMichael S. Tsirkin 
131f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
132f271b2ccSMax Krasnyansky struct tap_filter {
133f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
134f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
135f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
136f271b2ccSMax Krasnyansky };
137f271b2ccSMax Krasnyansky 
138baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
139baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
140baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
141b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
142c8d68e6bSJason Wang 
14396442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
14496442e42SJason Wang 
145608b9977SPaolo Abeni struct tun_pcpu_stats {
146608b9977SPaolo Abeni 	u64 rx_packets;
147608b9977SPaolo Abeni 	u64 rx_bytes;
148608b9977SPaolo Abeni 	u64 tx_packets;
149608b9977SPaolo Abeni 	u64 tx_bytes;
150608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
151608b9977SPaolo Abeni 	u32 rx_dropped;
152608b9977SPaolo Abeni 	u32 tx_dropped;
153608b9977SPaolo Abeni 	u32 rx_frame_errors;
154608b9977SPaolo Abeni };
155608b9977SPaolo Abeni 
15654f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
15792d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
15854f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
15954f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
16036fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
16154f968d6SJason Wang  * this).
1626e914fc7SJason Wang  *
1636e914fc7SJason Wang  * RCU usage:
16436fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1656e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
16654f968d6SJason Wang  */
167631ab46bSEric W. Biederman struct tun_file {
16854f968d6SJason Wang 	struct sock sk;
16954f968d6SJason Wang 	struct socket socket;
17054f968d6SJason Wang 	struct socket_wq wq;
1716e914fc7SJason Wang 	struct tun_struct __rcu *tun;
17254f968d6SJason Wang 	struct fasync_struct *fasync;
17354f968d6SJason Wang 	/* only used for fasnyc */
17454f968d6SJason Wang 	unsigned int flags;
175fb7589a1SPavel Emelyanov 	union {
176c8d68e6bSJason Wang 		u16 queue_index;
177fb7589a1SPavel Emelyanov 		unsigned int ifindex;
178fb7589a1SPavel Emelyanov 	};
17994317099SPetar Penkov 	struct napi_struct napi;
180aec72f33SEric Dumazet 	bool napi_enabled;
18190e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1824008e97fSJason Wang 	struct list_head next;
1834008e97fSJason Wang 	struct tun_struct *detached;
1845990a305SJason Wang 	struct ptr_ring tx_ring;
1858bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
186631ab46bSEric W. Biederman };
187631ab46bSEric W. Biederman 
18896442e42SJason Wang struct tun_flow_entry {
18996442e42SJason Wang 	struct hlist_node hash_link;
19096442e42SJason Wang 	struct rcu_head rcu;
19196442e42SJason Wang 	struct tun_struct *tun;
19296442e42SJason Wang 
19396442e42SJason Wang 	u32 rxhash;
1949bc88939STom Herbert 	u32 rps_rxhash;
19596442e42SJason Wang 	int queue_index;
19696442e42SJason Wang 	unsigned long updated;
19796442e42SJason Wang };
19896442e42SJason Wang 
19996442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
20096442e42SJason Wang 
201cd5681d7SJason Wang struct tun_prog {
20296f84061SJason Wang 	struct rcu_head rcu;
20396f84061SJason Wang 	struct bpf_prog *prog;
20496f84061SJason Wang };
20596f84061SJason Wang 
20654f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
20736fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
20854f968d6SJason Wang  * file were attached to a persist device.
20954f968d6SJason Wang  */
21014daa021SRusty Russell struct tun_struct {
211c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
212c8d68e6bSJason Wang 	unsigned int            numqueues;
213f271b2ccSMax Krasnyansky 	unsigned int 		flags;
2140625c883SEric W. Biederman 	kuid_t			owner;
2150625c883SEric W. Biederman 	kgid_t			group;
21614daa021SRusty Russell 
21714daa021SRusty Russell 	struct net_device	*dev;
218c8f44affSMichał Mirosław 	netdev_features_t	set_features;
21988255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
220d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
221d9d52b51SMichael S. Tsirkin 
222eaea34b2SPaolo Abeni 	int			align;
223d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
22454f968d6SJason Wang 	int			sndbuf;
22554f968d6SJason Wang 	struct tap_filter	txflt;
22654f968d6SJason Wang 	struct sock_fprog	fprog;
22754f968d6SJason Wang 	/* protected by rtnl lock */
22854f968d6SJason Wang 	bool			filter_attached;
22914daa021SRusty Russell #ifdef TUN_DEBUG
23014daa021SRusty Russell 	int debug;
23114daa021SRusty Russell #endif
23296442e42SJason Wang 	spinlock_t lock;
23396442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
23496442e42SJason Wang 	struct timer_list flow_gc_timer;
23596442e42SJason Wang 	unsigned long ageing_time;
2364008e97fSJason Wang 	unsigned int numdisabled;
2374008e97fSJason Wang 	struct list_head disabled;
2385dbbaf2dSPaul Moore 	void *security;
239b8732fb7SJason Wang 	u32 flow_count;
2405503fcecSJason Wang 	u32 rx_batched;
241608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
242761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
243cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
244aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
24514daa021SRusty Russell };
24614daa021SRusty Russell 
247aff3d70aSJason Wang struct veth {
248aff3d70aSJason Wang 	__be16 h_vlan_proto;
249aff3d70aSJason Wang 	__be16 h_vlan_TCI;
2501da177e4SLinus Torvalds };
2511da177e4SLinus Torvalds 
2521ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr)
253fc72d1d5SJason Wang {
254fc72d1d5SJason Wang 	return (unsigned long)ptr & TUN_XDP_FLAG;
255fc72d1d5SJason Wang }
2561ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame);
257fc72d1d5SJason Wang 
258fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr)
259fc72d1d5SJason Wang {
260fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
261fc72d1d5SJason Wang }
262fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr);
263fc72d1d5SJason Wang 
264fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr)
265fc72d1d5SJason Wang {
266fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
267fc72d1d5SJason Wang }
268fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp);
269fc72d1d5SJason Wang 
27094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
27194317099SPetar Penkov {
27294317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
27394317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
27494317099SPetar Penkov 	struct sk_buff_head process_queue;
27594317099SPetar Penkov 	struct sk_buff *skb;
27694317099SPetar Penkov 	int received = 0;
27794317099SPetar Penkov 
27894317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
27994317099SPetar Penkov 
28094317099SPetar Penkov 	spin_lock(&queue->lock);
28194317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
28294317099SPetar Penkov 	spin_unlock(&queue->lock);
28394317099SPetar Penkov 
28494317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
28594317099SPetar Penkov 		napi_gro_receive(napi, skb);
28694317099SPetar Penkov 		++received;
28794317099SPetar Penkov 	}
28894317099SPetar Penkov 
28994317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
29094317099SPetar Penkov 		spin_lock(&queue->lock);
29194317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
29294317099SPetar Penkov 		spin_unlock(&queue->lock);
29394317099SPetar Penkov 	}
29494317099SPetar Penkov 
29594317099SPetar Penkov 	return received;
29694317099SPetar Penkov }
29794317099SPetar Penkov 
29894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
29994317099SPetar Penkov {
30094317099SPetar Penkov 	unsigned int received;
30194317099SPetar Penkov 
30294317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
30394317099SPetar Penkov 
30494317099SPetar Penkov 	if (received < budget)
30594317099SPetar Penkov 		napi_complete_done(napi, received);
30694317099SPetar Penkov 
30794317099SPetar Penkov 	return received;
30894317099SPetar Penkov }
30994317099SPetar Penkov 
31094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
31194317099SPetar Penkov 			  bool napi_en)
31294317099SPetar Penkov {
313aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
31494317099SPetar Penkov 	if (napi_en) {
31594317099SPetar Penkov 		netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
31694317099SPetar Penkov 			       NAPI_POLL_WEIGHT);
31794317099SPetar Penkov 		napi_enable(&tfile->napi);
31890e33d45SPetar Penkov 		mutex_init(&tfile->napi_mutex);
31994317099SPetar Penkov 	}
32094317099SPetar Penkov }
32194317099SPetar Penkov 
32294317099SPetar Penkov static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
32394317099SPetar Penkov {
324aec72f33SEric Dumazet 	if (tfile->napi_enabled)
32594317099SPetar Penkov 		napi_disable(&tfile->napi);
32694317099SPetar Penkov }
32794317099SPetar Penkov 
32894317099SPetar Penkov static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
32994317099SPetar Penkov {
330aec72f33SEric Dumazet 	if (tfile->napi_enabled)
33194317099SPetar Penkov 		netif_napi_del(&tfile->napi);
33294317099SPetar Penkov }
33394317099SPetar Penkov 
33490e33d45SPetar Penkov static bool tun_napi_frags_enabled(const struct tun_struct *tun)
33590e33d45SPetar Penkov {
33690e33d45SPetar Penkov 	return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
33790e33d45SPetar Penkov }
33890e33d45SPetar Penkov 
3398b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
3408b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3418b8e658bSGreg Kurz {
3428b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
3438b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
3448b8e658bSGreg Kurz }
3458b8e658bSGreg Kurz 
3468b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3478b8e658bSGreg Kurz {
3488b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3498b8e658bSGreg Kurz 
3508b8e658bSGreg Kurz 	if (put_user(be, argp))
3518b8e658bSGreg Kurz 		return -EFAULT;
3528b8e658bSGreg Kurz 
3538b8e658bSGreg Kurz 	return 0;
3548b8e658bSGreg Kurz }
3558b8e658bSGreg Kurz 
3568b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3578b8e658bSGreg Kurz {
3588b8e658bSGreg Kurz 	int be;
3598b8e658bSGreg Kurz 
3608b8e658bSGreg Kurz 	if (get_user(be, argp))
3618b8e658bSGreg Kurz 		return -EFAULT;
3628b8e658bSGreg Kurz 
3638b8e658bSGreg Kurz 	if (be)
3648b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3658b8e658bSGreg Kurz 	else
3668b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3678b8e658bSGreg Kurz 
3688b8e658bSGreg Kurz 	return 0;
3698b8e658bSGreg Kurz }
3708b8e658bSGreg Kurz #else
3718b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3728b8e658bSGreg Kurz {
3738b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3748b8e658bSGreg Kurz }
3758b8e658bSGreg Kurz 
3768b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3778b8e658bSGreg Kurz {
3788b8e658bSGreg Kurz 	return -EINVAL;
3798b8e658bSGreg Kurz }
3808b8e658bSGreg Kurz 
3818b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3828b8e658bSGreg Kurz {
3838b8e658bSGreg Kurz 	return -EINVAL;
3848b8e658bSGreg Kurz }
3858b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3868b8e658bSGreg Kurz 
38725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
38825bd55bbSGreg Kurz {
3897d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3908b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
39125bd55bbSGreg Kurz }
39225bd55bbSGreg Kurz 
39356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
39456f0dcc5SMichael S. Tsirkin {
39525bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
39656f0dcc5SMichael S. Tsirkin }
39756f0dcc5SMichael S. Tsirkin 
39856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
39956f0dcc5SMichael S. Tsirkin {
40025bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
40156f0dcc5SMichael S. Tsirkin }
40256f0dcc5SMichael S. Tsirkin 
40396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
40496442e42SJason Wang {
40596442e42SJason Wang 	return rxhash & 0x3ff;
40696442e42SJason Wang }
40796442e42SJason Wang 
40896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
40996442e42SJason Wang {
41096442e42SJason Wang 	struct tun_flow_entry *e;
41196442e42SJason Wang 
412b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
41396442e42SJason Wang 		if (e->rxhash == rxhash)
41496442e42SJason Wang 			return e;
41596442e42SJason Wang 	}
41696442e42SJason Wang 	return NULL;
41796442e42SJason Wang }
41896442e42SJason Wang 
41996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
42096442e42SJason Wang 					      struct hlist_head *head,
42196442e42SJason Wang 					      u32 rxhash, u16 queue_index)
42296442e42SJason Wang {
4239fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
4249fdc6befSEric Dumazet 
42596442e42SJason Wang 	if (e) {
42696442e42SJason Wang 		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
42796442e42SJason Wang 			  rxhash, queue_index);
42896442e42SJason Wang 		e->updated = jiffies;
42996442e42SJason Wang 		e->rxhash = rxhash;
4309bc88939STom Herbert 		e->rps_rxhash = 0;
43196442e42SJason Wang 		e->queue_index = queue_index;
43296442e42SJason Wang 		e->tun = tun;
43396442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
434b8732fb7SJason Wang 		++tun->flow_count;
43596442e42SJason Wang 	}
43696442e42SJason Wang 	return e;
43796442e42SJason Wang }
43896442e42SJason Wang 
43996442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
44096442e42SJason Wang {
44196442e42SJason Wang 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
44296442e42SJason Wang 		  e->rxhash, e->queue_index);
44396442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
4449fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
445b8732fb7SJason Wang 	--tun->flow_count;
44696442e42SJason Wang }
44796442e42SJason Wang 
44896442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
44996442e42SJason Wang {
45096442e42SJason Wang 	int i;
45196442e42SJason Wang 
45296442e42SJason Wang 	spin_lock_bh(&tun->lock);
45396442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
45496442e42SJason Wang 		struct tun_flow_entry *e;
455b67bfe0dSSasha Levin 		struct hlist_node *n;
45696442e42SJason Wang 
457b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
45896442e42SJason Wang 			tun_flow_delete(tun, e);
45996442e42SJason Wang 	}
46096442e42SJason Wang 	spin_unlock_bh(&tun->lock);
46196442e42SJason Wang }
46296442e42SJason Wang 
46396442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
46496442e42SJason Wang {
46596442e42SJason Wang 	int i;
46696442e42SJason Wang 
46796442e42SJason Wang 	spin_lock_bh(&tun->lock);
46896442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
46996442e42SJason Wang 		struct tun_flow_entry *e;
470b67bfe0dSSasha Levin 		struct hlist_node *n;
47196442e42SJason Wang 
472b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
47396442e42SJason Wang 			if (e->queue_index == queue_index)
47496442e42SJason Wang 				tun_flow_delete(tun, e);
47596442e42SJason Wang 		}
47696442e42SJason Wang 	}
47796442e42SJason Wang 	spin_unlock_bh(&tun->lock);
47896442e42SJason Wang }
47996442e42SJason Wang 
480e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
48196442e42SJason Wang {
482e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
48396442e42SJason Wang 	unsigned long delay = tun->ageing_time;
48496442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
48596442e42SJason Wang 	unsigned long count = 0;
48696442e42SJason Wang 	int i;
48796442e42SJason Wang 
48896442e42SJason Wang 	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
48996442e42SJason Wang 
4907dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
49196442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
49296442e42SJason Wang 		struct tun_flow_entry *e;
493b67bfe0dSSasha Levin 		struct hlist_node *n;
49496442e42SJason Wang 
495b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
49696442e42SJason Wang 			unsigned long this_timer;
49781d98fa4SEric Dumazet 
49896442e42SJason Wang 			this_timer = e->updated + delay;
49981d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
50096442e42SJason Wang 				tun_flow_delete(tun, e);
50181d98fa4SEric Dumazet 				continue;
50281d98fa4SEric Dumazet 			}
50381d98fa4SEric Dumazet 			count++;
50481d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
50596442e42SJason Wang 				next_timer = this_timer;
50696442e42SJason Wang 		}
50796442e42SJason Wang 	}
50896442e42SJason Wang 
50996442e42SJason Wang 	if (count)
51096442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
5117dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
51296442e42SJason Wang }
51396442e42SJason Wang 
51449974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
5159e85722dSJason Wang 			    struct tun_file *tfile)
51696442e42SJason Wang {
51796442e42SJason Wang 	struct hlist_head *head;
51896442e42SJason Wang 	struct tun_flow_entry *e;
51996442e42SJason Wang 	unsigned long delay = tun->ageing_time;
5209e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
52196442e42SJason Wang 
52296442e42SJason Wang 	if (!rxhash)
52396442e42SJason Wang 		return;
52496442e42SJason Wang 	else
52596442e42SJason Wang 		head = &tun->flows[tun_hashfn(rxhash)];
52696442e42SJason Wang 
52796442e42SJason Wang 	rcu_read_lock();
52896442e42SJason Wang 
52996442e42SJason Wang 	e = tun_flow_find(head, rxhash);
53096442e42SJason Wang 	if (likely(e)) {
53196442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
53296442e42SJason Wang 		e->queue_index = queue_index;
53396442e42SJason Wang 		e->updated = jiffies;
5349bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
53596442e42SJason Wang 	} else {
53696442e42SJason Wang 		spin_lock_bh(&tun->lock);
537b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
538b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
53996442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
54096442e42SJason Wang 
54196442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
54296442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
54396442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
54496442e42SJason Wang 		spin_unlock_bh(&tun->lock);
54596442e42SJason Wang 	}
54696442e42SJason Wang 
54796442e42SJason Wang 	rcu_read_unlock();
54896442e42SJason Wang }
54996442e42SJason Wang 
5509bc88939STom Herbert /**
5519bc88939STom Herbert  * Save the hash received in the stack receive path and update the
5529bc88939STom Herbert  * flow_hash table accordingly.
5539bc88939STom Herbert  */
5549bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5559bc88939STom Herbert {
556567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5579bc88939STom Herbert 		e->rps_rxhash = hash;
5589bc88939STom Herbert }
5599bc88939STom Herbert 
560c8d68e6bSJason Wang /* We try to identify a flow through its rxhash first. The reason that
56192d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
562c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
563c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
564c8d68e6bSJason Wang  * different rxq no. here. If we could not get rxhash, then we would
565c8d68e6bSJason Wang  * hope the rxq no. may help here.
566c8d68e6bSJason Wang  */
56796f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
568c8d68e6bSJason Wang {
56996442e42SJason Wang 	struct tun_flow_entry *e;
570c8d68e6bSJason Wang 	u32 txq = 0;
571c8d68e6bSJason Wang 	u32 numqueues = 0;
572c8d68e6bSJason Wang 
5736aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
574c8d68e6bSJason Wang 
575feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
576c8d68e6bSJason Wang 	if (txq) {
57796442e42SJason Wang 		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5789bc88939STom Herbert 		if (e) {
5799bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, txq);
580fbe4d456SZhi Yong Wu 			txq = e->queue_index;
5819bc88939STom Herbert 		} else
582c8d68e6bSJason Wang 			/* use multiply and shift instead of expensive divide */
583c8d68e6bSJason Wang 			txq = ((u64)txq * numqueues) >> 32;
584c8d68e6bSJason Wang 	} else if (likely(skb_rx_queue_recorded(skb))) {
585c8d68e6bSJason Wang 		txq = skb_get_rx_queue(skb);
586c8d68e6bSJason Wang 		while (unlikely(txq >= numqueues))
587c8d68e6bSJason Wang 			txq -= numqueues;
588c8d68e6bSJason Wang 	}
589c8d68e6bSJason Wang 
590c8d68e6bSJason Wang 	return txq;
591c8d68e6bSJason Wang }
592c8d68e6bSJason Wang 
59396f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
59496f84061SJason Wang {
595cd5681d7SJason Wang 	struct tun_prog *prog;
59696f84061SJason Wang 	u16 ret = 0;
59796f84061SJason Wang 
59896f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
59996f84061SJason Wang 	if (prog)
60096f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
60196f84061SJason Wang 
60296f84061SJason Wang 	return ret % tun->numqueues;
60396f84061SJason Wang }
60496f84061SJason Wang 
60596f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
60696f84061SJason Wang 			    void *accel_priv, select_queue_fallback_t fallback)
60796f84061SJason Wang {
60896f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
60996f84061SJason Wang 	u16 ret;
61096f84061SJason Wang 
61196f84061SJason Wang 	rcu_read_lock();
61296f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
61396f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
61496f84061SJason Wang 	else
61596f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
61696f84061SJason Wang 	rcu_read_unlock();
61796f84061SJason Wang 
61896f84061SJason Wang 	return ret;
61996f84061SJason Wang }
62096f84061SJason Wang 
621cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
622cde8b15fSJason Wang {
623cde8b15fSJason Wang 	const struct cred *cred = current_cred();
624c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
625cde8b15fSJason Wang 
626cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
627cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
628c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
629cde8b15fSJason Wang }
630cde8b15fSJason Wang 
631c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
632c8d68e6bSJason Wang {
633c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
634c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
635c8d68e6bSJason Wang }
636c8d68e6bSJason Wang 
6374008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
6384008e97fSJason Wang {
6394008e97fSJason Wang 	tfile->detached = tun;
6404008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
6414008e97fSJason Wang 	++tun->numdisabled;
6424008e97fSJason Wang }
6434008e97fSJason Wang 
644d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
6454008e97fSJason Wang {
6464008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
6474008e97fSJason Wang 
6484008e97fSJason Wang 	tfile->detached = NULL;
6494008e97fSJason Wang 	list_del_init(&tfile->next);
6504008e97fSJason Wang 	--tun->numdisabled;
6514008e97fSJason Wang 	return tun;
6524008e97fSJason Wang }
6534008e97fSJason Wang 
6543a403076SJason Wang void tun_ptr_free(void *ptr)
655fc72d1d5SJason Wang {
656fc72d1d5SJason Wang 	if (!ptr)
657fc72d1d5SJason Wang 		return;
6581ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
6591ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
660fc72d1d5SJason Wang 
66103993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
662fc72d1d5SJason Wang 	} else {
663fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
664fc72d1d5SJason Wang 	}
665fc72d1d5SJason Wang }
6663a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free);
667fc72d1d5SJason Wang 
6684bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6694bfb0513SJason Wang {
670fc72d1d5SJason Wang 	void *ptr;
6711576d986SJason Wang 
672fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
673fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6741576d986SJason Wang 
6755503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6764bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6774bfb0513SJason Wang }
6784bfb0513SJason Wang 
6798565d26bSDavid S. Miller static void tun_cleanup_tx_ring(struct tun_file *tfile)
6804df0bfc7SCong Wang {
6818565d26bSDavid S. Miller 	if (tfile->tx_ring.queue) {
6828565d26bSDavid S. Miller 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
6838565d26bSDavid S. Miller 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
6848565d26bSDavid S. Miller 		memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
6854df0bfc7SCong Wang 	}
6864df0bfc7SCong Wang }
6874df0bfc7SCong Wang 
688c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
689c8d68e6bSJason Wang {
690c8d68e6bSJason Wang 	struct tun_file *ntfile;
691c8d68e6bSJason Wang 	struct tun_struct *tun;
692c8d68e6bSJason Wang 
693b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
694b8deabd3SJason Wang 
69594317099SPetar Penkov 	if (tun && clean) {
69694317099SPetar Penkov 		tun_napi_disable(tun, tfile);
69794317099SPetar Penkov 		tun_napi_del(tun, tfile);
69894317099SPetar Penkov 	}
69994317099SPetar Penkov 
7009e85722dSJason Wang 	if (tun && !tfile->detached) {
701c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
702c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
703c8d68e6bSJason Wang 
704c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
705c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
706b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
707c8d68e6bSJason Wang 		ntfile->queue_index = index;
708c8d68e6bSJason Wang 
709c8d68e6bSJason Wang 		--tun->numqueues;
7109e85722dSJason Wang 		if (clean) {
711c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
712c8d68e6bSJason Wang 			sock_put(&tfile->sk);
7139e85722dSJason Wang 		} else
7144008e97fSJason Wang 			tun_disable_queue(tun, tfile);
715c8d68e6bSJason Wang 
716c8d68e6bSJason Wang 		synchronize_net();
71796442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
718c8d68e6bSJason Wang 		/* Drop read queue */
7194bfb0513SJason Wang 		tun_queue_purge(tfile);
720c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
721dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
7224008e97fSJason Wang 		tun = tun_enable_queue(tfile);
723dd38bd85SJason Wang 		sock_put(&tfile->sk);
724dd38bd85SJason Wang 	}
725c8d68e6bSJason Wang 
726c8d68e6bSJason Wang 	if (clean) {
727af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
728af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
729af668b3cSMichael S. Tsirkin 
73040630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
731af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
7324008e97fSJason Wang 				unregister_netdevice(tun->dev);
733af668b3cSMichael S. Tsirkin 		}
7348565d26bSDavid S. Miller 		tun_cleanup_tx_ring(tfile);
735140e807dSEric W. Biederman 		sock_put(&tfile->sk);
736c8d68e6bSJason Wang 	}
737c8d68e6bSJason Wang }
738c8d68e6bSJason Wang 
739c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
740c8d68e6bSJason Wang {
74183c1f36fSSabrina Dubroca 	struct tun_struct *tun;
74283c1f36fSSabrina Dubroca 	struct net_device *dev;
74383c1f36fSSabrina Dubroca 
744c8d68e6bSJason Wang 	rtnl_lock();
74583c1f36fSSabrina Dubroca 	tun = rtnl_dereference(tfile->tun);
74683c1f36fSSabrina Dubroca 	dev = tun ? tun->dev : NULL;
747c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
74883c1f36fSSabrina Dubroca 	if (dev)
74983c1f36fSSabrina Dubroca 		netdev_state_change(dev);
750c8d68e6bSJason Wang 	rtnl_unlock();
751c8d68e6bSJason Wang }
752c8d68e6bSJason Wang 
753c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
754c8d68e6bSJason Wang {
755c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7564008e97fSJason Wang 	struct tun_file *tfile, *tmp;
757c8d68e6bSJason Wang 	int i, n = tun->numqueues;
758c8d68e6bSJason Wang 
759c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
760b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
761c8d68e6bSJason Wang 		BUG_ON(!tfile);
76294317099SPetar Penkov 		tun_napi_disable(tun, tfile);
763addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7649e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
765c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
766c8d68e6bSJason Wang 		--tun->numqueues;
767c8d68e6bSJason Wang 	}
7689e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
769addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7709e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
771c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7729e85722dSJason Wang 	}
773c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
774c8d68e6bSJason Wang 
775c8d68e6bSJason Wang 	synchronize_net();
776c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
777b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
77894317099SPetar Penkov 		tun_napi_del(tun, tfile);
779c8d68e6bSJason Wang 		/* Drop read queue */
7804bfb0513SJason Wang 		tun_queue_purge(tfile);
781c8d68e6bSJason Wang 		sock_put(&tfile->sk);
7828565d26bSDavid S. Miller 		tun_cleanup_tx_ring(tfile);
783c8d68e6bSJason Wang 	}
7844008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7854008e97fSJason Wang 		tun_enable_queue(tfile);
7864bfb0513SJason Wang 		tun_queue_purge(tfile);
7874008e97fSJason Wang 		sock_put(&tfile->sk);
7888565d26bSDavid S. Miller 		tun_cleanup_tx_ring(tfile);
7894008e97fSJason Wang 	}
7904008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
791dd38bd85SJason Wang 
79240630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
793dd38bd85SJason Wang 		module_put(THIS_MODULE);
794c8d68e6bSJason Wang }
795c8d68e6bSJason Wang 
79694317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
79794317099SPetar Penkov 		      bool skip_filter, bool napi)
798a7385ba2SEric W. Biederman {
799631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
8001576d986SJason Wang 	struct net_device *dev = tun->dev;
80138231b7aSEric W. Biederman 	int err;
802a7385ba2SEric W. Biederman 
8035dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
8045dbbaf2dSPaul Moore 	if (err < 0)
8055dbbaf2dSPaul Moore 		goto out;
8065dbbaf2dSPaul Moore 
80738231b7aSEric W. Biederman 	err = -EINVAL;
8089e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
80938231b7aSEric W. Biederman 		goto out;
81038231b7aSEric W. Biederman 
81138231b7aSEric W. Biederman 	err = -EBUSY;
81240630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
813c8d68e6bSJason Wang 		goto out;
814c8d68e6bSJason Wang 
815c8d68e6bSJason Wang 	err = -E2BIG;
8164008e97fSJason Wang 	if (!tfile->detached &&
8174008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
81838231b7aSEric W. Biederman 		goto out;
81938231b7aSEric W. Biederman 
82038231b7aSEric W. Biederman 	err = 0;
82154f968d6SJason Wang 
82292d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
823849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
8248ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
8258ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
8268ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
82754f968d6SJason Wang 		if (!err)
82854f968d6SJason Wang 			goto out;
82954f968d6SJason Wang 	}
8301576d986SJason Wang 
8311576d986SJason Wang 	if (!tfile->detached &&
8325990a305SJason Wang 	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
8331576d986SJason Wang 		err = -ENOMEM;
8341576d986SJason Wang 		goto out;
8351576d986SJason Wang 	}
8361576d986SJason Wang 
837c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
838addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8398bf5c4eeSJesper Dangaard Brouer 
8408bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
8418bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
8428bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
8438bf5c4eeSJesper Dangaard Brouer 
8448bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
8458bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
8468bf5c4eeSJesper Dangaard Brouer 	} else {
8478bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
8488bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
8498bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
8508bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
8518bf5c4eeSJesper Dangaard Brouer 			goto out;
8528d5d8852SJesper Dangaard Brouer 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
8538d5d8852SJesper Dangaard Brouer 						 MEM_TYPE_PAGE_SHARED, NULL);
8548d5d8852SJesper Dangaard Brouer 		if (err < 0) {
8558d5d8852SJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
8568d5d8852SJesper Dangaard Brouer 			goto out;
8578d5d8852SJesper Dangaard Brouer 		}
8588bf5c4eeSJesper Dangaard Brouer 		err = 0;
8598bf5c4eeSJesper Dangaard Brouer 	}
8608bf5c4eeSJesper Dangaard Brouer 
8616e914fc7SJason Wang 	rcu_assign_pointer(tfile->tun, tun);
862c8d68e6bSJason Wang 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
863c8d68e6bSJason Wang 	tun->numqueues++;
864c8d68e6bSJason Wang 
86594317099SPetar Penkov 	if (tfile->detached) {
8664008e97fSJason Wang 		tun_enable_queue(tfile);
86794317099SPetar Penkov 	} else {
8684008e97fSJason Wang 		sock_hold(&tfile->sk);
86994317099SPetar Penkov 		tun_napi_init(tun, tfile, napi);
87094317099SPetar Penkov 	}
8714008e97fSJason Wang 
872c8d68e6bSJason Wang 	tun_set_real_num_queues(tun);
873c8d68e6bSJason Wang 
874c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
875c8d68e6bSJason Wang 	 * refcnt.
876c8d68e6bSJason Wang 	 */
877a7385ba2SEric W. Biederman 
87838231b7aSEric W. Biederman out:
87938231b7aSEric W. Biederman 	return err;
880a7385ba2SEric W. Biederman }
881a7385ba2SEric W. Biederman 
8829484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
883631ab46bSEric W. Biederman {
8846e914fc7SJason Wang 	struct tun_struct *tun;
885c70f1829SEric W. Biederman 
8866e914fc7SJason Wang 	rcu_read_lock();
8876e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8886e914fc7SJason Wang 	if (tun)
8896e914fc7SJason Wang 		dev_hold(tun->dev);
8906e914fc7SJason Wang 	rcu_read_unlock();
891c70f1829SEric W. Biederman 
892c70f1829SEric W. Biederman 	return tun;
893631ab46bSEric W. Biederman }
894631ab46bSEric W. Biederman 
895631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
896631ab46bSEric W. Biederman {
8976e914fc7SJason Wang 	dev_put(tun->dev);
898631ab46bSEric W. Biederman }
899631ab46bSEric W. Biederman 
9006b8a66eeSJoe Perches /* TAP filtering */
901f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
902f271b2ccSMax Krasnyansky {
903f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
904f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
905f271b2ccSMax Krasnyansky }
906f271b2ccSMax Krasnyansky 
907f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
908f271b2ccSMax Krasnyansky {
909f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
910f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
911f271b2ccSMax Krasnyansky }
912f271b2ccSMax Krasnyansky 
913f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
914f271b2ccSMax Krasnyansky {
915f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
916f271b2ccSMax Krasnyansky 	struct tun_filter uf;
917f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
918f271b2ccSMax Krasnyansky 
919f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
920f271b2ccSMax Krasnyansky 		return -EFAULT;
921f271b2ccSMax Krasnyansky 
922f271b2ccSMax Krasnyansky 	if (!uf.count) {
923f271b2ccSMax Krasnyansky 		/* Disabled */
924f271b2ccSMax Krasnyansky 		filter->count = 0;
925f271b2ccSMax Krasnyansky 		return 0;
926f271b2ccSMax Krasnyansky 	}
927f271b2ccSMax Krasnyansky 
928f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
92928e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
93028e8190dSMarkus Elfring 	if (IS_ERR(addr))
93128e8190dSMarkus Elfring 		return PTR_ERR(addr);
932f271b2ccSMax Krasnyansky 
933f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
934f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
935f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
936f271b2ccSMax Krasnyansky 	filter->count = 0;
937f271b2ccSMax Krasnyansky 	wmb();
938f271b2ccSMax Krasnyansky 
939f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
940f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
941f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
942f271b2ccSMax Krasnyansky 
943f271b2ccSMax Krasnyansky 	nexact = n;
944f271b2ccSMax Krasnyansky 
945cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
946cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
947f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
948cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
949cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
950cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9513b8d2a69SMarkus Elfring 			goto free_addr;
952cfbf84fcSAlex Williamson 		}
953f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
954cfbf84fcSAlex Williamson 	}
955f271b2ccSMax Krasnyansky 
956f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
957f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
958f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
959f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
960f271b2ccSMax Krasnyansky 
961f271b2ccSMax Krasnyansky 	/* Now enable the filter */
962f271b2ccSMax Krasnyansky 	wmb();
963f271b2ccSMax Krasnyansky 	filter->count = nexact;
964f271b2ccSMax Krasnyansky 
965f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
966f271b2ccSMax Krasnyansky 	err = nexact;
9673b8d2a69SMarkus Elfring free_addr:
968f271b2ccSMax Krasnyansky 	kfree(addr);
969f271b2ccSMax Krasnyansky 	return err;
970f271b2ccSMax Krasnyansky }
971f271b2ccSMax Krasnyansky 
972f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
973f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
974f271b2ccSMax Krasnyansky {
975f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
976f271b2ccSMax Krasnyansky 	 * at this point. */
977f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
978f271b2ccSMax Krasnyansky 	int i;
979f271b2ccSMax Krasnyansky 
980f271b2ccSMax Krasnyansky 	/* Exact match */
981f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9822e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
983f271b2ccSMax Krasnyansky 			return 1;
984f271b2ccSMax Krasnyansky 
985f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
986f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
987f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
988f271b2ccSMax Krasnyansky 
989f271b2ccSMax Krasnyansky 	return 0;
990f271b2ccSMax Krasnyansky }
991f271b2ccSMax Krasnyansky 
992f271b2ccSMax Krasnyansky /*
993f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
994f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
995f271b2ccSMax Krasnyansky  */
996f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
997f271b2ccSMax Krasnyansky {
998f271b2ccSMax Krasnyansky 	if (!filter->count)
999f271b2ccSMax Krasnyansky 		return 1;
1000f271b2ccSMax Krasnyansky 
1001f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
1002f271b2ccSMax Krasnyansky }
1003f271b2ccSMax Krasnyansky 
10041da177e4SLinus Torvalds /* Network device part of the driver */
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops;
10071da177e4SLinus Torvalds 
1008c70f1829SEric W. Biederman /* Net device detach from fd. */
1009c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
1010c70f1829SEric W. Biederman {
1011c8d68e6bSJason Wang 	tun_detach_all(dev);
1012c70f1829SEric W. Biederman }
1013c70f1829SEric W. Biederman 
10141da177e4SLinus Torvalds /* Net device open. */
10151da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
10161da177e4SLinus Torvalds {
1017b20e2d54SHannes Frederic Sowa 	struct tun_struct *tun = netdev_priv(dev);
1018b20e2d54SHannes Frederic Sowa 	int i;
1019b20e2d54SHannes Frederic Sowa 
1020c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
1021b20e2d54SHannes Frederic Sowa 
1022b20e2d54SHannes Frederic Sowa 	for (i = 0; i < tun->numqueues; i++) {
1023b20e2d54SHannes Frederic Sowa 		struct tun_file *tfile;
1024b20e2d54SHannes Frederic Sowa 
1025b20e2d54SHannes Frederic Sowa 		tfile = rtnl_dereference(tun->tfiles[i]);
1026b20e2d54SHannes Frederic Sowa 		tfile->socket.sk->sk_write_space(tfile->socket.sk);
1027b20e2d54SHannes Frederic Sowa 	}
1028b20e2d54SHannes Frederic Sowa 
10291da177e4SLinus Torvalds 	return 0;
10301da177e4SLinus Torvalds }
10311da177e4SLinus Torvalds 
10321da177e4SLinus Torvalds /* Net device close. */
10331da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
10341da177e4SLinus Torvalds {
1035c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
10361da177e4SLinus Torvalds 	return 0;
10371da177e4SLinus Torvalds }
10381da177e4SLinus Torvalds 
10391da177e4SLinus Torvalds /* Net device start xmit */
104096f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10411da177e4SLinus Torvalds {
10423df97ba8SJason Wang #ifdef CONFIG_RPS
104396f84061SJason Wang 	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
10449bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10459bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10469bc88939STom Herbert 		 */
10479bc88939STom Herbert 		__u32 rxhash;
10489bc88939STom Herbert 
1049feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10509bc88939STom Herbert 		if (rxhash) {
10519bc88939STom Herbert 			struct tun_flow_entry *e;
10529bc88939STom Herbert 			e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
10539bc88939STom Herbert 					rxhash);
10549bc88939STom Herbert 			if (e)
10559bc88939STom Herbert 				tun_flow_save_rps_rxhash(e, rxhash);
10569bc88939STom Herbert 		}
10579bc88939STom Herbert 	}
10583df97ba8SJason Wang #endif
105996f84061SJason Wang }
106096f84061SJason Wang 
1061aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1062aff3d70aSJason Wang 				    struct sk_buff *skb,
1063aff3d70aSJason Wang 				    int len)
1064aff3d70aSJason Wang {
1065aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1066aff3d70aSJason Wang 
1067aff3d70aSJason Wang 	if (prog)
1068aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1069aff3d70aSJason Wang 
1070aff3d70aSJason Wang 	return len;
1071aff3d70aSJason Wang }
1072aff3d70aSJason Wang 
107396f84061SJason Wang /* Net device start xmit */
107496f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
107596f84061SJason Wang {
107696f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
107796f84061SJason Wang 	int txq = skb->queue_mapping;
107896f84061SJason Wang 	struct tun_file *tfile;
1079aff3d70aSJason Wang 	int len = skb->len;
108096f84061SJason Wang 
108196f84061SJason Wang 	rcu_read_lock();
108296f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
108396f84061SJason Wang 
108496f84061SJason Wang 	/* Drop packet if interface is not attached */
1085cc166427SWillem de Bruijn 	if (txq >= tun->numqueues)
108696f84061SJason Wang 		goto drop;
108796f84061SJason Wang 
108896f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
108996f84061SJason Wang 		tun_automq_xmit(tun, skb);
10909bc88939STom Herbert 
10916e914fc7SJason Wang 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
10926e914fc7SJason Wang 
1093c8d68e6bSJason Wang 	BUG_ON(!tfile);
1094c8d68e6bSJason Wang 
1095f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1096f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1097f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1098f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1099f271b2ccSMax Krasnyansky 		goto drop;
1100f271b2ccSMax Krasnyansky 
110154f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
110254f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
110399405162SMichael S. Tsirkin 		goto drop;
110499405162SMichael S. Tsirkin 
1105aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
110681c89507SBjørn Mork 	if (len == 0 || pskb_trim(skb, len))
1107aff3d70aSJason Wang 		goto drop;
1108aff3d70aSJason Wang 
11091f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
11107bf66305SJason Wang 		goto drop;
11117bf66305SJason Wang 
11127b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1113eda29772SRichard Cochran 
11140110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
11157bf66305SJason Wang 	 * for indefinite time.
11167bf66305SJason Wang 	 */
11170110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
11180110d6f2SMichael S. Tsirkin 
1119f8af75f3SEric Dumazet 	nf_reset(skb);
1120f8af75f3SEric Dumazet 
11215990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
11221576d986SJason Wang 		goto drop;
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds 	/* Notify and wake up reader process */
112554f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
112654f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
11279e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
11286e914fc7SJason Wang 
11296e914fc7SJason Wang 	rcu_read_unlock();
11306ed10654SPatrick McHardy 	return NETDEV_TX_OK;
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds drop:
1133608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1134149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
11351da177e4SLinus Torvalds 	kfree_skb(skb);
11366e914fc7SJason Wang 	rcu_read_unlock();
1137baeababbSJason Wang 	return NET_XMIT_DROP;
11381da177e4SLinus Torvalds }
11391da177e4SLinus Torvalds 
1140f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
11411da177e4SLinus Torvalds {
1142f271b2ccSMax Krasnyansky 	/*
1143f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1144f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1145f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1146f271b2ccSMax Krasnyansky 	 */
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
1149c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1150c8f44affSMichał Mirosław 	netdev_features_t features)
115188255375SMichał Mirosław {
115288255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
115388255375SMichał Mirosław 
115488255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
115588255375SMichał Mirosław }
1156bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1157bebd097aSNeil Horman static void tun_poll_controller(struct net_device *dev)
1158bebd097aSNeil Horman {
1159bebd097aSNeil Horman 	/*
1160bebd097aSNeil Horman 	 * Tun only receives frames when:
1161bebd097aSNeil Horman 	 * 1) the char device endpoint gets data from user space
1162bebd097aSNeil Horman 	 * 2) the tun socket gets a sendmsg call from user space
116394317099SPetar Penkov 	 * If NAPI is not enabled, since both of those are synchronous
116494317099SPetar Penkov 	 * operations, we are guaranteed never to have pending data when we poll
116594317099SPetar Penkov 	 * for it so there is nothing to do here but return.
1166bebd097aSNeil Horman 	 * We need this though so netpoll recognizes us as an interface that
1167bebd097aSNeil Horman 	 * supports polling, which enables bridge devices in virt setups to
1168bebd097aSNeil Horman 	 * still use netconsole
116994317099SPetar Penkov 	 * If NAPI is enabled, however, we need to schedule polling for all
117090e33d45SPetar Penkov 	 * queues unless we are using napi_gro_frags(), which we call in
117190e33d45SPetar Penkov 	 * process context and not in NAPI context.
1172bebd097aSNeil Horman 	 */
117394317099SPetar Penkov 	struct tun_struct *tun = netdev_priv(dev);
117494317099SPetar Penkov 
117594317099SPetar Penkov 	if (tun->flags & IFF_NAPI) {
117694317099SPetar Penkov 		struct tun_file *tfile;
117794317099SPetar Penkov 		int i;
117894317099SPetar Penkov 
117990e33d45SPetar Penkov 		if (tun_napi_frags_enabled(tun))
118090e33d45SPetar Penkov 			return;
118190e33d45SPetar Penkov 
118294317099SPetar Penkov 		rcu_read_lock();
118394317099SPetar Penkov 		for (i = 0; i < tun->numqueues; i++) {
118494317099SPetar Penkov 			tfile = rcu_dereference(tun->tfiles[i]);
1185aec72f33SEric Dumazet 			if (tfile->napi_enabled)
118694317099SPetar Penkov 				napi_schedule(&tfile->napi);
118794317099SPetar Penkov 		}
118894317099SPetar Penkov 		rcu_read_unlock();
118994317099SPetar Penkov 	}
1190bebd097aSNeil Horman 	return;
1191bebd097aSNeil Horman }
1192bebd097aSNeil Horman #endif
1193eaea34b2SPaolo Abeni 
1194eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1195eaea34b2SPaolo Abeni {
1196eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1197eaea34b2SPaolo Abeni 
1198eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1199eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1200eaea34b2SPaolo Abeni 
1201eaea34b2SPaolo Abeni 	tun->align = new_hr;
1202eaea34b2SPaolo Abeni }
1203eaea34b2SPaolo Abeni 
1204bc1f4470Sstephen hemminger static void
1205608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1206608b9977SPaolo Abeni {
1207608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1208608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1209608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1210608b9977SPaolo Abeni 	int i;
1211608b9977SPaolo Abeni 
1212608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1213608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1214608b9977SPaolo Abeni 		unsigned int start;
1215608b9977SPaolo Abeni 
1216608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1217608b9977SPaolo Abeni 		do {
1218608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
1219608b9977SPaolo Abeni 			rxpackets	= p->rx_packets;
1220608b9977SPaolo Abeni 			rxbytes		= p->rx_bytes;
1221608b9977SPaolo Abeni 			txpackets	= p->tx_packets;
1222608b9977SPaolo Abeni 			txbytes		= p->tx_bytes;
1223608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1224608b9977SPaolo Abeni 
1225608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1226608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1227608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1228608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1229608b9977SPaolo Abeni 
1230608b9977SPaolo Abeni 		/* u32 counters */
1231608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1232608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1233608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1234608b9977SPaolo Abeni 	}
1235608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1236608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1237608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1238608b9977SPaolo Abeni }
1239608b9977SPaolo Abeni 
1240761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1241761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1242761876c8SJason Wang {
1243761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1244761876c8SJason Wang 	struct bpf_prog *old_prog;
1245761876c8SJason Wang 
1246761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1247761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1248761876c8SJason Wang 	if (old_prog)
1249761876c8SJason Wang 		bpf_prog_put(old_prog);
1250761876c8SJason Wang 
1251761876c8SJason Wang 	return 0;
1252761876c8SJason Wang }
1253761876c8SJason Wang 
1254761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev)
1255761876c8SJason Wang {
1256761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1257761876c8SJason Wang 	const struct bpf_prog *xdp_prog;
1258761876c8SJason Wang 
1259761876c8SJason Wang 	xdp_prog = rtnl_dereference(tun->xdp_prog);
1260761876c8SJason Wang 	if (xdp_prog)
1261761876c8SJason Wang 		return xdp_prog->aux->id;
1262761876c8SJason Wang 
1263761876c8SJason Wang 	return 0;
1264761876c8SJason Wang }
1265761876c8SJason Wang 
1266f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1267761876c8SJason Wang {
1268761876c8SJason Wang 	switch (xdp->command) {
1269761876c8SJason Wang 	case XDP_SETUP_PROG:
1270761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1271761876c8SJason Wang 	case XDP_QUERY_PROG:
1272761876c8SJason Wang 		xdp->prog_id = tun_xdp_query(dev);
1273761876c8SJason Wang 		xdp->prog_attached = !!xdp->prog_id;
1274761876c8SJason Wang 		return 0;
1275761876c8SJason Wang 	default:
1276761876c8SJason Wang 		return -EINVAL;
1277761876c8SJason Wang 	}
1278761876c8SJason Wang }
1279761876c8SJason Wang 
1280758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1281c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1282758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1283758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
128400829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
128588255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1286c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1287bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1288bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1289bebd097aSNeil Horman #endif
1290eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1291608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1292758e43b7SStephen Hemminger };
1293758e43b7SStephen Hemminger 
1294*735fc405SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames)
1295fc72d1d5SJason Wang {
1296fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1297fc72d1d5SJason Wang 	struct tun_file *tfile;
1298fc72d1d5SJason Wang 	u32 numqueues;
1299*735fc405SJesper Dangaard Brouer 	int drops = 0;
1300*735fc405SJesper Dangaard Brouer 	int cnt = n;
1301*735fc405SJesper Dangaard Brouer 	int i;
1302fc72d1d5SJason Wang 
1303fc72d1d5SJason Wang 	rcu_read_lock();
1304fc72d1d5SJason Wang 
1305fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1306fc72d1d5SJason Wang 	if (!numqueues) {
1307*735fc405SJesper Dangaard Brouer 		rcu_read_unlock();
1308*735fc405SJesper Dangaard Brouer 		return -ENXIO; /* Caller will free/return all frames */
1309fc72d1d5SJason Wang 	}
1310fc72d1d5SJason Wang 
1311fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1312fc72d1d5SJason Wang 					    numqueues]);
1313*735fc405SJesper Dangaard Brouer 
1314*735fc405SJesper Dangaard Brouer 	spin_lock(&tfile->tx_ring.producer_lock);
1315*735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
1316*735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdp = frames[i];
1317fc72d1d5SJason Wang 		/* Encode the XDP flag into lowest bit for consumer to differ
1318fc72d1d5SJason Wang 		 * XDP buffer from sk_buff.
1319fc72d1d5SJason Wang 		 */
1320*735fc405SJesper Dangaard Brouer 		void *frame = tun_xdp_to_ptr(xdp);
1321fc72d1d5SJason Wang 
1322*735fc405SJesper Dangaard Brouer 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1323*735fc405SJesper Dangaard Brouer 			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1324*735fc405SJesper Dangaard Brouer 			xdp_return_frame_rx_napi(xdp);
1325*735fc405SJesper Dangaard Brouer 			drops++;
1326*735fc405SJesper Dangaard Brouer 		}
1327*735fc405SJesper Dangaard Brouer 	}
1328*735fc405SJesper Dangaard Brouer 	spin_unlock(&tfile->tx_ring.producer_lock);
1329*735fc405SJesper Dangaard Brouer 
1330fc72d1d5SJason Wang 	rcu_read_unlock();
1331*735fc405SJesper Dangaard Brouer 	return cnt - drops;
1332fc72d1d5SJason Wang }
1333fc72d1d5SJason Wang 
133444fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
133544fa2dbdSJesper Dangaard Brouer {
133644fa2dbdSJesper Dangaard Brouer 	struct xdp_frame *frame = convert_to_xdp_frame(xdp);
133744fa2dbdSJesper Dangaard Brouer 
133844fa2dbdSJesper Dangaard Brouer 	if (unlikely(!frame))
133944fa2dbdSJesper Dangaard Brouer 		return -EOVERFLOW;
134044fa2dbdSJesper Dangaard Brouer 
1341*735fc405SJesper Dangaard Brouer 	return tun_xdp_xmit(dev, 1, &frame);
134244fa2dbdSJesper Dangaard Brouer }
134344fa2dbdSJesper Dangaard Brouer 
1344fc72d1d5SJason Wang static void tun_xdp_flush(struct net_device *dev)
1345fc72d1d5SJason Wang {
1346fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1347fc72d1d5SJason Wang 	struct tun_file *tfile;
1348fc72d1d5SJason Wang 	u32 numqueues;
1349fc72d1d5SJason Wang 
1350fc72d1d5SJason Wang 	rcu_read_lock();
1351fc72d1d5SJason Wang 
1352fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1353fc72d1d5SJason Wang 	if (!numqueues)
1354fc72d1d5SJason Wang 		goto out;
1355fc72d1d5SJason Wang 
1356fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1357fc72d1d5SJason Wang 					    numqueues]);
1358fc72d1d5SJason Wang 	/* Notify and wake up reader process */
1359fc72d1d5SJason Wang 	if (tfile->flags & TUN_FASYNC)
1360fc72d1d5SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1361fc72d1d5SJason Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1362fc72d1d5SJason Wang 
1363fc72d1d5SJason Wang out:
1364fc72d1d5SJason Wang 	rcu_read_unlock();
1365fc72d1d5SJason Wang }
1366fc72d1d5SJason Wang 
1367758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1368c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1369758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1370758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
137100829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
137288255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1373afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1374758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1375758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1376c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1377bebd097aSNeil Horman #ifdef CONFIG_NET_POLL_CONTROLLER
1378bebd097aSNeil Horman 	.ndo_poll_controller	= tun_poll_controller,
1379bebd097aSNeil Horman #endif
13805e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1381eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1382608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1383f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1384fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
1385fc72d1d5SJason Wang 	.ndo_xdp_flush		= tun_xdp_flush,
1386758e43b7SStephen Hemminger };
1387758e43b7SStephen Hemminger 
1388944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
138996442e42SJason Wang {
139096442e42SJason Wang 	int i;
139196442e42SJason Wang 
139296442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
139396442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
139496442e42SJason Wang 
139596442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1396e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1397e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1398e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
139996442e42SJason Wang }
140096442e42SJason Wang 
140196442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
140296442e42SJason Wang {
140396442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
140496442e42SJason Wang 	tun_flow_flush(tun);
140596442e42SJason Wang }
140696442e42SJason Wang 
140791572088SJarod Wilson #define MIN_MTU 68
140891572088SJarod Wilson #define MAX_MTU 65535
140991572088SJarod Wilson 
14101da177e4SLinus Torvalds /* Initialize net device. */
14111da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
14121da177e4SLinus Torvalds {
14131da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
141640630b82SMichael S. Tsirkin 	case IFF_TUN:
1417758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1418758e43b7SStephen Hemminger 
14191da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
14201da177e4SLinus Torvalds 		dev->hard_header_len = 0;
14211da177e4SLinus Torvalds 		dev->addr_len = 0;
14221da177e4SLinus Torvalds 		dev->mtu = 1500;
14231da177e4SLinus Torvalds 
14241da177e4SLinus Torvalds 		/* Zero header length */
14251da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
14261da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
14271da177e4SLinus Torvalds 		break;
14281da177e4SLinus Torvalds 
142940630b82SMichael S. Tsirkin 	case IFF_TAP:
14307a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
14311da177e4SLinus Torvalds 		/* Ethernet TAP Device */
14321da177e4SLinus Torvalds 		ether_setup(dev);
1433550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1434a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
143536226a8dSBrian Braunstein 
1436f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
143736226a8dSBrian Braunstein 
14381da177e4SLinus Torvalds 		break;
14391da177e4SLinus Torvalds 	}
144091572088SJarod Wilson 
144191572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
144291572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
14431da177e4SLinus Torvalds }
14441da177e4SLinus Torvalds 
14451da177e4SLinus Torvalds /* Character device part */
14461da177e4SLinus Torvalds 
14471da177e4SLinus Torvalds /* Poll */
1448afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
14491da177e4SLinus Torvalds {
1450b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
14519484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
14523c8a9c63SMariusz Kozlowski 	struct sock *sk;
1453afc9a42bSAl Viro 	__poll_t mask = 0;
14541da177e4SLinus Torvalds 
14551da177e4SLinus Torvalds 	if (!tun)
1456a9a08845SLinus Torvalds 		return EPOLLERR;
14571da177e4SLinus Torvalds 
145854f968d6SJason Wang 	sk = tfile->socket.sk;
14593c8a9c63SMariusz Kozlowski 
14606b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
14611da177e4SLinus Torvalds 
14629e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
14631da177e4SLinus Torvalds 
14645990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
1465a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
14661da177e4SLinus Torvalds 
1467b20e2d54SHannes Frederic Sowa 	if (tun->dev->flags & IFF_UP &&
1468b20e2d54SHannes Frederic Sowa 	    (sock_writeable(sk) ||
14699cd3e072SEric Dumazet 	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1470b20e2d54SHannes Frederic Sowa 	      sock_writeable(sk))))
1471a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
147233dccbb0SHerbert Xu 
1473c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1474a9a08845SLinus Torvalds 		mask = EPOLLERR;
1475c70f1829SEric W. Biederman 
1476631ab46bSEric W. Biederman 	tun_put(tun);
14771da177e4SLinus Torvalds 	return mask;
14781da177e4SLinus Torvalds }
14791da177e4SLinus Torvalds 
148090e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
148190e33d45SPetar Penkov 					    size_t len,
148290e33d45SPetar Penkov 					    const struct iov_iter *it)
148390e33d45SPetar Penkov {
148490e33d45SPetar Penkov 	struct sk_buff *skb;
148590e33d45SPetar Penkov 	size_t linear;
148690e33d45SPetar Penkov 	int err;
148790e33d45SPetar Penkov 	int i;
148890e33d45SPetar Penkov 
148990e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
149090e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
149190e33d45SPetar Penkov 
149290e33d45SPetar Penkov 	local_bh_disable();
149390e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
149490e33d45SPetar Penkov 	local_bh_enable();
149590e33d45SPetar Penkov 	if (!skb)
149690e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
149790e33d45SPetar Penkov 
149890e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
149990e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
150090e33d45SPetar Penkov 	if (err)
150190e33d45SPetar Penkov 		goto free;
150290e33d45SPetar Penkov 
150390e33d45SPetar Penkov 	skb->len = len;
150490e33d45SPetar Penkov 	skb->data_len = len - linear;
150590e33d45SPetar Penkov 	skb->truesize += skb->data_len;
150690e33d45SPetar Penkov 
150790e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
150843a08e0fSEric Dumazet 		struct page_frag *pfrag = &current->task_frag;
150990e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
151090e33d45SPetar Penkov 
151190e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
151290e33d45SPetar Penkov 			err = -EINVAL;
151390e33d45SPetar Penkov 			goto free;
151490e33d45SPetar Penkov 		}
151590e33d45SPetar Penkov 
151643a08e0fSEric Dumazet 		if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
151790e33d45SPetar Penkov 			err = -ENOMEM;
151890e33d45SPetar Penkov 			goto free;
151990e33d45SPetar Penkov 		}
152090e33d45SPetar Penkov 
152143a08e0fSEric Dumazet 		skb_fill_page_desc(skb, i - 1, pfrag->page,
152243a08e0fSEric Dumazet 				   pfrag->offset, fragsz);
152343a08e0fSEric Dumazet 		page_ref_inc(pfrag->page);
152443a08e0fSEric Dumazet 		pfrag->offset += fragsz;
152590e33d45SPetar Penkov 	}
152690e33d45SPetar Penkov 
152790e33d45SPetar Penkov 	return skb;
152890e33d45SPetar Penkov free:
152990e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
153090e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
153190e33d45SPetar Penkov 	return ERR_PTR(err);
153290e33d45SPetar Penkov }
153390e33d45SPetar Penkov 
1534f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1535f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
153654f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
153733dccbb0SHerbert Xu 				     size_t prepad, size_t len,
153833dccbb0SHerbert Xu 				     size_t linear, int noblock)
1539f42157cbSRusty Russell {
154054f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1541f42157cbSRusty Russell 	struct sk_buff *skb;
154233dccbb0SHerbert Xu 	int err;
1543f42157cbSRusty Russell 
1544f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
15450eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
154633dccbb0SHerbert Xu 		linear = len;
1547f42157cbSRusty Russell 
154833dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
154928d64271SEric Dumazet 				   &err, 0);
1550f42157cbSRusty Russell 	if (!skb)
155133dccbb0SHerbert Xu 		return ERR_PTR(err);
1552f42157cbSRusty Russell 
1553f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1554f42157cbSRusty Russell 	skb_put(skb, linear);
155533dccbb0SHerbert Xu 	skb->data_len = len - linear;
155633dccbb0SHerbert Xu 	skb->len += len - linear;
1557f42157cbSRusty Russell 
1558f42157cbSRusty Russell 	return skb;
1559f42157cbSRusty Russell }
1560f42157cbSRusty Russell 
15615503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
15625503fcecSJason Wang 			   struct sk_buff *skb, int more)
15635503fcecSJason Wang {
15645503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
15655503fcecSJason Wang 	struct sk_buff_head process_queue;
15665503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
15675503fcecSJason Wang 	bool rcv = false;
15685503fcecSJason Wang 
15695503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15705503fcecSJason Wang 		local_bh_disable();
15715503fcecSJason Wang 		netif_receive_skb(skb);
15725503fcecSJason Wang 		local_bh_enable();
15735503fcecSJason Wang 		return;
15745503fcecSJason Wang 	}
15755503fcecSJason Wang 
15765503fcecSJason Wang 	spin_lock(&queue->lock);
15775503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15785503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15795503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15805503fcecSJason Wang 		rcv = true;
15815503fcecSJason Wang 	} else {
15825503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15835503fcecSJason Wang 	}
15845503fcecSJason Wang 	spin_unlock(&queue->lock);
15855503fcecSJason Wang 
15865503fcecSJason Wang 	if (rcv) {
15875503fcecSJason Wang 		struct sk_buff *nskb;
15885503fcecSJason Wang 
15895503fcecSJason Wang 		local_bh_disable();
15905503fcecSJason Wang 		while ((nskb = __skb_dequeue(&process_queue)))
15915503fcecSJason Wang 			netif_receive_skb(nskb);
15925503fcecSJason Wang 		netif_receive_skb(skb);
15935503fcecSJason Wang 		local_bh_enable();
15945503fcecSJason Wang 	}
15955503fcecSJason Wang }
15965503fcecSJason Wang 
159766ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
159866ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
159966ccbc9cSJason Wang {
160066ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
160166ccbc9cSJason Wang 		return false;
160266ccbc9cSJason Wang 
160366ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
160466ccbc9cSJason Wang 		return false;
160566ccbc9cSJason Wang 
160666ccbc9cSJason Wang 	if (!noblock)
160766ccbc9cSJason Wang 		return false;
160866ccbc9cSJason Wang 
160966ccbc9cSJason Wang 	if (zerocopy)
161066ccbc9cSJason Wang 		return false;
161166ccbc9cSJason Wang 
161266ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
161366ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
161466ccbc9cSJason Wang 		return false;
161566ccbc9cSJason Wang 
161666ccbc9cSJason Wang 	return true;
161766ccbc9cSJason Wang }
161866ccbc9cSJason Wang 
1619761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1620761876c8SJason Wang 				     struct tun_file *tfile,
162166ccbc9cSJason Wang 				     struct iov_iter *from,
1622761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
16231cfe6e93SJason Wang 				     int len, int *skb_xdp)
162466ccbc9cSJason Wang {
16250bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
162666ccbc9cSJason Wang 	struct sk_buff *skb;
1627761876c8SJason Wang 	struct bpf_prog *xdp_prog;
16287df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1629761876c8SJason Wang 	unsigned int delta = 0;
163066ccbc9cSJason Wang 	char *buf;
163166ccbc9cSJason Wang 	size_t copied;
16327df13219SJason Wang 	int err, pad = TUN_RX_PAD;
16337df13219SJason Wang 
16347df13219SJason Wang 	rcu_read_lock();
16357df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16367df13219SJason Wang 	if (xdp_prog)
16377df13219SJason Wang 		pad += TUN_HEADROOM;
16387df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16397df13219SJason Wang 	rcu_read_unlock();
164066ccbc9cSJason Wang 
164163b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
164266ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
164366ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
164466ccbc9cSJason Wang 
164566ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
164666ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16477df13219SJason Wang 				     alloc_frag->offset + pad,
164866ccbc9cSJason Wang 				     len, from);
164966ccbc9cSJason Wang 	if (copied != len)
165066ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
165166ccbc9cSJason Wang 
16527df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16537df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16547df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16557df13219SJason Wang 	 */
16567df13219SJason Wang 	if (hdr->gso_type || !xdp_prog)
16571cfe6e93SJason Wang 		*skb_xdp = 1;
1658761876c8SJason Wang 	else
16591cfe6e93SJason Wang 		*skb_xdp = 0;
166066ccbc9cSJason Wang 
166123e43f07SJason Wang 	preempt_disable();
1662761876c8SJason Wang 	rcu_read_lock();
1663761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16641cfe6e93SJason Wang 	if (xdp_prog && !*skb_xdp) {
1665761876c8SJason Wang 		struct xdp_buff xdp;
1666761876c8SJason Wang 		void *orig_data;
1667761876c8SJason Wang 		u32 act;
1668761876c8SJason Wang 
1669761876c8SJason Wang 		xdp.data_hard_start = buf;
16707df13219SJason Wang 		xdp.data = buf + pad;
1671de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1672761876c8SJason Wang 		xdp.data_end = xdp.data + len;
16738bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1674761876c8SJason Wang 		orig_data = xdp.data;
1675761876c8SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1676761876c8SJason Wang 
1677761876c8SJason Wang 		switch (act) {
1678761876c8SJason Wang 		case XDP_REDIRECT:
1679761876c8SJason Wang 			get_page(alloc_frag->page);
1680761876c8SJason Wang 			alloc_frag->offset += buflen;
1681761876c8SJason Wang 			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
16821bb4f2e8SJason Wang 			xdp_do_flush_map();
1683761876c8SJason Wang 			if (err)
1684761876c8SJason Wang 				goto err_redirect;
1685654d5738SXin Long 			rcu_read_unlock();
168623e43f07SJason Wang 			preempt_enable();
1687761876c8SJason Wang 			return NULL;
1688761876c8SJason Wang 		case XDP_TX:
168959655a5bSJason Wang 			get_page(alloc_frag->page);
169059655a5bSJason Wang 			alloc_frag->offset += buflen;
169144fa2dbdSJesper Dangaard Brouer 			if (tun_xdp_tx(tun->dev, &xdp))
169259655a5bSJason Wang 				goto err_redirect;
169359655a5bSJason Wang 			tun_xdp_flush(tun->dev);
169459655a5bSJason Wang 			rcu_read_unlock();
169559655a5bSJason Wang 			preempt_enable();
169659655a5bSJason Wang 			return NULL;
1697761876c8SJason Wang 		case XDP_PASS:
1698761876c8SJason Wang 			delta = orig_data - xdp.data;
16998fb58f1eSNikita V. Shirokov 			len = xdp.data_end - xdp.data;
1700761876c8SJason Wang 			break;
1701761876c8SJason Wang 		default:
1702761876c8SJason Wang 			bpf_warn_invalid_xdp_action(act);
1703761876c8SJason Wang 			/* fall through */
1704761876c8SJason Wang 		case XDP_ABORTED:
1705761876c8SJason Wang 			trace_xdp_exception(tun->dev, xdp_prog, act);
1706761876c8SJason Wang 			/* fall through */
1707761876c8SJason Wang 		case XDP_DROP:
1708761876c8SJason Wang 			goto err_xdp;
1709761876c8SJason Wang 		}
1710761876c8SJason Wang 	}
1711761876c8SJason Wang 
1712761876c8SJason Wang 	skb = build_skb(buf, buflen);
1713761876c8SJason Wang 	if (!skb) {
1714761876c8SJason Wang 		rcu_read_unlock();
171523e43f07SJason Wang 		preempt_enable();
1716761876c8SJason Wang 		return ERR_PTR(-ENOMEM);
1717761876c8SJason Wang 	}
1718761876c8SJason Wang 
17197df13219SJason Wang 	skb_reserve(skb, pad - delta);
17208fb58f1eSNikita V. Shirokov 	skb_put(skb, len);
172166ccbc9cSJason Wang 	get_page(alloc_frag->page);
172266ccbc9cSJason Wang 	alloc_frag->offset += buflen;
172366ccbc9cSJason Wang 
1724761876c8SJason Wang 	rcu_read_unlock();
172523e43f07SJason Wang 	preempt_enable();
1726761876c8SJason Wang 
172766ccbc9cSJason Wang 	return skb;
1728761876c8SJason Wang 
1729761876c8SJason Wang err_redirect:
1730761876c8SJason Wang 	put_page(alloc_frag->page);
1731761876c8SJason Wang err_xdp:
1732761876c8SJason Wang 	rcu_read_unlock();
173323e43f07SJason Wang 	preempt_enable();
1734761876c8SJason Wang 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
1735761876c8SJason Wang 	return NULL;
173666ccbc9cSJason Wang }
173766ccbc9cSJason Wang 
17381da177e4SLinus Torvalds /* Get packet from user space buffer */
173954f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1740f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
17415503fcecSJason Wang 			    int noblock, bool more)
17421da177e4SLinus Torvalds {
174309640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
17441da177e4SLinus Torvalds 	struct sk_buff *skb;
1745f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1746eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1747f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1748608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
174996f8d9ecSJason Wang 	int good_linear;
17500690899bSMichael S. Tsirkin 	int copylen;
17510690899bSMichael S. Tsirkin 	bool zerocopy = false;
17520690899bSMichael S. Tsirkin 	int err;
175396f84061SJason Wang 	u32 rxhash = 0;
17541cfe6e93SJason Wang 	int skb_xdp = 1;
175590e33d45SPetar Penkov 	bool frags = tun_napi_frags_enabled(tun);
17561da177e4SLinus Torvalds 
17571bd4978aSEric Dumazet 	if (!(tun->dev->flags & IFF_UP))
17581bd4978aSEric Dumazet 		return -EIO;
17591bd4978aSEric Dumazet 
176040630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
176115718ea0SDan Carpenter 		if (len < sizeof(pi))
17621da177e4SLinus Torvalds 			return -EINVAL;
176315718ea0SDan Carpenter 		len -= sizeof(pi);
17641da177e4SLinus Torvalds 
1765cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17661da177e4SLinus Torvalds 			return -EFAULT;
17671da177e4SLinus Torvalds 	}
17681da177e4SLinus Torvalds 
176940630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1770e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1771e1edab87SWillem de Bruijn 
1772e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1773f43798c2SRusty Russell 			return -EINVAL;
1774e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1775f43798c2SRusty Russell 
1776cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1777f43798c2SRusty Russell 			return -EFAULT;
1778f43798c2SRusty Russell 
17794909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
178056f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
178156f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17824909122fSHerbert Xu 
178356f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1784f43798c2SRusty Russell 			return -EINVAL;
1785e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1786f43798c2SRusty Russell 	}
1787f43798c2SRusty Russell 
178840630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1789a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17900eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
179156f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1792e01bf1c8SRusty Russell 			return -EINVAL;
1793e01bf1c8SRusty Russell 	}
17941da177e4SLinus Torvalds 
179596f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
179696f8d9ecSJason Wang 
179788529176SJason Wang 	if (msg_control) {
1798f5ff53b4SAl Viro 		struct iov_iter i = *from;
1799f5ff53b4SAl Viro 
180088529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
180188529176SJason Wang 		 * enough room for skb expand head in case it is used.
18020690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
18030690899bSMichael S. Tsirkin 		 */
180456f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
180596f8d9ecSJason Wang 		if (copylen > good_linear)
180696f8d9ecSJason Wang 			copylen = good_linear;
18073dd5c330SJason Wang 		linear = copylen;
1808f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1809f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
181088529176SJason Wang 			zerocopy = true;
181188529176SJason Wang 	}
181288529176SJason Wang 
181390e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
18141cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
18151cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
18161cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
18171cfe6e93SJason Wang 		 */
18181cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
181966ccbc9cSJason Wang 		if (IS_ERR(skb)) {
182066ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
182166ccbc9cSJason Wang 			return PTR_ERR(skb);
182266ccbc9cSJason Wang 		}
1823761876c8SJason Wang 		if (!skb)
1824761876c8SJason Wang 			return total_len;
182566ccbc9cSJason Wang 	} else {
182688529176SJason Wang 		if (!zerocopy) {
18270690899bSMichael S. Tsirkin 			copylen = len;
182856f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
182996f8d9ecSJason Wang 				linear = good_linear;
183096f8d9ecSJason Wang 			else
183156f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
18323dd5c330SJason Wang 		}
18330690899bSMichael S. Tsirkin 
183490e33d45SPetar Penkov 		if (frags) {
183590e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
183690e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
183790e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
183890e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
183990e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
184090e33d45SPetar Penkov 			 */
184190e33d45SPetar Penkov 			zerocopy = false;
184290e33d45SPetar Penkov 		} else {
184390e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
184490e33d45SPetar Penkov 					    noblock);
184590e33d45SPetar Penkov 		}
184690e33d45SPetar Penkov 
184733dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
184833dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1849608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
185090e33d45SPetar Penkov 			if (frags)
185190e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
185233dccbb0SHerbert Xu 			return PTR_ERR(skb);
18531da177e4SLinus Torvalds 		}
18541da177e4SLinus Torvalds 
18550690899bSMichael S. Tsirkin 		if (zerocopy)
1856f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1857af1cc7a2SJason Wang 		else
1858f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18590690899bSMichael S. Tsirkin 
18600690899bSMichael S. Tsirkin 		if (err) {
1861608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
18628f22757eSDave Jones 			kfree_skb(skb);
186390e33d45SPetar Penkov 			if (frags) {
186490e33d45SPetar Penkov 				tfile->napi.skb = NULL;
186590e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
186690e33d45SPetar Penkov 			}
186790e33d45SPetar Penkov 
18681da177e4SLinus Torvalds 			return -EFAULT;
18698f22757eSDave Jones 		}
187066ccbc9cSJason Wang 	}
18711da177e4SLinus Torvalds 
18723e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1873df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1874df10db98SPaolo Abeni 		kfree_skb(skb);
187590e33d45SPetar Penkov 		if (frags) {
187690e33d45SPetar Penkov 			tfile->napi.skb = NULL;
187790e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
187890e33d45SPetar Penkov 		}
187990e33d45SPetar Penkov 
1880df10db98SPaolo Abeni 		return -EINVAL;
1881df10db98SPaolo Abeni 	}
1882df10db98SPaolo Abeni 
18831da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
188440630b82SMichael S. Tsirkin 	case IFF_TUN:
188540630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18862580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18872580c4c1SAlexander Potapenko 
18882580c4c1SAlexander Potapenko 			switch (ip_version) {
18892580c4c1SAlexander Potapenko 			case 4:
1890f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1891f09f7ee2SAng Way Chuang 				break;
18922580c4c1SAlexander Potapenko 			case 6:
1893f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1894f09f7ee2SAng Way Chuang 				break;
1895f09f7ee2SAng Way Chuang 			default:
1896608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1897f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1898f09f7ee2SAng Way Chuang 				return -EINVAL;
1899f09f7ee2SAng Way Chuang 			}
1900f09f7ee2SAng Way Chuang 		}
1901f09f7ee2SAng Way Chuang 
1902459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
19031da177e4SLinus Torvalds 		skb->protocol = pi.proto;
19044c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
19051da177e4SLinus Torvalds 		break;
190640630b82SMichael S. Tsirkin 	case IFF_TAP:
190790e33d45SPetar Penkov 		if (!frags)
19081da177e4SLinus Torvalds 			skb->protocol = eth_type_trans(skb, tun->dev);
19091da177e4SLinus Torvalds 		break;
19106403eab1SJoe Perches 	}
19111da177e4SLinus Torvalds 
19120690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
19130690899bSMichael S. Tsirkin 	if (zerocopy) {
19140690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
19150690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1916c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1917af1cc7a2SJason Wang 	} else if (msg_control) {
1918af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1919af1cc7a2SJason Wang 		uarg->callback(uarg, false);
19200690899bSMichael S. Tsirkin 	}
19210690899bSMichael S. Tsirkin 
192272f65107SVlad Yasevich 	skb_reset_network_header(skb);
192340893fd0SJason Wang 	skb_probe_transport_header(skb, 0);
192438502af7SJason Wang 
19251cfe6e93SJason Wang 	if (skb_xdp) {
1926761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1927761876c8SJason Wang 		int ret;
1928761876c8SJason Wang 
1929761876c8SJason Wang 		rcu_read_lock();
1930761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1931761876c8SJason Wang 		if (xdp_prog) {
1932761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1933761876c8SJason Wang 			if (ret != XDP_PASS) {
1934761876c8SJason Wang 				rcu_read_unlock();
1935761876c8SJason Wang 				return total_len;
1936761876c8SJason Wang 			}
1937761876c8SJason Wang 		}
1938761876c8SJason Wang 		rcu_read_unlock();
1939761876c8SJason Wang 	}
1940761876c8SJason Wang 
1941cf1a1e07SPaolo Abeni 	/* Compute the costly rx hash only if needed for flow updates.
1942cf1a1e07SPaolo Abeni 	 * We may get a very small possibility of OOO during switching, not
1943cf1a1e07SPaolo Abeni 	 * worth to optimize.
1944cf1a1e07SPaolo Abeni 	 */
1945cf1a1e07SPaolo Abeni 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1946cf1a1e07SPaolo Abeni 	    !tfile->detached)
1947feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
194894317099SPetar Penkov 
194990e33d45SPetar Penkov 	if (frags) {
195090e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
195190e33d45SPetar Penkov 		u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
195290e33d45SPetar Penkov 
1953010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
195490e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
195590e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
195690e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
195790e33d45SPetar Penkov 			WARN_ON(1);
195890e33d45SPetar Penkov 			return -ENOMEM;
195990e33d45SPetar Penkov 		}
196090e33d45SPetar Penkov 
196190e33d45SPetar Penkov 		local_bh_disable();
196290e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
196390e33d45SPetar Penkov 		local_bh_enable();
196490e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1965aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
196694317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
196794317099SPetar Penkov 		int queue_len;
196894317099SPetar Penkov 
196994317099SPetar Penkov 		spin_lock_bh(&queue->lock);
197094317099SPetar Penkov 		__skb_queue_tail(queue, skb);
197194317099SPetar Penkov 		queue_len = skb_queue_len(queue);
197294317099SPetar Penkov 		spin_unlock(&queue->lock);
197394317099SPetar Penkov 
197494317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
197594317099SPetar Penkov 			napi_schedule(&tfile->napi);
197694317099SPetar Penkov 
197794317099SPetar Penkov 		local_bh_enable();
197894317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19795503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
198094317099SPetar Penkov 	} else {
19811da177e4SLinus Torvalds 		netif_rx_ni(skb);
198294317099SPetar Penkov 	}
19831da177e4SLinus Torvalds 
1984608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
1985608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
1986608b9977SPaolo Abeni 	stats->rx_packets++;
1987608b9977SPaolo Abeni 	stats->rx_bytes += len;
1988608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
1989608b9977SPaolo Abeni 	put_cpu_ptr(stats);
19901da177e4SLinus Torvalds 
199196f84061SJason Wang 	if (rxhash)
19929e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
199396f84061SJason Wang 
19940690899bSMichael S. Tsirkin 	return total_len;
19951da177e4SLinus Torvalds }
19961da177e4SLinus Torvalds 
1997f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
19981da177e4SLinus Torvalds {
199933dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
200054f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
20019484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2002631ab46bSEric W. Biederman 	ssize_t result;
20031da177e4SLinus Torvalds 
20041da177e4SLinus Torvalds 	if (!tun)
20051da177e4SLinus Torvalds 		return -EBADFD;
20061da177e4SLinus Torvalds 
20075503fcecSJason Wang 	result = tun_get_user(tun, tfile, NULL, from,
20085503fcecSJason Wang 			      file->f_flags & O_NONBLOCK, false);
2009631ab46bSEric W. Biederman 
2010631ab46bSEric W. Biederman 	tun_put(tun);
2011631ab46bSEric W. Biederman 	return result;
20121da177e4SLinus Torvalds }
20131da177e4SLinus Torvalds 
2014fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2015fc72d1d5SJason Wang 				struct tun_file *tfile,
20161ffcbc85SJesper Dangaard Brouer 				struct xdp_frame *xdp_frame,
2017fc72d1d5SJason Wang 				struct iov_iter *iter)
2018fc72d1d5SJason Wang {
2019fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
20201ffcbc85SJesper Dangaard Brouer 	size_t size = xdp_frame->len;
2021fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
2022fc72d1d5SJason Wang 	size_t ret;
2023fc72d1d5SJason Wang 
2024fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
2025fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
2026fc72d1d5SJason Wang 
2027fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2028fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2029fc72d1d5SJason Wang 			return -EINVAL;
2030fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2031fc72d1d5SJason Wang 			     sizeof(gso)))
2032fc72d1d5SJason Wang 			return -EFAULT;
2033fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2034fc72d1d5SJason Wang 	}
2035fc72d1d5SJason Wang 
20361ffcbc85SJesper Dangaard Brouer 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2037fc72d1d5SJason Wang 
2038fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
2039fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
2040fc72d1d5SJason Wang 	stats->tx_packets++;
2041fc72d1d5SJason Wang 	stats->tx_bytes += ret;
2042fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
2043fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
2044fc72d1d5SJason Wang 
2045fc72d1d5SJason Wang 	return ret;
2046fc72d1d5SJason Wang }
2047fc72d1d5SJason Wang 
20481da177e4SLinus Torvalds /* Put packet to the user space buffer */
20496f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
205054f968d6SJason Wang 			    struct tun_file *tfile,
20511da177e4SLinus Torvalds 			    struct sk_buff *skb,
2052e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20531da177e4SLinus Torvalds {
20541da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2055608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2056e0b46d0eSHerbert Xu 	ssize_t total;
20578c847d25SJason Wang 	int vlan_offset = 0;
2058a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20592eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2060a8f9bfdfSHerbert Xu 
2061df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2062a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20631da177e4SLinus Torvalds 
206440630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2065e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20661da177e4SLinus Torvalds 
2067e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2068e0b46d0eSHerbert Xu 
206940630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2070e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20711da177e4SLinus Torvalds 			return -EINVAL;
20721da177e4SLinus Torvalds 
2073e0b46d0eSHerbert Xu 		total += sizeof(pi);
2074e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20751da177e4SLinus Torvalds 			/* Packet will be striped */
20761da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20771da177e4SLinus Torvalds 		}
20781da177e4SLinus Torvalds 
2079e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20801da177e4SLinus Torvalds 			return -EFAULT;
20811da177e4SLinus Torvalds 	}
20821da177e4SLinus Torvalds 
20832eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20849403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
208534166093SMike Rapoport 
2086e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2087f43798c2SRusty Russell 			return -EINVAL;
2088f43798c2SRusty Russell 
20893e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
20906391a448SJason Wang 					    tun_is_little_endian(tun), true)) {
2091f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20926b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2093ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
209456f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
209556f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2096ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2097ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2098ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
209956f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2100ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2101ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2102ef3db4a5SMichael S. Tsirkin 		}
2103f43798c2SRusty Russell 
2104e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2105f43798c2SRusty Russell 			return -EFAULT;
21068c847d25SJason Wang 
21078c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2108f43798c2SRusty Russell 	}
2109f43798c2SRusty Russell 
2110a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2111e0b46d0eSHerbert Xu 		int ret;
2112aff3d70aSJason Wang 		struct veth veth;
21131da177e4SLinus Torvalds 
21146680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2115df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
21161da177e4SLinus Torvalds 
21176680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
21186680ec68SJason Wang 
2119e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2120e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
21216680ec68SJason Wang 			goto done;
21226680ec68SJason Wang 
2123e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2124e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
21256680ec68SJason Wang 			goto done;
21266680ec68SJason Wang 	}
21276680ec68SJason Wang 
2128e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
21296680ec68SJason Wang 
21306680ec68SJason Wang done:
2131608b9977SPaolo Abeni 	/* caller is in process context, */
2132608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2133608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
2134608b9977SPaolo Abeni 	stats->tx_packets++;
2135608b9977SPaolo Abeni 	stats->tx_bytes += skb->len + vlan_hlen;
2136608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2137608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
21381da177e4SLinus Torvalds 
21391da177e4SLinus Torvalds 	return total;
21401da177e4SLinus Torvalds }
21411da177e4SLinus Torvalds 
2142fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21431576d986SJason Wang {
21441576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2145fc72d1d5SJason Wang 	void *ptr = NULL;
2146f48cc6b2SJason Wang 	int error = 0;
21471576d986SJason Wang 
2148fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2149fc72d1d5SJason Wang 	if (ptr)
21501576d986SJason Wang 		goto out;
21511576d986SJason Wang 	if (noblock) {
2152f48cc6b2SJason Wang 		error = -EAGAIN;
21531576d986SJason Wang 		goto out;
21541576d986SJason Wang 	}
21551576d986SJason Wang 
21561576d986SJason Wang 	add_wait_queue(&tfile->wq.wait, &wait);
21571576d986SJason Wang 	current->state = TASK_INTERRUPTIBLE;
21581576d986SJason Wang 
21591576d986SJason Wang 	while (1) {
2160fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2161fc72d1d5SJason Wang 		if (ptr)
21621576d986SJason Wang 			break;
21631576d986SJason Wang 		if (signal_pending(current)) {
2164f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21651576d986SJason Wang 			break;
21661576d986SJason Wang 		}
21671576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2168f48cc6b2SJason Wang 			error = -EFAULT;
21691576d986SJason Wang 			break;
21701576d986SJason Wang 		}
21711576d986SJason Wang 
21721576d986SJason Wang 		schedule();
21731576d986SJason Wang 	}
21741576d986SJason Wang 
21751576d986SJason Wang 	current->state = TASK_RUNNING;
21761576d986SJason Wang 	remove_wait_queue(&tfile->wq.wait, &wait);
21771576d986SJason Wang 
21781576d986SJason Wang out:
2179f48cc6b2SJason Wang 	*err = error;
2180fc72d1d5SJason Wang 	return ptr;
21811576d986SJason Wang }
21821576d986SJason Wang 
218354f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21849b067034SAl Viro 			   struct iov_iter *to,
2185fc72d1d5SJason Wang 			   int noblock, void *ptr)
21861da177e4SLinus Torvalds {
21879b067034SAl Viro 	ssize_t ret;
21881576d986SJason Wang 	int err;
21891da177e4SLinus Torvalds 
21903872baf6SRami Rosen 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
21911da177e4SLinus Torvalds 
2192c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2193fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21949b067034SAl Viro 		return 0;
2195c33ee15bSWei Xu 	}
21961da177e4SLinus Torvalds 
2197fc72d1d5SJason Wang 	if (!ptr) {
21981576d986SJason Wang 		/* Read frames from ring */
2199fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2200fc72d1d5SJason Wang 		if (!ptr)
2201957f094fSAlex Gartrell 			return err;
2202ac77cfd4SJason Wang 	}
2203e0b46d0eSHerbert Xu 
22041ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
22051ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2206fc72d1d5SJason Wang 
22071ffcbc85SJesper Dangaard Brouer 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
220803993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
2209fc72d1d5SJason Wang 	} else {
2210fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2211fc72d1d5SJason Wang 
22129b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2213f51a5e82SJason Wang 		if (unlikely(ret < 0))
22141da177e4SLinus Torvalds 			kfree_skb(skb);
2215f51a5e82SJason Wang 		else
2216f51a5e82SJason Wang 			consume_skb(skb);
2217fc72d1d5SJason Wang 	}
22181da177e4SLinus Torvalds 
221905c2828cSMichael S. Tsirkin 	return ret;
222005c2828cSMichael S. Tsirkin }
222105c2828cSMichael S. Tsirkin 
22229b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
222305c2828cSMichael S. Tsirkin {
222405c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
222505c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
22269484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
22279b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
222805c2828cSMichael S. Tsirkin 
222905c2828cSMichael S. Tsirkin 	if (!tun)
223005c2828cSMichael S. Tsirkin 		return -EBADFD;
2231ac77cfd4SJason Wang 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
223242404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2233d0b7da8aSZhi Yong Wu 	if (ret > 0)
2234d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2235631ab46bSEric W. Biederman 	tun_put(tun);
22361da177e4SLinus Torvalds 	return ret;
22371da177e4SLinus Torvalds }
22381da177e4SLinus Torvalds 
2239cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
224096f84061SJason Wang {
2241cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
224296f84061SJason Wang 
224396f84061SJason Wang 	bpf_prog_destroy(prog->prog);
224496f84061SJason Wang 	kfree(prog);
224596f84061SJason Wang }
224696f84061SJason Wang 
22479d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun,
22489d6474e4SJason Wang 			  struct tun_prog __rcu **prog_p,
224996f84061SJason Wang 			  struct bpf_prog *prog)
225096f84061SJason Wang {
2251cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
225296f84061SJason Wang 
225396f84061SJason Wang 	if (prog) {
225496f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
225596f84061SJason Wang 		if (!new)
225696f84061SJason Wang 			return -ENOMEM;
225796f84061SJason Wang 		new->prog = prog;
225896f84061SJason Wang 	}
225996f84061SJason Wang 
2260124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2261cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2262124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2263cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2264124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
226596f84061SJason Wang 
226696f84061SJason Wang 	if (old)
2267cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
226896f84061SJason Wang 
226996f84061SJason Wang 	return 0;
227096f84061SJason Wang }
227196f84061SJason Wang 
227296442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
227396442e42SJason Wang {
227496442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
227596442e42SJason Wang 
22764008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
2277608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
227896442e42SJason Wang 	tun_flow_uninit(tun);
22795dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2280cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2281aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
228296442e42SJason Wang }
228396442e42SJason Wang 
22841da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22851da177e4SLinus Torvalds {
22861da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22871da177e4SLinus Torvalds 
22880625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22890625c883SEric W. Biederman 	tun->group = INVALID_GID;
22901da177e4SLinus Torvalds 
22911da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2292cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2293cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2294016adb72SJason Wang 	/* We prefer our own queue length */
2295016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
22961da177e4SLinus Torvalds }
22971da177e4SLinus Torvalds 
2298f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2299f019a7a5SEric W. Biederman  * device with netlink.
2300f019a7a5SEric W. Biederman  */
2301a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2302a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2303f019a7a5SEric W. Biederman {
2304f019a7a5SEric W. Biederman 	return -EINVAL;
2305f019a7a5SEric W. Biederman }
2306f019a7a5SEric W. Biederman 
23071ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev)
23081ec010e7SSabrina Dubroca {
23091ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
23101ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
23111ec010e7SSabrina Dubroca 
23121ec010e7SSabrina Dubroca 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
23131ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
23141ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* TYPE */
23151ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PI */
23161ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
23171ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PERSIST */
23181ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
23191ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
23201ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
23211ec010e7SSabrina Dubroca 	       0;
23221ec010e7SSabrina Dubroca }
23231ec010e7SSabrina Dubroca 
23241ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
23251ec010e7SSabrina Dubroca {
23261ec010e7SSabrina Dubroca 	struct tun_struct *tun = netdev_priv(dev);
23271ec010e7SSabrina Dubroca 
23281ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
23291ec010e7SSabrina Dubroca 		goto nla_put_failure;
23301ec010e7SSabrina Dubroca 	if (uid_valid(tun->owner) &&
23311ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_OWNER,
23321ec010e7SSabrina Dubroca 			from_kuid_munged(current_user_ns(), tun->owner)))
23331ec010e7SSabrina Dubroca 		goto nla_put_failure;
23341ec010e7SSabrina Dubroca 	if (gid_valid(tun->group) &&
23351ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_GROUP,
23361ec010e7SSabrina Dubroca 			from_kgid_munged(current_user_ns(), tun->group)))
23371ec010e7SSabrina Dubroca 		goto nla_put_failure;
23381ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
23391ec010e7SSabrina Dubroca 		goto nla_put_failure;
23401ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
23411ec010e7SSabrina Dubroca 		goto nla_put_failure;
23421ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
23431ec010e7SSabrina Dubroca 		goto nla_put_failure;
23441ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
23451ec010e7SSabrina Dubroca 		       !!(tun->flags & IFF_MULTI_QUEUE)))
23461ec010e7SSabrina Dubroca 		goto nla_put_failure;
23471ec010e7SSabrina Dubroca 	if (tun->flags & IFF_MULTI_QUEUE) {
23481ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
23491ec010e7SSabrina Dubroca 			goto nla_put_failure;
23501ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
23511ec010e7SSabrina Dubroca 				tun->numdisabled))
23521ec010e7SSabrina Dubroca 			goto nla_put_failure;
23531ec010e7SSabrina Dubroca 	}
23541ec010e7SSabrina Dubroca 
23551ec010e7SSabrina Dubroca 	return 0;
23561ec010e7SSabrina Dubroca 
23571ec010e7SSabrina Dubroca nla_put_failure:
23581ec010e7SSabrina Dubroca 	return -EMSGSIZE;
23591ec010e7SSabrina Dubroca }
23601ec010e7SSabrina Dubroca 
2361f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2362f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2363f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2364f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2365f019a7a5SEric W. Biederman 	.validate	= tun_validate,
23661ec010e7SSabrina Dubroca 	.get_size       = tun_get_size,
23671ec010e7SSabrina Dubroca 	.fill_info      = tun_fill_info,
2368f019a7a5SEric W. Biederman };
2369f019a7a5SEric W. Biederman 
237033dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
237133dccbb0SHerbert Xu {
237254f968d6SJason Wang 	struct tun_file *tfile;
237343815482SEric Dumazet 	wait_queue_head_t *wqueue;
237433dccbb0SHerbert Xu 
237533dccbb0SHerbert Xu 	if (!sock_writeable(sk))
237633dccbb0SHerbert Xu 		return;
237733dccbb0SHerbert Xu 
23789cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
237933dccbb0SHerbert Xu 		return;
238033dccbb0SHerbert Xu 
238143815482SEric Dumazet 	wqueue = sk_sleep(sk);
238243815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
2383a9a08845SLinus Torvalds 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2384a9a08845SLinus Torvalds 						EPOLLWRNORM | EPOLLWRBAND);
2385c722c625SHerbert Xu 
238654f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
238754f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
238833dccbb0SHerbert Xu }
238933dccbb0SHerbert Xu 
23901b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
239105c2828cSMichael S. Tsirkin {
239254f968d6SJason Wang 	int ret;
239354f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
23949484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
239554f968d6SJason Wang 
239654f968d6SJason Wang 	if (!tun)
239754f968d6SJason Wang 		return -EBADFD;
2398f5ff53b4SAl Viro 
2399c0371da6SAl Viro 	ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
24005503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
24015503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
240254f968d6SJason Wang 	tun_put(tun);
240354f968d6SJason Wang 	return ret;
240405c2828cSMichael S. Tsirkin }
240505c2828cSMichael S. Tsirkin 
24061b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
240705c2828cSMichael S. Tsirkin 		       int flags)
240805c2828cSMichael S. Tsirkin {
240954f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
24109484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2411fc72d1d5SJason Wang 	void *ptr = m->msg_control;
241205c2828cSMichael S. Tsirkin 	int ret;
241354f968d6SJason Wang 
2414c33ee15bSWei Xu 	if (!tun) {
2415c33ee15bSWei Xu 		ret = -EBADFD;
2416fc72d1d5SJason Wang 		goto out_free;
2417c33ee15bSWei Xu 	}
241854f968d6SJason Wang 
2419eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
24203811ae76SGao feng 		ret = -EINVAL;
2421c33ee15bSWei Xu 		goto out_put_tun;
24223811ae76SGao feng 	}
2423eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2424eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2425eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2426eda29772SRichard Cochran 		goto out;
2427eda29772SRichard Cochran 	}
2428fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
242987897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
243042404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
243142404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
243242404c09SDavid S. Miller 	}
24333811ae76SGao feng out:
243454f968d6SJason Wang 	tun_put(tun);
243505c2828cSMichael S. Tsirkin 	return ret;
2436c33ee15bSWei Xu 
2437c33ee15bSWei Xu out_put_tun:
2438c33ee15bSWei Xu 	tun_put(tun);
2439fc72d1d5SJason Wang out_free:
2440fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2441c33ee15bSWei Xu 	return ret;
244205c2828cSMichael S. Tsirkin }
244305c2828cSMichael S. Tsirkin 
2444fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2445fc72d1d5SJason Wang {
2446fc72d1d5SJason Wang 	if (likely(ptr)) {
24471ffcbc85SJesper Dangaard Brouer 		if (tun_is_xdp_frame(ptr)) {
24481ffcbc85SJesper Dangaard Brouer 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2449fc72d1d5SJason Wang 
24501ffcbc85SJesper Dangaard Brouer 			return xdpf->len;
2451fc72d1d5SJason Wang 		}
2452fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2453fc72d1d5SJason Wang 	} else {
2454fc72d1d5SJason Wang 		return 0;
2455fc72d1d5SJason Wang 	}
2456fc72d1d5SJason Wang }
2457fc72d1d5SJason Wang 
24581576d986SJason Wang static int tun_peek_len(struct socket *sock)
24591576d986SJason Wang {
24601576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
24611576d986SJason Wang 	struct tun_struct *tun;
24621576d986SJason Wang 	int ret = 0;
24631576d986SJason Wang 
24649484dc74Syuan linyu 	tun = tun_get(tfile);
24651576d986SJason Wang 	if (!tun)
24661576d986SJason Wang 		return 0;
24671576d986SJason Wang 
2468fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
24691576d986SJason Wang 	tun_put(tun);
24701576d986SJason Wang 
24711576d986SJason Wang 	return ret;
24721576d986SJason Wang }
24731576d986SJason Wang 
247405c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
247505c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
24761576d986SJason Wang 	.peek_len = tun_peek_len,
247705c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
247805c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
247905c2828cSMichael S. Tsirkin };
248005c2828cSMichael S. Tsirkin 
248133dccbb0SHerbert Xu static struct proto tun_proto = {
248233dccbb0SHerbert Xu 	.name		= "tun",
248333dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
248454f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
248533dccbb0SHerbert Xu };
2486f019a7a5SEric W. Biederman 
2487980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2488980c9e8cSDavid Woodhouse {
2489031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2490980c9e8cSDavid Woodhouse }
2491980c9e8cSDavid Woodhouse 
2492980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2493980c9e8cSDavid Woodhouse 			      char *buf)
2494980c9e8cSDavid Woodhouse {
2495980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2496980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2497980c9e8cSDavid Woodhouse }
2498980c9e8cSDavid Woodhouse 
2499980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2500980c9e8cSDavid Woodhouse 			      char *buf)
2501980c9e8cSDavid Woodhouse {
2502980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
25030625c883SEric W. Biederman 	return uid_valid(tun->owner)?
25040625c883SEric W. Biederman 		sprintf(buf, "%u\n",
25050625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
25060625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2507980c9e8cSDavid Woodhouse }
2508980c9e8cSDavid Woodhouse 
2509980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2510980c9e8cSDavid Woodhouse 			      char *buf)
2511980c9e8cSDavid Woodhouse {
2512980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
25130625c883SEric W. Biederman 	return gid_valid(tun->group) ?
25140625c883SEric W. Biederman 		sprintf(buf, "%u\n",
25150625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
25160625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2517980c9e8cSDavid Woodhouse }
2518980c9e8cSDavid Woodhouse 
2519980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2520980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2521980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2522980c9e8cSDavid Woodhouse 
2523c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2524c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2525c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2526c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2527c4d33e24STakashi Iwai 	NULL
2528c4d33e24STakashi Iwai };
2529c4d33e24STakashi Iwai 
2530c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2531c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2532c4d33e24STakashi Iwai };
2533c4d33e24STakashi Iwai 
2534d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
25351da177e4SLinus Torvalds {
25361da177e4SLinus Torvalds 	struct tun_struct *tun;
253754f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
25381da177e4SLinus Torvalds 	struct net_device *dev;
25391da177e4SLinus Torvalds 	int err;
25401da177e4SLinus Torvalds 
25417c0c3b1aSJason Wang 	if (tfile->detached)
25427c0c3b1aSJason Wang 		return -EINVAL;
25437c0c3b1aSJason Wang 
254490e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
254590e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
254690e33d45SPetar Penkov 			return -EPERM;
254790e33d45SPetar Penkov 
254890e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
254990e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
255090e33d45SPetar Penkov 			return -EINVAL;
255190e33d45SPetar Penkov 	}
255290e33d45SPetar Penkov 
255374a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
255474a3e5a7SEric W. Biederman 	if (dev) {
2555f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2556f85ba780SDavid Woodhouse 			return -EBUSY;
255774a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
255874a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
255974a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
256074a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
256174a3e5a7SEric W. Biederman 		else
256274a3e5a7SEric W. Biederman 			return -EINVAL;
256374a3e5a7SEric W. Biederman 
25648e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
256540630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
25668e6d91aeSJason Wang 			return -EINVAL;
25678e6d91aeSJason Wang 
2568cde8b15fSJason Wang 		if (tun_not_capable(tun))
25692b980dbdSPaul Moore 			return -EPERM;
25705dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
25712b980dbdSPaul Moore 		if (err < 0)
25722b980dbdSPaul Moore 			return err;
25732b980dbdSPaul Moore 
257494317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
257594317099SPetar Penkov 				 ifr->ifr_flags & IFF_NAPI);
2576a7385ba2SEric W. Biederman 		if (err < 0)
2577a7385ba2SEric W. Biederman 			return err;
25784008e97fSJason Wang 
257940630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2580e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2581e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2582e8dbad66SJason Wang 			 * to initialize the device again.
2583e8dbad66SJason Wang 			 */
258483c1f36fSSabrina Dubroca 			netdev_state_change(dev);
2585e8dbad66SJason Wang 			return 0;
2586e8dbad66SJason Wang 		}
25879fffc5c6SSabrina Dubroca 
25889fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
25899fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
259083c1f36fSSabrina Dubroca 
259183c1f36fSSabrina Dubroca 		netdev_state_change(dev);
259283c1f36fSSabrina Dubroca 	} else {
25931da177e4SLinus Torvalds 		char *name;
25941da177e4SLinus Torvalds 		unsigned long flags = 0;
2595edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2596edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
25971da177e4SLinus Torvalds 
2598c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2599ca6bb5d7SDavid Woodhouse 			return -EPERM;
26002b980dbdSPaul Moore 		err = security_tun_dev_create();
26012b980dbdSPaul Moore 		if (err < 0)
26022b980dbdSPaul Moore 			return err;
2603ca6bb5d7SDavid Woodhouse 
26041da177e4SLinus Torvalds 		/* Set dev type */
26051da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
26061da177e4SLinus Torvalds 			/* TUN device */
260740630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
26081da177e4SLinus Torvalds 			name = "tun%d";
26091da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
26101da177e4SLinus Torvalds 			/* TAP device */
261140630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
26121da177e4SLinus Torvalds 			name = "tap%d";
26131da177e4SLinus Torvalds 		} else
261436989b90SKusanagi Kouichi 			return -EINVAL;
26151da177e4SLinus Torvalds 
26161da177e4SLinus Torvalds 		if (*ifr->ifr_name)
26171da177e4SLinus Torvalds 			name = ifr->ifr_name;
26181da177e4SLinus Torvalds 
2619c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2620c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2621c835a677STom Gundersen 				       queues);
2622edfb6a14SJason Wang 
26231da177e4SLinus Torvalds 		if (!dev)
26241da177e4SLinus Torvalds 			return -ENOMEM;
26250ad646c8SCong Wang 		err = dev_get_valid_name(net, dev, name);
26265c25f65fSJulien Gomes 		if (err < 0)
26270ad646c8SCong Wang 			goto err_free_dev;
26281da177e4SLinus Torvalds 
2629fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2630f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2631fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2632c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2633758e43b7SStephen Hemminger 
26341da177e4SLinus Torvalds 		tun = netdev_priv(dev);
26351da177e4SLinus Torvalds 		tun->dev = dev;
26361da177e4SLinus Torvalds 		tun->flags = flags;
2637f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2638d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
26391da177e4SLinus Torvalds 
2640eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
264154f968d6SJason Wang 		tun->filter_attached = false;
264254f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
26435503fcecSJason Wang 		tun->rx_batched = 0;
264496f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
264533dccbb0SHerbert Xu 
2646608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2647608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2648608b9977SPaolo Abeni 			err = -ENOMEM;
2649608b9977SPaolo Abeni 			goto err_free_dev;
2650608b9977SPaolo Abeni 		}
2651608b9977SPaolo Abeni 
265296442e42SJason Wang 		spin_lock_init(&tun->lock);
265396442e42SJason Wang 
26545dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
26555dbbaf2dSPaul Moore 		if (err < 0)
2656608b9977SPaolo Abeni 			goto err_free_stat;
26572b980dbdSPaul Moore 
26581da177e4SLinus Torvalds 		tun_net_init(dev);
2659944a1376SPavel Emelyanov 		tun_flow_init(tun);
266096442e42SJason Wang 
266188255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
26626680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
26636680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
26642a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
26656671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
26666671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
26676671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
266888255375SMichał Mirosław 
26699fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
26709fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
26719fffc5c6SSabrina Dubroca 
26724008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
267394317099SPetar Penkov 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
2674eb0fb363SJason Wang 		if (err < 0)
2675662ca437SJason Wang 			goto err_free_flow;
2676eb0fb363SJason Wang 
26771da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
26781da177e4SLinus Torvalds 		if (err < 0)
2679662ca437SJason Wang 			goto err_detach;
2680af668b3cSMichael S. Tsirkin 	}
2681980c9e8cSDavid Woodhouse 
2682eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
26831da177e4SLinus Torvalds 
26846b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
26851da177e4SLinus Torvalds 
2686e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2687e35259a9SMax Krasnyansky 	 * xoff state.
2688e35259a9SMax Krasnyansky 	 */
2689e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2690c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2691e35259a9SMax Krasnyansky 
26921da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
26931da177e4SLinus Torvalds 	return 0;
26941da177e4SLinus Torvalds 
2695662ca437SJason Wang err_detach:
2696662ca437SJason Wang 	tun_detach_all(dev);
2697ff244c6bSEric Dumazet 	/* register_netdevice() already called tun_free_netdev() */
2698ff244c6bSEric Dumazet 	goto err_free_dev;
2699ff244c6bSEric Dumazet 
2700662ca437SJason Wang err_free_flow:
2701662ca437SJason Wang 	tun_flow_uninit(tun);
2702662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2703608b9977SPaolo Abeni err_free_stat:
2704608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
27051da177e4SLinus Torvalds err_free_dev:
27061da177e4SLinus Torvalds 	free_netdev(dev);
27071da177e4SLinus Torvalds 	return err;
27081da177e4SLinus Torvalds }
27091da177e4SLinus Torvalds 
27109ce99cf6SRami Rosen static void tun_get_iff(struct net *net, struct tun_struct *tun,
2711876bfd4dSHerbert Xu 		       struct ifreq *ifr)
2712e3b99556SMark McLoughlin {
27136b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2714e3b99556SMark McLoughlin 
2715e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2716e3b99556SMark McLoughlin 
2717980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2718e3b99556SMark McLoughlin 
2719e3b99556SMark McLoughlin }
2720e3b99556SMark McLoughlin 
27215228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
27225228ddc9SRusty Russell  * privs required. */
272388255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
27245228ddc9SRusty Russell {
2725c8f44affSMichał Mirosław 	netdev_features_t features = 0;
27265228ddc9SRusty Russell 
27275228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
272888255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
27295228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
27305228ddc9SRusty Russell 
27315228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
27325228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
27335228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
27345228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
27355228ddc9SRusty Russell 			}
27365228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
27375228ddc9SRusty Russell 				features |= NETIF_F_TSO;
27385228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
27395228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
27405228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
27415228ddc9SRusty Russell 		}
27420c19f846SWillem de Bruijn 
27430c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
27445228ddc9SRusty Russell 	}
27455228ddc9SRusty Russell 
27465228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
27475228ddc9SRusty Russell 	 * trying to set them. */
27485228ddc9SRusty Russell 	if (arg)
27495228ddc9SRusty Russell 		return -EINVAL;
27505228ddc9SRusty Russell 
275188255375SMichał Mirosław 	tun->set_features = features;
275209050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
275309050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
275488255375SMichał Mirosław 	netdev_update_features(tun->dev);
27555228ddc9SRusty Russell 
27565228ddc9SRusty Russell 	return 0;
27575228ddc9SRusty Russell }
27585228ddc9SRusty Russell 
2759c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2760c8d68e6bSJason Wang {
2761c8d68e6bSJason Wang 	int i;
2762c8d68e6bSJason Wang 	struct tun_file *tfile;
2763c8d68e6bSJason Wang 
2764c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2765b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
27668ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
27678ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
27688ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2769c8d68e6bSJason Wang 	}
2770c8d68e6bSJason Wang 
2771c8d68e6bSJason Wang 	tun->filter_attached = false;
2772c8d68e6bSJason Wang }
2773c8d68e6bSJason Wang 
2774c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2775c8d68e6bSJason Wang {
2776c8d68e6bSJason Wang 	int i, ret = 0;
2777c8d68e6bSJason Wang 	struct tun_file *tfile;
2778c8d68e6bSJason Wang 
2779c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2780b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
27818ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
27828ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
27838ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2784c8d68e6bSJason Wang 		if (ret) {
2785c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2786c8d68e6bSJason Wang 			return ret;
2787c8d68e6bSJason Wang 		}
2788c8d68e6bSJason Wang 	}
2789c8d68e6bSJason Wang 
2790c8d68e6bSJason Wang 	tun->filter_attached = true;
2791c8d68e6bSJason Wang 	return ret;
2792c8d68e6bSJason Wang }
2793c8d68e6bSJason Wang 
2794c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2795c8d68e6bSJason Wang {
2796c8d68e6bSJason Wang 	struct tun_file *tfile;
2797c8d68e6bSJason Wang 	int i;
2798c8d68e6bSJason Wang 
2799c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2800b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2801c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2802c8d68e6bSJason Wang 	}
2803c8d68e6bSJason Wang }
2804c8d68e6bSJason Wang 
2805cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2806cde8b15fSJason Wang {
2807cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2808cde8b15fSJason Wang 	struct tun_struct *tun;
2809cde8b15fSJason Wang 	int ret = 0;
2810cde8b15fSJason Wang 
2811cde8b15fSJason Wang 	rtnl_lock();
2812cde8b15fSJason Wang 
2813cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
28144008e97fSJason Wang 		tun = tfile->detached;
28155dbbaf2dSPaul Moore 		if (!tun) {
2816cde8b15fSJason Wang 			ret = -EINVAL;
28175dbbaf2dSPaul Moore 			goto unlock;
28185dbbaf2dSPaul Moore 		}
28195dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
28205dbbaf2dSPaul Moore 		if (ret < 0)
28215dbbaf2dSPaul Moore 			goto unlock;
282294317099SPetar Penkov 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
28234008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2824b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
282540630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
28264008e97fSJason Wang 			ret = -EINVAL;
2827cde8b15fSJason Wang 		else
28284008e97fSJason Wang 			__tun_detach(tfile, false);
28294008e97fSJason Wang 	} else
2830cde8b15fSJason Wang 		ret = -EINVAL;
2831cde8b15fSJason Wang 
283283c1f36fSSabrina Dubroca 	if (ret >= 0)
283383c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
283483c1f36fSSabrina Dubroca 
28355dbbaf2dSPaul Moore unlock:
2836cde8b15fSJason Wang 	rtnl_unlock();
2837cde8b15fSJason Wang 	return ret;
2838cde8b15fSJason Wang }
2839cde8b15fSJason Wang 
2840cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
2841cd5681d7SJason Wang 			void __user *data)
284296f84061SJason Wang {
284396f84061SJason Wang 	struct bpf_prog *prog;
284496f84061SJason Wang 	int fd;
284596f84061SJason Wang 
284696f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
284796f84061SJason Wang 		return -EFAULT;
284896f84061SJason Wang 
284996f84061SJason Wang 	if (fd == -1) {
285096f84061SJason Wang 		prog = NULL;
285196f84061SJason Wang 	} else {
285296f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
285396f84061SJason Wang 		if (IS_ERR(prog))
285496f84061SJason Wang 			return PTR_ERR(prog);
285596f84061SJason Wang 	}
285696f84061SJason Wang 
2857cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
285896f84061SJason Wang }
285996f84061SJason Wang 
286050857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
286150857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
28621da177e4SLinus Torvalds {
286336b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
2864f663706aSKirill Tkhai 	struct net *net = sock_net(&tfile->sk);
2865631ab46bSEric W. Biederman 	struct tun_struct *tun;
28661da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
28671da177e4SLinus Torvalds 	struct ifreq ifr;
28680625c883SEric W. Biederman 	kuid_t owner;
28690625c883SEric W. Biederman 	kgid_t group;
287033dccbb0SHerbert Xu 	int sndbuf;
2871d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
2872fb7589a1SPavel Emelyanov 	unsigned int ifindex;
28731cf8e410SMichael S. Tsirkin 	int le;
2874f271b2ccSMax Krasnyansky 	int ret;
287583c1f36fSSabrina Dubroca 	bool do_notify = false;
28761da177e4SLinus Torvalds 
2877f2780d6dSKirill Tkhai 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
2878f2780d6dSKirill Tkhai 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
287950857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
28801da177e4SLinus Torvalds 			return -EFAULT;
28818bbb1813SDavid S. Miller 	} else {
2882a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
28838bbb1813SDavid S. Miller 	}
2884631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
2885631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
2886631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
2887031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
2888031f5e03SMichael S. Tsirkin 		 */
2889031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
2890631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
2891f663706aSKirill Tkhai 	} else if (cmd == TUNSETQUEUE) {
2892cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
2893f663706aSKirill Tkhai 	} else if (cmd == SIOCGSKNS) {
2894f663706aSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2895f663706aSKirill Tkhai 			return -EPERM;
2896f663706aSKirill Tkhai 		return open_related_ns(&net->ns, get_net_ns);
2897f663706aSKirill Tkhai 	}
2898631ab46bSEric W. Biederman 
2899c8d68e6bSJason Wang 	ret = 0;
2900876bfd4dSHerbert Xu 	rtnl_lock();
2901876bfd4dSHerbert Xu 
29029484dc74Syuan linyu 	tun = tun_get(tfile);
29030f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
29040f16bc13SGao Feng 		ret = -EEXIST;
29050f16bc13SGao Feng 		if (tun)
29060f16bc13SGao Feng 			goto unlock;
29070f16bc13SGao Feng 
29081da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
29091da177e4SLinus Torvalds 
2910f2780d6dSKirill Tkhai 		ret = tun_set_iff(net, file, &ifr);
29111da177e4SLinus Torvalds 
2912876bfd4dSHerbert Xu 		if (ret)
2913876bfd4dSHerbert Xu 			goto unlock;
29141da177e4SLinus Torvalds 
291550857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2916876bfd4dSHerbert Xu 			ret = -EFAULT;
2917876bfd4dSHerbert Xu 		goto unlock;
29181da177e4SLinus Torvalds 	}
2919fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
2920fb7589a1SPavel Emelyanov 		ret = -EPERM;
2921fb7589a1SPavel Emelyanov 		if (tun)
2922fb7589a1SPavel Emelyanov 			goto unlock;
2923fb7589a1SPavel Emelyanov 
2924fb7589a1SPavel Emelyanov 		ret = -EFAULT;
2925fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
2926fb7589a1SPavel Emelyanov 			goto unlock;
2927fb7589a1SPavel Emelyanov 
2928fb7589a1SPavel Emelyanov 		ret = 0;
2929fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
2930fb7589a1SPavel Emelyanov 		goto unlock;
2931fb7589a1SPavel Emelyanov 	}
29321da177e4SLinus Torvalds 
2933876bfd4dSHerbert Xu 	ret = -EBADFD;
29341da177e4SLinus Torvalds 	if (!tun)
2935876bfd4dSHerbert Xu 		goto unlock;
29361da177e4SLinus Torvalds 
29371e588338SJason Wang 	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
29381da177e4SLinus Torvalds 
2939631ab46bSEric W. Biederman 	ret = 0;
29401da177e4SLinus Torvalds 	switch (cmd) {
2941e3b99556SMark McLoughlin 	case TUNGETIFF:
29429ce99cf6SRami Rosen 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2943e3b99556SMark McLoughlin 
29443d407a80SPavel Emelyanov 		if (tfile->detached)
29453d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
2946849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
2947849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
29483d407a80SPavel Emelyanov 
294950857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
2950631ab46bSEric W. Biederman 			ret = -EFAULT;
2951e3b99556SMark McLoughlin 		break;
2952e3b99556SMark McLoughlin 
29531da177e4SLinus Torvalds 	case TUNSETNOCSUM:
29541da177e4SLinus Torvalds 		/* Disable/Enable checksum */
29551da177e4SLinus Torvalds 
295688255375SMichał Mirosław 		/* [unimplemented] */
295788255375SMichał Mirosław 		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
29586b8a66eeSJoe Perches 			  arg ? "disabled" : "enabled");
29591da177e4SLinus Torvalds 		break;
29601da177e4SLinus Torvalds 
29611da177e4SLinus Torvalds 	case TUNSETPERSIST:
296254f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
296354f968d6SJason Wang 		 * module to prevent the module being unprobed.
296454f968d6SJason Wang 		 */
296540630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
296640630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
296754f968d6SJason Wang 			__module_get(THIS_MODULE);
296883c1f36fSSabrina Dubroca 			do_notify = true;
2969dd38bd85SJason Wang 		}
297040630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
297140630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
297254f968d6SJason Wang 			module_put(THIS_MODULE);
297383c1f36fSSabrina Dubroca 			do_notify = true;
297454f968d6SJason Wang 		}
29751da177e4SLinus Torvalds 
29766b8a66eeSJoe Perches 		tun_debug(KERN_INFO, tun, "persist %s\n",
29776b8a66eeSJoe Perches 			  arg ? "enabled" : "disabled");
29781da177e4SLinus Torvalds 		break;
29791da177e4SLinus Torvalds 
29801da177e4SLinus Torvalds 	case TUNSETOWNER:
29811da177e4SLinus Torvalds 		/* Set owner of the device */
29820625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
29830625c883SEric W. Biederman 		if (!uid_valid(owner)) {
29840625c883SEric W. Biederman 			ret = -EINVAL;
29850625c883SEric W. Biederman 			break;
29860625c883SEric W. Biederman 		}
29870625c883SEric W. Biederman 		tun->owner = owner;
298883c1f36fSSabrina Dubroca 		do_notify = true;
29891e588338SJason Wang 		tun_debug(KERN_INFO, tun, "owner set to %u\n",
29900625c883SEric W. Biederman 			  from_kuid(&init_user_ns, tun->owner));
29911da177e4SLinus Torvalds 		break;
29921da177e4SLinus Torvalds 
29938c644623SGuido Guenther 	case TUNSETGROUP:
29948c644623SGuido Guenther 		/* Set group of the device */
29950625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
29960625c883SEric W. Biederman 		if (!gid_valid(group)) {
29970625c883SEric W. Biederman 			ret = -EINVAL;
29980625c883SEric W. Biederman 			break;
29990625c883SEric W. Biederman 		}
30000625c883SEric W. Biederman 		tun->group = group;
300183c1f36fSSabrina Dubroca 		do_notify = true;
30021e588338SJason Wang 		tun_debug(KERN_INFO, tun, "group set to %u\n",
30030625c883SEric W. Biederman 			  from_kgid(&init_user_ns, tun->group));
30048c644623SGuido Guenther 		break;
30058c644623SGuido Guenther 
3006ff4cc3acSMike Kershaw 	case TUNSETLINK:
3007ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
3008ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
30096b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun,
30106b8a66eeSJoe Perches 				  "Linktype set failed because interface is up\n");
301148abfe05SDavid S. Miller 			ret = -EBUSY;
3012ff4cc3acSMike Kershaw 		} else {
3013ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
30146b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
30156b8a66eeSJoe Perches 				  tun->dev->type);
301648abfe05SDavid S. Miller 			ret = 0;
3017ff4cc3acSMike Kershaw 		}
3018631ab46bSEric W. Biederman 		break;
3019ff4cc3acSMike Kershaw 
30201da177e4SLinus Torvalds #ifdef TUN_DEBUG
30211da177e4SLinus Torvalds 	case TUNSETDEBUG:
30221da177e4SLinus Torvalds 		tun->debug = arg;
30231da177e4SLinus Torvalds 		break;
30241da177e4SLinus Torvalds #endif
30255228ddc9SRusty Russell 	case TUNSETOFFLOAD:
302688255375SMichał Mirosław 		ret = set_offload(tun, arg);
3027631ab46bSEric W. Biederman 		break;
30285228ddc9SRusty Russell 
3029f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
3030f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
3031631ab46bSEric W. Biederman 		ret = -EINVAL;
303240630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3033631ab46bSEric W. Biederman 			break;
3034c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
3035631ab46bSEric W. Biederman 		break;
30361da177e4SLinus Torvalds 
30371da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
3038b595076aSUwe Kleine-König 		/* Get hw address */
3039f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3040f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
304150857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3042631ab46bSEric W. Biederman 			ret = -EFAULT;
3043631ab46bSEric W. Biederman 		break;
30441da177e4SLinus Torvalds 
30451da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
3046f271b2ccSMax Krasnyansky 		/* Set hw address */
30476b8a66eeSJoe Perches 		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
30486b8a66eeSJoe Perches 			  ifr.ifr_hwaddr.sa_data);
304940102371SKim B. Heino 
305040102371SKim B. Heino 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
3051631ab46bSEric W. Biederman 		break;
305233dccbb0SHerbert Xu 
305333dccbb0SHerbert Xu 	case TUNGETSNDBUF:
305454f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
305533dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
305633dccbb0SHerbert Xu 			ret = -EFAULT;
305733dccbb0SHerbert Xu 		break;
305833dccbb0SHerbert Xu 
305933dccbb0SHerbert Xu 	case TUNSETSNDBUF:
306033dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
306133dccbb0SHerbert Xu 			ret = -EFAULT;
306233dccbb0SHerbert Xu 			break;
306333dccbb0SHerbert Xu 		}
306493161922SCraig Gallek 		if (sndbuf <= 0) {
306593161922SCraig Gallek 			ret = -EINVAL;
306693161922SCraig Gallek 			break;
306793161922SCraig Gallek 		}
306833dccbb0SHerbert Xu 
3069c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
3070c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
307133dccbb0SHerbert Xu 		break;
307233dccbb0SHerbert Xu 
3073d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
3074d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
3075d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3076d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3077d9d52b51SMichael S. Tsirkin 		break;
3078d9d52b51SMichael S. Tsirkin 
3079d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
3080d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3081d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3082d9d52b51SMichael S. Tsirkin 			break;
3083d9d52b51SMichael S. Tsirkin 		}
3084d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3085d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
3086d9d52b51SMichael S. Tsirkin 			break;
3087d9d52b51SMichael S. Tsirkin 		}
3088d9d52b51SMichael S. Tsirkin 
3089d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
3090d9d52b51SMichael S. Tsirkin 		break;
3091d9d52b51SMichael S. Tsirkin 
30921cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
30931cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
30941cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
30951cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
30961cf8e410SMichael S. Tsirkin 		break;
30971cf8e410SMichael S. Tsirkin 
30981cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
30991cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
31001cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
31011cf8e410SMichael S. Tsirkin 			break;
31021cf8e410SMichael S. Tsirkin 		}
31031cf8e410SMichael S. Tsirkin 		if (le)
31041cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
31051cf8e410SMichael S. Tsirkin 		else
31061cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
31071cf8e410SMichael S. Tsirkin 		break;
31081cf8e410SMichael S. Tsirkin 
31098b8e658bSGreg Kurz 	case TUNGETVNETBE:
31108b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
31118b8e658bSGreg Kurz 		break;
31128b8e658bSGreg Kurz 
31138b8e658bSGreg Kurz 	case TUNSETVNETBE:
31148b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
31158b8e658bSGreg Kurz 		break;
31168b8e658bSGreg Kurz 
311799405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
311899405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
311999405162SMichael S. Tsirkin 		ret = -EINVAL;
312040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
312199405162SMichael S. Tsirkin 			break;
312299405162SMichael S. Tsirkin 		ret = -EFAULT;
312354f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
312499405162SMichael S. Tsirkin 			break;
312599405162SMichael S. Tsirkin 
3126c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
312799405162SMichael S. Tsirkin 		break;
312899405162SMichael S. Tsirkin 
312999405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
313099405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
313199405162SMichael S. Tsirkin 		ret = -EINVAL;
313240630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
313399405162SMichael S. Tsirkin 			break;
3134c8d68e6bSJason Wang 		ret = 0;
3135c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
313699405162SMichael S. Tsirkin 		break;
313799405162SMichael S. Tsirkin 
313876975e9cSPavel Emelyanov 	case TUNGETFILTER:
313976975e9cSPavel Emelyanov 		ret = -EINVAL;
314040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
314176975e9cSPavel Emelyanov 			break;
314276975e9cSPavel Emelyanov 		ret = -EFAULT;
314376975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
314476975e9cSPavel Emelyanov 			break;
314576975e9cSPavel Emelyanov 		ret = 0;
314676975e9cSPavel Emelyanov 		break;
314776975e9cSPavel Emelyanov 
314896f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3149cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
315096f84061SJason Wang 		break;
315196f84061SJason Wang 
3152aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3153aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3154aff3d70aSJason Wang 		break;
3155aff3d70aSJason Wang 
31561da177e4SLinus Torvalds 	default:
3157631ab46bSEric W. Biederman 		ret = -EINVAL;
3158631ab46bSEric W. Biederman 		break;
3159ee289b64SJoe Perches 	}
31601da177e4SLinus Torvalds 
316183c1f36fSSabrina Dubroca 	if (do_notify)
316283c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
316383c1f36fSSabrina Dubroca 
3164876bfd4dSHerbert Xu unlock:
3165876bfd4dSHerbert Xu 	rtnl_unlock();
3166876bfd4dSHerbert Xu 	if (tun)
3167631ab46bSEric W. Biederman 		tun_put(tun);
3168631ab46bSEric W. Biederman 	return ret;
31691da177e4SLinus Torvalds }
31701da177e4SLinus Torvalds 
317150857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
317250857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
317350857e2aSArnd Bergmann {
317450857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
317550857e2aSArnd Bergmann }
317650857e2aSArnd Bergmann 
317750857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
317850857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
317950857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
318050857e2aSArnd Bergmann {
318150857e2aSArnd Bergmann 	switch (cmd) {
318250857e2aSArnd Bergmann 	case TUNSETIFF:
318350857e2aSArnd Bergmann 	case TUNGETIFF:
318450857e2aSArnd Bergmann 	case TUNSETTXFILTER:
318550857e2aSArnd Bergmann 	case TUNGETSNDBUF:
318650857e2aSArnd Bergmann 	case TUNSETSNDBUF:
318750857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
318850857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
318950857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
319050857e2aSArnd Bergmann 		break;
319150857e2aSArnd Bergmann 	default:
319250857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
319350857e2aSArnd Bergmann 		break;
319450857e2aSArnd Bergmann 	}
319550857e2aSArnd Bergmann 
319650857e2aSArnd Bergmann 	/*
319750857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
319850857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
319950857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
320050857e2aSArnd Bergmann 	 * contents.
320150857e2aSArnd Bergmann 	 */
320250857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
320350857e2aSArnd Bergmann }
320450857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
320550857e2aSArnd Bergmann 
32061da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
32071da177e4SLinus Torvalds {
320854f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
32091da177e4SLinus Torvalds 	int ret;
32101da177e4SLinus Torvalds 
321154f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
32129d319522SJonathan Corbet 		goto out;
32131da177e4SLinus Torvalds 
32141da177e4SLinus Torvalds 	if (on) {
3215e0b93eddSJeff Layton 		__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
321654f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
32171da177e4SLinus Torvalds 	} else
321854f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
32199d319522SJonathan Corbet 	ret = 0;
32209d319522SJonathan Corbet out:
32219d319522SJonathan Corbet 	return ret;
32221da177e4SLinus Torvalds }
32231da177e4SLinus Torvalds 
32241da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
32251da177e4SLinus Torvalds {
3226140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3227631ab46bSEric W. Biederman 	struct tun_file *tfile;
3228deed49fbSThomas Gleixner 
32296b8a66eeSJoe Perches 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3230631ab46bSEric W. Biederman 
3231140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
323211aa9c28SEric W. Biederman 					    &tun_proto, 0);
3233631ab46bSEric W. Biederman 	if (!tfile)
3234631ab46bSEric W. Biederman 		return -ENOMEM;
3235c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
323654f968d6SJason Wang 	tfile->flags = 0;
3237fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
323854f968d6SJason Wang 
323954f968d6SJason Wang 	init_waitqueue_head(&tfile->wq.wait);
32409e641bdcSXi Wang 	RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
324154f968d6SJason Wang 
324254f968d6SJason Wang 	tfile->socket.file = file;
324354f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
324454f968d6SJason Wang 
324554f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
324654f968d6SJason Wang 
324754f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
324854f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
324954f968d6SJason Wang 
3250631ab46bSEric W. Biederman 	file->private_data = tfile;
32514008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
325254f968d6SJason Wang 
325319a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
325419a6afb2SJason Wang 
32558565d26bSDavid S. Miller 	memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
32564df0bfc7SCong Wang 
32571da177e4SLinus Torvalds 	return 0;
32581da177e4SLinus Torvalds }
32591da177e4SLinus Torvalds 
32601da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
32611da177e4SLinus Torvalds {
3262631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
32631da177e4SLinus Torvalds 
3264c8d68e6bSJason Wang 	tun_detach(tfile, true);
32651da177e4SLinus Torvalds 
32661da177e4SLinus Torvalds 	return 0;
32671da177e4SLinus Torvalds }
32681da177e4SLinus Torvalds 
326993e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
32709484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
327193e14b6dSMasatake YAMATO {
32729484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
327393e14b6dSMasatake YAMATO 	struct tun_struct *tun;
327493e14b6dSMasatake YAMATO 	struct ifreq ifr;
327593e14b6dSMasatake YAMATO 
327693e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
327793e14b6dSMasatake YAMATO 
327893e14b6dSMasatake YAMATO 	rtnl_lock();
32799484dc74Syuan linyu 	tun = tun_get(tfile);
328093e14b6dSMasatake YAMATO 	if (tun)
328193e14b6dSMasatake YAMATO 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
328293e14b6dSMasatake YAMATO 	rtnl_unlock();
328393e14b6dSMasatake YAMATO 
328493e14b6dSMasatake YAMATO 	if (tun)
328593e14b6dSMasatake YAMATO 		tun_put(tun);
328693e14b6dSMasatake YAMATO 
3287a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
328893e14b6dSMasatake YAMATO }
328993e14b6dSMasatake YAMATO #endif
329093e14b6dSMasatake YAMATO 
3291d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
32921da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
32931da177e4SLinus Torvalds 	.llseek = no_llseek,
32949b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3295f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
32961da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3297876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
329850857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
329950857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
330050857e2aSArnd Bergmann #endif
33011da177e4SLinus Torvalds 	.open	= tun_chr_open,
33021da177e4SLinus Torvalds 	.release = tun_chr_close,
330393e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
330493e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
330593e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
330693e14b6dSMasatake YAMATO #endif
33071da177e4SLinus Torvalds };
33081da177e4SLinus Torvalds 
33091da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
33101da177e4SLinus Torvalds 	.minor = TUN_MINOR,
33111da177e4SLinus Torvalds 	.name = "tun",
3312e454cea2SKay Sievers 	.nodename = "net/tun",
33131da177e4SLinus Torvalds 	.fops = &tun_fops,
33141da177e4SLinus Torvalds };
33151da177e4SLinus Torvalds 
33161da177e4SLinus Torvalds /* ethtool interface */
33171da177e4SLinus Torvalds 
331829ccc49dSPhilippe Reynes static int tun_get_link_ksettings(struct net_device *dev,
331929ccc49dSPhilippe Reynes 				  struct ethtool_link_ksettings *cmd)
33201da177e4SLinus Torvalds {
332129ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
332229ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
332329ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
332429ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
332529ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
332629ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
332729ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
33281da177e4SLinus Torvalds 	return 0;
33291da177e4SLinus Torvalds }
33301da177e4SLinus Torvalds 
33311da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
33321da177e4SLinus Torvalds {
33331da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
33341da177e4SLinus Torvalds 
333533a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
333633a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
33371da177e4SLinus Torvalds 
33381da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
333940630b82SMichael S. Tsirkin 	case IFF_TUN:
334033a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
33411da177e4SLinus Torvalds 		break;
334240630b82SMichael S. Tsirkin 	case IFF_TAP:
334333a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
33441da177e4SLinus Torvalds 		break;
33451da177e4SLinus Torvalds 	}
33461da177e4SLinus Torvalds }
33471da177e4SLinus Torvalds 
33481da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
33491da177e4SLinus Torvalds {
33501da177e4SLinus Torvalds #ifdef TUN_DEBUG
33511da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
33521da177e4SLinus Torvalds 	return tun->debug;
33531da177e4SLinus Torvalds #else
33541da177e4SLinus Torvalds 	return -EOPNOTSUPP;
33551da177e4SLinus Torvalds #endif
33561da177e4SLinus Torvalds }
33571da177e4SLinus Torvalds 
33581da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
33591da177e4SLinus Torvalds {
33601da177e4SLinus Torvalds #ifdef TUN_DEBUG
33611da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
33621da177e4SLinus Torvalds 	tun->debug = value;
33631da177e4SLinus Torvalds #endif
33641da177e4SLinus Torvalds }
33651da177e4SLinus Torvalds 
33665503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
33675503fcecSJason Wang 			    struct ethtool_coalesce *ec)
33685503fcecSJason Wang {
33695503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
33705503fcecSJason Wang 
33715503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
33725503fcecSJason Wang 
33735503fcecSJason Wang 	return 0;
33745503fcecSJason Wang }
33755503fcecSJason Wang 
33765503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
33775503fcecSJason Wang 			    struct ethtool_coalesce *ec)
33785503fcecSJason Wang {
33795503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
33805503fcecSJason Wang 
33815503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
33825503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
33835503fcecSJason Wang 	else
33845503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
33855503fcecSJason Wang 
33865503fcecSJason Wang 	return 0;
33875503fcecSJason Wang }
33885503fcecSJason Wang 
33897282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
33901da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
33911da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
33921da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3393bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3394eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
33955503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
33965503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
339729ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
33981da177e4SLinus Torvalds };
33991da177e4SLinus Torvalds 
34001576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
34011576d986SJason Wang {
34021576d986SJason Wang 	struct net_device *dev = tun->dev;
34031576d986SJason Wang 	struct tun_file *tfile;
34045990a305SJason Wang 	struct ptr_ring **rings;
34051576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
34061576d986SJason Wang 	int ret, i;
34071576d986SJason Wang 
34085990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
34095990a305SJason Wang 	if (!rings)
34101576d986SJason Wang 		return -ENOMEM;
34111576d986SJason Wang 
34121576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
34131576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
34145990a305SJason Wang 		rings[i] = &tfile->tx_ring;
34151576d986SJason Wang 	}
34161576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
34175990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
34181576d986SJason Wang 
34195990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
34205990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3421fc72d1d5SJason Wang 				       tun_ptr_free);
34221576d986SJason Wang 
34235990a305SJason Wang 	kfree(rings);
34241576d986SJason Wang 	return ret;
34251576d986SJason Wang }
34261576d986SJason Wang 
34271576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
34281576d986SJason Wang 			    unsigned long event, void *ptr)
34291576d986SJason Wang {
34301576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
34311576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
34321576d986SJason Wang 
343386dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
343486dfb4acSCraig Gallek 		return NOTIFY_DONE;
343586dfb4acSCraig Gallek 
34361576d986SJason Wang 	switch (event) {
34371576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
34381576d986SJason Wang 		if (tun_queue_resize(tun))
34391576d986SJason Wang 			return NOTIFY_BAD;
34401576d986SJason Wang 		break;
34411576d986SJason Wang 	default:
34421576d986SJason Wang 		break;
34431576d986SJason Wang 	}
34441576d986SJason Wang 
34451576d986SJason Wang 	return NOTIFY_DONE;
34461576d986SJason Wang }
34471576d986SJason Wang 
34481576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
34491576d986SJason Wang 	.notifier_call	= tun_device_event,
34501576d986SJason Wang };
345179d17604SPavel Emelyanov 
34521da177e4SLinus Torvalds static int __init tun_init(void)
34531da177e4SLinus Torvalds {
34541da177e4SLinus Torvalds 	int ret = 0;
34551da177e4SLinus Torvalds 
34566b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
34571da177e4SLinus Torvalds 
3458f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
345979d17604SPavel Emelyanov 	if (ret) {
34606b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3461f019a7a5SEric W. Biederman 		goto err_linkops;
346279d17604SPavel Emelyanov 	}
346379d17604SPavel Emelyanov 
34641da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
346579d17604SPavel Emelyanov 	if (ret) {
34666b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
346779d17604SPavel Emelyanov 		goto err_misc;
346879d17604SPavel Emelyanov 	}
34691576d986SJason Wang 
34705edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
34715edfbd3cSTonghao Zhang 	if (ret) {
34725edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
34735edfbd3cSTonghao Zhang 		goto err_notifier;
34745edfbd3cSTonghao Zhang 	}
34755edfbd3cSTonghao Zhang 
347679d17604SPavel Emelyanov 	return  0;
34775edfbd3cSTonghao Zhang 
34785edfbd3cSTonghao Zhang err_notifier:
34795edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
348079d17604SPavel Emelyanov err_misc:
3481f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3482f019a7a5SEric W. Biederman err_linkops:
34831da177e4SLinus Torvalds 	return ret;
34841da177e4SLinus Torvalds }
34851da177e4SLinus Torvalds 
34861da177e4SLinus Torvalds static void tun_cleanup(void)
34871da177e4SLinus Torvalds {
34881da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3489f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
34901576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
34911da177e4SLinus Torvalds }
34921da177e4SLinus Torvalds 
349305c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
349405c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
349505c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
349605c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
349705c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
349805c2828cSMichael S. Tsirkin {
34996e914fc7SJason Wang 	struct tun_file *tfile;
350005c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
350105c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
35026e914fc7SJason Wang 	tfile = file->private_data;
35036e914fc7SJason Wang 	if (!tfile)
350405c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
350554f968d6SJason Wang 	return &tfile->socket;
350605c2828cSMichael S. Tsirkin }
350705c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
350805c2828cSMichael S. Tsirkin 
35095990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
351083339c6bSJason Wang {
351183339c6bSJason Wang 	struct tun_file *tfile;
351283339c6bSJason Wang 
351383339c6bSJason Wang 	if (file->f_op != &tun_fops)
351483339c6bSJason Wang 		return ERR_PTR(-EINVAL);
351583339c6bSJason Wang 	tfile = file->private_data;
351683339c6bSJason Wang 	if (!tfile)
351783339c6bSJason Wang 		return ERR_PTR(-EBADFD);
35185990a305SJason Wang 	return &tfile->tx_ring;
351983339c6bSJason Wang }
35205990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
352183339c6bSJason Wang 
35221da177e4SLinus Torvalds module_init(tun_init);
35231da177e4SLinus Torvalds module_exit(tun_cleanup);
35241da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
35251da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
35261da177e4SLinus Torvalds MODULE_LICENSE("GPL");
35271da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3528578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3529