xref: /openbmc/linux/drivers/net/tun.c (revision c39e342a050a4425348e6fe7f75827c0a1a7ebc5)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
41da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds /*
101da177e4SLinus Torvalds  *  Changes:
111da177e4SLinus Torvalds  *
12ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
14ff4cc3acSMike Kershaw  *
151da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
16344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
191da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
201da177e4SLinus Torvalds  *    Increased default tx queue length.
211da177e4SLinus Torvalds  *    Added ethtool API.
221da177e4SLinus Torvalds  *    Minor cleanups
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
251da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
296b8a66eeSJoe Perches 
301da177e4SLinus Torvalds #define DRV_NAME	"tun"
311da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
321da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
331da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds #include <linux/module.h>
361da177e4SLinus Torvalds #include <linux/errno.h>
371da177e4SLinus Torvalds #include <linux/kernel.h>
38174cd4b1SIngo Molnar #include <linux/sched/signal.h>
391da177e4SLinus Torvalds #include <linux/major.h>
401da177e4SLinus Torvalds #include <linux/slab.h>
411da177e4SLinus Torvalds #include <linux/poll.h>
421da177e4SLinus Torvalds #include <linux/fcntl.h>
431da177e4SLinus Torvalds #include <linux/init.h>
441da177e4SLinus Torvalds #include <linux/skbuff.h>
451da177e4SLinus Torvalds #include <linux/netdevice.h>
461da177e4SLinus Torvalds #include <linux/etherdevice.h>
471da177e4SLinus Torvalds #include <linux/miscdevice.h>
481da177e4SLinus Torvalds #include <linux/ethtool.h>
491da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5050857e2aSArnd Bergmann #include <linux/compat.h>
511da177e4SLinus Torvalds #include <linux/if.h>
521da177e4SLinus Torvalds #include <linux/if_arp.h>
531da177e4SLinus Torvalds #include <linux/if_ether.h>
541da177e4SLinus Torvalds #include <linux/if_tun.h>
556680ec68SJason Wang #include <linux/if_vlan.h>
561da177e4SLinus Torvalds #include <linux/crc32.h>
57d647a591SPavel Emelyanov #include <linux/nsproxy.h>
58f43798c2SRusty Russell #include <linux/virtio_net.h>
5999405162SMichael S. Tsirkin #include <linux/rcupdate.h>
60881d966bSEric W. Biederman #include <net/net_namespace.h>
6179d17604SPavel Emelyanov #include <net/netns/generic.h>
62f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
6333dccbb0SHerbert Xu #include <net/sock.h>
64735fc405SJesper Dangaard Brouer #include <net/xdp.h>
6593e14b6dSMasatake YAMATO #include <linux/seq_file.h>
66e0b46d0eSHerbert Xu #include <linux/uio.h>
671576d986SJason Wang #include <linux/skb_array.h>
68761876c8SJason Wang #include <linux/bpf.h>
69761876c8SJason Wang #include <linux/bpf_trace.h>
7090e33d45SPetar Penkov #include <linux/mutex.h>
711da177e4SLinus Torvalds 
727c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
73f2780d6dSKirill Tkhai #include <linux/proc_fs.h>
741da177e4SLinus Torvalds 
754e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
764e24f2ddSChas Williams 				       struct ethtool_link_ksettings *cmd);
774e24f2ddSChas Williams 
7814daa021SRusty Russell /* Uncomment to enable debugging */
7914daa021SRusty Russell /* #define TUN_DEBUG 1 */
8014daa021SRusty Russell 
811da177e4SLinus Torvalds #ifdef TUN_DEBUG
821da177e4SLinus Torvalds static int debug;
8314daa021SRusty Russell 
846b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
856b8a66eeSJoe Perches do {								\
866b8a66eeSJoe Perches 	if (tun->debug)						\
876b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
886b8a66eeSJoe Perches } while (0)
896b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
906b8a66eeSJoe Perches do {								\
916b8a66eeSJoe Perches 	if (debug == 2)						\
926b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
936b8a66eeSJoe Perches } while (0)
9414daa021SRusty Russell #else
956b8a66eeSJoe Perches #define tun_debug(level, tun, fmt, args...)			\
966b8a66eeSJoe Perches do {								\
976b8a66eeSJoe Perches 	if (0)							\
986b8a66eeSJoe Perches 		netdev_printk(level, tun->dev, fmt, ##args);	\
996b8a66eeSJoe Perches } while (0)
1006b8a66eeSJoe Perches #define DBG1(level, fmt, args...)				\
1016b8a66eeSJoe Perches do {								\
1026b8a66eeSJoe Perches 	if (0)							\
1036b8a66eeSJoe Perches 		printk(level fmt, ##args);			\
1046b8a66eeSJoe Perches } while (0)
1051da177e4SLinus Torvalds #endif
1061da177e4SLinus Torvalds 
1077df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
10866ccbc9cSJason Wang 
109031f5e03SMichael S. Tsirkin /* TUN device flags */
110031f5e03SMichael S. Tsirkin 
111031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
112031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
113031f5e03SMichael S. Tsirkin  */
114031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
1151cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
1161cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
1178b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
118031f5e03SMichael S. Tsirkin 
119031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
12090e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
12190e33d45SPetar Penkov 
1220690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
1230690899bSMichael S. Tsirkin 
124f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
125f271b2ccSMax Krasnyansky struct tap_filter {
126f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
127f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
128f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
129f271b2ccSMax Krasnyansky };
130f271b2ccSMax Krasnyansky 
131baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
132baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
133baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
134b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
135c8d68e6bSJason Wang 
13696442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
13796442e42SJason Wang 
138608b9977SPaolo Abeni struct tun_pcpu_stats {
1395260dd3eSEric Dumazet 	u64_stats_t rx_packets;
1405260dd3eSEric Dumazet 	u64_stats_t rx_bytes;
1415260dd3eSEric Dumazet 	u64_stats_t tx_packets;
1425260dd3eSEric Dumazet 	u64_stats_t tx_bytes;
143608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
144608b9977SPaolo Abeni 	u32 rx_dropped;
145608b9977SPaolo Abeni 	u32 tx_dropped;
146608b9977SPaolo Abeni 	u32 rx_frame_errors;
147608b9977SPaolo Abeni };
148608b9977SPaolo Abeni 
14954f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
15092d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
15154f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
15254f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
15336fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
15454f968d6SJason Wang  * this).
1556e914fc7SJason Wang  *
1566e914fc7SJason Wang  * RCU usage:
15736fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1586e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
15954f968d6SJason Wang  */
160631ab46bSEric W. Biederman struct tun_file {
16154f968d6SJason Wang 	struct sock sk;
16254f968d6SJason Wang 	struct socket socket;
1636e914fc7SJason Wang 	struct tun_struct __rcu *tun;
16454f968d6SJason Wang 	struct fasync_struct *fasync;
16554f968d6SJason Wang 	/* only used for fasnyc */
16654f968d6SJason Wang 	unsigned int flags;
167fb7589a1SPavel Emelyanov 	union {
168c8d68e6bSJason Wang 		u16 queue_index;
169fb7589a1SPavel Emelyanov 		unsigned int ifindex;
170fb7589a1SPavel Emelyanov 	};
17194317099SPetar Penkov 	struct napi_struct napi;
172aec72f33SEric Dumazet 	bool napi_enabled;
173af3fb24eSEric Dumazet 	bool napi_frags_enabled;
17490e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1754008e97fSJason Wang 	struct list_head next;
1764008e97fSJason Wang 	struct tun_struct *detached;
1775990a305SJason Wang 	struct ptr_ring tx_ring;
1788bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
179631ab46bSEric W. Biederman };
180631ab46bSEric W. Biederman 
181f9e06c45SJason Wang struct tun_page {
182f9e06c45SJason Wang 	struct page *page;
183f9e06c45SJason Wang 	int count;
184f9e06c45SJason Wang };
185f9e06c45SJason Wang 
18696442e42SJason Wang struct tun_flow_entry {
18796442e42SJason Wang 	struct hlist_node hash_link;
18896442e42SJason Wang 	struct rcu_head rcu;
18996442e42SJason Wang 	struct tun_struct *tun;
19096442e42SJason Wang 
19196442e42SJason Wang 	u32 rxhash;
1929bc88939STom Herbert 	u32 rps_rxhash;
19396442e42SJason Wang 	int queue_index;
19483b1bc12SLi RongQing 	unsigned long updated ____cacheline_aligned_in_smp;
19596442e42SJason Wang };
19696442e42SJason Wang 
19796442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
198f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
19996442e42SJason Wang 
200cd5681d7SJason Wang struct tun_prog {
20196f84061SJason Wang 	struct rcu_head rcu;
20296f84061SJason Wang 	struct bpf_prog *prog;
20396f84061SJason Wang };
20496f84061SJason Wang 
20554f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
20636fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
20754f968d6SJason Wang  * file were attached to a persist device.
20854f968d6SJason Wang  */
20914daa021SRusty Russell struct tun_struct {
210c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
211c8d68e6bSJason Wang 	unsigned int            numqueues;
212f271b2ccSMax Krasnyansky 	unsigned int 		flags;
2130625c883SEric W. Biederman 	kuid_t			owner;
2140625c883SEric W. Biederman 	kgid_t			group;
21514daa021SRusty Russell 
21614daa021SRusty Russell 	struct net_device	*dev;
217c8f44affSMichał Mirosław 	netdev_features_t	set_features;
21888255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
219d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
220d9d52b51SMichael S. Tsirkin 
221eaea34b2SPaolo Abeni 	int			align;
222d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
22354f968d6SJason Wang 	int			sndbuf;
22454f968d6SJason Wang 	struct tap_filter	txflt;
22554f968d6SJason Wang 	struct sock_fprog	fprog;
22654f968d6SJason Wang 	/* protected by rtnl lock */
22754f968d6SJason Wang 	bool			filter_attached;
22814daa021SRusty Russell #ifdef TUN_DEBUG
22914daa021SRusty Russell 	int debug;
23014daa021SRusty Russell #endif
23196442e42SJason Wang 	spinlock_t lock;
23296442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
23396442e42SJason Wang 	struct timer_list flow_gc_timer;
23496442e42SJason Wang 	unsigned long ageing_time;
2354008e97fSJason Wang 	unsigned int numdisabled;
2364008e97fSJason Wang 	struct list_head disabled;
2375dbbaf2dSPaul Moore 	void *security;
238b8732fb7SJason Wang 	u32 flow_count;
2395503fcecSJason Wang 	u32 rx_batched;
240608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
241761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
242cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
243aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
2444e24f2ddSChas Williams 	struct ethtool_link_ksettings link_ksettings;
24514daa021SRusty Russell };
24614daa021SRusty Russell 
247aff3d70aSJason Wang struct veth {
248aff3d70aSJason Wang 	__be16 h_vlan_proto;
249aff3d70aSJason Wang 	__be16 h_vlan_TCI;
2501da177e4SLinus Torvalds };
2511da177e4SLinus Torvalds 
2521ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr)
253fc72d1d5SJason Wang {
254fc72d1d5SJason Wang 	return (unsigned long)ptr & TUN_XDP_FLAG;
255fc72d1d5SJason Wang }
2561ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame);
257fc72d1d5SJason Wang 
258fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr)
259fc72d1d5SJason Wang {
260fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
261fc72d1d5SJason Wang }
262fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr);
263fc72d1d5SJason Wang 
264fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr)
265fc72d1d5SJason Wang {
266fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
267fc72d1d5SJason Wang }
268fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp);
269fc72d1d5SJason Wang 
27094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
27194317099SPetar Penkov {
27294317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
27394317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
27494317099SPetar Penkov 	struct sk_buff_head process_queue;
27594317099SPetar Penkov 	struct sk_buff *skb;
27694317099SPetar Penkov 	int received = 0;
27794317099SPetar Penkov 
27894317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
27994317099SPetar Penkov 
28094317099SPetar Penkov 	spin_lock(&queue->lock);
28194317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
28294317099SPetar Penkov 	spin_unlock(&queue->lock);
28394317099SPetar Penkov 
28494317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
28594317099SPetar Penkov 		napi_gro_receive(napi, skb);
28694317099SPetar Penkov 		++received;
28794317099SPetar Penkov 	}
28894317099SPetar Penkov 
28994317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
29094317099SPetar Penkov 		spin_lock(&queue->lock);
29194317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
29294317099SPetar Penkov 		spin_unlock(&queue->lock);
29394317099SPetar Penkov 	}
29494317099SPetar Penkov 
29594317099SPetar Penkov 	return received;
29694317099SPetar Penkov }
29794317099SPetar Penkov 
29894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
29994317099SPetar Penkov {
30094317099SPetar Penkov 	unsigned int received;
30194317099SPetar Penkov 
30294317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
30394317099SPetar Penkov 
30494317099SPetar Penkov 	if (received < budget)
30594317099SPetar Penkov 		napi_complete_done(napi, received);
30694317099SPetar Penkov 
30794317099SPetar Penkov 	return received;
30894317099SPetar Penkov }
30994317099SPetar Penkov 
31094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
311af3fb24eSEric Dumazet 			  bool napi_en, bool napi_frags)
31294317099SPetar Penkov {
313aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
314af3fb24eSEric Dumazet 	tfile->napi_frags_enabled = napi_en && napi_frags;
31594317099SPetar Penkov 	if (napi_en) {
316*c39e342aSPetar Penkov 		netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
31794317099SPetar Penkov 				  NAPI_POLL_WEIGHT);
31894317099SPetar Penkov 		napi_enable(&tfile->napi);
31994317099SPetar Penkov 	}
32094317099SPetar Penkov }
32194317099SPetar Penkov 
32206e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile)
32394317099SPetar Penkov {
324aec72f33SEric Dumazet 	if (tfile->napi_enabled)
32594317099SPetar Penkov 		napi_disable(&tfile->napi);
32694317099SPetar Penkov }
32794317099SPetar Penkov 
32806e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile)
32994317099SPetar Penkov {
330aec72f33SEric Dumazet 	if (tfile->napi_enabled)
33194317099SPetar Penkov 		netif_napi_del(&tfile->napi);
33294317099SPetar Penkov }
33394317099SPetar Penkov 
334af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile)
33590e33d45SPetar Penkov {
336af3fb24eSEric Dumazet 	return tfile->napi_frags_enabled;
33790e33d45SPetar Penkov }
33890e33d45SPetar Penkov 
3398b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
3408b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3418b8e658bSGreg Kurz {
3428b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
3438b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
3448b8e658bSGreg Kurz }
3458b8e658bSGreg Kurz 
3468b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3478b8e658bSGreg Kurz {
3488b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3498b8e658bSGreg Kurz 
3508b8e658bSGreg Kurz 	if (put_user(be, argp))
3518b8e658bSGreg Kurz 		return -EFAULT;
3528b8e658bSGreg Kurz 
3538b8e658bSGreg Kurz 	return 0;
3548b8e658bSGreg Kurz }
3558b8e658bSGreg Kurz 
3568b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3578b8e658bSGreg Kurz {
3588b8e658bSGreg Kurz 	int be;
3598b8e658bSGreg Kurz 
3608b8e658bSGreg Kurz 	if (get_user(be, argp))
3618b8e658bSGreg Kurz 		return -EFAULT;
3628b8e658bSGreg Kurz 
3638b8e658bSGreg Kurz 	if (be)
3648b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3658b8e658bSGreg Kurz 	else
3668b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3678b8e658bSGreg Kurz 
3688b8e658bSGreg Kurz 	return 0;
3698b8e658bSGreg Kurz }
3708b8e658bSGreg Kurz #else
3718b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3728b8e658bSGreg Kurz {
3738b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3748b8e658bSGreg Kurz }
3758b8e658bSGreg Kurz 
3768b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3778b8e658bSGreg Kurz {
3788b8e658bSGreg Kurz 	return -EINVAL;
3798b8e658bSGreg Kurz }
3808b8e658bSGreg Kurz 
3818b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3828b8e658bSGreg Kurz {
3838b8e658bSGreg Kurz 	return -EINVAL;
3848b8e658bSGreg Kurz }
3858b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3868b8e658bSGreg Kurz 
38725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
38825bd55bbSGreg Kurz {
3897d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3908b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
39125bd55bbSGreg Kurz }
39225bd55bbSGreg Kurz 
39356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
39456f0dcc5SMichael S. Tsirkin {
39525bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
39656f0dcc5SMichael S. Tsirkin }
39756f0dcc5SMichael S. Tsirkin 
39856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
39956f0dcc5SMichael S. Tsirkin {
40025bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
40156f0dcc5SMichael S. Tsirkin }
40256f0dcc5SMichael S. Tsirkin 
40396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
40496442e42SJason Wang {
405f13b5468SLi RongQing 	return rxhash & TUN_MASK_FLOW_ENTRIES;
40696442e42SJason Wang }
40796442e42SJason Wang 
40896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
40996442e42SJason Wang {
41096442e42SJason Wang 	struct tun_flow_entry *e;
41196442e42SJason Wang 
412b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
41396442e42SJason Wang 		if (e->rxhash == rxhash)
41496442e42SJason Wang 			return e;
41596442e42SJason Wang 	}
41696442e42SJason Wang 	return NULL;
41796442e42SJason Wang }
41896442e42SJason Wang 
41996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
42096442e42SJason Wang 					      struct hlist_head *head,
42196442e42SJason Wang 					      u32 rxhash, u16 queue_index)
42296442e42SJason Wang {
4239fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
4249fdc6befSEric Dumazet 
42596442e42SJason Wang 	if (e) {
42696442e42SJason Wang 		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
42796442e42SJason Wang 			  rxhash, queue_index);
42896442e42SJason Wang 		e->updated = jiffies;
42996442e42SJason Wang 		e->rxhash = rxhash;
4309bc88939STom Herbert 		e->rps_rxhash = 0;
43196442e42SJason Wang 		e->queue_index = queue_index;
43296442e42SJason Wang 		e->tun = tun;
43396442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
434b8732fb7SJason Wang 		++tun->flow_count;
43596442e42SJason Wang 	}
43696442e42SJason Wang 	return e;
43796442e42SJason Wang }
43896442e42SJason Wang 
43996442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
44096442e42SJason Wang {
44196442e42SJason Wang 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
44296442e42SJason Wang 		  e->rxhash, e->queue_index);
44396442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
4449fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
445b8732fb7SJason Wang 	--tun->flow_count;
44696442e42SJason Wang }
44796442e42SJason Wang 
44896442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
44996442e42SJason Wang {
45096442e42SJason Wang 	int i;
45196442e42SJason Wang 
45296442e42SJason Wang 	spin_lock_bh(&tun->lock);
45396442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
45496442e42SJason Wang 		struct tun_flow_entry *e;
455b67bfe0dSSasha Levin 		struct hlist_node *n;
45696442e42SJason Wang 
457b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
45896442e42SJason Wang 			tun_flow_delete(tun, e);
45996442e42SJason Wang 	}
46096442e42SJason Wang 	spin_unlock_bh(&tun->lock);
46196442e42SJason Wang }
46296442e42SJason Wang 
46396442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
46496442e42SJason Wang {
46596442e42SJason Wang 	int i;
46696442e42SJason Wang 
46796442e42SJason Wang 	spin_lock_bh(&tun->lock);
46896442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
46996442e42SJason Wang 		struct tun_flow_entry *e;
470b67bfe0dSSasha Levin 		struct hlist_node *n;
47196442e42SJason Wang 
472b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
47396442e42SJason Wang 			if (e->queue_index == queue_index)
47496442e42SJason Wang 				tun_flow_delete(tun, e);
47596442e42SJason Wang 		}
47696442e42SJason Wang 	}
47796442e42SJason Wang 	spin_unlock_bh(&tun->lock);
47896442e42SJason Wang }
47996442e42SJason Wang 
480e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
48196442e42SJason Wang {
482e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
48396442e42SJason Wang 	unsigned long delay = tun->ageing_time;
48496442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
48596442e42SJason Wang 	unsigned long count = 0;
48696442e42SJason Wang 	int i;
48796442e42SJason Wang 
48896442e42SJason Wang 	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
48996442e42SJason Wang 
4907dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
49196442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
49296442e42SJason Wang 		struct tun_flow_entry *e;
493b67bfe0dSSasha Levin 		struct hlist_node *n;
49496442e42SJason Wang 
495b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
49696442e42SJason Wang 			unsigned long this_timer;
49781d98fa4SEric Dumazet 
49896442e42SJason Wang 			this_timer = e->updated + delay;
49981d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
50096442e42SJason Wang 				tun_flow_delete(tun, e);
50181d98fa4SEric Dumazet 				continue;
50281d98fa4SEric Dumazet 			}
50381d98fa4SEric Dumazet 			count++;
50481d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
50596442e42SJason Wang 				next_timer = this_timer;
50696442e42SJason Wang 		}
50796442e42SJason Wang 	}
50896442e42SJason Wang 
50996442e42SJason Wang 	if (count)
51096442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
5117dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
51296442e42SJason Wang }
51396442e42SJason Wang 
51449974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
5159e85722dSJason Wang 			    struct tun_file *tfile)
51696442e42SJason Wang {
51796442e42SJason Wang 	struct hlist_head *head;
51896442e42SJason Wang 	struct tun_flow_entry *e;
51996442e42SJason Wang 	unsigned long delay = tun->ageing_time;
5209e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
52196442e42SJason Wang 
52296442e42SJason Wang 	head = &tun->flows[tun_hashfn(rxhash)];
52396442e42SJason Wang 
52496442e42SJason Wang 	rcu_read_lock();
52596442e42SJason Wang 
52696442e42SJason Wang 	e = tun_flow_find(head, rxhash);
52796442e42SJason Wang 	if (likely(e)) {
52896442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
5294ffdd22eSEric Dumazet 		if (READ_ONCE(e->queue_index) != queue_index)
5304ffdd22eSEric Dumazet 			WRITE_ONCE(e->queue_index, queue_index);
53183b1bc12SLi RongQing 		if (e->updated != jiffies)
53296442e42SJason Wang 			e->updated = jiffies;
5339bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
53496442e42SJason Wang 	} else {
53596442e42SJason Wang 		spin_lock_bh(&tun->lock);
536b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
537b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
53896442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
53996442e42SJason Wang 
54096442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
54196442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
54296442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
54396442e42SJason Wang 		spin_unlock_bh(&tun->lock);
54496442e42SJason Wang 	}
54596442e42SJason Wang 
54696442e42SJason Wang 	rcu_read_unlock();
54796442e42SJason Wang }
54896442e42SJason Wang 
5499bc88939STom Herbert /**
5509bc88939STom Herbert  * Save the hash received in the stack receive path and update the
5519bc88939STom Herbert  * flow_hash table accordingly.
5529bc88939STom Herbert  */
5539bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5549bc88939STom Herbert {
555567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5569bc88939STom Herbert 		e->rps_rxhash = hash;
5579bc88939STom Herbert }
5589bc88939STom Herbert 
5594b035271SWang Li /* We try to identify a flow through its rxhash. The reason that
56092d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
561c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
562c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
5634b035271SWang Li  * different rxq no. here.
564c8d68e6bSJason Wang  */
56596f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
566c8d68e6bSJason Wang {
56796442e42SJason Wang 	struct tun_flow_entry *e;
568c8d68e6bSJason Wang 	u32 txq = 0;
569c8d68e6bSJason Wang 	u32 numqueues = 0;
570c8d68e6bSJason Wang 
5716aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
572c8d68e6bSJason Wang 
573feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
57496442e42SJason Wang 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5759bc88939STom Herbert 	if (e) {
5769bc88939STom Herbert 		tun_flow_save_rps_rxhash(e, txq);
577fbe4d456SZhi Yong Wu 		txq = e->queue_index;
5784b035271SWang Li 	} else {
579c8d68e6bSJason Wang 		/* use multiply and shift instead of expensive divide */
580c8d68e6bSJason Wang 		txq = ((u64)txq * numqueues) >> 32;
581c8d68e6bSJason Wang 	}
582c8d68e6bSJason Wang 
583c8d68e6bSJason Wang 	return txq;
584c8d68e6bSJason Wang }
585c8d68e6bSJason Wang 
58696f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
58796f84061SJason Wang {
588cd5681d7SJason Wang 	struct tun_prog *prog;
589a35d310fSJason Wang 	u32 numqueues;
59096f84061SJason Wang 	u16 ret = 0;
59196f84061SJason Wang 
592a35d310fSJason Wang 	numqueues = READ_ONCE(tun->numqueues);
593a35d310fSJason Wang 	if (!numqueues)
594a35d310fSJason Wang 		return 0;
595a35d310fSJason Wang 
59696f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
59796f84061SJason Wang 	if (prog)
59896f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
59996f84061SJason Wang 
600a35d310fSJason Wang 	return ret % numqueues;
60196f84061SJason Wang }
60296f84061SJason Wang 
60396f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
604a350ecceSPaolo Abeni 			    struct net_device *sb_dev)
60596f84061SJason Wang {
60696f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
60796f84061SJason Wang 	u16 ret;
60896f84061SJason Wang 
60996f84061SJason Wang 	rcu_read_lock();
61096f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
61196f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
61296f84061SJason Wang 	else
61396f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
61496f84061SJason Wang 	rcu_read_unlock();
61596f84061SJason Wang 
61696f84061SJason Wang 	return ret;
61796f84061SJason Wang }
61896f84061SJason Wang 
619cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
620cde8b15fSJason Wang {
621cde8b15fSJason Wang 	const struct cred *cred = current_cred();
622c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
623cde8b15fSJason Wang 
624cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
625cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
626c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
627cde8b15fSJason Wang }
628cde8b15fSJason Wang 
629c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
630c8d68e6bSJason Wang {
631c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
632c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
633c8d68e6bSJason Wang }
634c8d68e6bSJason Wang 
6354008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
6364008e97fSJason Wang {
6374008e97fSJason Wang 	tfile->detached = tun;
6384008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
6394008e97fSJason Wang 	++tun->numdisabled;
6404008e97fSJason Wang }
6414008e97fSJason Wang 
642d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
6434008e97fSJason Wang {
6444008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
6454008e97fSJason Wang 
6464008e97fSJason Wang 	tfile->detached = NULL;
6474008e97fSJason Wang 	list_del_init(&tfile->next);
6484008e97fSJason Wang 	--tun->numdisabled;
6494008e97fSJason Wang 	return tun;
6504008e97fSJason Wang }
6514008e97fSJason Wang 
6523a403076SJason Wang void tun_ptr_free(void *ptr)
653fc72d1d5SJason Wang {
654fc72d1d5SJason Wang 	if (!ptr)
655fc72d1d5SJason Wang 		return;
6561ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
6571ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
658fc72d1d5SJason Wang 
65903993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
660fc72d1d5SJason Wang 	} else {
661fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
662fc72d1d5SJason Wang 	}
663fc72d1d5SJason Wang }
6643a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free);
665fc72d1d5SJason Wang 
6664bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6674bfb0513SJason Wang {
668fc72d1d5SJason Wang 	void *ptr;
6691576d986SJason Wang 
670fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
671fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6721576d986SJason Wang 
6735503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6744bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6754bfb0513SJason Wang }
6764bfb0513SJason Wang 
677c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
678c8d68e6bSJason Wang {
679c8d68e6bSJason Wang 	struct tun_file *ntfile;
680c8d68e6bSJason Wang 	struct tun_struct *tun;
681c8d68e6bSJason Wang 
682b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
683b8deabd3SJason Wang 
68494317099SPetar Penkov 	if (tun && clean) {
68506e55addSEric Dumazet 		tun_napi_disable(tfile);
68606e55addSEric Dumazet 		tun_napi_del(tfile);
68794317099SPetar Penkov 	}
68894317099SPetar Penkov 
6899e85722dSJason Wang 	if (tun && !tfile->detached) {
690c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
691c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
692c8d68e6bSJason Wang 
693c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
694c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
695b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
696c8d68e6bSJason Wang 		ntfile->queue_index = index;
6979871a9e4SJason Wang 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
6989871a9e4SJason Wang 				   NULL);
699c8d68e6bSJason Wang 
700c8d68e6bSJason Wang 		--tun->numqueues;
7019e85722dSJason Wang 		if (clean) {
702c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
703c8d68e6bSJason Wang 			sock_put(&tfile->sk);
7049e85722dSJason Wang 		} else
7054008e97fSJason Wang 			tun_disable_queue(tun, tfile);
706c8d68e6bSJason Wang 
707c8d68e6bSJason Wang 		synchronize_net();
70896442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
709c8d68e6bSJason Wang 		/* Drop read queue */
7104bfb0513SJason Wang 		tun_queue_purge(tfile);
711c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
712dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
7134008e97fSJason Wang 		tun = tun_enable_queue(tfile);
714dd38bd85SJason Wang 		sock_put(&tfile->sk);
715dd38bd85SJason Wang 	}
716c8d68e6bSJason Wang 
717c8d68e6bSJason Wang 	if (clean) {
718af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
719af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
720af668b3cSMichael S. Tsirkin 
72140630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
722af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
7234008e97fSJason Wang 				unregister_netdevice(tun->dev);
724af668b3cSMichael S. Tsirkin 		}
725b196d88aSJason Wang 		if (tun)
726b196d88aSJason Wang 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
7277063efd3SJason Wang 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
728140e807dSEric W. Biederman 		sock_put(&tfile->sk);
729c8d68e6bSJason Wang 	}
730c8d68e6bSJason Wang }
731c8d68e6bSJason Wang 
732c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
733c8d68e6bSJason Wang {
73483c1f36fSSabrina Dubroca 	struct tun_struct *tun;
73583c1f36fSSabrina Dubroca 	struct net_device *dev;
73683c1f36fSSabrina Dubroca 
737c8d68e6bSJason Wang 	rtnl_lock();
73883c1f36fSSabrina Dubroca 	tun = rtnl_dereference(tfile->tun);
73983c1f36fSSabrina Dubroca 	dev = tun ? tun->dev : NULL;
740c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
74183c1f36fSSabrina Dubroca 	if (dev)
74283c1f36fSSabrina Dubroca 		netdev_state_change(dev);
743c8d68e6bSJason Wang 	rtnl_unlock();
744c8d68e6bSJason Wang }
745c8d68e6bSJason Wang 
746c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
747c8d68e6bSJason Wang {
748c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7494008e97fSJason Wang 	struct tun_file *tfile, *tmp;
750c8d68e6bSJason Wang 	int i, n = tun->numqueues;
751c8d68e6bSJason Wang 
752c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
753b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
754c8d68e6bSJason Wang 		BUG_ON(!tfile);
75506e55addSEric Dumazet 		tun_napi_disable(tfile);
756addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7579e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
758c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
759c8d68e6bSJason Wang 		--tun->numqueues;
760c8d68e6bSJason Wang 	}
7619e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
762addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7639e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
764c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7659e85722dSJason Wang 	}
766c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
767c8d68e6bSJason Wang 
768c8d68e6bSJason Wang 	synchronize_net();
769c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
770b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
77106e55addSEric Dumazet 		tun_napi_del(tfile);
772c8d68e6bSJason Wang 		/* Drop read queue */
7734bfb0513SJason Wang 		tun_queue_purge(tfile);
774b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
775c8d68e6bSJason Wang 		sock_put(&tfile->sk);
776c8d68e6bSJason Wang 	}
7774008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7784008e97fSJason Wang 		tun_enable_queue(tfile);
7794bfb0513SJason Wang 		tun_queue_purge(tfile);
780b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7814008e97fSJason Wang 		sock_put(&tfile->sk);
7824008e97fSJason Wang 	}
7834008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
784dd38bd85SJason Wang 
78540630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
786dd38bd85SJason Wang 		module_put(THIS_MODULE);
787c8d68e6bSJason Wang }
788c8d68e6bSJason Wang 
78994317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
79077f22f92SYang Yingliang 		      bool skip_filter, bool napi, bool napi_frags,
79177f22f92SYang Yingliang 		      bool publish_tun)
792a7385ba2SEric W. Biederman {
793631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7941576d986SJason Wang 	struct net_device *dev = tun->dev;
79538231b7aSEric W. Biederman 	int err;
796a7385ba2SEric W. Biederman 
7975dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7985dbbaf2dSPaul Moore 	if (err < 0)
7995dbbaf2dSPaul Moore 		goto out;
8005dbbaf2dSPaul Moore 
80138231b7aSEric W. Biederman 	err = -EINVAL;
8029e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
80338231b7aSEric W. Biederman 		goto out;
80438231b7aSEric W. Biederman 
80538231b7aSEric W. Biederman 	err = -EBUSY;
80640630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
807c8d68e6bSJason Wang 		goto out;
808c8d68e6bSJason Wang 
809c8d68e6bSJason Wang 	err = -E2BIG;
8104008e97fSJason Wang 	if (!tfile->detached &&
8114008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
81238231b7aSEric W. Biederman 		goto out;
81338231b7aSEric W. Biederman 
81438231b7aSEric W. Biederman 	err = 0;
81554f968d6SJason Wang 
81692d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
817849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
8188ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
8198ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
8208ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
82154f968d6SJason Wang 		if (!err)
82254f968d6SJason Wang 			goto out;
82354f968d6SJason Wang 	}
8241576d986SJason Wang 
8251576d986SJason Wang 	if (!tfile->detached &&
826b196d88aSJason Wang 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
827b196d88aSJason Wang 			    GFP_KERNEL, tun_ptr_free)) {
8281576d986SJason Wang 		err = -ENOMEM;
8291576d986SJason Wang 		goto out;
8301576d986SJason Wang 	}
8311576d986SJason Wang 
832c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
833addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8348bf5c4eeSJesper Dangaard Brouer 
8358bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
8368bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
8378bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
8388bf5c4eeSJesper Dangaard Brouer 
8398bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
8408bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
8418bf5c4eeSJesper Dangaard Brouer 	} else {
8428bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
8438bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
8448bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
8458bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
8468bf5c4eeSJesper Dangaard Brouer 			goto out;
8478d5d8852SJesper Dangaard Brouer 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
8488d5d8852SJesper Dangaard Brouer 						 MEM_TYPE_PAGE_SHARED, NULL);
8498d5d8852SJesper Dangaard Brouer 		if (err < 0) {
8508d5d8852SJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
8518d5d8852SJesper Dangaard Brouer 			goto out;
8528d5d8852SJesper Dangaard Brouer 		}
8538bf5c4eeSJesper Dangaard Brouer 		err = 0;
8548bf5c4eeSJesper Dangaard Brouer 	}
8558bf5c4eeSJesper Dangaard Brouer 
85694317099SPetar Penkov 	if (tfile->detached) {
8574008e97fSJason Wang 		tun_enable_queue(tfile);
85894317099SPetar Penkov 	} else {
8594008e97fSJason Wang 		sock_hold(&tfile->sk);
860af3fb24eSEric Dumazet 		tun_napi_init(tun, tfile, napi, napi_frags);
86194317099SPetar Penkov 	}
8624008e97fSJason Wang 
863e4a2a304SJason Wang 	if (rtnl_dereference(tun->xdp_prog))
864e4a2a304SJason Wang 		sock_set_flag(&tfile->sk, SOCK_XDP);
865e4a2a304SJason Wang 
866c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
867c8d68e6bSJason Wang 	 * refcnt.
868c8d68e6bSJason Wang 	 */
869a7385ba2SEric W. Biederman 
8700b7959b6SStanislav Fomichev 	/* Publish tfile->tun and tun->tfiles only after we've fully
8710b7959b6SStanislav Fomichev 	 * initialized tfile; otherwise we risk using half-initialized
8720b7959b6SStanislav Fomichev 	 * object.
8730b7959b6SStanislav Fomichev 	 */
87477f22f92SYang Yingliang 	if (publish_tun)
8750b7959b6SStanislav Fomichev 		rcu_assign_pointer(tfile->tun, tun);
8760b7959b6SStanislav Fomichev 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
8770b7959b6SStanislav Fomichev 	tun->numqueues++;
8783a03cb84SGeorge Amanakis 	tun_set_real_num_queues(tun);
87938231b7aSEric W. Biederman out:
88038231b7aSEric W. Biederman 	return err;
881a7385ba2SEric W. Biederman }
882a7385ba2SEric W. Biederman 
8839484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
884631ab46bSEric W. Biederman {
8856e914fc7SJason Wang 	struct tun_struct *tun;
886c70f1829SEric W. Biederman 
8876e914fc7SJason Wang 	rcu_read_lock();
8886e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8896e914fc7SJason Wang 	if (tun)
8906e914fc7SJason Wang 		dev_hold(tun->dev);
8916e914fc7SJason Wang 	rcu_read_unlock();
892c70f1829SEric W. Biederman 
893c70f1829SEric W. Biederman 	return tun;
894631ab46bSEric W. Biederman }
895631ab46bSEric W. Biederman 
896631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
897631ab46bSEric W. Biederman {
8986e914fc7SJason Wang 	dev_put(tun->dev);
899631ab46bSEric W. Biederman }
900631ab46bSEric W. Biederman 
9016b8a66eeSJoe Perches /* TAP filtering */
902f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
903f271b2ccSMax Krasnyansky {
904f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
905f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
906f271b2ccSMax Krasnyansky }
907f271b2ccSMax Krasnyansky 
908f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
909f271b2ccSMax Krasnyansky {
910f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
911f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
912f271b2ccSMax Krasnyansky }
913f271b2ccSMax Krasnyansky 
914f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
915f271b2ccSMax Krasnyansky {
916f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
917f271b2ccSMax Krasnyansky 	struct tun_filter uf;
918f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
919f271b2ccSMax Krasnyansky 
920f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
921f271b2ccSMax Krasnyansky 		return -EFAULT;
922f271b2ccSMax Krasnyansky 
923f271b2ccSMax Krasnyansky 	if (!uf.count) {
924f271b2ccSMax Krasnyansky 		/* Disabled */
925f271b2ccSMax Krasnyansky 		filter->count = 0;
926f271b2ccSMax Krasnyansky 		return 0;
927f271b2ccSMax Krasnyansky 	}
928f271b2ccSMax Krasnyansky 
929f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
93028e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
93128e8190dSMarkus Elfring 	if (IS_ERR(addr))
93228e8190dSMarkus Elfring 		return PTR_ERR(addr);
933f271b2ccSMax Krasnyansky 
934f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
935f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
936f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
937f271b2ccSMax Krasnyansky 	filter->count = 0;
938f271b2ccSMax Krasnyansky 	wmb();
939f271b2ccSMax Krasnyansky 
940f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
941f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
942f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
943f271b2ccSMax Krasnyansky 
944f271b2ccSMax Krasnyansky 	nexact = n;
945f271b2ccSMax Krasnyansky 
946cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
947cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
948f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
949cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
950cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
951cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9523b8d2a69SMarkus Elfring 			goto free_addr;
953cfbf84fcSAlex Williamson 		}
954f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
955cfbf84fcSAlex Williamson 	}
956f271b2ccSMax Krasnyansky 
957f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
958f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
959f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
960f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
961f271b2ccSMax Krasnyansky 
962f271b2ccSMax Krasnyansky 	/* Now enable the filter */
963f271b2ccSMax Krasnyansky 	wmb();
964f271b2ccSMax Krasnyansky 	filter->count = nexact;
965f271b2ccSMax Krasnyansky 
966f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
967f271b2ccSMax Krasnyansky 	err = nexact;
9683b8d2a69SMarkus Elfring free_addr:
969f271b2ccSMax Krasnyansky 	kfree(addr);
970f271b2ccSMax Krasnyansky 	return err;
971f271b2ccSMax Krasnyansky }
972f271b2ccSMax Krasnyansky 
973f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
974f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
975f271b2ccSMax Krasnyansky {
976f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
977f271b2ccSMax Krasnyansky 	 * at this point. */
978f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
979f271b2ccSMax Krasnyansky 	int i;
980f271b2ccSMax Krasnyansky 
981f271b2ccSMax Krasnyansky 	/* Exact match */
982f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9832e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
984f271b2ccSMax Krasnyansky 			return 1;
985f271b2ccSMax Krasnyansky 
986f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
987f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
988f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
989f271b2ccSMax Krasnyansky 
990f271b2ccSMax Krasnyansky 	return 0;
991f271b2ccSMax Krasnyansky }
992f271b2ccSMax Krasnyansky 
993f271b2ccSMax Krasnyansky /*
994f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
995f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
996f271b2ccSMax Krasnyansky  */
997f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
998f271b2ccSMax Krasnyansky {
999f271b2ccSMax Krasnyansky 	if (!filter->count)
1000f271b2ccSMax Krasnyansky 		return 1;
1001f271b2ccSMax Krasnyansky 
1002f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
1003f271b2ccSMax Krasnyansky }
1004f271b2ccSMax Krasnyansky 
10051da177e4SLinus Torvalds /* Network device part of the driver */
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops;
10081da177e4SLinus Torvalds 
1009c70f1829SEric W. Biederman /* Net device detach from fd. */
1010c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
1011c70f1829SEric W. Biederman {
1012c8d68e6bSJason Wang 	tun_detach_all(dev);
1013c70f1829SEric W. Biederman }
1014c70f1829SEric W. Biederman 
10151da177e4SLinus Torvalds /* Net device open. */
10161da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
10171da177e4SLinus Torvalds {
1018c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
1019b20e2d54SHannes Frederic Sowa 
10201da177e4SLinus Torvalds 	return 0;
10211da177e4SLinus Torvalds }
10221da177e4SLinus Torvalds 
10231da177e4SLinus Torvalds /* Net device close. */
10241da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
10251da177e4SLinus Torvalds {
1026c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
10271da177e4SLinus Torvalds 	return 0;
10281da177e4SLinus Torvalds }
10291da177e4SLinus Torvalds 
10301da177e4SLinus Torvalds /* Net device start xmit */
103196f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10321da177e4SLinus Torvalds {
10333df97ba8SJason Wang #ifdef CONFIG_RPS
1034dc05360fSEric Dumazet 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
10359bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10369bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10379bc88939STom Herbert 		 */
10384b035271SWang Li 		struct tun_flow_entry *e;
10399bc88939STom Herbert 		__u32 rxhash;
10409bc88939STom Herbert 
1041feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10424b035271SWang Li 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
10439bc88939STom Herbert 		if (e)
10449bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, rxhash);
10459bc88939STom Herbert 	}
10463df97ba8SJason Wang #endif
104796f84061SJason Wang }
104896f84061SJason Wang 
1049aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1050aff3d70aSJason Wang 				    struct sk_buff *skb,
1051aff3d70aSJason Wang 				    int len)
1052aff3d70aSJason Wang {
1053aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1054aff3d70aSJason Wang 
1055aff3d70aSJason Wang 	if (prog)
1056aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1057aff3d70aSJason Wang 
1058aff3d70aSJason Wang 	return len;
1059aff3d70aSJason Wang }
1060aff3d70aSJason Wang 
106196f84061SJason Wang /* Net device start xmit */
106296f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
106396f84061SJason Wang {
106496f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
106596f84061SJason Wang 	int txq = skb->queue_mapping;
106696f84061SJason Wang 	struct tun_file *tfile;
1067aff3d70aSJason Wang 	int len = skb->len;
106896f84061SJason Wang 
106996f84061SJason Wang 	rcu_read_lock();
107096f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
107196f84061SJason Wang 
107296f84061SJason Wang 	/* Drop packet if interface is not attached */
10739871a9e4SJason Wang 	if (!tfile)
107496f84061SJason Wang 		goto drop;
107596f84061SJason Wang 
107696f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
107796f84061SJason Wang 		tun_automq_xmit(tun, skb);
10789bc88939STom Herbert 
10796e914fc7SJason Wang 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
10806e914fc7SJason Wang 
1081c8d68e6bSJason Wang 	BUG_ON(!tfile);
1082c8d68e6bSJason Wang 
1083f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1084f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1085f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1086f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1087f271b2ccSMax Krasnyansky 		goto drop;
1088f271b2ccSMax Krasnyansky 
108954f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
109054f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
109199405162SMichael S. Tsirkin 		goto drop;
109299405162SMichael S. Tsirkin 
1093aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
109481c89507SBjørn Mork 	if (len == 0 || pskb_trim(skb, len))
1095aff3d70aSJason Wang 		goto drop;
1096aff3d70aSJason Wang 
10971f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
10987bf66305SJason Wang 		goto drop;
10997bf66305SJason Wang 
11007b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1101eda29772SRichard Cochran 
11020110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
11037bf66305SJason Wang 	 * for indefinite time.
11047bf66305SJason Wang 	 */
11050110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
11060110d6f2SMichael S. Tsirkin 
1107895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
1108f8af75f3SEric Dumazet 
11095990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
11101576d986SJason Wang 		goto drop;
11111da177e4SLinus Torvalds 
11121da177e4SLinus Torvalds 	/* Notify and wake up reader process */
111354f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
111454f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
11159e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
11166e914fc7SJason Wang 
11176e914fc7SJason Wang 	rcu_read_unlock();
11186ed10654SPatrick McHardy 	return NETDEV_TX_OK;
11191da177e4SLinus Torvalds 
11201da177e4SLinus Torvalds drop:
1121608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1122149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
11231da177e4SLinus Torvalds 	kfree_skb(skb);
11246e914fc7SJason Wang 	rcu_read_unlock();
1125baeababbSJason Wang 	return NET_XMIT_DROP;
11261da177e4SLinus Torvalds }
11271da177e4SLinus Torvalds 
1128f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
11291da177e4SLinus Torvalds {
1130f271b2ccSMax Krasnyansky 	/*
1131f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1132f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1133f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1134f271b2ccSMax Krasnyansky 	 */
11351da177e4SLinus Torvalds }
11361da177e4SLinus Torvalds 
1137c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1138c8f44affSMichał Mirosław 	netdev_features_t features)
113988255375SMichał Mirosław {
114088255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
114188255375SMichał Mirosław 
114288255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
114388255375SMichał Mirosław }
1144eaea34b2SPaolo Abeni 
1145eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1146eaea34b2SPaolo Abeni {
1147eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1148eaea34b2SPaolo Abeni 
1149eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1150eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1151eaea34b2SPaolo Abeni 
1152eaea34b2SPaolo Abeni 	tun->align = new_hr;
1153eaea34b2SPaolo Abeni }
1154eaea34b2SPaolo Abeni 
1155bc1f4470Sstephen hemminger static void
1156608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1157608b9977SPaolo Abeni {
1158608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1159608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1160608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1161608b9977SPaolo Abeni 	int i;
1162608b9977SPaolo Abeni 
1163608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1164608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1165608b9977SPaolo Abeni 		unsigned int start;
1166608b9977SPaolo Abeni 
1167608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1168608b9977SPaolo Abeni 		do {
1169608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
11705260dd3eSEric Dumazet 			rxpackets	= u64_stats_read(&p->rx_packets);
11715260dd3eSEric Dumazet 			rxbytes		= u64_stats_read(&p->rx_bytes);
11725260dd3eSEric Dumazet 			txpackets	= u64_stats_read(&p->tx_packets);
11735260dd3eSEric Dumazet 			txbytes		= u64_stats_read(&p->tx_bytes);
1174608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1175608b9977SPaolo Abeni 
1176608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1177608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1178608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1179608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1180608b9977SPaolo Abeni 
1181608b9977SPaolo Abeni 		/* u32 counters */
1182608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1183608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1184608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1185608b9977SPaolo Abeni 	}
1186608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1187608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1188608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1189608b9977SPaolo Abeni }
1190608b9977SPaolo Abeni 
1191761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1192761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1193761876c8SJason Wang {
1194761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1195e4a2a304SJason Wang 	struct tun_file *tfile;
1196761876c8SJason Wang 	struct bpf_prog *old_prog;
1197e4a2a304SJason Wang 	int i;
1198761876c8SJason Wang 
1199761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1200761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1201761876c8SJason Wang 	if (old_prog)
1202761876c8SJason Wang 		bpf_prog_put(old_prog);
1203761876c8SJason Wang 
1204e4a2a304SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
1205e4a2a304SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
1206e4a2a304SJason Wang 		if (prog)
1207e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1208e4a2a304SJason Wang 		else
1209e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1210e4a2a304SJason Wang 	}
1211e4a2a304SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
1212e4a2a304SJason Wang 		if (prog)
1213e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1214e4a2a304SJason Wang 		else
1215e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1216e4a2a304SJason Wang 	}
1217e4a2a304SJason Wang 
1218761876c8SJason Wang 	return 0;
1219761876c8SJason Wang }
1220761876c8SJason Wang 
1221761876c8SJason Wang static u32 tun_xdp_query(struct net_device *dev)
1222761876c8SJason Wang {
1223761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1224761876c8SJason Wang 	const struct bpf_prog *xdp_prog;
1225761876c8SJason Wang 
1226761876c8SJason Wang 	xdp_prog = rtnl_dereference(tun->xdp_prog);
1227761876c8SJason Wang 	if (xdp_prog)
1228761876c8SJason Wang 		return xdp_prog->aux->id;
1229761876c8SJason Wang 
1230761876c8SJason Wang 	return 0;
1231761876c8SJason Wang }
1232761876c8SJason Wang 
1233f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1234761876c8SJason Wang {
1235761876c8SJason Wang 	switch (xdp->command) {
1236761876c8SJason Wang 	case XDP_SETUP_PROG:
1237761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1238761876c8SJason Wang 	case XDP_QUERY_PROG:
1239761876c8SJason Wang 		xdp->prog_id = tun_xdp_query(dev);
1240761876c8SJason Wang 		return 0;
1241761876c8SJason Wang 	default:
1242761876c8SJason Wang 		return -EINVAL;
1243761876c8SJason Wang 	}
1244761876c8SJason Wang }
1245761876c8SJason Wang 
124626d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
124726d31925SNicolas Dichtel {
124826d31925SNicolas Dichtel 	if (new_carrier) {
124926d31925SNicolas Dichtel 		struct tun_struct *tun = netdev_priv(dev);
125026d31925SNicolas Dichtel 
125126d31925SNicolas Dichtel 		if (!tun->numqueues)
125226d31925SNicolas Dichtel 			return -EPERM;
125326d31925SNicolas Dichtel 
125426d31925SNicolas Dichtel 		netif_carrier_on(dev);
125526d31925SNicolas Dichtel 	} else {
125626d31925SNicolas Dichtel 		netif_carrier_off(dev);
125726d31925SNicolas Dichtel 	}
125826d31925SNicolas Dichtel 	return 0;
125926d31925SNicolas Dichtel }
126026d31925SNicolas Dichtel 
1261758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1262c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1263758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1264758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
126500829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
126688255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1267c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1268eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1269608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
127026d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1271758e43b7SStephen Hemminger };
1272758e43b7SStephen Hemminger 
12730c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile)
12740c9d917bSJesper Dangaard Brouer {
12750c9d917bSJesper Dangaard Brouer 	/* Notify and wake up reader process */
12760c9d917bSJesper Dangaard Brouer 	if (tfile->flags & TUN_FASYNC)
12770c9d917bSJesper Dangaard Brouer 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
12780c9d917bSJesper Dangaard Brouer 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
12790c9d917bSJesper Dangaard Brouer }
12800c9d917bSJesper Dangaard Brouer 
128142b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n,
128242b33468SJesper Dangaard Brouer 			struct xdp_frame **frames, u32 flags)
1283fc72d1d5SJason Wang {
1284fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1285fc72d1d5SJason Wang 	struct tun_file *tfile;
1286fc72d1d5SJason Wang 	u32 numqueues;
1287735fc405SJesper Dangaard Brouer 	int drops = 0;
1288735fc405SJesper Dangaard Brouer 	int cnt = n;
1289735fc405SJesper Dangaard Brouer 	int i;
1290fc72d1d5SJason Wang 
12910c9d917bSJesper Dangaard Brouer 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
129242b33468SJesper Dangaard Brouer 		return -EINVAL;
129342b33468SJesper Dangaard Brouer 
1294fc72d1d5SJason Wang 	rcu_read_lock();
1295fc72d1d5SJason Wang 
12969871a9e4SJason Wang resample:
1297fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1298fc72d1d5SJason Wang 	if (!numqueues) {
1299735fc405SJesper Dangaard Brouer 		rcu_read_unlock();
1300735fc405SJesper Dangaard Brouer 		return -ENXIO; /* Caller will free/return all frames */
1301fc72d1d5SJason Wang 	}
1302fc72d1d5SJason Wang 
1303fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1304fc72d1d5SJason Wang 					    numqueues]);
13059871a9e4SJason Wang 	if (unlikely(!tfile))
13069871a9e4SJason Wang 		goto resample;
1307735fc405SJesper Dangaard Brouer 
1308735fc405SJesper Dangaard Brouer 	spin_lock(&tfile->tx_ring.producer_lock);
1309735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
1310735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdp = frames[i];
1311fc72d1d5SJason Wang 		/* Encode the XDP flag into lowest bit for consumer to differ
1312fc72d1d5SJason Wang 		 * XDP buffer from sk_buff.
1313fc72d1d5SJason Wang 		 */
1314735fc405SJesper Dangaard Brouer 		void *frame = tun_xdp_to_ptr(xdp);
1315fc72d1d5SJason Wang 
1316735fc405SJesper Dangaard Brouer 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1317735fc405SJesper Dangaard Brouer 			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1318735fc405SJesper Dangaard Brouer 			xdp_return_frame_rx_napi(xdp);
1319735fc405SJesper Dangaard Brouer 			drops++;
1320735fc405SJesper Dangaard Brouer 		}
1321735fc405SJesper Dangaard Brouer 	}
1322735fc405SJesper Dangaard Brouer 	spin_unlock(&tfile->tx_ring.producer_lock);
1323735fc405SJesper Dangaard Brouer 
13240c9d917bSJesper Dangaard Brouer 	if (flags & XDP_XMIT_FLUSH)
13250c9d917bSJesper Dangaard Brouer 		__tun_xdp_flush_tfile(tfile);
13260c9d917bSJesper Dangaard Brouer 
1327fc72d1d5SJason Wang 	rcu_read_unlock();
1328735fc405SJesper Dangaard Brouer 	return cnt - drops;
1329fc72d1d5SJason Wang }
1330fc72d1d5SJason Wang 
133144fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
133244fa2dbdSJesper Dangaard Brouer {
133344fa2dbdSJesper Dangaard Brouer 	struct xdp_frame *frame = convert_to_xdp_frame(xdp);
133444fa2dbdSJesper Dangaard Brouer 
133544fa2dbdSJesper Dangaard Brouer 	if (unlikely(!frame))
133644fa2dbdSJesper Dangaard Brouer 		return -EOVERFLOW;
133744fa2dbdSJesper Dangaard Brouer 
133842421a56SJesper Dangaard Brouer 	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1339fc72d1d5SJason Wang }
1340fc72d1d5SJason Wang 
1341758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1342c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1343758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1344758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
134500829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
134688255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1347afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1348758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1349758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1350c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
13515e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1352eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1353608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1354f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1355fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
135626d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1357758e43b7SStephen Hemminger };
1358758e43b7SStephen Hemminger 
1359944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
136096442e42SJason Wang {
136196442e42SJason Wang 	int i;
136296442e42SJason Wang 
136396442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
136496442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
136596442e42SJason Wang 
136696442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1367e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1368e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1369e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
137096442e42SJason Wang }
137196442e42SJason Wang 
137296442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
137396442e42SJason Wang {
137496442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
137596442e42SJason Wang 	tun_flow_flush(tun);
137696442e42SJason Wang }
137796442e42SJason Wang 
137891572088SJarod Wilson #define MIN_MTU 68
137991572088SJarod Wilson #define MAX_MTU 65535
138091572088SJarod Wilson 
13811da177e4SLinus Torvalds /* Initialize net device. */
13821da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
13831da177e4SLinus Torvalds {
13841da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13851da177e4SLinus Torvalds 
13861da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
138740630b82SMichael S. Tsirkin 	case IFF_TUN:
1388758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1389758e43b7SStephen Hemminger 
13901da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13911da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13921da177e4SLinus Torvalds 		dev->addr_len = 0;
13931da177e4SLinus Torvalds 		dev->mtu = 1500;
13941da177e4SLinus Torvalds 
13951da177e4SLinus Torvalds 		/* Zero header length */
13961da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
13971da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
13981da177e4SLinus Torvalds 		break;
13991da177e4SLinus Torvalds 
140040630b82SMichael S. Tsirkin 	case IFF_TAP:
14017a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
14021da177e4SLinus Torvalds 		/* Ethernet TAP Device */
14031da177e4SLinus Torvalds 		ether_setup(dev);
1404550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1405a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
140636226a8dSBrian Braunstein 
1407f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
140836226a8dSBrian Braunstein 
14091da177e4SLinus Torvalds 		break;
14101da177e4SLinus Torvalds 	}
141191572088SJarod Wilson 
141291572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
141391572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
14141da177e4SLinus Torvalds }
14151da177e4SLinus Torvalds 
14162f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
14172f3ab622SJason Wang {
14182f3ab622SJason Wang 	struct sock *sk = tfile->socket.sk;
14192f3ab622SJason Wang 
14202f3ab622SJason Wang 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
14212f3ab622SJason Wang }
14222f3ab622SJason Wang 
14231da177e4SLinus Torvalds /* Character device part */
14241da177e4SLinus Torvalds 
14251da177e4SLinus Torvalds /* Poll */
1426afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
14271da177e4SLinus Torvalds {
1428b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
14299484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
14303c8a9c63SMariusz Kozlowski 	struct sock *sk;
1431afc9a42bSAl Viro 	__poll_t mask = 0;
14321da177e4SLinus Torvalds 
14331da177e4SLinus Torvalds 	if (!tun)
1434a9a08845SLinus Torvalds 		return EPOLLERR;
14351da177e4SLinus Torvalds 
143654f968d6SJason Wang 	sk = tfile->socket.sk;
14373c8a9c63SMariusz Kozlowski 
14386b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
14391da177e4SLinus Torvalds 
14409e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
14411da177e4SLinus Torvalds 
14425990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
1443a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
14441da177e4SLinus Torvalds 
14452f3ab622SJason Wang 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
14462f3ab622SJason Wang 	 * guarantee EPOLLOUT to be raised by either here or
14472f3ab622SJason Wang 	 * tun_sock_write_space(). Then process could get notification
14482f3ab622SJason Wang 	 * after it writes to a down device and meets -EIO.
14492f3ab622SJason Wang 	 */
14502f3ab622SJason Wang 	if (tun_sock_writeable(tun, tfile) ||
14519cd3e072SEric Dumazet 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
14522f3ab622SJason Wang 	     tun_sock_writeable(tun, tfile)))
1453a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
145433dccbb0SHerbert Xu 
1455c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1456a9a08845SLinus Torvalds 		mask = EPOLLERR;
1457c70f1829SEric W. Biederman 
1458631ab46bSEric W. Biederman 	tun_put(tun);
14591da177e4SLinus Torvalds 	return mask;
14601da177e4SLinus Torvalds }
14611da177e4SLinus Torvalds 
146290e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
146390e33d45SPetar Penkov 					    size_t len,
146490e33d45SPetar Penkov 					    const struct iov_iter *it)
146590e33d45SPetar Penkov {
146690e33d45SPetar Penkov 	struct sk_buff *skb;
146790e33d45SPetar Penkov 	size_t linear;
146890e33d45SPetar Penkov 	int err;
146990e33d45SPetar Penkov 	int i;
147090e33d45SPetar Penkov 
147190e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
147290e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
147390e33d45SPetar Penkov 
147490e33d45SPetar Penkov 	local_bh_disable();
147590e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
147690e33d45SPetar Penkov 	local_bh_enable();
147790e33d45SPetar Penkov 	if (!skb)
147890e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
147990e33d45SPetar Penkov 
148090e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
148190e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
148290e33d45SPetar Penkov 	if (err)
148390e33d45SPetar Penkov 		goto free;
148490e33d45SPetar Penkov 
148590e33d45SPetar Penkov 	skb->len = len;
148690e33d45SPetar Penkov 	skb->data_len = len - linear;
148790e33d45SPetar Penkov 	skb->truesize += skb->data_len;
148890e33d45SPetar Penkov 
148990e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
149090e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
1491aa6daacaSEric Dumazet 		struct page *page;
1492aa6daacaSEric Dumazet 		void *frag;
149390e33d45SPetar Penkov 
149490e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
149590e33d45SPetar Penkov 			err = -EINVAL;
149690e33d45SPetar Penkov 			goto free;
149790e33d45SPetar Penkov 		}
1498aa6daacaSEric Dumazet 		frag = netdev_alloc_frag(fragsz);
1499aa6daacaSEric Dumazet 		if (!frag) {
150090e33d45SPetar Penkov 			err = -ENOMEM;
150190e33d45SPetar Penkov 			goto free;
150290e33d45SPetar Penkov 		}
1503aa6daacaSEric Dumazet 		page = virt_to_head_page(frag);
1504aa6daacaSEric Dumazet 		skb_fill_page_desc(skb, i - 1, page,
1505aa6daacaSEric Dumazet 				   frag - page_address(page), fragsz);
150690e33d45SPetar Penkov 	}
150790e33d45SPetar Penkov 
150890e33d45SPetar Penkov 	return skb;
150990e33d45SPetar Penkov free:
151090e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
151190e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
151290e33d45SPetar Penkov 	return ERR_PTR(err);
151390e33d45SPetar Penkov }
151490e33d45SPetar Penkov 
1515f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1516f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
151754f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
151833dccbb0SHerbert Xu 				     size_t prepad, size_t len,
151933dccbb0SHerbert Xu 				     size_t linear, int noblock)
1520f42157cbSRusty Russell {
152154f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1522f42157cbSRusty Russell 	struct sk_buff *skb;
152333dccbb0SHerbert Xu 	int err;
1524f42157cbSRusty Russell 
1525f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
15260eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
152733dccbb0SHerbert Xu 		linear = len;
1528f42157cbSRusty Russell 
152933dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
153028d64271SEric Dumazet 				   &err, 0);
1531f42157cbSRusty Russell 	if (!skb)
153233dccbb0SHerbert Xu 		return ERR_PTR(err);
1533f42157cbSRusty Russell 
1534f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1535f42157cbSRusty Russell 	skb_put(skb, linear);
153633dccbb0SHerbert Xu 	skb->data_len = len - linear;
153733dccbb0SHerbert Xu 	skb->len += len - linear;
1538f42157cbSRusty Russell 
1539f42157cbSRusty Russell 	return skb;
1540f42157cbSRusty Russell }
1541f42157cbSRusty Russell 
15425503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
15435503fcecSJason Wang 			   struct sk_buff *skb, int more)
15445503fcecSJason Wang {
15455503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
15465503fcecSJason Wang 	struct sk_buff_head process_queue;
15475503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
15485503fcecSJason Wang 	bool rcv = false;
15495503fcecSJason Wang 
15505503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15515503fcecSJason Wang 		local_bh_disable();
15528ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15535503fcecSJason Wang 		netif_receive_skb(skb);
15545503fcecSJason Wang 		local_bh_enable();
15555503fcecSJason Wang 		return;
15565503fcecSJason Wang 	}
15575503fcecSJason Wang 
15585503fcecSJason Wang 	spin_lock(&queue->lock);
15595503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15605503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15615503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15625503fcecSJason Wang 		rcv = true;
15635503fcecSJason Wang 	} else {
15645503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15655503fcecSJason Wang 	}
15665503fcecSJason Wang 	spin_unlock(&queue->lock);
15675503fcecSJason Wang 
15685503fcecSJason Wang 	if (rcv) {
15695503fcecSJason Wang 		struct sk_buff *nskb;
15705503fcecSJason Wang 
15715503fcecSJason Wang 		local_bh_disable();
15728ebebcbaSMatthew Cover 		while ((nskb = __skb_dequeue(&process_queue))) {
15738ebebcbaSMatthew Cover 			skb_record_rx_queue(nskb, tfile->queue_index);
15745503fcecSJason Wang 			netif_receive_skb(nskb);
15758ebebcbaSMatthew Cover 		}
15768ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15775503fcecSJason Wang 		netif_receive_skb(skb);
15785503fcecSJason Wang 		local_bh_enable();
15795503fcecSJason Wang 	}
15805503fcecSJason Wang }
15815503fcecSJason Wang 
158266ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
158366ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
158466ccbc9cSJason Wang {
158566ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
158666ccbc9cSJason Wang 		return false;
158766ccbc9cSJason Wang 
158866ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
158966ccbc9cSJason Wang 		return false;
159066ccbc9cSJason Wang 
159166ccbc9cSJason Wang 	if (!noblock)
159266ccbc9cSJason Wang 		return false;
159366ccbc9cSJason Wang 
159466ccbc9cSJason Wang 	if (zerocopy)
159566ccbc9cSJason Wang 		return false;
159666ccbc9cSJason Wang 
159766ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
159866ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
159966ccbc9cSJason Wang 		return false;
160066ccbc9cSJason Wang 
160166ccbc9cSJason Wang 	return true;
160266ccbc9cSJason Wang }
160366ccbc9cSJason Wang 
16044b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
16054b663366SAlexis Bauvin 				       struct page_frag *alloc_frag, char *buf,
16068ae1aff0SJason Wang 				       int buflen, int len, int pad)
1607ac1f1f6cSJason Wang {
1608ac1f1f6cSJason Wang 	struct sk_buff *skb = build_skb(buf, buflen);
1609ac1f1f6cSJason Wang 
1610ac1f1f6cSJason Wang 	if (!skb)
1611ac1f1f6cSJason Wang 		return ERR_PTR(-ENOMEM);
1612ac1f1f6cSJason Wang 
16138ae1aff0SJason Wang 	skb_reserve(skb, pad);
1614ac1f1f6cSJason Wang 	skb_put(skb, len);
16154b663366SAlexis Bauvin 	skb_set_owner_w(skb, tfile->socket.sk);
1616ac1f1f6cSJason Wang 
1617ac1f1f6cSJason Wang 	get_page(alloc_frag->page);
1618ac1f1f6cSJason Wang 	alloc_frag->offset += buflen;
1619ac1f1f6cSJason Wang 
1620ac1f1f6cSJason Wang 	return skb;
1621ac1f1f6cSJason Wang }
1622ac1f1f6cSJason Wang 
16238ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
16248ae1aff0SJason Wang 		       struct xdp_buff *xdp, u32 act)
16258ae1aff0SJason Wang {
16268ae1aff0SJason Wang 	int err;
16278ae1aff0SJason Wang 
16288ae1aff0SJason Wang 	switch (act) {
16298ae1aff0SJason Wang 	case XDP_REDIRECT:
16308ae1aff0SJason Wang 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
16318ae1aff0SJason Wang 		if (err)
16328ae1aff0SJason Wang 			return err;
16338ae1aff0SJason Wang 		break;
16348ae1aff0SJason Wang 	case XDP_TX:
16358ae1aff0SJason Wang 		err = tun_xdp_tx(tun->dev, xdp);
16368ae1aff0SJason Wang 		if (err < 0)
16378ae1aff0SJason Wang 			return err;
16388ae1aff0SJason Wang 		break;
16398ae1aff0SJason Wang 	case XDP_PASS:
16408ae1aff0SJason Wang 		break;
16418ae1aff0SJason Wang 	default:
16428ae1aff0SJason Wang 		bpf_warn_invalid_xdp_action(act);
16438ae1aff0SJason Wang 		/* fall through */
16448ae1aff0SJason Wang 	case XDP_ABORTED:
16458ae1aff0SJason Wang 		trace_xdp_exception(tun->dev, xdp_prog, act);
16468ae1aff0SJason Wang 		/* fall through */
16478ae1aff0SJason Wang 	case XDP_DROP:
16488ae1aff0SJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
16498ae1aff0SJason Wang 		break;
16508ae1aff0SJason Wang 	}
16518ae1aff0SJason Wang 
16528ae1aff0SJason Wang 	return act;
16538ae1aff0SJason Wang }
16548ae1aff0SJason Wang 
1655761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1656761876c8SJason Wang 				     struct tun_file *tfile,
165766ccbc9cSJason Wang 				     struct iov_iter *from,
1658761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
16591cfe6e93SJason Wang 				     int len, int *skb_xdp)
166066ccbc9cSJason Wang {
16610bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
1662761876c8SJason Wang 	struct bpf_prog *xdp_prog;
16637df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
166466ccbc9cSJason Wang 	char *buf;
166566ccbc9cSJason Wang 	size_t copied;
16668ae1aff0SJason Wang 	int pad = TUN_RX_PAD;
16678ae1aff0SJason Wang 	int err = 0;
16687df13219SJason Wang 
16697df13219SJason Wang 	rcu_read_lock();
16707df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16717df13219SJason Wang 	if (xdp_prog)
16724f23aff8SJason Wang 		pad += XDP_PACKET_HEADROOM;
16737df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16747df13219SJason Wang 	rcu_read_unlock();
167566ccbc9cSJason Wang 
167663b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
167766ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
167866ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
167966ccbc9cSJason Wang 
168066ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
168166ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16827df13219SJason Wang 				     alloc_frag->offset + pad,
168366ccbc9cSJason Wang 				     len, from);
168466ccbc9cSJason Wang 	if (copied != len)
168566ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
168666ccbc9cSJason Wang 
16877df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16887df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16897df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16907df13219SJason Wang 	 */
1691ac1f1f6cSJason Wang 	if (hdr->gso_type || !xdp_prog) {
16921cfe6e93SJason Wang 		*skb_xdp = 1;
16934b663366SAlexis Bauvin 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
16944b663366SAlexis Bauvin 				       pad);
1695ac1f1f6cSJason Wang 	}
1696ac1f1f6cSJason Wang 
16971cfe6e93SJason Wang 	*skb_xdp = 0;
169866ccbc9cSJason Wang 
16996547e387SToshiaki Makita 	local_bh_disable();
1700761876c8SJason Wang 	rcu_read_lock();
1701761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
17028ae1aff0SJason Wang 	if (xdp_prog) {
1703761876c8SJason Wang 		struct xdp_buff xdp;
1704761876c8SJason Wang 		u32 act;
1705761876c8SJason Wang 
1706761876c8SJason Wang 		xdp.data_hard_start = buf;
17077df13219SJason Wang 		xdp.data = buf + pad;
1708de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1709761876c8SJason Wang 		xdp.data_end = xdp.data + len;
17108bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1711761876c8SJason Wang 
17128ae1aff0SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
17138ae1aff0SJason Wang 		if (act == XDP_REDIRECT || act == XDP_TX) {
1714761876c8SJason Wang 			get_page(alloc_frag->page);
1715761876c8SJason Wang 			alloc_frag->offset += buflen;
1716761876c8SJason Wang 		}
17178ae1aff0SJason Wang 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
17188ae1aff0SJason Wang 		if (err < 0)
17198ae1aff0SJason Wang 			goto err_xdp;
17201a097910SJason Wang 		if (err == XDP_REDIRECT)
17211a097910SJason Wang 			xdp_do_flush_map();
17228ae1aff0SJason Wang 		if (err != XDP_PASS)
17238ae1aff0SJason Wang 			goto out;
17248ae1aff0SJason Wang 
17258ae1aff0SJason Wang 		pad = xdp.data - xdp.data_hard_start;
17268ae1aff0SJason Wang 		len = xdp.data_end - xdp.data;
1727761876c8SJason Wang 	}
1728761876c8SJason Wang 	rcu_read_unlock();
17296547e387SToshiaki Makita 	local_bh_enable();
1730291aeb2bSJason Wang 
17314b663366SAlexis Bauvin 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1732761876c8SJason Wang 
17338ae1aff0SJason Wang err_xdp:
1734761876c8SJason Wang 	put_page(alloc_frag->page);
1735f7053b6cSJason Wang out:
1736761876c8SJason Wang 	rcu_read_unlock();
17376547e387SToshiaki Makita 	local_bh_enable();
1738761876c8SJason Wang 	return NULL;
173966ccbc9cSJason Wang }
174066ccbc9cSJason Wang 
17411da177e4SLinus Torvalds /* Get packet from user space buffer */
174254f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1743f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
17445503fcecSJason Wang 			    int noblock, bool more)
17451da177e4SLinus Torvalds {
174609640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
17471da177e4SLinus Torvalds 	struct sk_buff *skb;
1748f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1749eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1750f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1751608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
175296f8d9ecSJason Wang 	int good_linear;
17530690899bSMichael S. Tsirkin 	int copylen;
17540690899bSMichael S. Tsirkin 	bool zerocopy = false;
17550690899bSMichael S. Tsirkin 	int err;
175696f84061SJason Wang 	u32 rxhash = 0;
17571cfe6e93SJason Wang 	int skb_xdp = 1;
1758af3fb24eSEric Dumazet 	bool frags = tun_napi_frags_enabled(tfile);
17591da177e4SLinus Torvalds 
176040630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
176115718ea0SDan Carpenter 		if (len < sizeof(pi))
17621da177e4SLinus Torvalds 			return -EINVAL;
176315718ea0SDan Carpenter 		len -= sizeof(pi);
17641da177e4SLinus Torvalds 
1765cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17661da177e4SLinus Torvalds 			return -EFAULT;
17671da177e4SLinus Torvalds 	}
17681da177e4SLinus Torvalds 
176940630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1770e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1771e1edab87SWillem de Bruijn 
1772e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1773f43798c2SRusty Russell 			return -EINVAL;
1774e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1775f43798c2SRusty Russell 
1776cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1777f43798c2SRusty Russell 			return -EFAULT;
1778f43798c2SRusty Russell 
17794909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
178056f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
178156f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17824909122fSHerbert Xu 
178356f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1784f43798c2SRusty Russell 			return -EINVAL;
1785e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1786f43798c2SRusty Russell 	}
1787f43798c2SRusty Russell 
178840630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1789a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17900eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
179156f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1792e01bf1c8SRusty Russell 			return -EINVAL;
1793e01bf1c8SRusty Russell 	}
17941da177e4SLinus Torvalds 
179596f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
179696f8d9ecSJason Wang 
179788529176SJason Wang 	if (msg_control) {
1798f5ff53b4SAl Viro 		struct iov_iter i = *from;
1799f5ff53b4SAl Viro 
180088529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
180188529176SJason Wang 		 * enough room for skb expand head in case it is used.
18020690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
18030690899bSMichael S. Tsirkin 		 */
180456f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
180596f8d9ecSJason Wang 		if (copylen > good_linear)
180696f8d9ecSJason Wang 			copylen = good_linear;
18073dd5c330SJason Wang 		linear = copylen;
1808f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1809f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
181088529176SJason Wang 			zerocopy = true;
181188529176SJason Wang 	}
181288529176SJason Wang 
181390e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
18141cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
18151cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
18161cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
18171cfe6e93SJason Wang 		 */
18181cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
181966ccbc9cSJason Wang 		if (IS_ERR(skb)) {
182066ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
182166ccbc9cSJason Wang 			return PTR_ERR(skb);
182266ccbc9cSJason Wang 		}
1823761876c8SJason Wang 		if (!skb)
1824761876c8SJason Wang 			return total_len;
182566ccbc9cSJason Wang 	} else {
182688529176SJason Wang 		if (!zerocopy) {
18270690899bSMichael S. Tsirkin 			copylen = len;
182856f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
182996f8d9ecSJason Wang 				linear = good_linear;
183096f8d9ecSJason Wang 			else
183156f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
18323dd5c330SJason Wang 		}
18330690899bSMichael S. Tsirkin 
183490e33d45SPetar Penkov 		if (frags) {
183590e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
183690e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
183790e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
183890e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
183990e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
184090e33d45SPetar Penkov 			 */
184190e33d45SPetar Penkov 			zerocopy = false;
184290e33d45SPetar Penkov 		} else {
184390e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
184490e33d45SPetar Penkov 					    noblock);
184590e33d45SPetar Penkov 		}
184690e33d45SPetar Penkov 
184733dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
184833dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1849608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
185090e33d45SPetar Penkov 			if (frags)
185190e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
185233dccbb0SHerbert Xu 			return PTR_ERR(skb);
18531da177e4SLinus Torvalds 		}
18541da177e4SLinus Torvalds 
18550690899bSMichael S. Tsirkin 		if (zerocopy)
1856f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1857af1cc7a2SJason Wang 		else
1858f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18590690899bSMichael S. Tsirkin 
18600690899bSMichael S. Tsirkin 		if (err) {
18614477138fSEric Dumazet 			err = -EFAULT;
18624477138fSEric Dumazet drop:
1863608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
18648f22757eSDave Jones 			kfree_skb(skb);
186590e33d45SPetar Penkov 			if (frags) {
186690e33d45SPetar Penkov 				tfile->napi.skb = NULL;
186790e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
186890e33d45SPetar Penkov 			}
186990e33d45SPetar Penkov 
18704477138fSEric Dumazet 			return err;
18718f22757eSDave Jones 		}
187266ccbc9cSJason Wang 	}
18731da177e4SLinus Torvalds 
18743e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1875df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1876df10db98SPaolo Abeni 		kfree_skb(skb);
187790e33d45SPetar Penkov 		if (frags) {
187890e33d45SPetar Penkov 			tfile->napi.skb = NULL;
187990e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
188090e33d45SPetar Penkov 		}
188190e33d45SPetar Penkov 
1882df10db98SPaolo Abeni 		return -EINVAL;
1883df10db98SPaolo Abeni 	}
1884df10db98SPaolo Abeni 
18851da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
188640630b82SMichael S. Tsirkin 	case IFF_TUN:
188740630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18882580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18892580c4c1SAlexander Potapenko 
18902580c4c1SAlexander Potapenko 			switch (ip_version) {
18912580c4c1SAlexander Potapenko 			case 4:
1892f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1893f09f7ee2SAng Way Chuang 				break;
18942580c4c1SAlexander Potapenko 			case 6:
1895f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1896f09f7ee2SAng Way Chuang 				break;
1897f09f7ee2SAng Way Chuang 			default:
1898608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1899f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1900f09f7ee2SAng Way Chuang 				return -EINVAL;
1901f09f7ee2SAng Way Chuang 			}
1902f09f7ee2SAng Way Chuang 		}
1903f09f7ee2SAng Way Chuang 
1904459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
19051da177e4SLinus Torvalds 		skb->protocol = pi.proto;
19064c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
19071da177e4SLinus Torvalds 		break;
190840630b82SMichael S. Tsirkin 	case IFF_TAP:
190990e33d45SPetar Penkov 		if (!frags)
19101da177e4SLinus Torvalds 			skb->protocol = eth_type_trans(skb, tun->dev);
19111da177e4SLinus Torvalds 		break;
19126403eab1SJoe Perches 	}
19131da177e4SLinus Torvalds 
19140690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
19150690899bSMichael S. Tsirkin 	if (zerocopy) {
19160690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
19170690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1918c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1919af1cc7a2SJason Wang 	} else if (msg_control) {
1920af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1921af1cc7a2SJason Wang 		uarg->callback(uarg, false);
19220690899bSMichael S. Tsirkin 	}
19230690899bSMichael S. Tsirkin 
192472f65107SVlad Yasevich 	skb_reset_network_header(skb);
1925d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
192638502af7SJason Wang 
19271cfe6e93SJason Wang 	if (skb_xdp) {
1928761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1929761876c8SJason Wang 		int ret;
1930761876c8SJason Wang 
19316547e387SToshiaki Makita 		local_bh_disable();
1932761876c8SJason Wang 		rcu_read_lock();
1933761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1934761876c8SJason Wang 		if (xdp_prog) {
1935761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1936761876c8SJason Wang 			if (ret != XDP_PASS) {
1937761876c8SJason Wang 				rcu_read_unlock();
19386547e387SToshiaki Makita 				local_bh_enable();
1939761876c8SJason Wang 				return total_len;
1940761876c8SJason Wang 			}
1941761876c8SJason Wang 		}
1942761876c8SJason Wang 		rcu_read_unlock();
19436547e387SToshiaki Makita 		local_bh_enable();
1944761876c8SJason Wang 	}
1945761876c8SJason Wang 
1946cf1a1e07SPaolo Abeni 	/* Compute the costly rx hash only if needed for flow updates.
1947cf1a1e07SPaolo Abeni 	 * We may get a very small possibility of OOO during switching, not
1948cf1a1e07SPaolo Abeni 	 * worth to optimize.
1949cf1a1e07SPaolo Abeni 	 */
1950cf1a1e07SPaolo Abeni 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1951cf1a1e07SPaolo Abeni 	    !tfile->detached)
1952feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
195394317099SPetar Penkov 
19544477138fSEric Dumazet 	rcu_read_lock();
19554477138fSEric Dumazet 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
19564477138fSEric Dumazet 		err = -EIO;
19579180bb4fSEric Dumazet 		rcu_read_unlock();
19584477138fSEric Dumazet 		goto drop;
19594477138fSEric Dumazet 	}
19604477138fSEric Dumazet 
196190e33d45SPetar Penkov 	if (frags) {
196290e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
1963c43f1255SStanislav Fomichev 		u32 headlen = eth_get_headlen(tun->dev, skb->data,
1964c43f1255SStanislav Fomichev 					      skb_headlen(skb));
196590e33d45SPetar Penkov 
1966010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
196790e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
196890e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
19694477138fSEric Dumazet 			rcu_read_unlock();
197090e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
197190e33d45SPetar Penkov 			WARN_ON(1);
197290e33d45SPetar Penkov 			return -ENOMEM;
197390e33d45SPetar Penkov 		}
197490e33d45SPetar Penkov 
197590e33d45SPetar Penkov 		local_bh_disable();
197690e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
197790e33d45SPetar Penkov 		local_bh_enable();
197890e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1979aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
198094317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
198194317099SPetar Penkov 		int queue_len;
198294317099SPetar Penkov 
198394317099SPetar Penkov 		spin_lock_bh(&queue->lock);
198494317099SPetar Penkov 		__skb_queue_tail(queue, skb);
198594317099SPetar Penkov 		queue_len = skb_queue_len(queue);
198694317099SPetar Penkov 		spin_unlock(&queue->lock);
198794317099SPetar Penkov 
198894317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
198994317099SPetar Penkov 			napi_schedule(&tfile->napi);
199094317099SPetar Penkov 
199194317099SPetar Penkov 		local_bh_enable();
199294317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19935503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
199494317099SPetar Penkov 	} else {
19951da177e4SLinus Torvalds 		netif_rx_ni(skb);
199694317099SPetar Penkov 	}
19974477138fSEric Dumazet 	rcu_read_unlock();
19981da177e4SLinus Torvalds 
1999608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2000608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
20015260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
20025260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, len);
2003608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2004608b9977SPaolo Abeni 	put_cpu_ptr(stats);
20051da177e4SLinus Torvalds 
200696f84061SJason Wang 	if (rxhash)
20079e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
200896f84061SJason Wang 
20090690899bSMichael S. Tsirkin 	return total_len;
20101da177e4SLinus Torvalds }
20111da177e4SLinus Torvalds 
2012f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
20131da177e4SLinus Torvalds {
201433dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
201554f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
20169484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2017631ab46bSEric W. Biederman 	ssize_t result;
20181da177e4SLinus Torvalds 
20191da177e4SLinus Torvalds 	if (!tun)
20201da177e4SLinus Torvalds 		return -EBADFD;
20211da177e4SLinus Torvalds 
20225503fcecSJason Wang 	result = tun_get_user(tun, tfile, NULL, from,
20235503fcecSJason Wang 			      file->f_flags & O_NONBLOCK, false);
2024631ab46bSEric W. Biederman 
2025631ab46bSEric W. Biederman 	tun_put(tun);
2026631ab46bSEric W. Biederman 	return result;
20271da177e4SLinus Torvalds }
20281da177e4SLinus Torvalds 
2029fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2030fc72d1d5SJason Wang 				struct tun_file *tfile,
20311ffcbc85SJesper Dangaard Brouer 				struct xdp_frame *xdp_frame,
2032fc72d1d5SJason Wang 				struct iov_iter *iter)
2033fc72d1d5SJason Wang {
2034fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
20351ffcbc85SJesper Dangaard Brouer 	size_t size = xdp_frame->len;
2036fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
2037fc72d1d5SJason Wang 	size_t ret;
2038fc72d1d5SJason Wang 
2039fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
2040fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
2041fc72d1d5SJason Wang 
2042fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2043fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2044fc72d1d5SJason Wang 			return -EINVAL;
2045fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2046fc72d1d5SJason Wang 			     sizeof(gso)))
2047fc72d1d5SJason Wang 			return -EFAULT;
2048fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2049fc72d1d5SJason Wang 	}
2050fc72d1d5SJason Wang 
20511ffcbc85SJesper Dangaard Brouer 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2052fc72d1d5SJason Wang 
2053fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
2054fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
20555260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
20565260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, ret);
2057fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
2058fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
2059fc72d1d5SJason Wang 
2060fc72d1d5SJason Wang 	return ret;
2061fc72d1d5SJason Wang }
2062fc72d1d5SJason Wang 
20631da177e4SLinus Torvalds /* Put packet to the user space buffer */
20646f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
206554f968d6SJason Wang 			    struct tun_file *tfile,
20661da177e4SLinus Torvalds 			    struct sk_buff *skb,
2067e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20681da177e4SLinus Torvalds {
20691da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2070608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2071e0b46d0eSHerbert Xu 	ssize_t total;
20728c847d25SJason Wang 	int vlan_offset = 0;
2073a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20742eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2075a8f9bfdfSHerbert Xu 
2076df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2077a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20781da177e4SLinus Torvalds 
207940630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2080e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20811da177e4SLinus Torvalds 
2082e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2083e0b46d0eSHerbert Xu 
208440630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2085e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20861da177e4SLinus Torvalds 			return -EINVAL;
20871da177e4SLinus Torvalds 
2088e0b46d0eSHerbert Xu 		total += sizeof(pi);
2089e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20901da177e4SLinus Torvalds 			/* Packet will be striped */
20911da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20921da177e4SLinus Torvalds 		}
20931da177e4SLinus Torvalds 
2094e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20951da177e4SLinus Torvalds 			return -EFAULT;
20961da177e4SLinus Torvalds 	}
20971da177e4SLinus Torvalds 
20982eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20999403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
210034166093SMike Rapoport 
2101e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2102f43798c2SRusty Russell 			return -EINVAL;
2103f43798c2SRusty Russell 
21043e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
2105fd3a8862SWillem de Bruijn 					    tun_is_little_endian(tun), true,
2106fd3a8862SWillem de Bruijn 					    vlan_hlen)) {
2107f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
21086b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2109ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
211056f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
211156f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2112ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2113ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2114ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
211556f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2116ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2117ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2118ef3db4a5SMichael S. Tsirkin 		}
2119f43798c2SRusty Russell 
2120e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2121f43798c2SRusty Russell 			return -EFAULT;
21228c847d25SJason Wang 
21238c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2124f43798c2SRusty Russell 	}
2125f43798c2SRusty Russell 
2126a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2127e0b46d0eSHerbert Xu 		int ret;
2128aff3d70aSJason Wang 		struct veth veth;
21291da177e4SLinus Torvalds 
21306680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2131df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
21321da177e4SLinus Torvalds 
21336680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
21346680ec68SJason Wang 
2135e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2136e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
21376680ec68SJason Wang 			goto done;
21386680ec68SJason Wang 
2139e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2140e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
21416680ec68SJason Wang 			goto done;
21426680ec68SJason Wang 	}
21436680ec68SJason Wang 
2144e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
21456680ec68SJason Wang 
21466680ec68SJason Wang done:
2147608b9977SPaolo Abeni 	/* caller is in process context, */
2148608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2149608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
21505260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
21515260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
2152608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2153608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
21541da177e4SLinus Torvalds 
21551da177e4SLinus Torvalds 	return total;
21561da177e4SLinus Torvalds }
21571da177e4SLinus Torvalds 
2158fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21591576d986SJason Wang {
21601576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2161fc72d1d5SJason Wang 	void *ptr = NULL;
2162f48cc6b2SJason Wang 	int error = 0;
21631576d986SJason Wang 
2164fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2165fc72d1d5SJason Wang 	if (ptr)
21661576d986SJason Wang 		goto out;
21671576d986SJason Wang 	if (noblock) {
2168f48cc6b2SJason Wang 		error = -EAGAIN;
21691576d986SJason Wang 		goto out;
21701576d986SJason Wang 	}
21711576d986SJason Wang 
2172333f7909SAl Viro 	add_wait_queue(&tfile->socket.wq.wait, &wait);
21731576d986SJason Wang 
21741576d986SJason Wang 	while (1) {
217571828b22STimur Celik 		set_current_state(TASK_INTERRUPTIBLE);
2176fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2177fc72d1d5SJason Wang 		if (ptr)
21781576d986SJason Wang 			break;
21791576d986SJason Wang 		if (signal_pending(current)) {
2180f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21811576d986SJason Wang 			break;
21821576d986SJason Wang 		}
21831576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2184f48cc6b2SJason Wang 			error = -EFAULT;
21851576d986SJason Wang 			break;
21861576d986SJason Wang 		}
21871576d986SJason Wang 
21881576d986SJason Wang 		schedule();
21891576d986SJason Wang 	}
21901576d986SJason Wang 
2191ecef67cbSTimur Celik 	__set_current_state(TASK_RUNNING);
2192333f7909SAl Viro 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
21931576d986SJason Wang 
21941576d986SJason Wang out:
2195f48cc6b2SJason Wang 	*err = error;
2196fc72d1d5SJason Wang 	return ptr;
21971576d986SJason Wang }
21981576d986SJason Wang 
219954f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
22009b067034SAl Viro 			   struct iov_iter *to,
2201fc72d1d5SJason Wang 			   int noblock, void *ptr)
22021da177e4SLinus Torvalds {
22039b067034SAl Viro 	ssize_t ret;
22041576d986SJason Wang 	int err;
22051da177e4SLinus Torvalds 
22063872baf6SRami Rosen 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
22071da177e4SLinus Torvalds 
2208c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2209fc72d1d5SJason Wang 		tun_ptr_free(ptr);
22109b067034SAl Viro 		return 0;
2211c33ee15bSWei Xu 	}
22121da177e4SLinus Torvalds 
2213fc72d1d5SJason Wang 	if (!ptr) {
22141576d986SJason Wang 		/* Read frames from ring */
2215fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2216fc72d1d5SJason Wang 		if (!ptr)
2217957f094fSAlex Gartrell 			return err;
2218ac77cfd4SJason Wang 	}
2219e0b46d0eSHerbert Xu 
22201ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
22211ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2222fc72d1d5SJason Wang 
22231ffcbc85SJesper Dangaard Brouer 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
222403993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
2225fc72d1d5SJason Wang 	} else {
2226fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2227fc72d1d5SJason Wang 
22289b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2229f51a5e82SJason Wang 		if (unlikely(ret < 0))
22301da177e4SLinus Torvalds 			kfree_skb(skb);
2231f51a5e82SJason Wang 		else
2232f51a5e82SJason Wang 			consume_skb(skb);
2233fc72d1d5SJason Wang 	}
22341da177e4SLinus Torvalds 
223505c2828cSMichael S. Tsirkin 	return ret;
223605c2828cSMichael S. Tsirkin }
223705c2828cSMichael S. Tsirkin 
22389b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
223905c2828cSMichael S. Tsirkin {
224005c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
224105c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
22429484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
22439b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
224405c2828cSMichael S. Tsirkin 
224505c2828cSMichael S. Tsirkin 	if (!tun)
224605c2828cSMichael S. Tsirkin 		return -EBADFD;
2247ac77cfd4SJason Wang 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
224842404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2249d0b7da8aSZhi Yong Wu 	if (ret > 0)
2250d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2251631ab46bSEric W. Biederman 	tun_put(tun);
22521da177e4SLinus Torvalds 	return ret;
22531da177e4SLinus Torvalds }
22541da177e4SLinus Torvalds 
2255cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
225696f84061SJason Wang {
2257cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
225896f84061SJason Wang 
225996f84061SJason Wang 	bpf_prog_destroy(prog->prog);
226096f84061SJason Wang 	kfree(prog);
226196f84061SJason Wang }
226296f84061SJason Wang 
22639d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun,
22649d6474e4SJason Wang 			  struct tun_prog __rcu **prog_p,
226596f84061SJason Wang 			  struct bpf_prog *prog)
226696f84061SJason Wang {
2267cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
226896f84061SJason Wang 
226996f84061SJason Wang 	if (prog) {
227096f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
227196f84061SJason Wang 		if (!new)
227296f84061SJason Wang 			return -ENOMEM;
227396f84061SJason Wang 		new->prog = prog;
227496f84061SJason Wang 	}
227596f84061SJason Wang 
2276124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2277cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2278124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2279cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2280124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
228196f84061SJason Wang 
228296f84061SJason Wang 	if (old)
2283cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
228496f84061SJason Wang 
228596f84061SJason Wang 	return 0;
228696f84061SJason Wang }
228796f84061SJason Wang 
228896442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
228996442e42SJason Wang {
229096442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
229196442e42SJason Wang 
22924008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
229311fc7d5aSEric Dumazet 
2294608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
229511fc7d5aSEric Dumazet 	/* We clear pcpu_stats so that tun_set_iff() can tell if
229611fc7d5aSEric Dumazet 	 * tun_free_netdev() has been called from register_netdevice().
229711fc7d5aSEric Dumazet 	 */
229811fc7d5aSEric Dumazet 	tun->pcpu_stats = NULL;
229911fc7d5aSEric Dumazet 
230096442e42SJason Wang 	tun_flow_uninit(tun);
23015dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2302cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2303aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
230496442e42SJason Wang }
230596442e42SJason Wang 
23061da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
23071da177e4SLinus Torvalds {
23081da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
23091da177e4SLinus Torvalds 
23100625c883SEric W. Biederman 	tun->owner = INVALID_UID;
23110625c883SEric W. Biederman 	tun->group = INVALID_GID;
23124e24f2ddSChas Williams 	tun_default_link_ksettings(dev, &tun->link_ksettings);
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2315cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2316cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2317016adb72SJason Wang 	/* We prefer our own queue length */
2318016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
23191da177e4SLinus Torvalds }
23201da177e4SLinus Torvalds 
2321f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2322f019a7a5SEric W. Biederman  * device with netlink.
2323f019a7a5SEric W. Biederman  */
2324a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2325a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2326f019a7a5SEric W. Biederman {
232735b827b6SNicolas Dichtel 	NL_SET_ERR_MSG(extack,
232835b827b6SNicolas Dichtel 		       "tun/tap creation via rtnetlink is not supported.");
232935b827b6SNicolas Dichtel 	return -EOPNOTSUPP;
2330f019a7a5SEric W. Biederman }
2331f019a7a5SEric W. Biederman 
23321ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev)
23331ec010e7SSabrina Dubroca {
23341ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
23351ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
23361ec010e7SSabrina Dubroca 
23371ec010e7SSabrina Dubroca 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
23381ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
23391ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* TYPE */
23401ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PI */
23411ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
23421ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PERSIST */
23431ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
23441ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
23451ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
23461ec010e7SSabrina Dubroca 	       0;
23471ec010e7SSabrina Dubroca }
23481ec010e7SSabrina Dubroca 
23491ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
23501ec010e7SSabrina Dubroca {
23511ec010e7SSabrina Dubroca 	struct tun_struct *tun = netdev_priv(dev);
23521ec010e7SSabrina Dubroca 
23531ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
23541ec010e7SSabrina Dubroca 		goto nla_put_failure;
23551ec010e7SSabrina Dubroca 	if (uid_valid(tun->owner) &&
23561ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_OWNER,
23571ec010e7SSabrina Dubroca 			from_kuid_munged(current_user_ns(), tun->owner)))
23581ec010e7SSabrina Dubroca 		goto nla_put_failure;
23591ec010e7SSabrina Dubroca 	if (gid_valid(tun->group) &&
23601ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_GROUP,
23611ec010e7SSabrina Dubroca 			from_kgid_munged(current_user_ns(), tun->group)))
23621ec010e7SSabrina Dubroca 		goto nla_put_failure;
23631ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
23641ec010e7SSabrina Dubroca 		goto nla_put_failure;
23651ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
23661ec010e7SSabrina Dubroca 		goto nla_put_failure;
23671ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
23681ec010e7SSabrina Dubroca 		goto nla_put_failure;
23691ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
23701ec010e7SSabrina Dubroca 		       !!(tun->flags & IFF_MULTI_QUEUE)))
23711ec010e7SSabrina Dubroca 		goto nla_put_failure;
23721ec010e7SSabrina Dubroca 	if (tun->flags & IFF_MULTI_QUEUE) {
23731ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
23741ec010e7SSabrina Dubroca 			goto nla_put_failure;
23751ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
23761ec010e7SSabrina Dubroca 				tun->numdisabled))
23771ec010e7SSabrina Dubroca 			goto nla_put_failure;
23781ec010e7SSabrina Dubroca 	}
23791ec010e7SSabrina Dubroca 
23801ec010e7SSabrina Dubroca 	return 0;
23811ec010e7SSabrina Dubroca 
23821ec010e7SSabrina Dubroca nla_put_failure:
23831ec010e7SSabrina Dubroca 	return -EMSGSIZE;
23841ec010e7SSabrina Dubroca }
23851ec010e7SSabrina Dubroca 
2386f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2387f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2388f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2389f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2390f019a7a5SEric W. Biederman 	.validate	= tun_validate,
23911ec010e7SSabrina Dubroca 	.get_size       = tun_get_size,
23921ec010e7SSabrina Dubroca 	.fill_info      = tun_fill_info,
2393f019a7a5SEric W. Biederman };
2394f019a7a5SEric W. Biederman 
239533dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
239633dccbb0SHerbert Xu {
239754f968d6SJason Wang 	struct tun_file *tfile;
239843815482SEric Dumazet 	wait_queue_head_t *wqueue;
239933dccbb0SHerbert Xu 
240033dccbb0SHerbert Xu 	if (!sock_writeable(sk))
240133dccbb0SHerbert Xu 		return;
240233dccbb0SHerbert Xu 
24039cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
240433dccbb0SHerbert Xu 		return;
240533dccbb0SHerbert Xu 
240643815482SEric Dumazet 	wqueue = sk_sleep(sk);
240743815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
2408a9a08845SLinus Torvalds 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2409a9a08845SLinus Torvalds 						EPOLLWRNORM | EPOLLWRBAND);
2410c722c625SHerbert Xu 
241154f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
241254f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
241333dccbb0SHerbert Xu }
241433dccbb0SHerbert Xu 
2415f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage)
2416f9e06c45SJason Wang {
2417f9e06c45SJason Wang 	if (tpage->page)
2418f9e06c45SJason Wang 		__page_frag_cache_drain(tpage->page, tpage->count);
2419f9e06c45SJason Wang }
2420f9e06c45SJason Wang 
2421043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun,
2422043d222fSJason Wang 		       struct tun_file *tfile,
2423f9e06c45SJason Wang 		       struct xdp_buff *xdp, int *flush,
2424f9e06c45SJason Wang 		       struct tun_page *tpage)
2425043d222fSJason Wang {
24264e4b08e5SPrashant Bhole 	unsigned int datasize = xdp->data_end - xdp->data;
2427043d222fSJason Wang 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2428043d222fSJason Wang 	struct virtio_net_hdr *gso = &hdr->gso;
2429043d222fSJason Wang 	struct tun_pcpu_stats *stats;
2430043d222fSJason Wang 	struct bpf_prog *xdp_prog;
2431043d222fSJason Wang 	struct sk_buff *skb = NULL;
2432043d222fSJason Wang 	u32 rxhash = 0, act;
2433043d222fSJason Wang 	int buflen = hdr->buflen;
2434043d222fSJason Wang 	int err = 0;
2435043d222fSJason Wang 	bool skb_xdp = false;
2436f9e06c45SJason Wang 	struct page *page;
2437043d222fSJason Wang 
2438043d222fSJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
2439043d222fSJason Wang 	if (xdp_prog) {
2440043d222fSJason Wang 		if (gso->gso_type) {
2441043d222fSJason Wang 			skb_xdp = true;
2442043d222fSJason Wang 			goto build;
2443043d222fSJason Wang 		}
2444043d222fSJason Wang 		xdp_set_data_meta_invalid(xdp);
2445043d222fSJason Wang 		xdp->rxq = &tfile->xdp_rxq;
2446043d222fSJason Wang 
2447043d222fSJason Wang 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2448043d222fSJason Wang 		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2449043d222fSJason Wang 		if (err < 0) {
2450043d222fSJason Wang 			put_page(virt_to_head_page(xdp->data));
2451043d222fSJason Wang 			return err;
2452043d222fSJason Wang 		}
2453043d222fSJason Wang 
2454043d222fSJason Wang 		switch (err) {
2455043d222fSJason Wang 		case XDP_REDIRECT:
2456043d222fSJason Wang 			*flush = true;
2457043d222fSJason Wang 			/* fall through */
2458043d222fSJason Wang 		case XDP_TX:
2459043d222fSJason Wang 			return 0;
2460043d222fSJason Wang 		case XDP_PASS:
2461043d222fSJason Wang 			break;
2462043d222fSJason Wang 		default:
2463f9e06c45SJason Wang 			page = virt_to_head_page(xdp->data);
2464f9e06c45SJason Wang 			if (tpage->page == page) {
2465f9e06c45SJason Wang 				++tpage->count;
2466f9e06c45SJason Wang 			} else {
2467f9e06c45SJason Wang 				tun_put_page(tpage);
2468f9e06c45SJason Wang 				tpage->page = page;
2469f9e06c45SJason Wang 				tpage->count = 1;
2470f9e06c45SJason Wang 			}
2471043d222fSJason Wang 			return 0;
2472043d222fSJason Wang 		}
2473043d222fSJason Wang 	}
2474043d222fSJason Wang 
2475043d222fSJason Wang build:
2476043d222fSJason Wang 	skb = build_skb(xdp->data_hard_start, buflen);
2477043d222fSJason Wang 	if (!skb) {
2478043d222fSJason Wang 		err = -ENOMEM;
2479043d222fSJason Wang 		goto out;
2480043d222fSJason Wang 	}
2481043d222fSJason Wang 
2482043d222fSJason Wang 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2483043d222fSJason Wang 	skb_put(skb, xdp->data_end - xdp->data);
2484043d222fSJason Wang 
2485043d222fSJason Wang 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2486043d222fSJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2487043d222fSJason Wang 		kfree_skb(skb);
2488043d222fSJason Wang 		err = -EINVAL;
2489043d222fSJason Wang 		goto out;
2490043d222fSJason Wang 	}
2491043d222fSJason Wang 
2492043d222fSJason Wang 	skb->protocol = eth_type_trans(skb, tun->dev);
2493043d222fSJason Wang 	skb_reset_network_header(skb);
2494d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
2495043d222fSJason Wang 
2496043d222fSJason Wang 	if (skb_xdp) {
2497043d222fSJason Wang 		err = do_xdp_generic(xdp_prog, skb);
2498043d222fSJason Wang 		if (err != XDP_PASS)
2499043d222fSJason Wang 			goto out;
2500043d222fSJason Wang 	}
2501043d222fSJason Wang 
2502f29eb2a9SPaolo Abeni 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2503f29eb2a9SPaolo Abeni 	    !tfile->detached)
2504043d222fSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
2505043d222fSJason Wang 
25068ebebcbaSMatthew Cover 	skb_record_rx_queue(skb, tfile->queue_index);
2507043d222fSJason Wang 	netif_receive_skb(skb);
2508043d222fSJason Wang 
25096342ca64SPrashant Bhole 	/* No need for get_cpu_ptr() here since this function is
25106342ca64SPrashant Bhole 	 * always called with bh disabled
25116342ca64SPrashant Bhole 	 */
25126342ca64SPrashant Bhole 	stats = this_cpu_ptr(tun->pcpu_stats);
2513043d222fSJason Wang 	u64_stats_update_begin(&stats->syncp);
25145260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
25155260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, datasize);
2516043d222fSJason Wang 	u64_stats_update_end(&stats->syncp);
2517043d222fSJason Wang 
2518043d222fSJason Wang 	if (rxhash)
2519043d222fSJason Wang 		tun_flow_update(tun, rxhash, tfile);
2520043d222fSJason Wang 
2521043d222fSJason Wang out:
2522043d222fSJason Wang 	return err;
2523043d222fSJason Wang }
2524043d222fSJason Wang 
25251b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
252605c2828cSMichael S. Tsirkin {
2527043d222fSJason Wang 	int ret, i;
252854f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25299484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2530fe8dd45bSJason Wang 	struct tun_msg_ctl *ctl = m->msg_control;
2531043d222fSJason Wang 	struct xdp_buff *xdp;
253254f968d6SJason Wang 
253354f968d6SJason Wang 	if (!tun)
253454f968d6SJason Wang 		return -EBADFD;
2535f5ff53b4SAl Viro 
2536043d222fSJason Wang 	if (ctl && (ctl->type == TUN_MSG_PTR)) {
25376f0271d9SDavid S. Miller 		struct tun_page tpage;
2538043d222fSJason Wang 		int n = ctl->num;
2539043d222fSJason Wang 		int flush = 0;
2540043d222fSJason Wang 
25416f0271d9SDavid S. Miller 		memset(&tpage, 0, sizeof(tpage));
25426f0271d9SDavid S. Miller 
2543043d222fSJason Wang 		local_bh_disable();
2544043d222fSJason Wang 		rcu_read_lock();
2545043d222fSJason Wang 
2546043d222fSJason Wang 		for (i = 0; i < n; i++) {
2547043d222fSJason Wang 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2548f9e06c45SJason Wang 			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2549043d222fSJason Wang 		}
2550043d222fSJason Wang 
2551043d222fSJason Wang 		if (flush)
2552043d222fSJason Wang 			xdp_do_flush_map();
2553043d222fSJason Wang 
2554043d222fSJason Wang 		rcu_read_unlock();
2555043d222fSJason Wang 		local_bh_enable();
2556043d222fSJason Wang 
2557f9e06c45SJason Wang 		tun_put_page(&tpage);
2558f9e06c45SJason Wang 
2559043d222fSJason Wang 		ret = total_len;
2560043d222fSJason Wang 		goto out;
2561043d222fSJason Wang 	}
2562fe8dd45bSJason Wang 
2563fe8dd45bSJason Wang 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
25645503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
25655503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
2566043d222fSJason Wang out:
256754f968d6SJason Wang 	tun_put(tun);
256854f968d6SJason Wang 	return ret;
256905c2828cSMichael S. Tsirkin }
257005c2828cSMichael S. Tsirkin 
25711b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
257205c2828cSMichael S. Tsirkin 		       int flags)
257305c2828cSMichael S. Tsirkin {
257454f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25759484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2576fc72d1d5SJason Wang 	void *ptr = m->msg_control;
257705c2828cSMichael S. Tsirkin 	int ret;
257854f968d6SJason Wang 
2579c33ee15bSWei Xu 	if (!tun) {
2580c33ee15bSWei Xu 		ret = -EBADFD;
2581fc72d1d5SJason Wang 		goto out_free;
2582c33ee15bSWei Xu 	}
258354f968d6SJason Wang 
2584eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
25853811ae76SGao feng 		ret = -EINVAL;
2586c33ee15bSWei Xu 		goto out_put_tun;
25873811ae76SGao feng 	}
2588eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2589eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2590eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2591eda29772SRichard Cochran 		goto out;
2592eda29772SRichard Cochran 	}
2593fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
259487897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
259542404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
259642404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
259742404c09SDavid S. Miller 	}
25983811ae76SGao feng out:
259954f968d6SJason Wang 	tun_put(tun);
260005c2828cSMichael S. Tsirkin 	return ret;
2601c33ee15bSWei Xu 
2602c33ee15bSWei Xu out_put_tun:
2603c33ee15bSWei Xu 	tun_put(tun);
2604fc72d1d5SJason Wang out_free:
2605fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2606c33ee15bSWei Xu 	return ret;
260705c2828cSMichael S. Tsirkin }
260805c2828cSMichael S. Tsirkin 
2609fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2610fc72d1d5SJason Wang {
2611fc72d1d5SJason Wang 	if (likely(ptr)) {
26121ffcbc85SJesper Dangaard Brouer 		if (tun_is_xdp_frame(ptr)) {
26131ffcbc85SJesper Dangaard Brouer 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2614fc72d1d5SJason Wang 
26151ffcbc85SJesper Dangaard Brouer 			return xdpf->len;
2616fc72d1d5SJason Wang 		}
2617fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2618fc72d1d5SJason Wang 	} else {
2619fc72d1d5SJason Wang 		return 0;
2620fc72d1d5SJason Wang 	}
2621fc72d1d5SJason Wang }
2622fc72d1d5SJason Wang 
26231576d986SJason Wang static int tun_peek_len(struct socket *sock)
26241576d986SJason Wang {
26251576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
26261576d986SJason Wang 	struct tun_struct *tun;
26271576d986SJason Wang 	int ret = 0;
26281576d986SJason Wang 
26299484dc74Syuan linyu 	tun = tun_get(tfile);
26301576d986SJason Wang 	if (!tun)
26311576d986SJason Wang 		return 0;
26321576d986SJason Wang 
2633fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
26341576d986SJason Wang 	tun_put(tun);
26351576d986SJason Wang 
26361576d986SJason Wang 	return ret;
26371576d986SJason Wang }
26381576d986SJason Wang 
263905c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
264005c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
26411576d986SJason Wang 	.peek_len = tun_peek_len,
264205c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
264305c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
264405c2828cSMichael S. Tsirkin };
264505c2828cSMichael S. Tsirkin 
264633dccbb0SHerbert Xu static struct proto tun_proto = {
264733dccbb0SHerbert Xu 	.name		= "tun",
264833dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
264954f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
265033dccbb0SHerbert Xu };
2651f019a7a5SEric W. Biederman 
2652980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2653980c9e8cSDavid Woodhouse {
2654031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2655980c9e8cSDavid Woodhouse }
2656980c9e8cSDavid Woodhouse 
2657980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2658980c9e8cSDavid Woodhouse 			      char *buf)
2659980c9e8cSDavid Woodhouse {
2660980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2661980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2662980c9e8cSDavid Woodhouse }
2663980c9e8cSDavid Woodhouse 
2664980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2665980c9e8cSDavid Woodhouse 			      char *buf)
2666980c9e8cSDavid Woodhouse {
2667980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26680625c883SEric W. Biederman 	return uid_valid(tun->owner)?
26690625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26700625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
26710625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2672980c9e8cSDavid Woodhouse }
2673980c9e8cSDavid Woodhouse 
2674980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2675980c9e8cSDavid Woodhouse 			      char *buf)
2676980c9e8cSDavid Woodhouse {
2677980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26780625c883SEric W. Biederman 	return gid_valid(tun->group) ?
26790625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26800625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
26810625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2682980c9e8cSDavid Woodhouse }
2683980c9e8cSDavid Woodhouse 
2684980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2685980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2686980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2687980c9e8cSDavid Woodhouse 
2688c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2689c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2690c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2691c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2692c4d33e24STakashi Iwai 	NULL
2693c4d33e24STakashi Iwai };
2694c4d33e24STakashi Iwai 
2695c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2696c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2697c4d33e24STakashi Iwai };
2698c4d33e24STakashi Iwai 
2699d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
27001da177e4SLinus Torvalds {
27011da177e4SLinus Torvalds 	struct tun_struct *tun;
270254f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
27031da177e4SLinus Torvalds 	struct net_device *dev;
27041da177e4SLinus Torvalds 	int err;
27051da177e4SLinus Torvalds 
27067c0c3b1aSJason Wang 	if (tfile->detached)
27077c0c3b1aSJason Wang 		return -EINVAL;
27087c0c3b1aSJason Wang 
270990e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
271090e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
271190e33d45SPetar Penkov 			return -EPERM;
271290e33d45SPetar Penkov 
271390e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
271490e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
271590e33d45SPetar Penkov 			return -EINVAL;
271690e33d45SPetar Penkov 	}
271790e33d45SPetar Penkov 
271874a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
271974a3e5a7SEric W. Biederman 	if (dev) {
2720f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2721f85ba780SDavid Woodhouse 			return -EBUSY;
272274a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
272374a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
272474a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
272574a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
272674a3e5a7SEric W. Biederman 		else
272774a3e5a7SEric W. Biederman 			return -EINVAL;
272874a3e5a7SEric W. Biederman 
27298e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
273040630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
27318e6d91aeSJason Wang 			return -EINVAL;
27328e6d91aeSJason Wang 
2733cde8b15fSJason Wang 		if (tun_not_capable(tun))
27342b980dbdSPaul Moore 			return -EPERM;
27355dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
27362b980dbdSPaul Moore 		if (err < 0)
27372b980dbdSPaul Moore 			return err;
27382b980dbdSPaul Moore 
273994317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2740af3fb24eSEric Dumazet 				 ifr->ifr_flags & IFF_NAPI,
274177f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2742a7385ba2SEric W. Biederman 		if (err < 0)
2743a7385ba2SEric W. Biederman 			return err;
27444008e97fSJason Wang 
274540630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2746e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2747e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2748e8dbad66SJason Wang 			 * to initialize the device again.
2749e8dbad66SJason Wang 			 */
275083c1f36fSSabrina Dubroca 			netdev_state_change(dev);
2751e8dbad66SJason Wang 			return 0;
2752e8dbad66SJason Wang 		}
27539fffc5c6SSabrina Dubroca 
27549fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27559fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
275683c1f36fSSabrina Dubroca 
275783c1f36fSSabrina Dubroca 		netdev_state_change(dev);
275883c1f36fSSabrina Dubroca 	} else {
27591da177e4SLinus Torvalds 		char *name;
27601da177e4SLinus Torvalds 		unsigned long flags = 0;
2761edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2762edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
27631da177e4SLinus Torvalds 
2764c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2765ca6bb5d7SDavid Woodhouse 			return -EPERM;
27662b980dbdSPaul Moore 		err = security_tun_dev_create();
27672b980dbdSPaul Moore 		if (err < 0)
27682b980dbdSPaul Moore 			return err;
2769ca6bb5d7SDavid Woodhouse 
27701da177e4SLinus Torvalds 		/* Set dev type */
27711da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
27721da177e4SLinus Torvalds 			/* TUN device */
277340630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
27741da177e4SLinus Torvalds 			name = "tun%d";
27751da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
27761da177e4SLinus Torvalds 			/* TAP device */
277740630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
27781da177e4SLinus Torvalds 			name = "tap%d";
27791da177e4SLinus Torvalds 		} else
278036989b90SKusanagi Kouichi 			return -EINVAL;
27811da177e4SLinus Torvalds 
27821da177e4SLinus Torvalds 		if (*ifr->ifr_name)
27831da177e4SLinus Torvalds 			name = ifr->ifr_name;
27841da177e4SLinus Torvalds 
2785c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2786c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2787c835a677STom Gundersen 				       queues);
2788edfb6a14SJason Wang 
27891da177e4SLinus Torvalds 		if (!dev)
27901da177e4SLinus Torvalds 			return -ENOMEM;
27911da177e4SLinus Torvalds 
2792fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2793f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2794fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2795c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2796758e43b7SStephen Hemminger 
27971da177e4SLinus Torvalds 		tun = netdev_priv(dev);
27981da177e4SLinus Torvalds 		tun->dev = dev;
27991da177e4SLinus Torvalds 		tun->flags = flags;
2800f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2801d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
28021da177e4SLinus Torvalds 
2803eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
280454f968d6SJason Wang 		tun->filter_attached = false;
280554f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
28065503fcecSJason Wang 		tun->rx_batched = 0;
280796f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
280833dccbb0SHerbert Xu 
2809608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2810608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2811608b9977SPaolo Abeni 			err = -ENOMEM;
2812608b9977SPaolo Abeni 			goto err_free_dev;
2813608b9977SPaolo Abeni 		}
2814608b9977SPaolo Abeni 
281596442e42SJason Wang 		spin_lock_init(&tun->lock);
281696442e42SJason Wang 
28175dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
28185dbbaf2dSPaul Moore 		if (err < 0)
2819608b9977SPaolo Abeni 			goto err_free_stat;
28202b980dbdSPaul Moore 
28211da177e4SLinus Torvalds 		tun_net_init(dev);
2822944a1376SPavel Emelyanov 		tun_flow_init(tun);
282396442e42SJason Wang 
282488255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
28256680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
28266680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
28272a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
28286671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
28296671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
28306671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
283188255375SMichał Mirosław 
28329fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
28339fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
28349fffc5c6SSabrina Dubroca 
28354008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
2836af3fb24eSEric Dumazet 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
283777f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2838eb0fb363SJason Wang 		if (err < 0)
2839662ca437SJason Wang 			goto err_free_flow;
2840eb0fb363SJason Wang 
28411da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
28421da177e4SLinus Torvalds 		if (err < 0)
2843662ca437SJason Wang 			goto err_detach;
284477f22f92SYang Yingliang 		/* free_netdev() won't check refcnt, to aovid race
284577f22f92SYang Yingliang 		 * with dev_put() we need publish tun after registration.
284677f22f92SYang Yingliang 		 */
284777f22f92SYang Yingliang 		rcu_assign_pointer(tfile->tun, tun);
2848af668b3cSMichael S. Tsirkin 	}
2849980c9e8cSDavid Woodhouse 
2850eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
28511da177e4SLinus Torvalds 
28526b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
28531da177e4SLinus Torvalds 
2854e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2855e35259a9SMax Krasnyansky 	 * xoff state.
2856e35259a9SMax Krasnyansky 	 */
2857e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2858c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2859e35259a9SMax Krasnyansky 
28601da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
28611da177e4SLinus Torvalds 	return 0;
28621da177e4SLinus Torvalds 
2863662ca437SJason Wang err_detach:
2864662ca437SJason Wang 	tun_detach_all(dev);
286511fc7d5aSEric Dumazet 	/* We are here because register_netdevice() has failed.
286611fc7d5aSEric Dumazet 	 * If register_netdevice() already called tun_free_netdev()
286711fc7d5aSEric Dumazet 	 * while dealing with the error, tun->pcpu_stats has been cleared.
286811fc7d5aSEric Dumazet 	 */
286911fc7d5aSEric Dumazet 	if (!tun->pcpu_stats)
2870ff244c6bSEric Dumazet 		goto err_free_dev;
2871ff244c6bSEric Dumazet 
2872662ca437SJason Wang err_free_flow:
2873662ca437SJason Wang 	tun_flow_uninit(tun);
2874662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2875608b9977SPaolo Abeni err_free_stat:
2876608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
28771da177e4SLinus Torvalds err_free_dev:
28781da177e4SLinus Torvalds 	free_netdev(dev);
28791da177e4SLinus Torvalds 	return err;
28801da177e4SLinus Torvalds }
28811da177e4SLinus Torvalds 
288212132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2883e3b99556SMark McLoughlin {
28846b8a66eeSJoe Perches 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2885e3b99556SMark McLoughlin 
2886e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2887e3b99556SMark McLoughlin 
2888980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2889e3b99556SMark McLoughlin 
2890e3b99556SMark McLoughlin }
2891e3b99556SMark McLoughlin 
28925228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
28935228ddc9SRusty Russell  * privs required. */
289488255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
28955228ddc9SRusty Russell {
2896c8f44affSMichał Mirosław 	netdev_features_t features = 0;
28975228ddc9SRusty Russell 
28985228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
289988255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
29005228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
29015228ddc9SRusty Russell 
29025228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
29035228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
29045228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
29055228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
29065228ddc9SRusty Russell 			}
29075228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
29085228ddc9SRusty Russell 				features |= NETIF_F_TSO;
29095228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
29105228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
29115228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
29125228ddc9SRusty Russell 		}
29130c19f846SWillem de Bruijn 
29140c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
29155228ddc9SRusty Russell 	}
29165228ddc9SRusty Russell 
29175228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
29185228ddc9SRusty Russell 	 * trying to set them. */
29195228ddc9SRusty Russell 	if (arg)
29205228ddc9SRusty Russell 		return -EINVAL;
29215228ddc9SRusty Russell 
292288255375SMichał Mirosław 	tun->set_features = features;
292309050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
292409050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
292588255375SMichał Mirosław 	netdev_update_features(tun->dev);
29265228ddc9SRusty Russell 
29275228ddc9SRusty Russell 	return 0;
29285228ddc9SRusty Russell }
29295228ddc9SRusty Russell 
2930c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2931c8d68e6bSJason Wang {
2932c8d68e6bSJason Wang 	int i;
2933c8d68e6bSJason Wang 	struct tun_file *tfile;
2934c8d68e6bSJason Wang 
2935c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2936b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
29378ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
29388ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
29398ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2940c8d68e6bSJason Wang 	}
2941c8d68e6bSJason Wang 
2942c8d68e6bSJason Wang 	tun->filter_attached = false;
2943c8d68e6bSJason Wang }
2944c8d68e6bSJason Wang 
2945c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2946c8d68e6bSJason Wang {
2947c8d68e6bSJason Wang 	int i, ret = 0;
2948c8d68e6bSJason Wang 	struct tun_file *tfile;
2949c8d68e6bSJason Wang 
2950c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2951b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
29528ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
29538ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
29548ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2955c8d68e6bSJason Wang 		if (ret) {
2956c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2957c8d68e6bSJason Wang 			return ret;
2958c8d68e6bSJason Wang 		}
2959c8d68e6bSJason Wang 	}
2960c8d68e6bSJason Wang 
2961c8d68e6bSJason Wang 	tun->filter_attached = true;
2962c8d68e6bSJason Wang 	return ret;
2963c8d68e6bSJason Wang }
2964c8d68e6bSJason Wang 
2965c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2966c8d68e6bSJason Wang {
2967c8d68e6bSJason Wang 	struct tun_file *tfile;
2968c8d68e6bSJason Wang 	int i;
2969c8d68e6bSJason Wang 
2970c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2971b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2972c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2973c8d68e6bSJason Wang 	}
2974c8d68e6bSJason Wang }
2975c8d68e6bSJason Wang 
2976cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2977cde8b15fSJason Wang {
2978cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2979cde8b15fSJason Wang 	struct tun_struct *tun;
2980cde8b15fSJason Wang 	int ret = 0;
2981cde8b15fSJason Wang 
2982cde8b15fSJason Wang 	rtnl_lock();
2983cde8b15fSJason Wang 
2984cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
29854008e97fSJason Wang 		tun = tfile->detached;
29865dbbaf2dSPaul Moore 		if (!tun) {
2987cde8b15fSJason Wang 			ret = -EINVAL;
29885dbbaf2dSPaul Moore 			goto unlock;
29895dbbaf2dSPaul Moore 		}
29905dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
29915dbbaf2dSPaul Moore 		if (ret < 0)
29925dbbaf2dSPaul Moore 			goto unlock;
2993af3fb24eSEric Dumazet 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
299477f22f92SYang Yingliang 				 tun->flags & IFF_NAPI_FRAGS, true);
29954008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2996b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
299740630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
29984008e97fSJason Wang 			ret = -EINVAL;
2999cde8b15fSJason Wang 		else
30004008e97fSJason Wang 			__tun_detach(tfile, false);
30014008e97fSJason Wang 	} else
3002cde8b15fSJason Wang 		ret = -EINVAL;
3003cde8b15fSJason Wang 
300483c1f36fSSabrina Dubroca 	if (ret >= 0)
300583c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
300683c1f36fSSabrina Dubroca 
30075dbbaf2dSPaul Moore unlock:
3008cde8b15fSJason Wang 	rtnl_unlock();
3009cde8b15fSJason Wang 	return ret;
3010cde8b15fSJason Wang }
3011cde8b15fSJason Wang 
3012cd5681d7SJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
3013cd5681d7SJason Wang 			void __user *data)
301496f84061SJason Wang {
301596f84061SJason Wang 	struct bpf_prog *prog;
301696f84061SJason Wang 	int fd;
301796f84061SJason Wang 
301896f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
301996f84061SJason Wang 		return -EFAULT;
302096f84061SJason Wang 
302196f84061SJason Wang 	if (fd == -1) {
302296f84061SJason Wang 		prog = NULL;
302396f84061SJason Wang 	} else {
302496f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
302596f84061SJason Wang 		if (IS_ERR(prog))
302696f84061SJason Wang 			return PTR_ERR(prog);
302796f84061SJason Wang 	}
302896f84061SJason Wang 
3029cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
303096f84061SJason Wang }
303196f84061SJason Wang 
303250857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
303350857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
30341da177e4SLinus Torvalds {
303536b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
3036f663706aSKirill Tkhai 	struct net *net = sock_net(&tfile->sk);
3037631ab46bSEric W. Biederman 	struct tun_struct *tun;
30381da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
303926d31925SNicolas Dichtel 	unsigned int ifindex, carrier;
30401da177e4SLinus Torvalds 	struct ifreq ifr;
30410625c883SEric W. Biederman 	kuid_t owner;
30420625c883SEric W. Biederman 	kgid_t group;
304333dccbb0SHerbert Xu 	int sndbuf;
3044d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
30451cf8e410SMichael S. Tsirkin 	int le;
3046f271b2ccSMax Krasnyansky 	int ret;
304783c1f36fSSabrina Dubroca 	bool do_notify = false;
30481da177e4SLinus Torvalds 
3049f2780d6dSKirill Tkhai 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3050f2780d6dSKirill Tkhai 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
305150857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
30521da177e4SLinus Torvalds 			return -EFAULT;
30538bbb1813SDavid S. Miller 	} else {
3054a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
30558bbb1813SDavid S. Miller 	}
3056631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
3057631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
3058631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
3059031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
3060031f5e03SMichael S. Tsirkin 		 */
3061031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3062631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
3063f663706aSKirill Tkhai 	} else if (cmd == TUNSETQUEUE) {
3064cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
3065f663706aSKirill Tkhai 	} else if (cmd == SIOCGSKNS) {
3066f663706aSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3067f663706aSKirill Tkhai 			return -EPERM;
3068f663706aSKirill Tkhai 		return open_related_ns(&net->ns, get_net_ns);
3069f663706aSKirill Tkhai 	}
3070631ab46bSEric W. Biederman 
3071c8d68e6bSJason Wang 	ret = 0;
3072876bfd4dSHerbert Xu 	rtnl_lock();
3073876bfd4dSHerbert Xu 
30749484dc74Syuan linyu 	tun = tun_get(tfile);
30750f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
30760f16bc13SGao Feng 		ret = -EEXIST;
30770f16bc13SGao Feng 		if (tun)
30780f16bc13SGao Feng 			goto unlock;
30790f16bc13SGao Feng 
30801da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
30811da177e4SLinus Torvalds 
3082f2780d6dSKirill Tkhai 		ret = tun_set_iff(net, file, &ifr);
30831da177e4SLinus Torvalds 
3084876bfd4dSHerbert Xu 		if (ret)
3085876bfd4dSHerbert Xu 			goto unlock;
30861da177e4SLinus Torvalds 
308750857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3088876bfd4dSHerbert Xu 			ret = -EFAULT;
3089876bfd4dSHerbert Xu 		goto unlock;
30901da177e4SLinus Torvalds 	}
3091fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
3092fb7589a1SPavel Emelyanov 		ret = -EPERM;
3093fb7589a1SPavel Emelyanov 		if (tun)
3094fb7589a1SPavel Emelyanov 			goto unlock;
3095fb7589a1SPavel Emelyanov 
3096fb7589a1SPavel Emelyanov 		ret = -EFAULT;
3097fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3098fb7589a1SPavel Emelyanov 			goto unlock;
3099fb7589a1SPavel Emelyanov 
3100fb7589a1SPavel Emelyanov 		ret = 0;
3101fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
3102fb7589a1SPavel Emelyanov 		goto unlock;
3103fb7589a1SPavel Emelyanov 	}
31041da177e4SLinus Torvalds 
3105876bfd4dSHerbert Xu 	ret = -EBADFD;
31061da177e4SLinus Torvalds 	if (!tun)
3107876bfd4dSHerbert Xu 		goto unlock;
31081da177e4SLinus Torvalds 
31091e588338SJason Wang 	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
31101da177e4SLinus Torvalds 
31110c3e0e3bSKirill Tkhai 	net = dev_net(tun->dev);
3112631ab46bSEric W. Biederman 	ret = 0;
31131da177e4SLinus Torvalds 	switch (cmd) {
3114e3b99556SMark McLoughlin 	case TUNGETIFF:
311512132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
3116e3b99556SMark McLoughlin 
31173d407a80SPavel Emelyanov 		if (tfile->detached)
31183d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3119849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
3120849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
31213d407a80SPavel Emelyanov 
312250857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3123631ab46bSEric W. Biederman 			ret = -EFAULT;
3124e3b99556SMark McLoughlin 		break;
3125e3b99556SMark McLoughlin 
31261da177e4SLinus Torvalds 	case TUNSETNOCSUM:
31271da177e4SLinus Torvalds 		/* Disable/Enable checksum */
31281da177e4SLinus Torvalds 
312988255375SMichał Mirosław 		/* [unimplemented] */
313088255375SMichał Mirosław 		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
31316b8a66eeSJoe Perches 			  arg ? "disabled" : "enabled");
31321da177e4SLinus Torvalds 		break;
31331da177e4SLinus Torvalds 
31341da177e4SLinus Torvalds 	case TUNSETPERSIST:
313554f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
313654f968d6SJason Wang 		 * module to prevent the module being unprobed.
313754f968d6SJason Wang 		 */
313840630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
313940630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
314054f968d6SJason Wang 			__module_get(THIS_MODULE);
314183c1f36fSSabrina Dubroca 			do_notify = true;
3142dd38bd85SJason Wang 		}
314340630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
314440630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
314554f968d6SJason Wang 			module_put(THIS_MODULE);
314683c1f36fSSabrina Dubroca 			do_notify = true;
314754f968d6SJason Wang 		}
31481da177e4SLinus Torvalds 
31496b8a66eeSJoe Perches 		tun_debug(KERN_INFO, tun, "persist %s\n",
31506b8a66eeSJoe Perches 			  arg ? "enabled" : "disabled");
31511da177e4SLinus Torvalds 		break;
31521da177e4SLinus Torvalds 
31531da177e4SLinus Torvalds 	case TUNSETOWNER:
31541da177e4SLinus Torvalds 		/* Set owner of the device */
31550625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
31560625c883SEric W. Biederman 		if (!uid_valid(owner)) {
31570625c883SEric W. Biederman 			ret = -EINVAL;
31580625c883SEric W. Biederman 			break;
31590625c883SEric W. Biederman 		}
31600625c883SEric W. Biederman 		tun->owner = owner;
316183c1f36fSSabrina Dubroca 		do_notify = true;
31621e588338SJason Wang 		tun_debug(KERN_INFO, tun, "owner set to %u\n",
31630625c883SEric W. Biederman 			  from_kuid(&init_user_ns, tun->owner));
31641da177e4SLinus Torvalds 		break;
31651da177e4SLinus Torvalds 
31668c644623SGuido Guenther 	case TUNSETGROUP:
31678c644623SGuido Guenther 		/* Set group of the device */
31680625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
31690625c883SEric W. Biederman 		if (!gid_valid(group)) {
31700625c883SEric W. Biederman 			ret = -EINVAL;
31710625c883SEric W. Biederman 			break;
31720625c883SEric W. Biederman 		}
31730625c883SEric W. Biederman 		tun->group = group;
317483c1f36fSSabrina Dubroca 		do_notify = true;
31751e588338SJason Wang 		tun_debug(KERN_INFO, tun, "group set to %u\n",
31760625c883SEric W. Biederman 			  from_kgid(&init_user_ns, tun->group));
31778c644623SGuido Guenther 		break;
31788c644623SGuido Guenther 
3179ff4cc3acSMike Kershaw 	case TUNSETLINK:
3180ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
3181ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
31826b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun,
31836b8a66eeSJoe Perches 				  "Linktype set failed because interface is up\n");
318448abfe05SDavid S. Miller 			ret = -EBUSY;
3185ff4cc3acSMike Kershaw 		} else {
3186ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
31876b8a66eeSJoe Perches 			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
31886b8a66eeSJoe Perches 				  tun->dev->type);
318948abfe05SDavid S. Miller 			ret = 0;
3190ff4cc3acSMike Kershaw 		}
3191631ab46bSEric W. Biederman 		break;
3192ff4cc3acSMike Kershaw 
31931da177e4SLinus Torvalds #ifdef TUN_DEBUG
31941da177e4SLinus Torvalds 	case TUNSETDEBUG:
31951da177e4SLinus Torvalds 		tun->debug = arg;
31961da177e4SLinus Torvalds 		break;
31971da177e4SLinus Torvalds #endif
31985228ddc9SRusty Russell 	case TUNSETOFFLOAD:
319988255375SMichał Mirosław 		ret = set_offload(tun, arg);
3200631ab46bSEric W. Biederman 		break;
32015228ddc9SRusty Russell 
3202f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
3203f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
3204631ab46bSEric W. Biederman 		ret = -EINVAL;
320540630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3206631ab46bSEric W. Biederman 			break;
3207c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
3208631ab46bSEric W. Biederman 		break;
32091da177e4SLinus Torvalds 
32101da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
3211b595076aSUwe Kleine-König 		/* Get hw address */
3212f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3213f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
321450857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3215631ab46bSEric W. Biederman 			ret = -EFAULT;
3216631ab46bSEric W. Biederman 		break;
32171da177e4SLinus Torvalds 
32181da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
3219f271b2ccSMax Krasnyansky 		/* Set hw address */
32206b8a66eeSJoe Perches 		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
32216b8a66eeSJoe Perches 			  ifr.ifr_hwaddr.sa_data);
322240102371SKim B. Heino 
32233a37a963SPetr Machata 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3224631ab46bSEric W. Biederman 		break;
322533dccbb0SHerbert Xu 
322633dccbb0SHerbert Xu 	case TUNGETSNDBUF:
322754f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
322833dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
322933dccbb0SHerbert Xu 			ret = -EFAULT;
323033dccbb0SHerbert Xu 		break;
323133dccbb0SHerbert Xu 
323233dccbb0SHerbert Xu 	case TUNSETSNDBUF:
323333dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
323433dccbb0SHerbert Xu 			ret = -EFAULT;
323533dccbb0SHerbert Xu 			break;
323633dccbb0SHerbert Xu 		}
323793161922SCraig Gallek 		if (sndbuf <= 0) {
323893161922SCraig Gallek 			ret = -EINVAL;
323993161922SCraig Gallek 			break;
324093161922SCraig Gallek 		}
324133dccbb0SHerbert Xu 
3242c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
3243c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
324433dccbb0SHerbert Xu 		break;
324533dccbb0SHerbert Xu 
3246d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
3247d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
3248d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3249d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3250d9d52b51SMichael S. Tsirkin 		break;
3251d9d52b51SMichael S. Tsirkin 
3252d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
3253d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3254d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3255d9d52b51SMichael S. Tsirkin 			break;
3256d9d52b51SMichael S. Tsirkin 		}
3257d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3258d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
3259d9d52b51SMichael S. Tsirkin 			break;
3260d9d52b51SMichael S. Tsirkin 		}
3261d9d52b51SMichael S. Tsirkin 
3262d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
3263d9d52b51SMichael S. Tsirkin 		break;
3264d9d52b51SMichael S. Tsirkin 
32651cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
32661cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
32671cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
32681cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32691cf8e410SMichael S. Tsirkin 		break;
32701cf8e410SMichael S. Tsirkin 
32711cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
32721cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
32731cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32741cf8e410SMichael S. Tsirkin 			break;
32751cf8e410SMichael S. Tsirkin 		}
32761cf8e410SMichael S. Tsirkin 		if (le)
32771cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
32781cf8e410SMichael S. Tsirkin 		else
32791cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
32801cf8e410SMichael S. Tsirkin 		break;
32811cf8e410SMichael S. Tsirkin 
32828b8e658bSGreg Kurz 	case TUNGETVNETBE:
32838b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
32848b8e658bSGreg Kurz 		break;
32858b8e658bSGreg Kurz 
32868b8e658bSGreg Kurz 	case TUNSETVNETBE:
32878b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
32888b8e658bSGreg Kurz 		break;
32898b8e658bSGreg Kurz 
329099405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
329199405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
329299405162SMichael S. Tsirkin 		ret = -EINVAL;
329340630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
329499405162SMichael S. Tsirkin 			break;
329599405162SMichael S. Tsirkin 		ret = -EFAULT;
329654f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
329799405162SMichael S. Tsirkin 			break;
329899405162SMichael S. Tsirkin 
3299c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
330099405162SMichael S. Tsirkin 		break;
330199405162SMichael S. Tsirkin 
330299405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
330399405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
330499405162SMichael S. Tsirkin 		ret = -EINVAL;
330540630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
330699405162SMichael S. Tsirkin 			break;
3307c8d68e6bSJason Wang 		ret = 0;
3308c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
330999405162SMichael S. Tsirkin 		break;
331099405162SMichael S. Tsirkin 
331176975e9cSPavel Emelyanov 	case TUNGETFILTER:
331276975e9cSPavel Emelyanov 		ret = -EINVAL;
331340630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
331476975e9cSPavel Emelyanov 			break;
331576975e9cSPavel Emelyanov 		ret = -EFAULT;
331676975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
331776975e9cSPavel Emelyanov 			break;
331876975e9cSPavel Emelyanov 		ret = 0;
331976975e9cSPavel Emelyanov 		break;
332076975e9cSPavel Emelyanov 
332196f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3322cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
332396f84061SJason Wang 		break;
332496f84061SJason Wang 
3325aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3326aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3327aff3d70aSJason Wang 		break;
3328aff3d70aSJason Wang 
332926d31925SNicolas Dichtel 	case TUNSETCARRIER:
333026d31925SNicolas Dichtel 		ret = -EFAULT;
333126d31925SNicolas Dichtel 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
333226d31925SNicolas Dichtel 			goto unlock;
333326d31925SNicolas Dichtel 
333426d31925SNicolas Dichtel 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
333526d31925SNicolas Dichtel 		break;
333626d31925SNicolas Dichtel 
33370c3e0e3bSKirill Tkhai 	case TUNGETDEVNETNS:
33380c3e0e3bSKirill Tkhai 		ret = -EPERM;
33390c3e0e3bSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
33400c3e0e3bSKirill Tkhai 			goto unlock;
33410c3e0e3bSKirill Tkhai 		ret = open_related_ns(&net->ns, get_net_ns);
33420c3e0e3bSKirill Tkhai 		break;
33430c3e0e3bSKirill Tkhai 
33441da177e4SLinus Torvalds 	default:
3345631ab46bSEric W. Biederman 		ret = -EINVAL;
3346631ab46bSEric W. Biederman 		break;
3347ee289b64SJoe Perches 	}
33481da177e4SLinus Torvalds 
334983c1f36fSSabrina Dubroca 	if (do_notify)
335083c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
335183c1f36fSSabrina Dubroca 
3352876bfd4dSHerbert Xu unlock:
3353876bfd4dSHerbert Xu 	rtnl_unlock();
3354876bfd4dSHerbert Xu 	if (tun)
3355631ab46bSEric W. Biederman 		tun_put(tun);
3356631ab46bSEric W. Biederman 	return ret;
33571da177e4SLinus Torvalds }
33581da177e4SLinus Torvalds 
335950857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
336050857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
336150857e2aSArnd Bergmann {
336250857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
336350857e2aSArnd Bergmann }
336450857e2aSArnd Bergmann 
336550857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
336650857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
336750857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
336850857e2aSArnd Bergmann {
336950857e2aSArnd Bergmann 	switch (cmd) {
337050857e2aSArnd Bergmann 	case TUNSETIFF:
337150857e2aSArnd Bergmann 	case TUNGETIFF:
337250857e2aSArnd Bergmann 	case TUNSETTXFILTER:
337350857e2aSArnd Bergmann 	case TUNGETSNDBUF:
337450857e2aSArnd Bergmann 	case TUNSETSNDBUF:
337550857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
337650857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
337750857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
337850857e2aSArnd Bergmann 		break;
337950857e2aSArnd Bergmann 	default:
338050857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
338150857e2aSArnd Bergmann 		break;
338250857e2aSArnd Bergmann 	}
338350857e2aSArnd Bergmann 
338450857e2aSArnd Bergmann 	/*
338550857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
338650857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
338750857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
338850857e2aSArnd Bergmann 	 * contents.
338950857e2aSArnd Bergmann 	 */
339050857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
339150857e2aSArnd Bergmann }
339250857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
339350857e2aSArnd Bergmann 
33941da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
33951da177e4SLinus Torvalds {
339654f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
33971da177e4SLinus Torvalds 	int ret;
33981da177e4SLinus Torvalds 
339954f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
34009d319522SJonathan Corbet 		goto out;
34011da177e4SLinus Torvalds 
34021da177e4SLinus Torvalds 	if (on) {
340301919134SEric W. Biederman 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
340454f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
34051da177e4SLinus Torvalds 	} else
340654f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
34079d319522SJonathan Corbet 	ret = 0;
34089d319522SJonathan Corbet out:
34099d319522SJonathan Corbet 	return ret;
34101da177e4SLinus Torvalds }
34111da177e4SLinus Torvalds 
34121da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
34131da177e4SLinus Torvalds {
3414140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3415631ab46bSEric W. Biederman 	struct tun_file *tfile;
3416deed49fbSThomas Gleixner 
34176b8a66eeSJoe Perches 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3418631ab46bSEric W. Biederman 
3419140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
342011aa9c28SEric W. Biederman 					    &tun_proto, 0);
3421631ab46bSEric W. Biederman 	if (!tfile)
3422631ab46bSEric W. Biederman 		return -ENOMEM;
3423b196d88aSJason Wang 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3424b196d88aSJason Wang 		sk_free(&tfile->sk);
3425b196d88aSJason Wang 		return -ENOMEM;
3426b196d88aSJason Wang 	}
3427b196d88aSJason Wang 
3428c7256f57SEric Dumazet 	mutex_init(&tfile->napi_mutex);
3429c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
343054f968d6SJason Wang 	tfile->flags = 0;
3431fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
343254f968d6SJason Wang 
3433333f7909SAl Viro 	init_waitqueue_head(&tfile->socket.wq.wait);
343454f968d6SJason Wang 
343554f968d6SJason Wang 	tfile->socket.file = file;
343654f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
343754f968d6SJason Wang 
343854f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
343954f968d6SJason Wang 
344054f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
344154f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
344254f968d6SJason Wang 
3443631ab46bSEric W. Biederman 	file->private_data = tfile;
34444008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
344554f968d6SJason Wang 
344619a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
344719a6afb2SJason Wang 
34481da177e4SLinus Torvalds 	return 0;
34491da177e4SLinus Torvalds }
34501da177e4SLinus Torvalds 
34511da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
34521da177e4SLinus Torvalds {
3453631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
34541da177e4SLinus Torvalds 
3455c8d68e6bSJason Wang 	tun_detach(tfile, true);
34561da177e4SLinus Torvalds 
34571da177e4SLinus Torvalds 	return 0;
34581da177e4SLinus Torvalds }
34591da177e4SLinus Torvalds 
346093e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
34619484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
346293e14b6dSMasatake YAMATO {
34639484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
346493e14b6dSMasatake YAMATO 	struct tun_struct *tun;
346593e14b6dSMasatake YAMATO 	struct ifreq ifr;
346693e14b6dSMasatake YAMATO 
346793e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
346893e14b6dSMasatake YAMATO 
346993e14b6dSMasatake YAMATO 	rtnl_lock();
34709484dc74Syuan linyu 	tun = tun_get(tfile);
347193e14b6dSMasatake YAMATO 	if (tun)
347212132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
347393e14b6dSMasatake YAMATO 	rtnl_unlock();
347493e14b6dSMasatake YAMATO 
347593e14b6dSMasatake YAMATO 	if (tun)
347693e14b6dSMasatake YAMATO 		tun_put(tun);
347793e14b6dSMasatake YAMATO 
3478a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
347993e14b6dSMasatake YAMATO }
348093e14b6dSMasatake YAMATO #endif
348193e14b6dSMasatake YAMATO 
3482d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
34831da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
34841da177e4SLinus Torvalds 	.llseek = no_llseek,
34859b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3486f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
34871da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3488876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
348950857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
349050857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
349150857e2aSArnd Bergmann #endif
34921da177e4SLinus Torvalds 	.open	= tun_chr_open,
34931da177e4SLinus Torvalds 	.release = tun_chr_close,
349493e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
349593e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
349693e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
349793e14b6dSMasatake YAMATO #endif
34981da177e4SLinus Torvalds };
34991da177e4SLinus Torvalds 
35001da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
35011da177e4SLinus Torvalds 	.minor = TUN_MINOR,
35021da177e4SLinus Torvalds 	.name = "tun",
3503e454cea2SKay Sievers 	.nodename = "net/tun",
35041da177e4SLinus Torvalds 	.fops = &tun_fops,
35051da177e4SLinus Torvalds };
35061da177e4SLinus Torvalds 
35071da177e4SLinus Torvalds /* ethtool interface */
35081da177e4SLinus Torvalds 
35094e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
351029ccc49dSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
35111da177e4SLinus Torvalds {
351229ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
351329ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
351429ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
351529ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
351629ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
351729ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
351829ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
35194e24f2ddSChas Williams }
35204e24f2ddSChas Williams 
35214e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev,
35224e24f2ddSChas Williams 				  struct ethtool_link_ksettings *cmd)
35234e24f2ddSChas Williams {
35244e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
35254e24f2ddSChas Williams 
35264e24f2ddSChas Williams 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
35274e24f2ddSChas Williams 	return 0;
35284e24f2ddSChas Williams }
35294e24f2ddSChas Williams 
35304e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev,
35314e24f2ddSChas Williams 				  const struct ethtool_link_ksettings *cmd)
35324e24f2ddSChas Williams {
35334e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
35344e24f2ddSChas Williams 
35354e24f2ddSChas Williams 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
35361da177e4SLinus Torvalds 	return 0;
35371da177e4SLinus Torvalds }
35381da177e4SLinus Torvalds 
35391da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
35401da177e4SLinus Torvalds {
35411da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35421da177e4SLinus Torvalds 
354333a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
354433a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
35451da177e4SLinus Torvalds 
35461da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
354740630b82SMichael S. Tsirkin 	case IFF_TUN:
354833a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
35491da177e4SLinus Torvalds 		break;
355040630b82SMichael S. Tsirkin 	case IFF_TAP:
355133a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
35521da177e4SLinus Torvalds 		break;
35531da177e4SLinus Torvalds 	}
35541da177e4SLinus Torvalds }
35551da177e4SLinus Torvalds 
35561da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
35571da177e4SLinus Torvalds {
35581da177e4SLinus Torvalds #ifdef TUN_DEBUG
35591da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35601da177e4SLinus Torvalds 	return tun->debug;
35611da177e4SLinus Torvalds #else
35621da177e4SLinus Torvalds 	return -EOPNOTSUPP;
35631da177e4SLinus Torvalds #endif
35641da177e4SLinus Torvalds }
35651da177e4SLinus Torvalds 
35661da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
35671da177e4SLinus Torvalds {
35681da177e4SLinus Torvalds #ifdef TUN_DEBUG
35691da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35701da177e4SLinus Torvalds 	tun->debug = value;
35711da177e4SLinus Torvalds #endif
35721da177e4SLinus Torvalds }
35731da177e4SLinus Torvalds 
35745503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
35755503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35765503fcecSJason Wang {
35775503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35785503fcecSJason Wang 
35795503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
35805503fcecSJason Wang 
35815503fcecSJason Wang 	return 0;
35825503fcecSJason Wang }
35835503fcecSJason Wang 
35845503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
35855503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35865503fcecSJason Wang {
35875503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35885503fcecSJason Wang 
35895503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
35905503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
35915503fcecSJason Wang 	else
35925503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
35935503fcecSJason Wang 
35945503fcecSJason Wang 	return 0;
35955503fcecSJason Wang }
35965503fcecSJason Wang 
35977282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
35981da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
35991da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
36001da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3601bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3602eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
36035503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
36045503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
360529ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
36064e24f2ddSChas Williams 	.set_link_ksettings = tun_set_link_ksettings,
36071da177e4SLinus Torvalds };
36081da177e4SLinus Torvalds 
36091576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
36101576d986SJason Wang {
36111576d986SJason Wang 	struct net_device *dev = tun->dev;
36121576d986SJason Wang 	struct tun_file *tfile;
36135990a305SJason Wang 	struct ptr_ring **rings;
36141576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
36151576d986SJason Wang 	int ret, i;
36161576d986SJason Wang 
36175990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
36185990a305SJason Wang 	if (!rings)
36191576d986SJason Wang 		return -ENOMEM;
36201576d986SJason Wang 
36211576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
36221576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
36235990a305SJason Wang 		rings[i] = &tfile->tx_ring;
36241576d986SJason Wang 	}
36251576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
36265990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
36271576d986SJason Wang 
36285990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
36295990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3630fc72d1d5SJason Wang 				       tun_ptr_free);
36311576d986SJason Wang 
36325990a305SJason Wang 	kfree(rings);
36331576d986SJason Wang 	return ret;
36341576d986SJason Wang }
36351576d986SJason Wang 
36361576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
36371576d986SJason Wang 			    unsigned long event, void *ptr)
36381576d986SJason Wang {
36391576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
36401576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
364172b319dcSFei Li 	int i;
36421576d986SJason Wang 
364386dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
364486dfb4acSCraig Gallek 		return NOTIFY_DONE;
364586dfb4acSCraig Gallek 
36461576d986SJason Wang 	switch (event) {
36471576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
36481576d986SJason Wang 		if (tun_queue_resize(tun))
36491576d986SJason Wang 			return NOTIFY_BAD;
36501576d986SJason Wang 		break;
365172b319dcSFei Li 	case NETDEV_UP:
365272b319dcSFei Li 		for (i = 0; i < tun->numqueues; i++) {
365372b319dcSFei Li 			struct tun_file *tfile;
365472b319dcSFei Li 
365572b319dcSFei Li 			tfile = rtnl_dereference(tun->tfiles[i]);
365672b319dcSFei Li 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
365772b319dcSFei Li 		}
365872b319dcSFei Li 		break;
36591576d986SJason Wang 	default:
36601576d986SJason Wang 		break;
36611576d986SJason Wang 	}
36621576d986SJason Wang 
36631576d986SJason Wang 	return NOTIFY_DONE;
36641576d986SJason Wang }
36651576d986SJason Wang 
36661576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
36671576d986SJason Wang 	.notifier_call	= tun_device_event,
36681576d986SJason Wang };
366979d17604SPavel Emelyanov 
36701da177e4SLinus Torvalds static int __init tun_init(void)
36711da177e4SLinus Torvalds {
36721da177e4SLinus Torvalds 	int ret = 0;
36731da177e4SLinus Torvalds 
36746b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
36751da177e4SLinus Torvalds 
3676f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
367779d17604SPavel Emelyanov 	if (ret) {
36786b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3679f019a7a5SEric W. Biederman 		goto err_linkops;
368079d17604SPavel Emelyanov 	}
368179d17604SPavel Emelyanov 
36821da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
368379d17604SPavel Emelyanov 	if (ret) {
36846b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
368579d17604SPavel Emelyanov 		goto err_misc;
368679d17604SPavel Emelyanov 	}
36871576d986SJason Wang 
36885edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
36895edfbd3cSTonghao Zhang 	if (ret) {
36905edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
36915edfbd3cSTonghao Zhang 		goto err_notifier;
36925edfbd3cSTonghao Zhang 	}
36935edfbd3cSTonghao Zhang 
369479d17604SPavel Emelyanov 	return  0;
36955edfbd3cSTonghao Zhang 
36965edfbd3cSTonghao Zhang err_notifier:
36975edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
369879d17604SPavel Emelyanov err_misc:
3699f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3700f019a7a5SEric W. Biederman err_linkops:
37011da177e4SLinus Torvalds 	return ret;
37021da177e4SLinus Torvalds }
37031da177e4SLinus Torvalds 
37041da177e4SLinus Torvalds static void tun_cleanup(void)
37051da177e4SLinus Torvalds {
37061da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3707f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
37081576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
37091da177e4SLinus Torvalds }
37101da177e4SLinus Torvalds 
371105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
371205c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
371305c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
371405c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
371505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
371605c2828cSMichael S. Tsirkin {
37176e914fc7SJason Wang 	struct tun_file *tfile;
371805c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
371905c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
37206e914fc7SJason Wang 	tfile = file->private_data;
37216e914fc7SJason Wang 	if (!tfile)
372205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
372354f968d6SJason Wang 	return &tfile->socket;
372405c2828cSMichael S. Tsirkin }
372505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
372605c2828cSMichael S. Tsirkin 
37275990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
372883339c6bSJason Wang {
372983339c6bSJason Wang 	struct tun_file *tfile;
373083339c6bSJason Wang 
373183339c6bSJason Wang 	if (file->f_op != &tun_fops)
373283339c6bSJason Wang 		return ERR_PTR(-EINVAL);
373383339c6bSJason Wang 	tfile = file->private_data;
373483339c6bSJason Wang 	if (!tfile)
373583339c6bSJason Wang 		return ERR_PTR(-EBADFD);
37365990a305SJason Wang 	return &tfile->tx_ring;
373783339c6bSJason Wang }
37385990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
373983339c6bSJason Wang 
37401da177e4SLinus Torvalds module_init(tun_init);
37411da177e4SLinus Torvalds module_exit(tun_cleanup);
37421da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
37431da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
37441da177e4SLinus Torvalds MODULE_LICENSE("GPL");
37451da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3746578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3747