xref: /openbmc/linux/drivers/net/tun.c (revision df561f6688fef775baa341a0f5d960becd248b11)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
41da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds /*
101da177e4SLinus Torvalds  *  Changes:
111da177e4SLinus Torvalds  *
12ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
14ff4cc3acSMike Kershaw  *
151da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
16344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
191da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
201da177e4SLinus Torvalds  *    Increased default tx queue length.
211da177e4SLinus Torvalds  *    Added ethtool API.
221da177e4SLinus Torvalds  *    Minor cleanups
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
251da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
296b8a66eeSJoe Perches 
301da177e4SLinus Torvalds #define DRV_NAME	"tun"
311da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
321da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
331da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds #include <linux/module.h>
361da177e4SLinus Torvalds #include <linux/errno.h>
371da177e4SLinus Torvalds #include <linux/kernel.h>
38174cd4b1SIngo Molnar #include <linux/sched/signal.h>
391da177e4SLinus Torvalds #include <linux/major.h>
401da177e4SLinus Torvalds #include <linux/slab.h>
411da177e4SLinus Torvalds #include <linux/poll.h>
421da177e4SLinus Torvalds #include <linux/fcntl.h>
431da177e4SLinus Torvalds #include <linux/init.h>
441da177e4SLinus Torvalds #include <linux/skbuff.h>
451da177e4SLinus Torvalds #include <linux/netdevice.h>
461da177e4SLinus Torvalds #include <linux/etherdevice.h>
471da177e4SLinus Torvalds #include <linux/miscdevice.h>
481da177e4SLinus Torvalds #include <linux/ethtool.h>
491da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5050857e2aSArnd Bergmann #include <linux/compat.h>
511da177e4SLinus Torvalds #include <linux/if.h>
521da177e4SLinus Torvalds #include <linux/if_arp.h>
531da177e4SLinus Torvalds #include <linux/if_ether.h>
541da177e4SLinus Torvalds #include <linux/if_tun.h>
556680ec68SJason Wang #include <linux/if_vlan.h>
561da177e4SLinus Torvalds #include <linux/crc32.h>
57d647a591SPavel Emelyanov #include <linux/nsproxy.h>
58f43798c2SRusty Russell #include <linux/virtio_net.h>
5999405162SMichael S. Tsirkin #include <linux/rcupdate.h>
60881d966bSEric W. Biederman #include <net/net_namespace.h>
6179d17604SPavel Emelyanov #include <net/netns/generic.h>
62f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
6333dccbb0SHerbert Xu #include <net/sock.h>
64735fc405SJesper Dangaard Brouer #include <net/xdp.h>
65b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h>
6693e14b6dSMasatake YAMATO #include <linux/seq_file.h>
67e0b46d0eSHerbert Xu #include <linux/uio.h>
681576d986SJason Wang #include <linux/skb_array.h>
69761876c8SJason Wang #include <linux/bpf.h>
70761876c8SJason Wang #include <linux/bpf_trace.h>
7190e33d45SPetar Penkov #include <linux/mutex.h>
721da177e4SLinus Torvalds 
737c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
74f2780d6dSKirill Tkhai #include <linux/proc_fs.h>
751da177e4SLinus Torvalds 
764e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
774e24f2ddSChas Williams 				       struct ethtool_link_ksettings *cmd);
784e24f2ddSChas Williams 
797df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
8066ccbc9cSJason Wang 
81031f5e03SMichael S. Tsirkin /* TUN device flags */
82031f5e03SMichael S. Tsirkin 
83031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
84031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
85031f5e03SMichael S. Tsirkin  */
86031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
871cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
881cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
898b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
90031f5e03SMichael S. Tsirkin 
91031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
9290e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
9390e33d45SPetar Penkov 
940690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
950690899bSMichael S. Tsirkin 
96f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
97f271b2ccSMax Krasnyansky struct tap_filter {
98f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
99f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
100f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
101f271b2ccSMax Krasnyansky };
102f271b2ccSMax Krasnyansky 
103baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
104baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
105baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
106b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
107c8d68e6bSJason Wang 
10896442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
10996442e42SJason Wang 
110608b9977SPaolo Abeni struct tun_pcpu_stats {
1115260dd3eSEric Dumazet 	u64_stats_t rx_packets;
1125260dd3eSEric Dumazet 	u64_stats_t rx_bytes;
1135260dd3eSEric Dumazet 	u64_stats_t tx_packets;
1145260dd3eSEric Dumazet 	u64_stats_t tx_bytes;
115608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
116608b9977SPaolo Abeni 	u32 rx_dropped;
117608b9977SPaolo Abeni 	u32 tx_dropped;
118608b9977SPaolo Abeni 	u32 rx_frame_errors;
119608b9977SPaolo Abeni };
120608b9977SPaolo Abeni 
12154f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
12292d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
12354f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
12454f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
12536fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
12654f968d6SJason Wang  * this).
1276e914fc7SJason Wang  *
1286e914fc7SJason Wang  * RCU usage:
12936fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1306e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
13154f968d6SJason Wang  */
132631ab46bSEric W. Biederman struct tun_file {
13354f968d6SJason Wang 	struct sock sk;
13454f968d6SJason Wang 	struct socket socket;
1356e914fc7SJason Wang 	struct tun_struct __rcu *tun;
13654f968d6SJason Wang 	struct fasync_struct *fasync;
13754f968d6SJason Wang 	/* only used for fasnyc */
13854f968d6SJason Wang 	unsigned int flags;
139fb7589a1SPavel Emelyanov 	union {
140c8d68e6bSJason Wang 		u16 queue_index;
141fb7589a1SPavel Emelyanov 		unsigned int ifindex;
142fb7589a1SPavel Emelyanov 	};
14394317099SPetar Penkov 	struct napi_struct napi;
144aec72f33SEric Dumazet 	bool napi_enabled;
145af3fb24eSEric Dumazet 	bool napi_frags_enabled;
14690e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1474008e97fSJason Wang 	struct list_head next;
1484008e97fSJason Wang 	struct tun_struct *detached;
1495990a305SJason Wang 	struct ptr_ring tx_ring;
1508bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
151631ab46bSEric W. Biederman };
152631ab46bSEric W. Biederman 
153f9e06c45SJason Wang struct tun_page {
154f9e06c45SJason Wang 	struct page *page;
155f9e06c45SJason Wang 	int count;
156f9e06c45SJason Wang };
157f9e06c45SJason Wang 
15896442e42SJason Wang struct tun_flow_entry {
15996442e42SJason Wang 	struct hlist_node hash_link;
16096442e42SJason Wang 	struct rcu_head rcu;
16196442e42SJason Wang 	struct tun_struct *tun;
16296442e42SJason Wang 
16396442e42SJason Wang 	u32 rxhash;
1649bc88939STom Herbert 	u32 rps_rxhash;
16596442e42SJason Wang 	int queue_index;
16683b1bc12SLi RongQing 	unsigned long updated ____cacheline_aligned_in_smp;
16796442e42SJason Wang };
16896442e42SJason Wang 
16996442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
170f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
17196442e42SJason Wang 
172cd5681d7SJason Wang struct tun_prog {
17396f84061SJason Wang 	struct rcu_head rcu;
17496f84061SJason Wang 	struct bpf_prog *prog;
17596f84061SJason Wang };
17696f84061SJason Wang 
17754f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
17836fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
17954f968d6SJason Wang  * file were attached to a persist device.
18054f968d6SJason Wang  */
18114daa021SRusty Russell struct tun_struct {
182c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
183c8d68e6bSJason Wang 	unsigned int            numqueues;
184f271b2ccSMax Krasnyansky 	unsigned int 		flags;
1850625c883SEric W. Biederman 	kuid_t			owner;
1860625c883SEric W. Biederman 	kgid_t			group;
18714daa021SRusty Russell 
18814daa021SRusty Russell 	struct net_device	*dev;
189c8f44affSMichał Mirosław 	netdev_features_t	set_features;
19088255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
191d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
192d9d52b51SMichael S. Tsirkin 
193eaea34b2SPaolo Abeni 	int			align;
194d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
19554f968d6SJason Wang 	int			sndbuf;
19654f968d6SJason Wang 	struct tap_filter	txflt;
19754f968d6SJason Wang 	struct sock_fprog	fprog;
19854f968d6SJason Wang 	/* protected by rtnl lock */
19954f968d6SJason Wang 	bool			filter_attached;
2003424170fSMichal Kubecek 	u32			msg_enable;
20196442e42SJason Wang 	spinlock_t lock;
20296442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
20396442e42SJason Wang 	struct timer_list flow_gc_timer;
20496442e42SJason Wang 	unsigned long ageing_time;
2054008e97fSJason Wang 	unsigned int numdisabled;
2064008e97fSJason Wang 	struct list_head disabled;
2075dbbaf2dSPaul Moore 	void *security;
208b8732fb7SJason Wang 	u32 flow_count;
2095503fcecSJason Wang 	u32 rx_batched;
210608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
211761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
212cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
213aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
2144e24f2ddSChas Williams 	struct ethtool_link_ksettings link_ksettings;
21514daa021SRusty Russell };
21614daa021SRusty Russell 
217aff3d70aSJason Wang struct veth {
218aff3d70aSJason Wang 	__be16 h_vlan_proto;
219aff3d70aSJason Wang 	__be16 h_vlan_TCI;
2201da177e4SLinus Torvalds };
2211da177e4SLinus Torvalds 
2221ffcbc85SJesper Dangaard Brouer bool tun_is_xdp_frame(void *ptr)
223fc72d1d5SJason Wang {
224fc72d1d5SJason Wang 	return (unsigned long)ptr & TUN_XDP_FLAG;
225fc72d1d5SJason Wang }
2261ffcbc85SJesper Dangaard Brouer EXPORT_SYMBOL(tun_is_xdp_frame);
227fc72d1d5SJason Wang 
228fc72d1d5SJason Wang void *tun_xdp_to_ptr(void *ptr)
229fc72d1d5SJason Wang {
230fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
231fc72d1d5SJason Wang }
232fc72d1d5SJason Wang EXPORT_SYMBOL(tun_xdp_to_ptr);
233fc72d1d5SJason Wang 
234fc72d1d5SJason Wang void *tun_ptr_to_xdp(void *ptr)
235fc72d1d5SJason Wang {
236fc72d1d5SJason Wang 	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
237fc72d1d5SJason Wang }
238fc72d1d5SJason Wang EXPORT_SYMBOL(tun_ptr_to_xdp);
239fc72d1d5SJason Wang 
24094317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
24194317099SPetar Penkov {
24294317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
24394317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
24494317099SPetar Penkov 	struct sk_buff_head process_queue;
24594317099SPetar Penkov 	struct sk_buff *skb;
24694317099SPetar Penkov 	int received = 0;
24794317099SPetar Penkov 
24894317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
24994317099SPetar Penkov 
25094317099SPetar Penkov 	spin_lock(&queue->lock);
25194317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
25294317099SPetar Penkov 	spin_unlock(&queue->lock);
25394317099SPetar Penkov 
25494317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
25594317099SPetar Penkov 		napi_gro_receive(napi, skb);
25694317099SPetar Penkov 		++received;
25794317099SPetar Penkov 	}
25894317099SPetar Penkov 
25994317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
26094317099SPetar Penkov 		spin_lock(&queue->lock);
26194317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
26294317099SPetar Penkov 		spin_unlock(&queue->lock);
26394317099SPetar Penkov 	}
26494317099SPetar Penkov 
26594317099SPetar Penkov 	return received;
26694317099SPetar Penkov }
26794317099SPetar Penkov 
26894317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
26994317099SPetar Penkov {
27094317099SPetar Penkov 	unsigned int received;
27194317099SPetar Penkov 
27294317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
27394317099SPetar Penkov 
27494317099SPetar Penkov 	if (received < budget)
27594317099SPetar Penkov 		napi_complete_done(napi, received);
27694317099SPetar Penkov 
27794317099SPetar Penkov 	return received;
27894317099SPetar Penkov }
27994317099SPetar Penkov 
28094317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
281af3fb24eSEric Dumazet 			  bool napi_en, bool napi_frags)
28294317099SPetar Penkov {
283aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
284af3fb24eSEric Dumazet 	tfile->napi_frags_enabled = napi_en && napi_frags;
28594317099SPetar Penkov 	if (napi_en) {
286c39e342aSPetar Penkov 		netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
28794317099SPetar Penkov 				  NAPI_POLL_WEIGHT);
28894317099SPetar Penkov 		napi_enable(&tfile->napi);
28994317099SPetar Penkov 	}
29094317099SPetar Penkov }
29194317099SPetar Penkov 
29206e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile)
29394317099SPetar Penkov {
294aec72f33SEric Dumazet 	if (tfile->napi_enabled)
29594317099SPetar Penkov 		napi_disable(&tfile->napi);
29694317099SPetar Penkov }
29794317099SPetar Penkov 
29806e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile)
29994317099SPetar Penkov {
300aec72f33SEric Dumazet 	if (tfile->napi_enabled)
30194317099SPetar Penkov 		netif_napi_del(&tfile->napi);
30294317099SPetar Penkov }
30394317099SPetar Penkov 
304af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile)
30590e33d45SPetar Penkov {
306af3fb24eSEric Dumazet 	return tfile->napi_frags_enabled;
30790e33d45SPetar Penkov }
30890e33d45SPetar Penkov 
3098b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
3108b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3118b8e658bSGreg Kurz {
3128b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
3138b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
3148b8e658bSGreg Kurz }
3158b8e658bSGreg Kurz 
3168b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3178b8e658bSGreg Kurz {
3188b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3198b8e658bSGreg Kurz 
3208b8e658bSGreg Kurz 	if (put_user(be, argp))
3218b8e658bSGreg Kurz 		return -EFAULT;
3228b8e658bSGreg Kurz 
3238b8e658bSGreg Kurz 	return 0;
3248b8e658bSGreg Kurz }
3258b8e658bSGreg Kurz 
3268b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3278b8e658bSGreg Kurz {
3288b8e658bSGreg Kurz 	int be;
3298b8e658bSGreg Kurz 
3308b8e658bSGreg Kurz 	if (get_user(be, argp))
3318b8e658bSGreg Kurz 		return -EFAULT;
3328b8e658bSGreg Kurz 
3338b8e658bSGreg Kurz 	if (be)
3348b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3358b8e658bSGreg Kurz 	else
3368b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3378b8e658bSGreg Kurz 
3388b8e658bSGreg Kurz 	return 0;
3398b8e658bSGreg Kurz }
3408b8e658bSGreg Kurz #else
3418b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3428b8e658bSGreg Kurz {
3438b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3448b8e658bSGreg Kurz }
3458b8e658bSGreg Kurz 
3468b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3478b8e658bSGreg Kurz {
3488b8e658bSGreg Kurz 	return -EINVAL;
3498b8e658bSGreg Kurz }
3508b8e658bSGreg Kurz 
3518b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3528b8e658bSGreg Kurz {
3538b8e658bSGreg Kurz 	return -EINVAL;
3548b8e658bSGreg Kurz }
3558b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3568b8e658bSGreg Kurz 
35725bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
35825bd55bbSGreg Kurz {
3597d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3608b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
36125bd55bbSGreg Kurz }
36225bd55bbSGreg Kurz 
36356f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
36456f0dcc5SMichael S. Tsirkin {
36525bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
36656f0dcc5SMichael S. Tsirkin }
36756f0dcc5SMichael S. Tsirkin 
36856f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
36956f0dcc5SMichael S. Tsirkin {
37025bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
37156f0dcc5SMichael S. Tsirkin }
37256f0dcc5SMichael S. Tsirkin 
37396442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
37496442e42SJason Wang {
375f13b5468SLi RongQing 	return rxhash & TUN_MASK_FLOW_ENTRIES;
37696442e42SJason Wang }
37796442e42SJason Wang 
37896442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
37996442e42SJason Wang {
38096442e42SJason Wang 	struct tun_flow_entry *e;
38196442e42SJason Wang 
382b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
38396442e42SJason Wang 		if (e->rxhash == rxhash)
38496442e42SJason Wang 			return e;
38596442e42SJason Wang 	}
38696442e42SJason Wang 	return NULL;
38796442e42SJason Wang }
38896442e42SJason Wang 
38996442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
39096442e42SJason Wang 					      struct hlist_head *head,
39196442e42SJason Wang 					      u32 rxhash, u16 queue_index)
39296442e42SJason Wang {
3939fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
3949fdc6befSEric Dumazet 
39596442e42SJason Wang 	if (e) {
3963424170fSMichal Kubecek 		netif_info(tun, tx_queued, tun->dev,
3973424170fSMichal Kubecek 			   "create flow: hash %u index %u\n",
39896442e42SJason Wang 			   rxhash, queue_index);
39996442e42SJason Wang 		e->updated = jiffies;
40096442e42SJason Wang 		e->rxhash = rxhash;
4019bc88939STom Herbert 		e->rps_rxhash = 0;
40296442e42SJason Wang 		e->queue_index = queue_index;
40396442e42SJason Wang 		e->tun = tun;
40496442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
405b8732fb7SJason Wang 		++tun->flow_count;
40696442e42SJason Wang 	}
40796442e42SJason Wang 	return e;
40896442e42SJason Wang }
40996442e42SJason Wang 
41096442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
41196442e42SJason Wang {
4123424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
41396442e42SJason Wang 		   e->rxhash, e->queue_index);
41496442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
4159fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
416b8732fb7SJason Wang 	--tun->flow_count;
41796442e42SJason Wang }
41896442e42SJason Wang 
41996442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
42096442e42SJason Wang {
42196442e42SJason Wang 	int i;
42296442e42SJason Wang 
42396442e42SJason Wang 	spin_lock_bh(&tun->lock);
42496442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
42596442e42SJason Wang 		struct tun_flow_entry *e;
426b67bfe0dSSasha Levin 		struct hlist_node *n;
42796442e42SJason Wang 
428b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
42996442e42SJason Wang 			tun_flow_delete(tun, e);
43096442e42SJason Wang 	}
43196442e42SJason Wang 	spin_unlock_bh(&tun->lock);
43296442e42SJason Wang }
43396442e42SJason Wang 
43496442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
43596442e42SJason Wang {
43696442e42SJason Wang 	int i;
43796442e42SJason Wang 
43896442e42SJason Wang 	spin_lock_bh(&tun->lock);
43996442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
44096442e42SJason Wang 		struct tun_flow_entry *e;
441b67bfe0dSSasha Levin 		struct hlist_node *n;
44296442e42SJason Wang 
443b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
44496442e42SJason Wang 			if (e->queue_index == queue_index)
44596442e42SJason Wang 				tun_flow_delete(tun, e);
44696442e42SJason Wang 		}
44796442e42SJason Wang 	}
44896442e42SJason Wang 	spin_unlock_bh(&tun->lock);
44996442e42SJason Wang }
45096442e42SJason Wang 
451e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
45296442e42SJason Wang {
453e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
45496442e42SJason Wang 	unsigned long delay = tun->ageing_time;
45596442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
45696442e42SJason Wang 	unsigned long count = 0;
45796442e42SJason Wang 	int i;
45896442e42SJason Wang 
4597dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
46096442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
46196442e42SJason Wang 		struct tun_flow_entry *e;
462b67bfe0dSSasha Levin 		struct hlist_node *n;
46396442e42SJason Wang 
464b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
46596442e42SJason Wang 			unsigned long this_timer;
46681d98fa4SEric Dumazet 
46796442e42SJason Wang 			this_timer = e->updated + delay;
46881d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
46996442e42SJason Wang 				tun_flow_delete(tun, e);
47081d98fa4SEric Dumazet 				continue;
47181d98fa4SEric Dumazet 			}
47281d98fa4SEric Dumazet 			count++;
47381d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
47496442e42SJason Wang 				next_timer = this_timer;
47596442e42SJason Wang 		}
47696442e42SJason Wang 	}
47796442e42SJason Wang 
47896442e42SJason Wang 	if (count)
47996442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
4807dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
48196442e42SJason Wang }
48296442e42SJason Wang 
48349974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
4849e85722dSJason Wang 			    struct tun_file *tfile)
48596442e42SJason Wang {
48696442e42SJason Wang 	struct hlist_head *head;
48796442e42SJason Wang 	struct tun_flow_entry *e;
48896442e42SJason Wang 	unsigned long delay = tun->ageing_time;
4899e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
49096442e42SJason Wang 
49196442e42SJason Wang 	head = &tun->flows[tun_hashfn(rxhash)];
49296442e42SJason Wang 
49396442e42SJason Wang 	rcu_read_lock();
49496442e42SJason Wang 
49596442e42SJason Wang 	e = tun_flow_find(head, rxhash);
49696442e42SJason Wang 	if (likely(e)) {
49796442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
4984ffdd22eSEric Dumazet 		if (READ_ONCE(e->queue_index) != queue_index)
4994ffdd22eSEric Dumazet 			WRITE_ONCE(e->queue_index, queue_index);
50083b1bc12SLi RongQing 		if (e->updated != jiffies)
50196442e42SJason Wang 			e->updated = jiffies;
5029bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
50396442e42SJason Wang 	} else {
50496442e42SJason Wang 		spin_lock_bh(&tun->lock);
505b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
506b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
50796442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
50896442e42SJason Wang 
50996442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
51096442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
51196442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
51296442e42SJason Wang 		spin_unlock_bh(&tun->lock);
51396442e42SJason Wang 	}
51496442e42SJason Wang 
51596442e42SJason Wang 	rcu_read_unlock();
51696442e42SJason Wang }
51796442e42SJason Wang 
518516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the
5199bc88939STom Herbert  * flow_hash table accordingly.
5209bc88939STom Herbert  */
5219bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5229bc88939STom Herbert {
523567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5249bc88939STom Herbert 		e->rps_rxhash = hash;
5259bc88939STom Herbert }
5269bc88939STom Herbert 
5274b035271SWang Li /* We try to identify a flow through its rxhash. The reason that
52892d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
529c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
530c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
5314b035271SWang Li  * different rxq no. here.
532c8d68e6bSJason Wang  */
53396f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
534c8d68e6bSJason Wang {
53596442e42SJason Wang 	struct tun_flow_entry *e;
536c8d68e6bSJason Wang 	u32 txq = 0;
537c8d68e6bSJason Wang 	u32 numqueues = 0;
538c8d68e6bSJason Wang 
5396aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
540c8d68e6bSJason Wang 
541feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
54296442e42SJason Wang 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5439bc88939STom Herbert 	if (e) {
5449bc88939STom Herbert 		tun_flow_save_rps_rxhash(e, txq);
545fbe4d456SZhi Yong Wu 		txq = e->queue_index;
5464b035271SWang Li 	} else {
547c8d68e6bSJason Wang 		/* use multiply and shift instead of expensive divide */
548c8d68e6bSJason Wang 		txq = ((u64)txq * numqueues) >> 32;
549c8d68e6bSJason Wang 	}
550c8d68e6bSJason Wang 
551c8d68e6bSJason Wang 	return txq;
552c8d68e6bSJason Wang }
553c8d68e6bSJason Wang 
55496f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
55596f84061SJason Wang {
556cd5681d7SJason Wang 	struct tun_prog *prog;
557a35d310fSJason Wang 	u32 numqueues;
55896f84061SJason Wang 	u16 ret = 0;
55996f84061SJason Wang 
560a35d310fSJason Wang 	numqueues = READ_ONCE(tun->numqueues);
561a35d310fSJason Wang 	if (!numqueues)
562a35d310fSJason Wang 		return 0;
563a35d310fSJason Wang 
56496f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
56596f84061SJason Wang 	if (prog)
56696f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
56796f84061SJason Wang 
568a35d310fSJason Wang 	return ret % numqueues;
56996f84061SJason Wang }
57096f84061SJason Wang 
57196f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
572a350ecceSPaolo Abeni 			    struct net_device *sb_dev)
57396f84061SJason Wang {
57496f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
57596f84061SJason Wang 	u16 ret;
57696f84061SJason Wang 
57796f84061SJason Wang 	rcu_read_lock();
57896f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
57996f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
58096f84061SJason Wang 	else
58196f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
58296f84061SJason Wang 	rcu_read_unlock();
58396f84061SJason Wang 
58496f84061SJason Wang 	return ret;
58596f84061SJason Wang }
58696f84061SJason Wang 
587cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
588cde8b15fSJason Wang {
589cde8b15fSJason Wang 	const struct cred *cred = current_cred();
590c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
591cde8b15fSJason Wang 
592cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
593cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
594c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
595cde8b15fSJason Wang }
596cde8b15fSJason Wang 
597c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
598c8d68e6bSJason Wang {
599c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
600c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
601c8d68e6bSJason Wang }
602c8d68e6bSJason Wang 
6034008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
6044008e97fSJason Wang {
6054008e97fSJason Wang 	tfile->detached = tun;
6064008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
6074008e97fSJason Wang 	++tun->numdisabled;
6084008e97fSJason Wang }
6094008e97fSJason Wang 
610d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
6114008e97fSJason Wang {
6124008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
6134008e97fSJason Wang 
6144008e97fSJason Wang 	tfile->detached = NULL;
6154008e97fSJason Wang 	list_del_init(&tfile->next);
6164008e97fSJason Wang 	--tun->numdisabled;
6174008e97fSJason Wang 	return tun;
6184008e97fSJason Wang }
6194008e97fSJason Wang 
6203a403076SJason Wang void tun_ptr_free(void *ptr)
621fc72d1d5SJason Wang {
622fc72d1d5SJason Wang 	if (!ptr)
623fc72d1d5SJason Wang 		return;
6241ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
6251ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
626fc72d1d5SJason Wang 
62703993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
628fc72d1d5SJason Wang 	} else {
629fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
630fc72d1d5SJason Wang 	}
631fc72d1d5SJason Wang }
6323a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free);
633fc72d1d5SJason Wang 
6344bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6354bfb0513SJason Wang {
636fc72d1d5SJason Wang 	void *ptr;
6371576d986SJason Wang 
638fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
639fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6401576d986SJason Wang 
6415503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6424bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6434bfb0513SJason Wang }
6444bfb0513SJason Wang 
645c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
646c8d68e6bSJason Wang {
647c8d68e6bSJason Wang 	struct tun_file *ntfile;
648c8d68e6bSJason Wang 	struct tun_struct *tun;
649c8d68e6bSJason Wang 
650b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
651b8deabd3SJason Wang 
65294317099SPetar Penkov 	if (tun && clean) {
65306e55addSEric Dumazet 		tun_napi_disable(tfile);
65406e55addSEric Dumazet 		tun_napi_del(tfile);
65594317099SPetar Penkov 	}
65694317099SPetar Penkov 
6579e85722dSJason Wang 	if (tun && !tfile->detached) {
658c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
659c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
660c8d68e6bSJason Wang 
661c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
662c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
663b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
664c8d68e6bSJason Wang 		ntfile->queue_index = index;
6659871a9e4SJason Wang 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
6669871a9e4SJason Wang 				   NULL);
667c8d68e6bSJason Wang 
668c8d68e6bSJason Wang 		--tun->numqueues;
6699e85722dSJason Wang 		if (clean) {
670c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
671c8d68e6bSJason Wang 			sock_put(&tfile->sk);
6729e85722dSJason Wang 		} else
6734008e97fSJason Wang 			tun_disable_queue(tun, tfile);
674c8d68e6bSJason Wang 
675c8d68e6bSJason Wang 		synchronize_net();
67696442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
677c8d68e6bSJason Wang 		/* Drop read queue */
6784bfb0513SJason Wang 		tun_queue_purge(tfile);
679c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
680dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
6814008e97fSJason Wang 		tun = tun_enable_queue(tfile);
682dd38bd85SJason Wang 		sock_put(&tfile->sk);
683dd38bd85SJason Wang 	}
684c8d68e6bSJason Wang 
685c8d68e6bSJason Wang 	if (clean) {
686af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
687af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
688af668b3cSMichael S. Tsirkin 
68940630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
690af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
6914008e97fSJason Wang 				unregister_netdevice(tun->dev);
692af668b3cSMichael S. Tsirkin 		}
693b196d88aSJason Wang 		if (tun)
694b196d88aSJason Wang 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
6957063efd3SJason Wang 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
696140e807dSEric W. Biederman 		sock_put(&tfile->sk);
697c8d68e6bSJason Wang 	}
698c8d68e6bSJason Wang }
699c8d68e6bSJason Wang 
700c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
701c8d68e6bSJason Wang {
70283c1f36fSSabrina Dubroca 	struct tun_struct *tun;
70383c1f36fSSabrina Dubroca 	struct net_device *dev;
70483c1f36fSSabrina Dubroca 
705c8d68e6bSJason Wang 	rtnl_lock();
70683c1f36fSSabrina Dubroca 	tun = rtnl_dereference(tfile->tun);
70783c1f36fSSabrina Dubroca 	dev = tun ? tun->dev : NULL;
708c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
70983c1f36fSSabrina Dubroca 	if (dev)
71083c1f36fSSabrina Dubroca 		netdev_state_change(dev);
711c8d68e6bSJason Wang 	rtnl_unlock();
712c8d68e6bSJason Wang }
713c8d68e6bSJason Wang 
714c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
715c8d68e6bSJason Wang {
716c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7174008e97fSJason Wang 	struct tun_file *tfile, *tmp;
718c8d68e6bSJason Wang 	int i, n = tun->numqueues;
719c8d68e6bSJason Wang 
720c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
721b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
722c8d68e6bSJason Wang 		BUG_ON(!tfile);
72306e55addSEric Dumazet 		tun_napi_disable(tfile);
724addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7259e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
726c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
727c8d68e6bSJason Wang 		--tun->numqueues;
728c8d68e6bSJason Wang 	}
7299e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
730addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7319e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
732c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7339e85722dSJason Wang 	}
734c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
735c8d68e6bSJason Wang 
736c8d68e6bSJason Wang 	synchronize_net();
737c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
738b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
73906e55addSEric Dumazet 		tun_napi_del(tfile);
740c8d68e6bSJason Wang 		/* Drop read queue */
7414bfb0513SJason Wang 		tun_queue_purge(tfile);
742b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
743c8d68e6bSJason Wang 		sock_put(&tfile->sk);
744c8d68e6bSJason Wang 	}
7454008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7464008e97fSJason Wang 		tun_enable_queue(tfile);
7474bfb0513SJason Wang 		tun_queue_purge(tfile);
748b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7494008e97fSJason Wang 		sock_put(&tfile->sk);
7504008e97fSJason Wang 	}
7514008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
752dd38bd85SJason Wang 
75340630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
754dd38bd85SJason Wang 		module_put(THIS_MODULE);
755c8d68e6bSJason Wang }
756c8d68e6bSJason Wang 
75794317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
75877f22f92SYang Yingliang 		      bool skip_filter, bool napi, bool napi_frags,
75977f22f92SYang Yingliang 		      bool publish_tun)
760a7385ba2SEric W. Biederman {
761631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7621576d986SJason Wang 	struct net_device *dev = tun->dev;
76338231b7aSEric W. Biederman 	int err;
764a7385ba2SEric W. Biederman 
7655dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7665dbbaf2dSPaul Moore 	if (err < 0)
7675dbbaf2dSPaul Moore 		goto out;
7685dbbaf2dSPaul Moore 
76938231b7aSEric W. Biederman 	err = -EINVAL;
7709e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
77138231b7aSEric W. Biederman 		goto out;
77238231b7aSEric W. Biederman 
77338231b7aSEric W. Biederman 	err = -EBUSY;
77440630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
775c8d68e6bSJason Wang 		goto out;
776c8d68e6bSJason Wang 
777c8d68e6bSJason Wang 	err = -E2BIG;
7784008e97fSJason Wang 	if (!tfile->detached &&
7794008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
78038231b7aSEric W. Biederman 		goto out;
78138231b7aSEric W. Biederman 
78238231b7aSEric W. Biederman 	err = 0;
78354f968d6SJason Wang 
78492d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
785849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
7868ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
7878ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
7888ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
78954f968d6SJason Wang 		if (!err)
79054f968d6SJason Wang 			goto out;
79154f968d6SJason Wang 	}
7921576d986SJason Wang 
7931576d986SJason Wang 	if (!tfile->detached &&
794b196d88aSJason Wang 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
795b196d88aSJason Wang 			    GFP_KERNEL, tun_ptr_free)) {
7961576d986SJason Wang 		err = -ENOMEM;
7971576d986SJason Wang 		goto out;
7981576d986SJason Wang 	}
7991576d986SJason Wang 
800c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
801addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
8028bf5c4eeSJesper Dangaard Brouer 
8038bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
8048bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
8058bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
8068bf5c4eeSJesper Dangaard Brouer 
8078bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
8088bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
8098bf5c4eeSJesper Dangaard Brouer 	} else {
8108bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
8118bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
8128bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
8138bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
8148bf5c4eeSJesper Dangaard Brouer 			goto out;
8158d5d8852SJesper Dangaard Brouer 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
8168d5d8852SJesper Dangaard Brouer 						 MEM_TYPE_PAGE_SHARED, NULL);
8178d5d8852SJesper Dangaard Brouer 		if (err < 0) {
8188d5d8852SJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
8198d5d8852SJesper Dangaard Brouer 			goto out;
8208d5d8852SJesper Dangaard Brouer 		}
8218bf5c4eeSJesper Dangaard Brouer 		err = 0;
8228bf5c4eeSJesper Dangaard Brouer 	}
8238bf5c4eeSJesper Dangaard Brouer 
82494317099SPetar Penkov 	if (tfile->detached) {
8254008e97fSJason Wang 		tun_enable_queue(tfile);
82694317099SPetar Penkov 	} else {
8274008e97fSJason Wang 		sock_hold(&tfile->sk);
828af3fb24eSEric Dumazet 		tun_napi_init(tun, tfile, napi, napi_frags);
82994317099SPetar Penkov 	}
8304008e97fSJason Wang 
831e4a2a304SJason Wang 	if (rtnl_dereference(tun->xdp_prog))
832e4a2a304SJason Wang 		sock_set_flag(&tfile->sk, SOCK_XDP);
833e4a2a304SJason Wang 
834c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
835c8d68e6bSJason Wang 	 * refcnt.
836c8d68e6bSJason Wang 	 */
837a7385ba2SEric W. Biederman 
8380b7959b6SStanislav Fomichev 	/* Publish tfile->tun and tun->tfiles only after we've fully
8390b7959b6SStanislav Fomichev 	 * initialized tfile; otherwise we risk using half-initialized
8400b7959b6SStanislav Fomichev 	 * object.
8410b7959b6SStanislav Fomichev 	 */
84277f22f92SYang Yingliang 	if (publish_tun)
8430b7959b6SStanislav Fomichev 		rcu_assign_pointer(tfile->tun, tun);
8440b7959b6SStanislav Fomichev 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
8450b7959b6SStanislav Fomichev 	tun->numqueues++;
8463a03cb84SGeorge Amanakis 	tun_set_real_num_queues(tun);
84738231b7aSEric W. Biederman out:
84838231b7aSEric W. Biederman 	return err;
849a7385ba2SEric W. Biederman }
850a7385ba2SEric W. Biederman 
8519484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
852631ab46bSEric W. Biederman {
8536e914fc7SJason Wang 	struct tun_struct *tun;
854c70f1829SEric W. Biederman 
8556e914fc7SJason Wang 	rcu_read_lock();
8566e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8576e914fc7SJason Wang 	if (tun)
8586e914fc7SJason Wang 		dev_hold(tun->dev);
8596e914fc7SJason Wang 	rcu_read_unlock();
860c70f1829SEric W. Biederman 
861c70f1829SEric W. Biederman 	return tun;
862631ab46bSEric W. Biederman }
863631ab46bSEric W. Biederman 
864631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
865631ab46bSEric W. Biederman {
8666e914fc7SJason Wang 	dev_put(tun->dev);
867631ab46bSEric W. Biederman }
868631ab46bSEric W. Biederman 
8696b8a66eeSJoe Perches /* TAP filtering */
870f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
871f271b2ccSMax Krasnyansky {
872f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
873f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
874f271b2ccSMax Krasnyansky }
875f271b2ccSMax Krasnyansky 
876f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
877f271b2ccSMax Krasnyansky {
878f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
879f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
880f271b2ccSMax Krasnyansky }
881f271b2ccSMax Krasnyansky 
882f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
883f271b2ccSMax Krasnyansky {
884f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
885f271b2ccSMax Krasnyansky 	struct tun_filter uf;
886f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
887f271b2ccSMax Krasnyansky 
888f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
889f271b2ccSMax Krasnyansky 		return -EFAULT;
890f271b2ccSMax Krasnyansky 
891f271b2ccSMax Krasnyansky 	if (!uf.count) {
892f271b2ccSMax Krasnyansky 		/* Disabled */
893f271b2ccSMax Krasnyansky 		filter->count = 0;
894f271b2ccSMax Krasnyansky 		return 0;
895f271b2ccSMax Krasnyansky 	}
896f271b2ccSMax Krasnyansky 
897f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
89828e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
89928e8190dSMarkus Elfring 	if (IS_ERR(addr))
90028e8190dSMarkus Elfring 		return PTR_ERR(addr);
901f271b2ccSMax Krasnyansky 
902f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
903f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
904f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
905f271b2ccSMax Krasnyansky 	filter->count = 0;
906f271b2ccSMax Krasnyansky 	wmb();
907f271b2ccSMax Krasnyansky 
908f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
909f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
910f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
911f271b2ccSMax Krasnyansky 
912f271b2ccSMax Krasnyansky 	nexact = n;
913f271b2ccSMax Krasnyansky 
914cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
915cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
916f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
917cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
918cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
919cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9203b8d2a69SMarkus Elfring 			goto free_addr;
921cfbf84fcSAlex Williamson 		}
922f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
923cfbf84fcSAlex Williamson 	}
924f271b2ccSMax Krasnyansky 
925f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
926f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
927f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
928f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
929f271b2ccSMax Krasnyansky 
930f271b2ccSMax Krasnyansky 	/* Now enable the filter */
931f271b2ccSMax Krasnyansky 	wmb();
932f271b2ccSMax Krasnyansky 	filter->count = nexact;
933f271b2ccSMax Krasnyansky 
934f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
935f271b2ccSMax Krasnyansky 	err = nexact;
9363b8d2a69SMarkus Elfring free_addr:
937f271b2ccSMax Krasnyansky 	kfree(addr);
938f271b2ccSMax Krasnyansky 	return err;
939f271b2ccSMax Krasnyansky }
940f271b2ccSMax Krasnyansky 
941f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
942f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
943f271b2ccSMax Krasnyansky {
944f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
945f271b2ccSMax Krasnyansky 	 * at this point. */
946f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
947f271b2ccSMax Krasnyansky 	int i;
948f271b2ccSMax Krasnyansky 
949f271b2ccSMax Krasnyansky 	/* Exact match */
950f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9512e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
952f271b2ccSMax Krasnyansky 			return 1;
953f271b2ccSMax Krasnyansky 
954f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
955f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
956f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
957f271b2ccSMax Krasnyansky 
958f271b2ccSMax Krasnyansky 	return 0;
959f271b2ccSMax Krasnyansky }
960f271b2ccSMax Krasnyansky 
961f271b2ccSMax Krasnyansky /*
962f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
963f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
964f271b2ccSMax Krasnyansky  */
965f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
966f271b2ccSMax Krasnyansky {
967f271b2ccSMax Krasnyansky 	if (!filter->count)
968f271b2ccSMax Krasnyansky 		return 1;
969f271b2ccSMax Krasnyansky 
970f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
971f271b2ccSMax Krasnyansky }
972f271b2ccSMax Krasnyansky 
9731da177e4SLinus Torvalds /* Network device part of the driver */
9741da177e4SLinus Torvalds 
9751da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops;
9761da177e4SLinus Torvalds 
977c70f1829SEric W. Biederman /* Net device detach from fd. */
978c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
979c70f1829SEric W. Biederman {
980c8d68e6bSJason Wang 	tun_detach_all(dev);
981c70f1829SEric W. Biederman }
982c70f1829SEric W. Biederman 
9831da177e4SLinus Torvalds /* Net device open. */
9841da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
9851da177e4SLinus Torvalds {
986c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
987b20e2d54SHannes Frederic Sowa 
9881da177e4SLinus Torvalds 	return 0;
9891da177e4SLinus Torvalds }
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds /* Net device close. */
9921da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
9931da177e4SLinus Torvalds {
994c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
9951da177e4SLinus Torvalds 	return 0;
9961da177e4SLinus Torvalds }
9971da177e4SLinus Torvalds 
9981da177e4SLinus Torvalds /* Net device start xmit */
99996f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10001da177e4SLinus Torvalds {
10013df97ba8SJason Wang #ifdef CONFIG_RPS
1002dc05360fSEric Dumazet 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
10039bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10049bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10059bc88939STom Herbert 		 */
10064b035271SWang Li 		struct tun_flow_entry *e;
10079bc88939STom Herbert 		__u32 rxhash;
10089bc88939STom Herbert 
1009feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10104b035271SWang Li 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
10119bc88939STom Herbert 		if (e)
10129bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, rxhash);
10139bc88939STom Herbert 	}
10143df97ba8SJason Wang #endif
101596f84061SJason Wang }
101696f84061SJason Wang 
1017aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1018aff3d70aSJason Wang 				    struct sk_buff *skb,
1019aff3d70aSJason Wang 				    int len)
1020aff3d70aSJason Wang {
1021aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1022aff3d70aSJason Wang 
1023aff3d70aSJason Wang 	if (prog)
1024aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1025aff3d70aSJason Wang 
1026aff3d70aSJason Wang 	return len;
1027aff3d70aSJason Wang }
1028aff3d70aSJason Wang 
102996f84061SJason Wang /* Net device start xmit */
103096f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
103196f84061SJason Wang {
103296f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
103396f84061SJason Wang 	int txq = skb->queue_mapping;
103496f84061SJason Wang 	struct tun_file *tfile;
1035aff3d70aSJason Wang 	int len = skb->len;
103696f84061SJason Wang 
103796f84061SJason Wang 	rcu_read_lock();
103896f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
103996f84061SJason Wang 
104096f84061SJason Wang 	/* Drop packet if interface is not attached */
10419871a9e4SJason Wang 	if (!tfile)
104296f84061SJason Wang 		goto drop;
104396f84061SJason Wang 
104496f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
104596f84061SJason Wang 		tun_automq_xmit(tun, skb);
10469bc88939STom Herbert 
10473424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
10486e914fc7SJason Wang 
1049f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1050f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1051f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1052f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1053f271b2ccSMax Krasnyansky 		goto drop;
1054f271b2ccSMax Krasnyansky 
105554f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
105654f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
105799405162SMichael S. Tsirkin 		goto drop;
105899405162SMichael S. Tsirkin 
1059aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
106081c89507SBjørn Mork 	if (len == 0 || pskb_trim(skb, len))
1061aff3d70aSJason Wang 		goto drop;
1062aff3d70aSJason Wang 
10631f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
10647bf66305SJason Wang 		goto drop;
10657bf66305SJason Wang 
10667b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1067eda29772SRichard Cochran 
10680110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
10697bf66305SJason Wang 	 * for indefinite time.
10707bf66305SJason Wang 	 */
10710110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
10720110d6f2SMichael S. Tsirkin 
1073895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
1074f8af75f3SEric Dumazet 
10755990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
10761576d986SJason Wang 		goto drop;
10771da177e4SLinus Torvalds 
10781da177e4SLinus Torvalds 	/* Notify and wake up reader process */
107954f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
108054f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
10819e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
10826e914fc7SJason Wang 
10836e914fc7SJason Wang 	rcu_read_unlock();
10846ed10654SPatrick McHardy 	return NETDEV_TX_OK;
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds drop:
1087608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1088149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
10891da177e4SLinus Torvalds 	kfree_skb(skb);
10906e914fc7SJason Wang 	rcu_read_unlock();
1091baeababbSJason Wang 	return NET_XMIT_DROP;
10921da177e4SLinus Torvalds }
10931da177e4SLinus Torvalds 
1094f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
10951da177e4SLinus Torvalds {
1096f271b2ccSMax Krasnyansky 	/*
1097f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1098f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1099f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1100f271b2ccSMax Krasnyansky 	 */
11011da177e4SLinus Torvalds }
11021da177e4SLinus Torvalds 
1103c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1104c8f44affSMichał Mirosław 	netdev_features_t features)
110588255375SMichał Mirosław {
110688255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
110788255375SMichał Mirosław 
110888255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
110988255375SMichał Mirosław }
1110eaea34b2SPaolo Abeni 
1111eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1112eaea34b2SPaolo Abeni {
1113eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1114eaea34b2SPaolo Abeni 
1115eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1116eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1117eaea34b2SPaolo Abeni 
1118eaea34b2SPaolo Abeni 	tun->align = new_hr;
1119eaea34b2SPaolo Abeni }
1120eaea34b2SPaolo Abeni 
1121bc1f4470Sstephen hemminger static void
1122608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1123608b9977SPaolo Abeni {
1124608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1125608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1126608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1127608b9977SPaolo Abeni 	int i;
1128608b9977SPaolo Abeni 
1129608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1130608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1131608b9977SPaolo Abeni 		unsigned int start;
1132608b9977SPaolo Abeni 
1133608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1134608b9977SPaolo Abeni 		do {
1135608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
11365260dd3eSEric Dumazet 			rxpackets	= u64_stats_read(&p->rx_packets);
11375260dd3eSEric Dumazet 			rxbytes		= u64_stats_read(&p->rx_bytes);
11385260dd3eSEric Dumazet 			txpackets	= u64_stats_read(&p->tx_packets);
11395260dd3eSEric Dumazet 			txbytes		= u64_stats_read(&p->tx_bytes);
1140608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1141608b9977SPaolo Abeni 
1142608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1143608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1144608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1145608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1146608b9977SPaolo Abeni 
1147608b9977SPaolo Abeni 		/* u32 counters */
1148608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1149608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1150608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1151608b9977SPaolo Abeni 	}
1152608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1153608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1154608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1155608b9977SPaolo Abeni }
1156608b9977SPaolo Abeni 
1157761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1158761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1159761876c8SJason Wang {
1160761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1161e4a2a304SJason Wang 	struct tun_file *tfile;
1162761876c8SJason Wang 	struct bpf_prog *old_prog;
1163e4a2a304SJason Wang 	int i;
1164761876c8SJason Wang 
1165761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1166761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1167761876c8SJason Wang 	if (old_prog)
1168761876c8SJason Wang 		bpf_prog_put(old_prog);
1169761876c8SJason Wang 
1170e4a2a304SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
1171e4a2a304SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
1172e4a2a304SJason Wang 		if (prog)
1173e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1174e4a2a304SJason Wang 		else
1175e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1176e4a2a304SJason Wang 	}
1177e4a2a304SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
1178e4a2a304SJason Wang 		if (prog)
1179e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1180e4a2a304SJason Wang 		else
1181e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1182e4a2a304SJason Wang 	}
1183e4a2a304SJason Wang 
1184761876c8SJason Wang 	return 0;
1185761876c8SJason Wang }
1186761876c8SJason Wang 
1187f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1188761876c8SJason Wang {
1189761876c8SJason Wang 	switch (xdp->command) {
1190761876c8SJason Wang 	case XDP_SETUP_PROG:
1191761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1192761876c8SJason Wang 	default:
1193761876c8SJason Wang 		return -EINVAL;
1194761876c8SJason Wang 	}
1195761876c8SJason Wang }
1196761876c8SJason Wang 
119726d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
119826d31925SNicolas Dichtel {
119926d31925SNicolas Dichtel 	if (new_carrier) {
120026d31925SNicolas Dichtel 		struct tun_struct *tun = netdev_priv(dev);
120126d31925SNicolas Dichtel 
120226d31925SNicolas Dichtel 		if (!tun->numqueues)
120326d31925SNicolas Dichtel 			return -EPERM;
120426d31925SNicolas Dichtel 
120526d31925SNicolas Dichtel 		netif_carrier_on(dev);
120626d31925SNicolas Dichtel 	} else {
120726d31925SNicolas Dichtel 		netif_carrier_off(dev);
120826d31925SNicolas Dichtel 	}
120926d31925SNicolas Dichtel 	return 0;
121026d31925SNicolas Dichtel }
121126d31925SNicolas Dichtel 
1212758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1213c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1214758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1215758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
121600829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
121788255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1218c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1219eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1220608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
122126d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1222758e43b7SStephen Hemminger };
1223758e43b7SStephen Hemminger 
12240c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile)
12250c9d917bSJesper Dangaard Brouer {
12260c9d917bSJesper Dangaard Brouer 	/* Notify and wake up reader process */
12270c9d917bSJesper Dangaard Brouer 	if (tfile->flags & TUN_FASYNC)
12280c9d917bSJesper Dangaard Brouer 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
12290c9d917bSJesper Dangaard Brouer 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
12300c9d917bSJesper Dangaard Brouer }
12310c9d917bSJesper Dangaard Brouer 
123242b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n,
123342b33468SJesper Dangaard Brouer 			struct xdp_frame **frames, u32 flags)
1234fc72d1d5SJason Wang {
1235fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1236fc72d1d5SJason Wang 	struct tun_file *tfile;
1237fc72d1d5SJason Wang 	u32 numqueues;
1238735fc405SJesper Dangaard Brouer 	int drops = 0;
1239735fc405SJesper Dangaard Brouer 	int cnt = n;
1240735fc405SJesper Dangaard Brouer 	int i;
1241fc72d1d5SJason Wang 
12420c9d917bSJesper Dangaard Brouer 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
124342b33468SJesper Dangaard Brouer 		return -EINVAL;
124442b33468SJesper Dangaard Brouer 
1245fc72d1d5SJason Wang 	rcu_read_lock();
1246fc72d1d5SJason Wang 
12479871a9e4SJason Wang resample:
1248fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1249fc72d1d5SJason Wang 	if (!numqueues) {
1250735fc405SJesper Dangaard Brouer 		rcu_read_unlock();
1251735fc405SJesper Dangaard Brouer 		return -ENXIO; /* Caller will free/return all frames */
1252fc72d1d5SJason Wang 	}
1253fc72d1d5SJason Wang 
1254fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1255fc72d1d5SJason Wang 					    numqueues]);
12569871a9e4SJason Wang 	if (unlikely(!tfile))
12579871a9e4SJason Wang 		goto resample;
1258735fc405SJesper Dangaard Brouer 
1259735fc405SJesper Dangaard Brouer 	spin_lock(&tfile->tx_ring.producer_lock);
1260735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
1261735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdp = frames[i];
1262fc72d1d5SJason Wang 		/* Encode the XDP flag into lowest bit for consumer to differ
1263fc72d1d5SJason Wang 		 * XDP buffer from sk_buff.
1264fc72d1d5SJason Wang 		 */
1265735fc405SJesper Dangaard Brouer 		void *frame = tun_xdp_to_ptr(xdp);
1266fc72d1d5SJason Wang 
1267735fc405SJesper Dangaard Brouer 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1268735fc405SJesper Dangaard Brouer 			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1269735fc405SJesper Dangaard Brouer 			xdp_return_frame_rx_napi(xdp);
1270735fc405SJesper Dangaard Brouer 			drops++;
1271735fc405SJesper Dangaard Brouer 		}
1272735fc405SJesper Dangaard Brouer 	}
1273735fc405SJesper Dangaard Brouer 	spin_unlock(&tfile->tx_ring.producer_lock);
1274735fc405SJesper Dangaard Brouer 
12750c9d917bSJesper Dangaard Brouer 	if (flags & XDP_XMIT_FLUSH)
12760c9d917bSJesper Dangaard Brouer 		__tun_xdp_flush_tfile(tfile);
12770c9d917bSJesper Dangaard Brouer 
1278fc72d1d5SJason Wang 	rcu_read_unlock();
1279735fc405SJesper Dangaard Brouer 	return cnt - drops;
1280fc72d1d5SJason Wang }
1281fc72d1d5SJason Wang 
128244fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
128344fa2dbdSJesper Dangaard Brouer {
12841b698fa5SLorenzo Bianconi 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
128544fa2dbdSJesper Dangaard Brouer 
128644fa2dbdSJesper Dangaard Brouer 	if (unlikely(!frame))
128744fa2dbdSJesper Dangaard Brouer 		return -EOVERFLOW;
128844fa2dbdSJesper Dangaard Brouer 
128942421a56SJesper Dangaard Brouer 	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1290fc72d1d5SJason Wang }
1291fc72d1d5SJason Wang 
1292758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1293c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1294758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1295758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
129600829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
129788255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1298afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1299758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1300758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1301c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
13025e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1303eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1304608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1305f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1306fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
130726d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1308758e43b7SStephen Hemminger };
1309758e43b7SStephen Hemminger 
1310944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
131196442e42SJason Wang {
131296442e42SJason Wang 	int i;
131396442e42SJason Wang 
131496442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
131596442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
131696442e42SJason Wang 
131796442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1318e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1319e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1320e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
132196442e42SJason Wang }
132296442e42SJason Wang 
132396442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
132496442e42SJason Wang {
132596442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
132696442e42SJason Wang 	tun_flow_flush(tun);
132796442e42SJason Wang }
132896442e42SJason Wang 
132991572088SJarod Wilson #define MIN_MTU 68
133091572088SJarod Wilson #define MAX_MTU 65535
133191572088SJarod Wilson 
13321da177e4SLinus Torvalds /* Initialize net device. */
13331da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
13341da177e4SLinus Torvalds {
13351da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
133840630b82SMichael S. Tsirkin 	case IFF_TUN:
1339758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1340b9815eb1SJason A. Donenfeld 		dev->header_ops = &ip_tunnel_header_ops;
1341758e43b7SStephen Hemminger 
13421da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13431da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13441da177e4SLinus Torvalds 		dev->addr_len = 0;
13451da177e4SLinus Torvalds 		dev->mtu = 1500;
13461da177e4SLinus Torvalds 
13471da177e4SLinus Torvalds 		/* Zero header length */
13481da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
13491da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
13501da177e4SLinus Torvalds 		break;
13511da177e4SLinus Torvalds 
135240630b82SMichael S. Tsirkin 	case IFF_TAP:
13537a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
13541da177e4SLinus Torvalds 		/* Ethernet TAP Device */
13551da177e4SLinus Torvalds 		ether_setup(dev);
1356550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1357a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
135836226a8dSBrian Braunstein 
1359f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
136036226a8dSBrian Braunstein 
13611da177e4SLinus Torvalds 		break;
13621da177e4SLinus Torvalds 	}
136391572088SJarod Wilson 
136491572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
136591572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
13661da177e4SLinus Torvalds }
13671da177e4SLinus Torvalds 
13682f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
13692f3ab622SJason Wang {
13702f3ab622SJason Wang 	struct sock *sk = tfile->socket.sk;
13712f3ab622SJason Wang 
13722f3ab622SJason Wang 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
13732f3ab622SJason Wang }
13742f3ab622SJason Wang 
13751da177e4SLinus Torvalds /* Character device part */
13761da177e4SLinus Torvalds 
13771da177e4SLinus Torvalds /* Poll */
1378afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
13791da177e4SLinus Torvalds {
1380b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
13819484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
13823c8a9c63SMariusz Kozlowski 	struct sock *sk;
1383afc9a42bSAl Viro 	__poll_t mask = 0;
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds 	if (!tun)
1386a9a08845SLinus Torvalds 		return EPOLLERR;
13871da177e4SLinus Torvalds 
138854f968d6SJason Wang 	sk = tfile->socket.sk;
13893c8a9c63SMariusz Kozlowski 
13909e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
13911da177e4SLinus Torvalds 
13925990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
1393a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
13941da177e4SLinus Torvalds 
13952f3ab622SJason Wang 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
13962f3ab622SJason Wang 	 * guarantee EPOLLOUT to be raised by either here or
13972f3ab622SJason Wang 	 * tun_sock_write_space(). Then process could get notification
13982f3ab622SJason Wang 	 * after it writes to a down device and meets -EIO.
13992f3ab622SJason Wang 	 */
14002f3ab622SJason Wang 	if (tun_sock_writeable(tun, tfile) ||
14019cd3e072SEric Dumazet 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
14022f3ab622SJason Wang 	     tun_sock_writeable(tun, tfile)))
1403a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
140433dccbb0SHerbert Xu 
1405c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1406a9a08845SLinus Torvalds 		mask = EPOLLERR;
1407c70f1829SEric W. Biederman 
1408631ab46bSEric W. Biederman 	tun_put(tun);
14091da177e4SLinus Torvalds 	return mask;
14101da177e4SLinus Torvalds }
14111da177e4SLinus Torvalds 
141290e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
141390e33d45SPetar Penkov 					    size_t len,
141490e33d45SPetar Penkov 					    const struct iov_iter *it)
141590e33d45SPetar Penkov {
141690e33d45SPetar Penkov 	struct sk_buff *skb;
141790e33d45SPetar Penkov 	size_t linear;
141890e33d45SPetar Penkov 	int err;
141990e33d45SPetar Penkov 	int i;
142090e33d45SPetar Penkov 
142190e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
142290e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
142390e33d45SPetar Penkov 
142490e33d45SPetar Penkov 	local_bh_disable();
142590e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
142690e33d45SPetar Penkov 	local_bh_enable();
142790e33d45SPetar Penkov 	if (!skb)
142890e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
142990e33d45SPetar Penkov 
143090e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
143190e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
143290e33d45SPetar Penkov 	if (err)
143390e33d45SPetar Penkov 		goto free;
143490e33d45SPetar Penkov 
143590e33d45SPetar Penkov 	skb->len = len;
143690e33d45SPetar Penkov 	skb->data_len = len - linear;
143790e33d45SPetar Penkov 	skb->truesize += skb->data_len;
143890e33d45SPetar Penkov 
143990e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
144090e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
1441aa6daacaSEric Dumazet 		struct page *page;
1442aa6daacaSEric Dumazet 		void *frag;
144390e33d45SPetar Penkov 
144490e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
144590e33d45SPetar Penkov 			err = -EINVAL;
144690e33d45SPetar Penkov 			goto free;
144790e33d45SPetar Penkov 		}
1448aa6daacaSEric Dumazet 		frag = netdev_alloc_frag(fragsz);
1449aa6daacaSEric Dumazet 		if (!frag) {
145090e33d45SPetar Penkov 			err = -ENOMEM;
145190e33d45SPetar Penkov 			goto free;
145290e33d45SPetar Penkov 		}
1453aa6daacaSEric Dumazet 		page = virt_to_head_page(frag);
1454aa6daacaSEric Dumazet 		skb_fill_page_desc(skb, i - 1, page,
1455aa6daacaSEric Dumazet 				   frag - page_address(page), fragsz);
145690e33d45SPetar Penkov 	}
145790e33d45SPetar Penkov 
145890e33d45SPetar Penkov 	return skb;
145990e33d45SPetar Penkov free:
146090e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
146190e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
146290e33d45SPetar Penkov 	return ERR_PTR(err);
146390e33d45SPetar Penkov }
146490e33d45SPetar Penkov 
1465f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1466f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
146754f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
146833dccbb0SHerbert Xu 				     size_t prepad, size_t len,
146933dccbb0SHerbert Xu 				     size_t linear, int noblock)
1470f42157cbSRusty Russell {
147154f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1472f42157cbSRusty Russell 	struct sk_buff *skb;
147333dccbb0SHerbert Xu 	int err;
1474f42157cbSRusty Russell 
1475f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
14760eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
147733dccbb0SHerbert Xu 		linear = len;
1478f42157cbSRusty Russell 
147933dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
148028d64271SEric Dumazet 				   &err, 0);
1481f42157cbSRusty Russell 	if (!skb)
148233dccbb0SHerbert Xu 		return ERR_PTR(err);
1483f42157cbSRusty Russell 
1484f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1485f42157cbSRusty Russell 	skb_put(skb, linear);
148633dccbb0SHerbert Xu 	skb->data_len = len - linear;
148733dccbb0SHerbert Xu 	skb->len += len - linear;
1488f42157cbSRusty Russell 
1489f42157cbSRusty Russell 	return skb;
1490f42157cbSRusty Russell }
1491f42157cbSRusty Russell 
14925503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
14935503fcecSJason Wang 			   struct sk_buff *skb, int more)
14945503fcecSJason Wang {
14955503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
14965503fcecSJason Wang 	struct sk_buff_head process_queue;
14975503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
14985503fcecSJason Wang 	bool rcv = false;
14995503fcecSJason Wang 
15005503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15015503fcecSJason Wang 		local_bh_disable();
15028ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15035503fcecSJason Wang 		netif_receive_skb(skb);
15045503fcecSJason Wang 		local_bh_enable();
15055503fcecSJason Wang 		return;
15065503fcecSJason Wang 	}
15075503fcecSJason Wang 
15085503fcecSJason Wang 	spin_lock(&queue->lock);
15095503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15105503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15115503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15125503fcecSJason Wang 		rcv = true;
15135503fcecSJason Wang 	} else {
15145503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15155503fcecSJason Wang 	}
15165503fcecSJason Wang 	spin_unlock(&queue->lock);
15175503fcecSJason Wang 
15185503fcecSJason Wang 	if (rcv) {
15195503fcecSJason Wang 		struct sk_buff *nskb;
15205503fcecSJason Wang 
15215503fcecSJason Wang 		local_bh_disable();
15228ebebcbaSMatthew Cover 		while ((nskb = __skb_dequeue(&process_queue))) {
15238ebebcbaSMatthew Cover 			skb_record_rx_queue(nskb, tfile->queue_index);
15245503fcecSJason Wang 			netif_receive_skb(nskb);
15258ebebcbaSMatthew Cover 		}
15268ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15275503fcecSJason Wang 		netif_receive_skb(skb);
15285503fcecSJason Wang 		local_bh_enable();
15295503fcecSJason Wang 	}
15305503fcecSJason Wang }
15315503fcecSJason Wang 
153266ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
153366ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
153466ccbc9cSJason Wang {
153566ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
153666ccbc9cSJason Wang 		return false;
153766ccbc9cSJason Wang 
153866ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
153966ccbc9cSJason Wang 		return false;
154066ccbc9cSJason Wang 
154166ccbc9cSJason Wang 	if (!noblock)
154266ccbc9cSJason Wang 		return false;
154366ccbc9cSJason Wang 
154466ccbc9cSJason Wang 	if (zerocopy)
154566ccbc9cSJason Wang 		return false;
154666ccbc9cSJason Wang 
154766ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
154866ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
154966ccbc9cSJason Wang 		return false;
155066ccbc9cSJason Wang 
155166ccbc9cSJason Wang 	return true;
155266ccbc9cSJason Wang }
155366ccbc9cSJason Wang 
15544b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
15554b663366SAlexis Bauvin 				       struct page_frag *alloc_frag, char *buf,
15568ae1aff0SJason Wang 				       int buflen, int len, int pad)
1557ac1f1f6cSJason Wang {
1558ac1f1f6cSJason Wang 	struct sk_buff *skb = build_skb(buf, buflen);
1559ac1f1f6cSJason Wang 
1560ac1f1f6cSJason Wang 	if (!skb)
1561ac1f1f6cSJason Wang 		return ERR_PTR(-ENOMEM);
1562ac1f1f6cSJason Wang 
15638ae1aff0SJason Wang 	skb_reserve(skb, pad);
1564ac1f1f6cSJason Wang 	skb_put(skb, len);
15654b663366SAlexis Bauvin 	skb_set_owner_w(skb, tfile->socket.sk);
1566ac1f1f6cSJason Wang 
1567ac1f1f6cSJason Wang 	get_page(alloc_frag->page);
1568ac1f1f6cSJason Wang 	alloc_frag->offset += buflen;
1569ac1f1f6cSJason Wang 
1570ac1f1f6cSJason Wang 	return skb;
1571ac1f1f6cSJason Wang }
1572ac1f1f6cSJason Wang 
15738ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
15748ae1aff0SJason Wang 		       struct xdp_buff *xdp, u32 act)
15758ae1aff0SJason Wang {
15768ae1aff0SJason Wang 	int err;
15778ae1aff0SJason Wang 
15788ae1aff0SJason Wang 	switch (act) {
15798ae1aff0SJason Wang 	case XDP_REDIRECT:
15808ae1aff0SJason Wang 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
15818ae1aff0SJason Wang 		if (err)
15828ae1aff0SJason Wang 			return err;
15838ae1aff0SJason Wang 		break;
15848ae1aff0SJason Wang 	case XDP_TX:
15858ae1aff0SJason Wang 		err = tun_xdp_tx(tun->dev, xdp);
15868ae1aff0SJason Wang 		if (err < 0)
15878ae1aff0SJason Wang 			return err;
15888ae1aff0SJason Wang 		break;
15898ae1aff0SJason Wang 	case XDP_PASS:
15908ae1aff0SJason Wang 		break;
15918ae1aff0SJason Wang 	default:
15928ae1aff0SJason Wang 		bpf_warn_invalid_xdp_action(act);
1593*df561f66SGustavo A. R. Silva 		fallthrough;
15948ae1aff0SJason Wang 	case XDP_ABORTED:
15958ae1aff0SJason Wang 		trace_xdp_exception(tun->dev, xdp_prog, act);
1596*df561f66SGustavo A. R. Silva 		fallthrough;
15978ae1aff0SJason Wang 	case XDP_DROP:
15988ae1aff0SJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
15998ae1aff0SJason Wang 		break;
16008ae1aff0SJason Wang 	}
16018ae1aff0SJason Wang 
16028ae1aff0SJason Wang 	return act;
16038ae1aff0SJason Wang }
16048ae1aff0SJason Wang 
1605761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1606761876c8SJason Wang 				     struct tun_file *tfile,
160766ccbc9cSJason Wang 				     struct iov_iter *from,
1608761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
16091cfe6e93SJason Wang 				     int len, int *skb_xdp)
161066ccbc9cSJason Wang {
16110bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
1612761876c8SJason Wang 	struct bpf_prog *xdp_prog;
16137df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
161466ccbc9cSJason Wang 	char *buf;
161566ccbc9cSJason Wang 	size_t copied;
16168ae1aff0SJason Wang 	int pad = TUN_RX_PAD;
16178ae1aff0SJason Wang 	int err = 0;
16187df13219SJason Wang 
16197df13219SJason Wang 	rcu_read_lock();
16207df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16217df13219SJason Wang 	if (xdp_prog)
16224f23aff8SJason Wang 		pad += XDP_PACKET_HEADROOM;
16237df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16247df13219SJason Wang 	rcu_read_unlock();
162566ccbc9cSJason Wang 
162663b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
162766ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
162866ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
162966ccbc9cSJason Wang 
163066ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
163166ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16327df13219SJason Wang 				     alloc_frag->offset + pad,
163366ccbc9cSJason Wang 				     len, from);
163466ccbc9cSJason Wang 	if (copied != len)
163566ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
163666ccbc9cSJason Wang 
16377df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16387df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16397df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16407df13219SJason Wang 	 */
1641ac1f1f6cSJason Wang 	if (hdr->gso_type || !xdp_prog) {
16421cfe6e93SJason Wang 		*skb_xdp = 1;
16434b663366SAlexis Bauvin 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
16444b663366SAlexis Bauvin 				       pad);
1645ac1f1f6cSJason Wang 	}
1646ac1f1f6cSJason Wang 
16471cfe6e93SJason Wang 	*skb_xdp = 0;
164866ccbc9cSJason Wang 
16496547e387SToshiaki Makita 	local_bh_disable();
1650761876c8SJason Wang 	rcu_read_lock();
1651761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16528ae1aff0SJason Wang 	if (xdp_prog) {
1653761876c8SJason Wang 		struct xdp_buff xdp;
1654761876c8SJason Wang 		u32 act;
1655761876c8SJason Wang 
1656761876c8SJason Wang 		xdp.data_hard_start = buf;
16577df13219SJason Wang 		xdp.data = buf + pad;
1658de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1659761876c8SJason Wang 		xdp.data_end = xdp.data + len;
16608bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1661fb3e6e93SJesper Dangaard Brouer 		xdp.frame_sz = buflen;
1662761876c8SJason Wang 
16638ae1aff0SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
16648ae1aff0SJason Wang 		if (act == XDP_REDIRECT || act == XDP_TX) {
1665761876c8SJason Wang 			get_page(alloc_frag->page);
1666761876c8SJason Wang 			alloc_frag->offset += buflen;
1667761876c8SJason Wang 		}
16688ae1aff0SJason Wang 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1669bee34890SWill Deacon 		if (err < 0) {
1670bee34890SWill Deacon 			if (act == XDP_REDIRECT || act == XDP_TX)
1671bee34890SWill Deacon 				put_page(alloc_frag->page);
1672bee34890SWill Deacon 			goto out;
1673bee34890SWill Deacon 		}
1674bee34890SWill Deacon 
16751a097910SJason Wang 		if (err == XDP_REDIRECT)
16761d233886SToke Høiland-Jørgensen 			xdp_do_flush();
16778ae1aff0SJason Wang 		if (err != XDP_PASS)
16788ae1aff0SJason Wang 			goto out;
16798ae1aff0SJason Wang 
16808ae1aff0SJason Wang 		pad = xdp.data - xdp.data_hard_start;
16818ae1aff0SJason Wang 		len = xdp.data_end - xdp.data;
1682761876c8SJason Wang 	}
1683761876c8SJason Wang 	rcu_read_unlock();
16846547e387SToshiaki Makita 	local_bh_enable();
1685291aeb2bSJason Wang 
16864b663366SAlexis Bauvin 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1687761876c8SJason Wang 
1688f7053b6cSJason Wang out:
1689761876c8SJason Wang 	rcu_read_unlock();
16906547e387SToshiaki Makita 	local_bh_enable();
1691761876c8SJason Wang 	return NULL;
169266ccbc9cSJason Wang }
169366ccbc9cSJason Wang 
16941da177e4SLinus Torvalds /* Get packet from user space buffer */
169554f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1696f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
16975503fcecSJason Wang 			    int noblock, bool more)
16981da177e4SLinus Torvalds {
169909640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
17001da177e4SLinus Torvalds 	struct sk_buff *skb;
1701f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1702eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1703f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1704608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
170596f8d9ecSJason Wang 	int good_linear;
17060690899bSMichael S. Tsirkin 	int copylen;
17070690899bSMichael S. Tsirkin 	bool zerocopy = false;
17080690899bSMichael S. Tsirkin 	int err;
170996f84061SJason Wang 	u32 rxhash = 0;
17101cfe6e93SJason Wang 	int skb_xdp = 1;
1711af3fb24eSEric Dumazet 	bool frags = tun_napi_frags_enabled(tfile);
17121da177e4SLinus Torvalds 
171340630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
171415718ea0SDan Carpenter 		if (len < sizeof(pi))
17151da177e4SLinus Torvalds 			return -EINVAL;
171615718ea0SDan Carpenter 		len -= sizeof(pi);
17171da177e4SLinus Torvalds 
1718cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17191da177e4SLinus Torvalds 			return -EFAULT;
17201da177e4SLinus Torvalds 	}
17211da177e4SLinus Torvalds 
172240630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1723e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1724e1edab87SWillem de Bruijn 
1725e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1726f43798c2SRusty Russell 			return -EINVAL;
1727e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1728f43798c2SRusty Russell 
1729cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1730f43798c2SRusty Russell 			return -EFAULT;
1731f43798c2SRusty Russell 
17324909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
173356f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
173456f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17354909122fSHerbert Xu 
173656f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1737f43798c2SRusty Russell 			return -EINVAL;
1738e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1739f43798c2SRusty Russell 	}
1740f43798c2SRusty Russell 
174140630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1742a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17430eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
174456f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1745e01bf1c8SRusty Russell 			return -EINVAL;
1746e01bf1c8SRusty Russell 	}
17471da177e4SLinus Torvalds 
174896f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
174996f8d9ecSJason Wang 
175088529176SJason Wang 	if (msg_control) {
1751f5ff53b4SAl Viro 		struct iov_iter i = *from;
1752f5ff53b4SAl Viro 
175388529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
175488529176SJason Wang 		 * enough room for skb expand head in case it is used.
17550690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
17560690899bSMichael S. Tsirkin 		 */
175756f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
175896f8d9ecSJason Wang 		if (copylen > good_linear)
175996f8d9ecSJason Wang 			copylen = good_linear;
17603dd5c330SJason Wang 		linear = copylen;
1761f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1762f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
176388529176SJason Wang 			zerocopy = true;
176488529176SJason Wang 	}
176588529176SJason Wang 
176690e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
17671cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
17681cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
17691cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
17701cfe6e93SJason Wang 		 */
17711cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
177266ccbc9cSJason Wang 		if (IS_ERR(skb)) {
177366ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
177466ccbc9cSJason Wang 			return PTR_ERR(skb);
177566ccbc9cSJason Wang 		}
1776761876c8SJason Wang 		if (!skb)
1777761876c8SJason Wang 			return total_len;
177866ccbc9cSJason Wang 	} else {
177988529176SJason Wang 		if (!zerocopy) {
17800690899bSMichael S. Tsirkin 			copylen = len;
178156f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
178296f8d9ecSJason Wang 				linear = good_linear;
178396f8d9ecSJason Wang 			else
178456f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
17853dd5c330SJason Wang 		}
17860690899bSMichael S. Tsirkin 
178790e33d45SPetar Penkov 		if (frags) {
178890e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
178990e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
179090e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
179190e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
179290e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
179390e33d45SPetar Penkov 			 */
179490e33d45SPetar Penkov 			zerocopy = false;
179590e33d45SPetar Penkov 		} else {
179690e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
179790e33d45SPetar Penkov 					    noblock);
179890e33d45SPetar Penkov 		}
179990e33d45SPetar Penkov 
180033dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
180133dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1802608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
180390e33d45SPetar Penkov 			if (frags)
180490e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
180533dccbb0SHerbert Xu 			return PTR_ERR(skb);
18061da177e4SLinus Torvalds 		}
18071da177e4SLinus Torvalds 
18080690899bSMichael S. Tsirkin 		if (zerocopy)
1809f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1810af1cc7a2SJason Wang 		else
1811f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18120690899bSMichael S. Tsirkin 
18130690899bSMichael S. Tsirkin 		if (err) {
18144477138fSEric Dumazet 			err = -EFAULT;
18154477138fSEric Dumazet drop:
1816608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
18178f22757eSDave Jones 			kfree_skb(skb);
181890e33d45SPetar Penkov 			if (frags) {
181990e33d45SPetar Penkov 				tfile->napi.skb = NULL;
182090e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
182190e33d45SPetar Penkov 			}
182290e33d45SPetar Penkov 
18234477138fSEric Dumazet 			return err;
18248f22757eSDave Jones 		}
182566ccbc9cSJason Wang 	}
18261da177e4SLinus Torvalds 
18273e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1828df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1829df10db98SPaolo Abeni 		kfree_skb(skb);
183090e33d45SPetar Penkov 		if (frags) {
183190e33d45SPetar Penkov 			tfile->napi.skb = NULL;
183290e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
183390e33d45SPetar Penkov 		}
183490e33d45SPetar Penkov 
1835df10db98SPaolo Abeni 		return -EINVAL;
1836df10db98SPaolo Abeni 	}
1837df10db98SPaolo Abeni 
18381da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
183940630b82SMichael S. Tsirkin 	case IFF_TUN:
184040630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18412580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18422580c4c1SAlexander Potapenko 
18432580c4c1SAlexander Potapenko 			switch (ip_version) {
18442580c4c1SAlexander Potapenko 			case 4:
1845f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1846f09f7ee2SAng Way Chuang 				break;
18472580c4c1SAlexander Potapenko 			case 6:
1848f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1849f09f7ee2SAng Way Chuang 				break;
1850f09f7ee2SAng Way Chuang 			default:
1851608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1852f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1853f09f7ee2SAng Way Chuang 				return -EINVAL;
1854f09f7ee2SAng Way Chuang 			}
1855f09f7ee2SAng Way Chuang 		}
1856f09f7ee2SAng Way Chuang 
1857459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
18581da177e4SLinus Torvalds 		skb->protocol = pi.proto;
18594c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
18601da177e4SLinus Torvalds 		break;
186140630b82SMichael S. Tsirkin 	case IFF_TAP:
186296aa1b22SWillem de Bruijn 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
186396aa1b22SWillem de Bruijn 			err = -ENOMEM;
186496aa1b22SWillem de Bruijn 			goto drop;
186596aa1b22SWillem de Bruijn 		}
18661da177e4SLinus Torvalds 		skb->protocol = eth_type_trans(skb, tun->dev);
18671da177e4SLinus Torvalds 		break;
18686403eab1SJoe Perches 	}
18691da177e4SLinus Torvalds 
18700690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
18710690899bSMichael S. Tsirkin 	if (zerocopy) {
18720690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
18730690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1874c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1875af1cc7a2SJason Wang 	} else if (msg_control) {
1876af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1877af1cc7a2SJason Wang 		uarg->callback(uarg, false);
18780690899bSMichael S. Tsirkin 	}
18790690899bSMichael S. Tsirkin 
188072f65107SVlad Yasevich 	skb_reset_network_header(skb);
1881d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
18823fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
188338502af7SJason Wang 
18841cfe6e93SJason Wang 	if (skb_xdp) {
1885761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1886761876c8SJason Wang 		int ret;
1887761876c8SJason Wang 
18886547e387SToshiaki Makita 		local_bh_disable();
1889761876c8SJason Wang 		rcu_read_lock();
1890761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1891761876c8SJason Wang 		if (xdp_prog) {
1892761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1893761876c8SJason Wang 			if (ret != XDP_PASS) {
1894761876c8SJason Wang 				rcu_read_unlock();
18956547e387SToshiaki Makita 				local_bh_enable();
18961efba987SEric Dumazet 				if (frags) {
18971efba987SEric Dumazet 					tfile->napi.skb = NULL;
18981efba987SEric Dumazet 					mutex_unlock(&tfile->napi_mutex);
18991efba987SEric Dumazet 				}
1900761876c8SJason Wang 				return total_len;
1901761876c8SJason Wang 			}
1902761876c8SJason Wang 		}
1903761876c8SJason Wang 		rcu_read_unlock();
19046547e387SToshiaki Makita 		local_bh_enable();
1905761876c8SJason Wang 	}
1906761876c8SJason Wang 
1907cf1a1e07SPaolo Abeni 	/* Compute the costly rx hash only if needed for flow updates.
1908cf1a1e07SPaolo Abeni 	 * We may get a very small possibility of OOO during switching, not
1909cf1a1e07SPaolo Abeni 	 * worth to optimize.
1910cf1a1e07SPaolo Abeni 	 */
1911cf1a1e07SPaolo Abeni 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1912cf1a1e07SPaolo Abeni 	    !tfile->detached)
1913feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
191494317099SPetar Penkov 
19154477138fSEric Dumazet 	rcu_read_lock();
19164477138fSEric Dumazet 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
19174477138fSEric Dumazet 		err = -EIO;
19189180bb4fSEric Dumazet 		rcu_read_unlock();
19194477138fSEric Dumazet 		goto drop;
19204477138fSEric Dumazet 	}
19214477138fSEric Dumazet 
192290e33d45SPetar Penkov 	if (frags) {
192396aa1b22SWillem de Bruijn 		u32 headlen;
192496aa1b22SWillem de Bruijn 
192590e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
192696aa1b22SWillem de Bruijn 		skb_push(skb, ETH_HLEN);
192796aa1b22SWillem de Bruijn 		headlen = eth_get_headlen(tun->dev, skb->data,
1928c43f1255SStanislav Fomichev 					  skb_headlen(skb));
192990e33d45SPetar Penkov 
1930010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
193190e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
193290e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
19334477138fSEric Dumazet 			rcu_read_unlock();
193490e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
193590e33d45SPetar Penkov 			WARN_ON(1);
193690e33d45SPetar Penkov 			return -ENOMEM;
193790e33d45SPetar Penkov 		}
193890e33d45SPetar Penkov 
193990e33d45SPetar Penkov 		local_bh_disable();
194090e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
194190e33d45SPetar Penkov 		local_bh_enable();
194290e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1943aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
194494317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
194594317099SPetar Penkov 		int queue_len;
194694317099SPetar Penkov 
194794317099SPetar Penkov 		spin_lock_bh(&queue->lock);
194894317099SPetar Penkov 		__skb_queue_tail(queue, skb);
194994317099SPetar Penkov 		queue_len = skb_queue_len(queue);
195094317099SPetar Penkov 		spin_unlock(&queue->lock);
195194317099SPetar Penkov 
195294317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
195394317099SPetar Penkov 			napi_schedule(&tfile->napi);
195494317099SPetar Penkov 
195594317099SPetar Penkov 		local_bh_enable();
195694317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19575503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
195894317099SPetar Penkov 	} else {
19591da177e4SLinus Torvalds 		netif_rx_ni(skb);
196094317099SPetar Penkov 	}
19614477138fSEric Dumazet 	rcu_read_unlock();
19621da177e4SLinus Torvalds 
1963608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
1964608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
19655260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
19665260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, len);
1967608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
1968608b9977SPaolo Abeni 	put_cpu_ptr(stats);
19691da177e4SLinus Torvalds 
197096f84061SJason Wang 	if (rxhash)
19719e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
197296f84061SJason Wang 
19730690899bSMichael S. Tsirkin 	return total_len;
19741da177e4SLinus Torvalds }
19751da177e4SLinus Torvalds 
1976f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
19771da177e4SLinus Torvalds {
197833dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
197954f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
19809484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
1981631ab46bSEric W. Biederman 	ssize_t result;
19821da177e4SLinus Torvalds 
19831da177e4SLinus Torvalds 	if (!tun)
19841da177e4SLinus Torvalds 		return -EBADFD;
19851da177e4SLinus Torvalds 
19865503fcecSJason Wang 	result = tun_get_user(tun, tfile, NULL, from,
19875503fcecSJason Wang 			      file->f_flags & O_NONBLOCK, false);
1988631ab46bSEric W. Biederman 
1989631ab46bSEric W. Biederman 	tun_put(tun);
1990631ab46bSEric W. Biederman 	return result;
19911da177e4SLinus Torvalds }
19921da177e4SLinus Torvalds 
1993fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
1994fc72d1d5SJason Wang 				struct tun_file *tfile,
19951ffcbc85SJesper Dangaard Brouer 				struct xdp_frame *xdp_frame,
1996fc72d1d5SJason Wang 				struct iov_iter *iter)
1997fc72d1d5SJason Wang {
1998fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
19991ffcbc85SJesper Dangaard Brouer 	size_t size = xdp_frame->len;
2000fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
2001fc72d1d5SJason Wang 	size_t ret;
2002fc72d1d5SJason Wang 
2003fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
2004fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
2005fc72d1d5SJason Wang 
2006fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2007fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2008fc72d1d5SJason Wang 			return -EINVAL;
2009fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2010fc72d1d5SJason Wang 			     sizeof(gso)))
2011fc72d1d5SJason Wang 			return -EFAULT;
2012fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2013fc72d1d5SJason Wang 	}
2014fc72d1d5SJason Wang 
20151ffcbc85SJesper Dangaard Brouer 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2016fc72d1d5SJason Wang 
2017fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
2018fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
20195260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
20205260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, ret);
2021fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
2022fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
2023fc72d1d5SJason Wang 
2024fc72d1d5SJason Wang 	return ret;
2025fc72d1d5SJason Wang }
2026fc72d1d5SJason Wang 
20271da177e4SLinus Torvalds /* Put packet to the user space buffer */
20286f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
202954f968d6SJason Wang 			    struct tun_file *tfile,
20301da177e4SLinus Torvalds 			    struct sk_buff *skb,
2031e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20321da177e4SLinus Torvalds {
20331da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2034608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2035e0b46d0eSHerbert Xu 	ssize_t total;
20368c847d25SJason Wang 	int vlan_offset = 0;
2037a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20382eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2039a8f9bfdfSHerbert Xu 
2040df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2041a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20421da177e4SLinus Torvalds 
204340630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2044e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20451da177e4SLinus Torvalds 
2046e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2047e0b46d0eSHerbert Xu 
204840630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2049e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20501da177e4SLinus Torvalds 			return -EINVAL;
20511da177e4SLinus Torvalds 
2052e0b46d0eSHerbert Xu 		total += sizeof(pi);
2053e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20541da177e4SLinus Torvalds 			/* Packet will be striped */
20551da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20561da177e4SLinus Torvalds 		}
20571da177e4SLinus Torvalds 
2058e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20591da177e4SLinus Torvalds 			return -EFAULT;
20601da177e4SLinus Torvalds 	}
20611da177e4SLinus Torvalds 
20622eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20639403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
206434166093SMike Rapoport 
2065e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2066f43798c2SRusty Russell 			return -EINVAL;
2067f43798c2SRusty Russell 
20683e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
2069fd3a8862SWillem de Bruijn 					    tun_is_little_endian(tun), true,
2070fd3a8862SWillem de Bruijn 					    vlan_hlen)) {
2071f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20726b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2073ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
207456f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
207556f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2076ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2077ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2078ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
207956f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2080ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2081ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2082ef3db4a5SMichael S. Tsirkin 		}
2083f43798c2SRusty Russell 
2084e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2085f43798c2SRusty Russell 			return -EFAULT;
20868c847d25SJason Wang 
20878c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2088f43798c2SRusty Russell 	}
2089f43798c2SRusty Russell 
2090a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2091e0b46d0eSHerbert Xu 		int ret;
2092aff3d70aSJason Wang 		struct veth veth;
20931da177e4SLinus Torvalds 
20946680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2095df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
20961da177e4SLinus Torvalds 
20976680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
20986680ec68SJason Wang 
2099e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2100e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
21016680ec68SJason Wang 			goto done;
21026680ec68SJason Wang 
2103e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2104e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
21056680ec68SJason Wang 			goto done;
21066680ec68SJason Wang 	}
21076680ec68SJason Wang 
2108e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
21096680ec68SJason Wang 
21106680ec68SJason Wang done:
2111608b9977SPaolo Abeni 	/* caller is in process context, */
2112608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2113608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
21145260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
21155260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
2116608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2117608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
21181da177e4SLinus Torvalds 
21191da177e4SLinus Torvalds 	return total;
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds 
2122fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21231576d986SJason Wang {
21241576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2125fc72d1d5SJason Wang 	void *ptr = NULL;
2126f48cc6b2SJason Wang 	int error = 0;
21271576d986SJason Wang 
2128fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2129fc72d1d5SJason Wang 	if (ptr)
21301576d986SJason Wang 		goto out;
21311576d986SJason Wang 	if (noblock) {
2132f48cc6b2SJason Wang 		error = -EAGAIN;
21331576d986SJason Wang 		goto out;
21341576d986SJason Wang 	}
21351576d986SJason Wang 
2136333f7909SAl Viro 	add_wait_queue(&tfile->socket.wq.wait, &wait);
21371576d986SJason Wang 
21381576d986SJason Wang 	while (1) {
213971828b22STimur Celik 		set_current_state(TASK_INTERRUPTIBLE);
2140fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2141fc72d1d5SJason Wang 		if (ptr)
21421576d986SJason Wang 			break;
21431576d986SJason Wang 		if (signal_pending(current)) {
2144f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21451576d986SJason Wang 			break;
21461576d986SJason Wang 		}
21471576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2148f48cc6b2SJason Wang 			error = -EFAULT;
21491576d986SJason Wang 			break;
21501576d986SJason Wang 		}
21511576d986SJason Wang 
21521576d986SJason Wang 		schedule();
21531576d986SJason Wang 	}
21541576d986SJason Wang 
2155ecef67cbSTimur Celik 	__set_current_state(TASK_RUNNING);
2156333f7909SAl Viro 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
21571576d986SJason Wang 
21581576d986SJason Wang out:
2159f48cc6b2SJason Wang 	*err = error;
2160fc72d1d5SJason Wang 	return ptr;
21611576d986SJason Wang }
21621576d986SJason Wang 
216354f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21649b067034SAl Viro 			   struct iov_iter *to,
2165fc72d1d5SJason Wang 			   int noblock, void *ptr)
21661da177e4SLinus Torvalds {
21679b067034SAl Viro 	ssize_t ret;
21681576d986SJason Wang 	int err;
21691da177e4SLinus Torvalds 
2170c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2171fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21729b067034SAl Viro 		return 0;
2173c33ee15bSWei Xu 	}
21741da177e4SLinus Torvalds 
2175fc72d1d5SJason Wang 	if (!ptr) {
21761576d986SJason Wang 		/* Read frames from ring */
2177fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2178fc72d1d5SJason Wang 		if (!ptr)
2179957f094fSAlex Gartrell 			return err;
2180ac77cfd4SJason Wang 	}
2181e0b46d0eSHerbert Xu 
21821ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
21831ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2184fc72d1d5SJason Wang 
21851ffcbc85SJesper Dangaard Brouer 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
218603993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
2187fc72d1d5SJason Wang 	} else {
2188fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2189fc72d1d5SJason Wang 
21909b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2191f51a5e82SJason Wang 		if (unlikely(ret < 0))
21921da177e4SLinus Torvalds 			kfree_skb(skb);
2193f51a5e82SJason Wang 		else
2194f51a5e82SJason Wang 			consume_skb(skb);
2195fc72d1d5SJason Wang 	}
21961da177e4SLinus Torvalds 
219705c2828cSMichael S. Tsirkin 	return ret;
219805c2828cSMichael S. Tsirkin }
219905c2828cSMichael S. Tsirkin 
22009b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
220105c2828cSMichael S. Tsirkin {
220205c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
220305c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
22049484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
22059b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
220605c2828cSMichael S. Tsirkin 
220705c2828cSMichael S. Tsirkin 	if (!tun)
220805c2828cSMichael S. Tsirkin 		return -EBADFD;
2209ac77cfd4SJason Wang 	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
221042404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2211d0b7da8aSZhi Yong Wu 	if (ret > 0)
2212d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2213631ab46bSEric W. Biederman 	tun_put(tun);
22141da177e4SLinus Torvalds 	return ret;
22151da177e4SLinus Torvalds }
22161da177e4SLinus Torvalds 
2217cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
221896f84061SJason Wang {
2219cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
222096f84061SJason Wang 
222196f84061SJason Wang 	bpf_prog_destroy(prog->prog);
222296f84061SJason Wang 	kfree(prog);
222396f84061SJason Wang }
222496f84061SJason Wang 
22259d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun,
22269d6474e4SJason Wang 			  struct tun_prog __rcu **prog_p,
222796f84061SJason Wang 			  struct bpf_prog *prog)
222896f84061SJason Wang {
2229cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
223096f84061SJason Wang 
223196f84061SJason Wang 	if (prog) {
223296f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
223396f84061SJason Wang 		if (!new)
223496f84061SJason Wang 			return -ENOMEM;
223596f84061SJason Wang 		new->prog = prog;
223696f84061SJason Wang 	}
223796f84061SJason Wang 
2238124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2239cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2240124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2241cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2242124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
224396f84061SJason Wang 
224496f84061SJason Wang 	if (old)
2245cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
224696f84061SJason Wang 
224796f84061SJason Wang 	return 0;
224896f84061SJason Wang }
224996f84061SJason Wang 
225096442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
225196442e42SJason Wang {
225296442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
225396442e42SJason Wang 
22544008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
225511fc7d5aSEric Dumazet 
2256608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
225711fc7d5aSEric Dumazet 	/* We clear pcpu_stats so that tun_set_iff() can tell if
225811fc7d5aSEric Dumazet 	 * tun_free_netdev() has been called from register_netdevice().
225911fc7d5aSEric Dumazet 	 */
226011fc7d5aSEric Dumazet 	tun->pcpu_stats = NULL;
226111fc7d5aSEric Dumazet 
226296442e42SJason Wang 	tun_flow_uninit(tun);
22635dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2264cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2265aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
226696442e42SJason Wang }
226796442e42SJason Wang 
22681da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22691da177e4SLinus Torvalds {
22701da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22711da177e4SLinus Torvalds 
22720625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22730625c883SEric W. Biederman 	tun->group = INVALID_GID;
22744e24f2ddSChas Williams 	tun_default_link_ksettings(dev, &tun->link_ksettings);
22751da177e4SLinus Torvalds 
22761da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2277cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2278cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2279016adb72SJason Wang 	/* We prefer our own queue length */
2280016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
22811da177e4SLinus Torvalds }
22821da177e4SLinus Torvalds 
2283f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2284f019a7a5SEric W. Biederman  * device with netlink.
2285f019a7a5SEric W. Biederman  */
2286a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2287a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2288f019a7a5SEric W. Biederman {
228935b827b6SNicolas Dichtel 	NL_SET_ERR_MSG(extack,
229035b827b6SNicolas Dichtel 		       "tun/tap creation via rtnetlink is not supported.");
229135b827b6SNicolas Dichtel 	return -EOPNOTSUPP;
2292f019a7a5SEric W. Biederman }
2293f019a7a5SEric W. Biederman 
22941ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev)
22951ec010e7SSabrina Dubroca {
22961ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
22971ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
22981ec010e7SSabrina Dubroca 
22991ec010e7SSabrina Dubroca 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
23001ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
23011ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* TYPE */
23021ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PI */
23031ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
23041ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PERSIST */
23051ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
23061ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
23071ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
23081ec010e7SSabrina Dubroca 	       0;
23091ec010e7SSabrina Dubroca }
23101ec010e7SSabrina Dubroca 
23111ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
23121ec010e7SSabrina Dubroca {
23131ec010e7SSabrina Dubroca 	struct tun_struct *tun = netdev_priv(dev);
23141ec010e7SSabrina Dubroca 
23151ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
23161ec010e7SSabrina Dubroca 		goto nla_put_failure;
23171ec010e7SSabrina Dubroca 	if (uid_valid(tun->owner) &&
23181ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_OWNER,
23191ec010e7SSabrina Dubroca 			from_kuid_munged(current_user_ns(), tun->owner)))
23201ec010e7SSabrina Dubroca 		goto nla_put_failure;
23211ec010e7SSabrina Dubroca 	if (gid_valid(tun->group) &&
23221ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_GROUP,
23231ec010e7SSabrina Dubroca 			from_kgid_munged(current_user_ns(), tun->group)))
23241ec010e7SSabrina Dubroca 		goto nla_put_failure;
23251ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
23261ec010e7SSabrina Dubroca 		goto nla_put_failure;
23271ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
23281ec010e7SSabrina Dubroca 		goto nla_put_failure;
23291ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
23301ec010e7SSabrina Dubroca 		goto nla_put_failure;
23311ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
23321ec010e7SSabrina Dubroca 		       !!(tun->flags & IFF_MULTI_QUEUE)))
23331ec010e7SSabrina Dubroca 		goto nla_put_failure;
23341ec010e7SSabrina Dubroca 	if (tun->flags & IFF_MULTI_QUEUE) {
23351ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
23361ec010e7SSabrina Dubroca 			goto nla_put_failure;
23371ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
23381ec010e7SSabrina Dubroca 				tun->numdisabled))
23391ec010e7SSabrina Dubroca 			goto nla_put_failure;
23401ec010e7SSabrina Dubroca 	}
23411ec010e7SSabrina Dubroca 
23421ec010e7SSabrina Dubroca 	return 0;
23431ec010e7SSabrina Dubroca 
23441ec010e7SSabrina Dubroca nla_put_failure:
23451ec010e7SSabrina Dubroca 	return -EMSGSIZE;
23461ec010e7SSabrina Dubroca }
23471ec010e7SSabrina Dubroca 
2348f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2349f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2350f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2351f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2352f019a7a5SEric W. Biederman 	.validate	= tun_validate,
23531ec010e7SSabrina Dubroca 	.get_size       = tun_get_size,
23541ec010e7SSabrina Dubroca 	.fill_info      = tun_fill_info,
2355f019a7a5SEric W. Biederman };
2356f019a7a5SEric W. Biederman 
235733dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
235833dccbb0SHerbert Xu {
235954f968d6SJason Wang 	struct tun_file *tfile;
236043815482SEric Dumazet 	wait_queue_head_t *wqueue;
236133dccbb0SHerbert Xu 
236233dccbb0SHerbert Xu 	if (!sock_writeable(sk))
236333dccbb0SHerbert Xu 		return;
236433dccbb0SHerbert Xu 
23659cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
236633dccbb0SHerbert Xu 		return;
236733dccbb0SHerbert Xu 
236843815482SEric Dumazet 	wqueue = sk_sleep(sk);
236943815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
2370a9a08845SLinus Torvalds 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2371a9a08845SLinus Torvalds 						EPOLLWRNORM | EPOLLWRBAND);
2372c722c625SHerbert Xu 
237354f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
237454f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
237533dccbb0SHerbert Xu }
237633dccbb0SHerbert Xu 
2377f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage)
2378f9e06c45SJason Wang {
2379f9e06c45SJason Wang 	if (tpage->page)
2380f9e06c45SJason Wang 		__page_frag_cache_drain(tpage->page, tpage->count);
2381f9e06c45SJason Wang }
2382f9e06c45SJason Wang 
2383043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun,
2384043d222fSJason Wang 		       struct tun_file *tfile,
2385f9e06c45SJason Wang 		       struct xdp_buff *xdp, int *flush,
2386f9e06c45SJason Wang 		       struct tun_page *tpage)
2387043d222fSJason Wang {
23884e4b08e5SPrashant Bhole 	unsigned int datasize = xdp->data_end - xdp->data;
2389043d222fSJason Wang 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2390043d222fSJason Wang 	struct virtio_net_hdr *gso = &hdr->gso;
2391043d222fSJason Wang 	struct tun_pcpu_stats *stats;
2392043d222fSJason Wang 	struct bpf_prog *xdp_prog;
2393043d222fSJason Wang 	struct sk_buff *skb = NULL;
2394043d222fSJason Wang 	u32 rxhash = 0, act;
2395043d222fSJason Wang 	int buflen = hdr->buflen;
2396043d222fSJason Wang 	int err = 0;
2397043d222fSJason Wang 	bool skb_xdp = false;
2398f9e06c45SJason Wang 	struct page *page;
2399043d222fSJason Wang 
2400043d222fSJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
2401043d222fSJason Wang 	if (xdp_prog) {
2402043d222fSJason Wang 		if (gso->gso_type) {
2403043d222fSJason Wang 			skb_xdp = true;
2404043d222fSJason Wang 			goto build;
2405043d222fSJason Wang 		}
2406043d222fSJason Wang 		xdp_set_data_meta_invalid(xdp);
2407043d222fSJason Wang 		xdp->rxq = &tfile->xdp_rxq;
2408fb3e6e93SJesper Dangaard Brouer 		xdp->frame_sz = buflen;
2409043d222fSJason Wang 
2410043d222fSJason Wang 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2411043d222fSJason Wang 		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2412043d222fSJason Wang 		if (err < 0) {
2413043d222fSJason Wang 			put_page(virt_to_head_page(xdp->data));
2414043d222fSJason Wang 			return err;
2415043d222fSJason Wang 		}
2416043d222fSJason Wang 
2417043d222fSJason Wang 		switch (err) {
2418043d222fSJason Wang 		case XDP_REDIRECT:
2419043d222fSJason Wang 			*flush = true;
2420*df561f66SGustavo A. R. Silva 			fallthrough;
2421043d222fSJason Wang 		case XDP_TX:
2422043d222fSJason Wang 			return 0;
2423043d222fSJason Wang 		case XDP_PASS:
2424043d222fSJason Wang 			break;
2425043d222fSJason Wang 		default:
2426f9e06c45SJason Wang 			page = virt_to_head_page(xdp->data);
2427f9e06c45SJason Wang 			if (tpage->page == page) {
2428f9e06c45SJason Wang 				++tpage->count;
2429f9e06c45SJason Wang 			} else {
2430f9e06c45SJason Wang 				tun_put_page(tpage);
2431f9e06c45SJason Wang 				tpage->page = page;
2432f9e06c45SJason Wang 				tpage->count = 1;
2433f9e06c45SJason Wang 			}
2434043d222fSJason Wang 			return 0;
2435043d222fSJason Wang 		}
2436043d222fSJason Wang 	}
2437043d222fSJason Wang 
2438043d222fSJason Wang build:
2439043d222fSJason Wang 	skb = build_skb(xdp->data_hard_start, buflen);
2440043d222fSJason Wang 	if (!skb) {
2441043d222fSJason Wang 		err = -ENOMEM;
2442043d222fSJason Wang 		goto out;
2443043d222fSJason Wang 	}
2444043d222fSJason Wang 
2445043d222fSJason Wang 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2446043d222fSJason Wang 	skb_put(skb, xdp->data_end - xdp->data);
2447043d222fSJason Wang 
2448043d222fSJason Wang 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2449043d222fSJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2450043d222fSJason Wang 		kfree_skb(skb);
2451043d222fSJason Wang 		err = -EINVAL;
2452043d222fSJason Wang 		goto out;
2453043d222fSJason Wang 	}
2454043d222fSJason Wang 
2455043d222fSJason Wang 	skb->protocol = eth_type_trans(skb, tun->dev);
2456043d222fSJason Wang 	skb_reset_network_header(skb);
2457d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
24583fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
2459043d222fSJason Wang 
2460043d222fSJason Wang 	if (skb_xdp) {
2461043d222fSJason Wang 		err = do_xdp_generic(xdp_prog, skb);
2462043d222fSJason Wang 		if (err != XDP_PASS)
2463043d222fSJason Wang 			goto out;
2464043d222fSJason Wang 	}
2465043d222fSJason Wang 
2466f29eb2a9SPaolo Abeni 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2467f29eb2a9SPaolo Abeni 	    !tfile->detached)
2468043d222fSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
2469043d222fSJason Wang 
2470043d222fSJason Wang 	netif_receive_skb(skb);
2471043d222fSJason Wang 
24726342ca64SPrashant Bhole 	/* No need for get_cpu_ptr() here since this function is
24736342ca64SPrashant Bhole 	 * always called with bh disabled
24746342ca64SPrashant Bhole 	 */
24756342ca64SPrashant Bhole 	stats = this_cpu_ptr(tun->pcpu_stats);
2476043d222fSJason Wang 	u64_stats_update_begin(&stats->syncp);
24775260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
24785260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, datasize);
2479043d222fSJason Wang 	u64_stats_update_end(&stats->syncp);
2480043d222fSJason Wang 
2481043d222fSJason Wang 	if (rxhash)
2482043d222fSJason Wang 		tun_flow_update(tun, rxhash, tfile);
2483043d222fSJason Wang 
2484043d222fSJason Wang out:
2485043d222fSJason Wang 	return err;
2486043d222fSJason Wang }
2487043d222fSJason Wang 
24881b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
248905c2828cSMichael S. Tsirkin {
2490043d222fSJason Wang 	int ret, i;
249154f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
24929484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2493fe8dd45bSJason Wang 	struct tun_msg_ctl *ctl = m->msg_control;
2494043d222fSJason Wang 	struct xdp_buff *xdp;
249554f968d6SJason Wang 
249654f968d6SJason Wang 	if (!tun)
249754f968d6SJason Wang 		return -EBADFD;
2498f5ff53b4SAl Viro 
2499043d222fSJason Wang 	if (ctl && (ctl->type == TUN_MSG_PTR)) {
25006f0271d9SDavid S. Miller 		struct tun_page tpage;
2501043d222fSJason Wang 		int n = ctl->num;
2502043d222fSJason Wang 		int flush = 0;
2503043d222fSJason Wang 
25046f0271d9SDavid S. Miller 		memset(&tpage, 0, sizeof(tpage));
25056f0271d9SDavid S. Miller 
2506043d222fSJason Wang 		local_bh_disable();
2507043d222fSJason Wang 		rcu_read_lock();
2508043d222fSJason Wang 
2509043d222fSJason Wang 		for (i = 0; i < n; i++) {
2510043d222fSJason Wang 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2511f9e06c45SJason Wang 			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2512043d222fSJason Wang 		}
2513043d222fSJason Wang 
2514043d222fSJason Wang 		if (flush)
25151d233886SToke Høiland-Jørgensen 			xdp_do_flush();
2516043d222fSJason Wang 
2517043d222fSJason Wang 		rcu_read_unlock();
2518043d222fSJason Wang 		local_bh_enable();
2519043d222fSJason Wang 
2520f9e06c45SJason Wang 		tun_put_page(&tpage);
2521f9e06c45SJason Wang 
2522043d222fSJason Wang 		ret = total_len;
2523043d222fSJason Wang 		goto out;
2524043d222fSJason Wang 	}
2525fe8dd45bSJason Wang 
2526fe8dd45bSJason Wang 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
25275503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
25285503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
2529043d222fSJason Wang out:
253054f968d6SJason Wang 	tun_put(tun);
253154f968d6SJason Wang 	return ret;
253205c2828cSMichael S. Tsirkin }
253305c2828cSMichael S. Tsirkin 
25341b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
253505c2828cSMichael S. Tsirkin 		       int flags)
253605c2828cSMichael S. Tsirkin {
253754f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25389484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2539fc72d1d5SJason Wang 	void *ptr = m->msg_control;
254005c2828cSMichael S. Tsirkin 	int ret;
254154f968d6SJason Wang 
2542c33ee15bSWei Xu 	if (!tun) {
2543c33ee15bSWei Xu 		ret = -EBADFD;
2544fc72d1d5SJason Wang 		goto out_free;
2545c33ee15bSWei Xu 	}
254654f968d6SJason Wang 
2547eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
25483811ae76SGao feng 		ret = -EINVAL;
2549c33ee15bSWei Xu 		goto out_put_tun;
25503811ae76SGao feng 	}
2551eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2552eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2553eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2554eda29772SRichard Cochran 		goto out;
2555eda29772SRichard Cochran 	}
2556fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
255787897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
255842404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
255942404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
256042404c09SDavid S. Miller 	}
25613811ae76SGao feng out:
256254f968d6SJason Wang 	tun_put(tun);
256305c2828cSMichael S. Tsirkin 	return ret;
2564c33ee15bSWei Xu 
2565c33ee15bSWei Xu out_put_tun:
2566c33ee15bSWei Xu 	tun_put(tun);
2567fc72d1d5SJason Wang out_free:
2568fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2569c33ee15bSWei Xu 	return ret;
257005c2828cSMichael S. Tsirkin }
257105c2828cSMichael S. Tsirkin 
2572fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2573fc72d1d5SJason Wang {
2574fc72d1d5SJason Wang 	if (likely(ptr)) {
25751ffcbc85SJesper Dangaard Brouer 		if (tun_is_xdp_frame(ptr)) {
25761ffcbc85SJesper Dangaard Brouer 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2577fc72d1d5SJason Wang 
25781ffcbc85SJesper Dangaard Brouer 			return xdpf->len;
2579fc72d1d5SJason Wang 		}
2580fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2581fc72d1d5SJason Wang 	} else {
2582fc72d1d5SJason Wang 		return 0;
2583fc72d1d5SJason Wang 	}
2584fc72d1d5SJason Wang }
2585fc72d1d5SJason Wang 
25861576d986SJason Wang static int tun_peek_len(struct socket *sock)
25871576d986SJason Wang {
25881576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25891576d986SJason Wang 	struct tun_struct *tun;
25901576d986SJason Wang 	int ret = 0;
25911576d986SJason Wang 
25929484dc74Syuan linyu 	tun = tun_get(tfile);
25931576d986SJason Wang 	if (!tun)
25941576d986SJason Wang 		return 0;
25951576d986SJason Wang 
2596fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
25971576d986SJason Wang 	tun_put(tun);
25981576d986SJason Wang 
25991576d986SJason Wang 	return ret;
26001576d986SJason Wang }
26011576d986SJason Wang 
260205c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
260305c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
26041576d986SJason Wang 	.peek_len = tun_peek_len,
260505c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
260605c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
260705c2828cSMichael S. Tsirkin };
260805c2828cSMichael S. Tsirkin 
260933dccbb0SHerbert Xu static struct proto tun_proto = {
261033dccbb0SHerbert Xu 	.name		= "tun",
261133dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
261254f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
261333dccbb0SHerbert Xu };
2614f019a7a5SEric W. Biederman 
2615980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2616980c9e8cSDavid Woodhouse {
2617031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2618980c9e8cSDavid Woodhouse }
2619980c9e8cSDavid Woodhouse 
2620980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2621980c9e8cSDavid Woodhouse 			      char *buf)
2622980c9e8cSDavid Woodhouse {
2623980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2624980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2625980c9e8cSDavid Woodhouse }
2626980c9e8cSDavid Woodhouse 
2627980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2628980c9e8cSDavid Woodhouse 			      char *buf)
2629980c9e8cSDavid Woodhouse {
2630980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26310625c883SEric W. Biederman 	return uid_valid(tun->owner)?
26320625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26330625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
26340625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2635980c9e8cSDavid Woodhouse }
2636980c9e8cSDavid Woodhouse 
2637980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2638980c9e8cSDavid Woodhouse 			      char *buf)
2639980c9e8cSDavid Woodhouse {
2640980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26410625c883SEric W. Biederman 	return gid_valid(tun->group) ?
26420625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26430625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
26440625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2645980c9e8cSDavid Woodhouse }
2646980c9e8cSDavid Woodhouse 
2647980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2648980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2649980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2650980c9e8cSDavid Woodhouse 
2651c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2652c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2653c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2654c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2655c4d33e24STakashi Iwai 	NULL
2656c4d33e24STakashi Iwai };
2657c4d33e24STakashi Iwai 
2658c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2659c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2660c4d33e24STakashi Iwai };
2661c4d33e24STakashi Iwai 
2662d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
26631da177e4SLinus Torvalds {
26641da177e4SLinus Torvalds 	struct tun_struct *tun;
266554f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
26661da177e4SLinus Torvalds 	struct net_device *dev;
26671da177e4SLinus Torvalds 	int err;
26681da177e4SLinus Torvalds 
26697c0c3b1aSJason Wang 	if (tfile->detached)
26707c0c3b1aSJason Wang 		return -EINVAL;
26717c0c3b1aSJason Wang 
267290e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
267390e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
267490e33d45SPetar Penkov 			return -EPERM;
267590e33d45SPetar Penkov 
267690e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
267790e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
267890e33d45SPetar Penkov 			return -EINVAL;
267990e33d45SPetar Penkov 	}
268090e33d45SPetar Penkov 
268174a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
268274a3e5a7SEric W. Biederman 	if (dev) {
2683f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2684f85ba780SDavid Woodhouse 			return -EBUSY;
268574a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
268674a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
268774a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
268874a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
268974a3e5a7SEric W. Biederman 		else
269074a3e5a7SEric W. Biederman 			return -EINVAL;
269174a3e5a7SEric W. Biederman 
26928e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
269340630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
26948e6d91aeSJason Wang 			return -EINVAL;
26958e6d91aeSJason Wang 
2696cde8b15fSJason Wang 		if (tun_not_capable(tun))
26972b980dbdSPaul Moore 			return -EPERM;
26985dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
26992b980dbdSPaul Moore 		if (err < 0)
27002b980dbdSPaul Moore 			return err;
27012b980dbdSPaul Moore 
270294317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2703af3fb24eSEric Dumazet 				 ifr->ifr_flags & IFF_NAPI,
270477f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2705a7385ba2SEric W. Biederman 		if (err < 0)
2706a7385ba2SEric W. Biederman 			return err;
27074008e97fSJason Wang 
270840630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2709e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2710e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2711e8dbad66SJason Wang 			 * to initialize the device again.
2712e8dbad66SJason Wang 			 */
271383c1f36fSSabrina Dubroca 			netdev_state_change(dev);
2714e8dbad66SJason Wang 			return 0;
2715e8dbad66SJason Wang 		}
27169fffc5c6SSabrina Dubroca 
27179fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27189fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
271983c1f36fSSabrina Dubroca 
272083c1f36fSSabrina Dubroca 		netdev_state_change(dev);
272183c1f36fSSabrina Dubroca 	} else {
27221da177e4SLinus Torvalds 		char *name;
27231da177e4SLinus Torvalds 		unsigned long flags = 0;
2724edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2725edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
27261da177e4SLinus Torvalds 
2727c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2728ca6bb5d7SDavid Woodhouse 			return -EPERM;
27292b980dbdSPaul Moore 		err = security_tun_dev_create();
27302b980dbdSPaul Moore 		if (err < 0)
27312b980dbdSPaul Moore 			return err;
2732ca6bb5d7SDavid Woodhouse 
27331da177e4SLinus Torvalds 		/* Set dev type */
27341da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
27351da177e4SLinus Torvalds 			/* TUN device */
273640630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
27371da177e4SLinus Torvalds 			name = "tun%d";
27381da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
27391da177e4SLinus Torvalds 			/* TAP device */
274040630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
27411da177e4SLinus Torvalds 			name = "tap%d";
27421da177e4SLinus Torvalds 		} else
274336989b90SKusanagi Kouichi 			return -EINVAL;
27441da177e4SLinus Torvalds 
27451da177e4SLinus Torvalds 		if (*ifr->ifr_name)
27461da177e4SLinus Torvalds 			name = ifr->ifr_name;
27471da177e4SLinus Torvalds 
2748c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2749c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2750c835a677STom Gundersen 				       queues);
2751edfb6a14SJason Wang 
27521da177e4SLinus Torvalds 		if (!dev)
27531da177e4SLinus Torvalds 			return -ENOMEM;
27541da177e4SLinus Torvalds 
2755fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2756f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2757fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2758c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2759758e43b7SStephen Hemminger 
27601da177e4SLinus Torvalds 		tun = netdev_priv(dev);
27611da177e4SLinus Torvalds 		tun->dev = dev;
27621da177e4SLinus Torvalds 		tun->flags = flags;
2763f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2764d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
27651da177e4SLinus Torvalds 
2766eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
276754f968d6SJason Wang 		tun->filter_attached = false;
276854f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
27695503fcecSJason Wang 		tun->rx_batched = 0;
277096f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
277133dccbb0SHerbert Xu 
2772608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2773608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2774608b9977SPaolo Abeni 			err = -ENOMEM;
2775608b9977SPaolo Abeni 			goto err_free_dev;
2776608b9977SPaolo Abeni 		}
2777608b9977SPaolo Abeni 
277896442e42SJason Wang 		spin_lock_init(&tun->lock);
277996442e42SJason Wang 
27805dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
27815dbbaf2dSPaul Moore 		if (err < 0)
2782608b9977SPaolo Abeni 			goto err_free_stat;
27832b980dbdSPaul Moore 
27841da177e4SLinus Torvalds 		tun_net_init(dev);
2785944a1376SPavel Emelyanov 		tun_flow_init(tun);
278696442e42SJason Wang 
278788255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
27886680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
27896680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
27902a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
27916671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
27926671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
27936671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
279488255375SMichał Mirosław 
27959fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27969fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
27979fffc5c6SSabrina Dubroca 
27984008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
2799af3fb24eSEric Dumazet 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
280077f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2801eb0fb363SJason Wang 		if (err < 0)
2802662ca437SJason Wang 			goto err_free_flow;
2803eb0fb363SJason Wang 
28041da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
28051da177e4SLinus Torvalds 		if (err < 0)
2806662ca437SJason Wang 			goto err_detach;
280777f22f92SYang Yingliang 		/* free_netdev() won't check refcnt, to aovid race
280877f22f92SYang Yingliang 		 * with dev_put() we need publish tun after registration.
280977f22f92SYang Yingliang 		 */
281077f22f92SYang Yingliang 		rcu_assign_pointer(tfile->tun, tun);
2811af668b3cSMichael S. Tsirkin 	}
2812980c9e8cSDavid Woodhouse 
2813eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
28141da177e4SLinus Torvalds 
2815e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2816e35259a9SMax Krasnyansky 	 * xoff state.
2817e35259a9SMax Krasnyansky 	 */
2818e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2819c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2820e35259a9SMax Krasnyansky 
28211da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
28221da177e4SLinus Torvalds 	return 0;
28231da177e4SLinus Torvalds 
2824662ca437SJason Wang err_detach:
2825662ca437SJason Wang 	tun_detach_all(dev);
282611fc7d5aSEric Dumazet 	/* We are here because register_netdevice() has failed.
282711fc7d5aSEric Dumazet 	 * If register_netdevice() already called tun_free_netdev()
282811fc7d5aSEric Dumazet 	 * while dealing with the error, tun->pcpu_stats has been cleared.
282911fc7d5aSEric Dumazet 	 */
283011fc7d5aSEric Dumazet 	if (!tun->pcpu_stats)
2831ff244c6bSEric Dumazet 		goto err_free_dev;
2832ff244c6bSEric Dumazet 
2833662ca437SJason Wang err_free_flow:
2834662ca437SJason Wang 	tun_flow_uninit(tun);
2835662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2836608b9977SPaolo Abeni err_free_stat:
2837608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
28381da177e4SLinus Torvalds err_free_dev:
28391da177e4SLinus Torvalds 	free_netdev(dev);
28401da177e4SLinus Torvalds 	return err;
28411da177e4SLinus Torvalds }
28421da177e4SLinus Torvalds 
284312132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2844e3b99556SMark McLoughlin {
2845e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2846e3b99556SMark McLoughlin 
2847980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2848e3b99556SMark McLoughlin 
2849e3b99556SMark McLoughlin }
2850e3b99556SMark McLoughlin 
28515228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
28525228ddc9SRusty Russell  * privs required. */
285388255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
28545228ddc9SRusty Russell {
2855c8f44affSMichał Mirosław 	netdev_features_t features = 0;
28565228ddc9SRusty Russell 
28575228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
285888255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
28595228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
28605228ddc9SRusty Russell 
28615228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
28625228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
28635228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
28645228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
28655228ddc9SRusty Russell 			}
28665228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
28675228ddc9SRusty Russell 				features |= NETIF_F_TSO;
28685228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
28695228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
28705228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
28715228ddc9SRusty Russell 		}
28720c19f846SWillem de Bruijn 
28730c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
28745228ddc9SRusty Russell 	}
28755228ddc9SRusty Russell 
28765228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
28775228ddc9SRusty Russell 	 * trying to set them. */
28785228ddc9SRusty Russell 	if (arg)
28795228ddc9SRusty Russell 		return -EINVAL;
28805228ddc9SRusty Russell 
288188255375SMichał Mirosław 	tun->set_features = features;
288209050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
288309050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
288488255375SMichał Mirosław 	netdev_update_features(tun->dev);
28855228ddc9SRusty Russell 
28865228ddc9SRusty Russell 	return 0;
28875228ddc9SRusty Russell }
28885228ddc9SRusty Russell 
2889c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2890c8d68e6bSJason Wang {
2891c8d68e6bSJason Wang 	int i;
2892c8d68e6bSJason Wang 	struct tun_file *tfile;
2893c8d68e6bSJason Wang 
2894c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2895b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
28968ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
28978ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
28988ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2899c8d68e6bSJason Wang 	}
2900c8d68e6bSJason Wang 
2901c8d68e6bSJason Wang 	tun->filter_attached = false;
2902c8d68e6bSJason Wang }
2903c8d68e6bSJason Wang 
2904c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2905c8d68e6bSJason Wang {
2906c8d68e6bSJason Wang 	int i, ret = 0;
2907c8d68e6bSJason Wang 	struct tun_file *tfile;
2908c8d68e6bSJason Wang 
2909c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2910b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
29118ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
29128ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
29138ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2914c8d68e6bSJason Wang 		if (ret) {
2915c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2916c8d68e6bSJason Wang 			return ret;
2917c8d68e6bSJason Wang 		}
2918c8d68e6bSJason Wang 	}
2919c8d68e6bSJason Wang 
2920c8d68e6bSJason Wang 	tun->filter_attached = true;
2921c8d68e6bSJason Wang 	return ret;
2922c8d68e6bSJason Wang }
2923c8d68e6bSJason Wang 
2924c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2925c8d68e6bSJason Wang {
2926c8d68e6bSJason Wang 	struct tun_file *tfile;
2927c8d68e6bSJason Wang 	int i;
2928c8d68e6bSJason Wang 
2929c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2930b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2931c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2932c8d68e6bSJason Wang 	}
2933c8d68e6bSJason Wang }
2934c8d68e6bSJason Wang 
2935cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2936cde8b15fSJason Wang {
2937cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2938cde8b15fSJason Wang 	struct tun_struct *tun;
2939cde8b15fSJason Wang 	int ret = 0;
2940cde8b15fSJason Wang 
2941cde8b15fSJason Wang 	rtnl_lock();
2942cde8b15fSJason Wang 
2943cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
29444008e97fSJason Wang 		tun = tfile->detached;
29455dbbaf2dSPaul Moore 		if (!tun) {
2946cde8b15fSJason Wang 			ret = -EINVAL;
29475dbbaf2dSPaul Moore 			goto unlock;
29485dbbaf2dSPaul Moore 		}
29495dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
29505dbbaf2dSPaul Moore 		if (ret < 0)
29515dbbaf2dSPaul Moore 			goto unlock;
2952af3fb24eSEric Dumazet 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
295377f22f92SYang Yingliang 				 tun->flags & IFF_NAPI_FRAGS, true);
29544008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2955b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
295640630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
29574008e97fSJason Wang 			ret = -EINVAL;
2958cde8b15fSJason Wang 		else
29594008e97fSJason Wang 			__tun_detach(tfile, false);
29604008e97fSJason Wang 	} else
2961cde8b15fSJason Wang 		ret = -EINVAL;
2962cde8b15fSJason Wang 
296383c1f36fSSabrina Dubroca 	if (ret >= 0)
296483c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
296583c1f36fSSabrina Dubroca 
29665dbbaf2dSPaul Moore unlock:
2967cde8b15fSJason Wang 	rtnl_unlock();
2968cde8b15fSJason Wang 	return ret;
2969cde8b15fSJason Wang }
2970cde8b15fSJason Wang 
29718f3f330dSJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2972cd5681d7SJason Wang 			void __user *data)
297396f84061SJason Wang {
297496f84061SJason Wang 	struct bpf_prog *prog;
297596f84061SJason Wang 	int fd;
297696f84061SJason Wang 
297796f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
297896f84061SJason Wang 		return -EFAULT;
297996f84061SJason Wang 
298096f84061SJason Wang 	if (fd == -1) {
298196f84061SJason Wang 		prog = NULL;
298296f84061SJason Wang 	} else {
298396f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
298496f84061SJason Wang 		if (IS_ERR(prog))
298596f84061SJason Wang 			return PTR_ERR(prog);
298696f84061SJason Wang 	}
298796f84061SJason Wang 
2988cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
298996f84061SJason Wang }
299096f84061SJason Wang 
299150857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
299250857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
29931da177e4SLinus Torvalds {
299436b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
2995f663706aSKirill Tkhai 	struct net *net = sock_net(&tfile->sk);
2996631ab46bSEric W. Biederman 	struct tun_struct *tun;
29971da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
299826d31925SNicolas Dichtel 	unsigned int ifindex, carrier;
29991da177e4SLinus Torvalds 	struct ifreq ifr;
30000625c883SEric W. Biederman 	kuid_t owner;
30010625c883SEric W. Biederman 	kgid_t group;
300233dccbb0SHerbert Xu 	int sndbuf;
3003d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
30041cf8e410SMichael S. Tsirkin 	int le;
3005f271b2ccSMax Krasnyansky 	int ret;
300683c1f36fSSabrina Dubroca 	bool do_notify = false;
30071da177e4SLinus Torvalds 
3008f2780d6dSKirill Tkhai 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3009f2780d6dSKirill Tkhai 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
301050857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
30111da177e4SLinus Torvalds 			return -EFAULT;
30128bbb1813SDavid S. Miller 	} else {
3013a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
30148bbb1813SDavid S. Miller 	}
3015631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
3016631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
3017631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
3018031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
3019031f5e03SMichael S. Tsirkin 		 */
3020031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3021631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
3022f663706aSKirill Tkhai 	} else if (cmd == TUNSETQUEUE) {
3023cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
3024f663706aSKirill Tkhai 	} else if (cmd == SIOCGSKNS) {
3025f663706aSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3026f663706aSKirill Tkhai 			return -EPERM;
3027f663706aSKirill Tkhai 		return open_related_ns(&net->ns, get_net_ns);
3028f663706aSKirill Tkhai 	}
3029631ab46bSEric W. Biederman 
3030c8d68e6bSJason Wang 	ret = 0;
3031876bfd4dSHerbert Xu 	rtnl_lock();
3032876bfd4dSHerbert Xu 
30339484dc74Syuan linyu 	tun = tun_get(tfile);
30340f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
30350f16bc13SGao Feng 		ret = -EEXIST;
30360f16bc13SGao Feng 		if (tun)
30370f16bc13SGao Feng 			goto unlock;
30380f16bc13SGao Feng 
30391da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
30401da177e4SLinus Torvalds 
3041f2780d6dSKirill Tkhai 		ret = tun_set_iff(net, file, &ifr);
30421da177e4SLinus Torvalds 
3043876bfd4dSHerbert Xu 		if (ret)
3044876bfd4dSHerbert Xu 			goto unlock;
30451da177e4SLinus Torvalds 
304650857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3047876bfd4dSHerbert Xu 			ret = -EFAULT;
3048876bfd4dSHerbert Xu 		goto unlock;
30491da177e4SLinus Torvalds 	}
3050fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
3051fb7589a1SPavel Emelyanov 		ret = -EPERM;
3052fb7589a1SPavel Emelyanov 		if (tun)
3053fb7589a1SPavel Emelyanov 			goto unlock;
3054fb7589a1SPavel Emelyanov 
3055fb7589a1SPavel Emelyanov 		ret = -EFAULT;
3056fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3057fb7589a1SPavel Emelyanov 			goto unlock;
3058fb7589a1SPavel Emelyanov 
3059fb7589a1SPavel Emelyanov 		ret = 0;
3060fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
3061fb7589a1SPavel Emelyanov 		goto unlock;
3062fb7589a1SPavel Emelyanov 	}
30631da177e4SLinus Torvalds 
3064876bfd4dSHerbert Xu 	ret = -EBADFD;
30651da177e4SLinus Torvalds 	if (!tun)
3066876bfd4dSHerbert Xu 		goto unlock;
30671da177e4SLinus Torvalds 
30683424170fSMichal Kubecek 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
30691da177e4SLinus Torvalds 
30700c3e0e3bSKirill Tkhai 	net = dev_net(tun->dev);
3071631ab46bSEric W. Biederman 	ret = 0;
30721da177e4SLinus Torvalds 	switch (cmd) {
3073e3b99556SMark McLoughlin 	case TUNGETIFF:
307412132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
3075e3b99556SMark McLoughlin 
30763d407a80SPavel Emelyanov 		if (tfile->detached)
30773d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3078849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
3079849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
30803d407a80SPavel Emelyanov 
308150857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3082631ab46bSEric W. Biederman 			ret = -EFAULT;
3083e3b99556SMark McLoughlin 		break;
3084e3b99556SMark McLoughlin 
30851da177e4SLinus Torvalds 	case TUNSETNOCSUM:
30861da177e4SLinus Torvalds 		/* Disable/Enable checksum */
30871da177e4SLinus Torvalds 
308888255375SMichał Mirosław 		/* [unimplemented] */
30893424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
30906b8a66eeSJoe Perches 			   arg ? "disabled" : "enabled");
30911da177e4SLinus Torvalds 		break;
30921da177e4SLinus Torvalds 
30931da177e4SLinus Torvalds 	case TUNSETPERSIST:
309454f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
309554f968d6SJason Wang 		 * module to prevent the module being unprobed.
309654f968d6SJason Wang 		 */
309740630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
309840630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
309954f968d6SJason Wang 			__module_get(THIS_MODULE);
310083c1f36fSSabrina Dubroca 			do_notify = true;
3101dd38bd85SJason Wang 		}
310240630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
310340630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
310454f968d6SJason Wang 			module_put(THIS_MODULE);
310583c1f36fSSabrina Dubroca 			do_notify = true;
310654f968d6SJason Wang 		}
31071da177e4SLinus Torvalds 
31083424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "persist %s\n",
31096b8a66eeSJoe Perches 			   arg ? "enabled" : "disabled");
31101da177e4SLinus Torvalds 		break;
31111da177e4SLinus Torvalds 
31121da177e4SLinus Torvalds 	case TUNSETOWNER:
31131da177e4SLinus Torvalds 		/* Set owner of the device */
31140625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
31150625c883SEric W. Biederman 		if (!uid_valid(owner)) {
31160625c883SEric W. Biederman 			ret = -EINVAL;
31170625c883SEric W. Biederman 			break;
31180625c883SEric W. Biederman 		}
31190625c883SEric W. Biederman 		tun->owner = owner;
312083c1f36fSSabrina Dubroca 		do_notify = true;
31213424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
31220625c883SEric W. Biederman 			   from_kuid(&init_user_ns, tun->owner));
31231da177e4SLinus Torvalds 		break;
31241da177e4SLinus Torvalds 
31258c644623SGuido Guenther 	case TUNSETGROUP:
31268c644623SGuido Guenther 		/* Set group of the device */
31270625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
31280625c883SEric W. Biederman 		if (!gid_valid(group)) {
31290625c883SEric W. Biederman 			ret = -EINVAL;
31300625c883SEric W. Biederman 			break;
31310625c883SEric W. Biederman 		}
31320625c883SEric W. Biederman 		tun->group = group;
313383c1f36fSSabrina Dubroca 		do_notify = true;
31343424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "group set to %u\n",
31350625c883SEric W. Biederman 			   from_kgid(&init_user_ns, tun->group));
31368c644623SGuido Guenther 		break;
31378c644623SGuido Guenther 
3138ff4cc3acSMike Kershaw 	case TUNSETLINK:
3139ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
3140ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
31413424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev,
31426b8a66eeSJoe Perches 				   "Linktype set failed because interface is up\n");
314348abfe05SDavid S. Miller 			ret = -EBUSY;
3144ff4cc3acSMike Kershaw 		} else {
3145ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
31463424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
31476b8a66eeSJoe Perches 				   tun->dev->type);
314848abfe05SDavid S. Miller 			ret = 0;
3149ff4cc3acSMike Kershaw 		}
3150631ab46bSEric W. Biederman 		break;
3151ff4cc3acSMike Kershaw 
31521da177e4SLinus Torvalds 	case TUNSETDEBUG:
31533424170fSMichal Kubecek 		tun->msg_enable = (u32)arg;
31541da177e4SLinus Torvalds 		break;
31553424170fSMichal Kubecek 
31565228ddc9SRusty Russell 	case TUNSETOFFLOAD:
315788255375SMichał Mirosław 		ret = set_offload(tun, arg);
3158631ab46bSEric W. Biederman 		break;
31595228ddc9SRusty Russell 
3160f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
3161f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
3162631ab46bSEric W. Biederman 		ret = -EINVAL;
316340630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3164631ab46bSEric W. Biederman 			break;
3165c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
3166631ab46bSEric W. Biederman 		break;
31671da177e4SLinus Torvalds 
31681da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
3169b595076aSUwe Kleine-König 		/* Get hw address */
3170f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3171f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
317250857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3173631ab46bSEric W. Biederman 			ret = -EFAULT;
3174631ab46bSEric W. Biederman 		break;
31751da177e4SLinus Torvalds 
31761da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
3177f271b2ccSMax Krasnyansky 		/* Set hw address */
31783a37a963SPetr Machata 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3179631ab46bSEric W. Biederman 		break;
318033dccbb0SHerbert Xu 
318133dccbb0SHerbert Xu 	case TUNGETSNDBUF:
318254f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
318333dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
318433dccbb0SHerbert Xu 			ret = -EFAULT;
318533dccbb0SHerbert Xu 		break;
318633dccbb0SHerbert Xu 
318733dccbb0SHerbert Xu 	case TUNSETSNDBUF:
318833dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
318933dccbb0SHerbert Xu 			ret = -EFAULT;
319033dccbb0SHerbert Xu 			break;
319133dccbb0SHerbert Xu 		}
319293161922SCraig Gallek 		if (sndbuf <= 0) {
319393161922SCraig Gallek 			ret = -EINVAL;
319493161922SCraig Gallek 			break;
319593161922SCraig Gallek 		}
319633dccbb0SHerbert Xu 
3197c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
3198c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
319933dccbb0SHerbert Xu 		break;
320033dccbb0SHerbert Xu 
3201d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
3202d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
3203d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3204d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3205d9d52b51SMichael S. Tsirkin 		break;
3206d9d52b51SMichael S. Tsirkin 
3207d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
3208d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3209d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3210d9d52b51SMichael S. Tsirkin 			break;
3211d9d52b51SMichael S. Tsirkin 		}
3212d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3213d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
3214d9d52b51SMichael S. Tsirkin 			break;
3215d9d52b51SMichael S. Tsirkin 		}
3216d9d52b51SMichael S. Tsirkin 
3217d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
3218d9d52b51SMichael S. Tsirkin 		break;
3219d9d52b51SMichael S. Tsirkin 
32201cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
32211cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
32221cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
32231cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32241cf8e410SMichael S. Tsirkin 		break;
32251cf8e410SMichael S. Tsirkin 
32261cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
32271cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
32281cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32291cf8e410SMichael S. Tsirkin 			break;
32301cf8e410SMichael S. Tsirkin 		}
32311cf8e410SMichael S. Tsirkin 		if (le)
32321cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
32331cf8e410SMichael S. Tsirkin 		else
32341cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
32351cf8e410SMichael S. Tsirkin 		break;
32361cf8e410SMichael S. Tsirkin 
32378b8e658bSGreg Kurz 	case TUNGETVNETBE:
32388b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
32398b8e658bSGreg Kurz 		break;
32408b8e658bSGreg Kurz 
32418b8e658bSGreg Kurz 	case TUNSETVNETBE:
32428b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
32438b8e658bSGreg Kurz 		break;
32448b8e658bSGreg Kurz 
324599405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
324699405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
324799405162SMichael S. Tsirkin 		ret = -EINVAL;
324840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
324999405162SMichael S. Tsirkin 			break;
325099405162SMichael S. Tsirkin 		ret = -EFAULT;
325154f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
325299405162SMichael S. Tsirkin 			break;
325399405162SMichael S. Tsirkin 
3254c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
325599405162SMichael S. Tsirkin 		break;
325699405162SMichael S. Tsirkin 
325799405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
325899405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
325999405162SMichael S. Tsirkin 		ret = -EINVAL;
326040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
326199405162SMichael S. Tsirkin 			break;
3262c8d68e6bSJason Wang 		ret = 0;
3263c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
326499405162SMichael S. Tsirkin 		break;
326599405162SMichael S. Tsirkin 
326676975e9cSPavel Emelyanov 	case TUNGETFILTER:
326776975e9cSPavel Emelyanov 		ret = -EINVAL;
326840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
326976975e9cSPavel Emelyanov 			break;
327076975e9cSPavel Emelyanov 		ret = -EFAULT;
327176975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
327276975e9cSPavel Emelyanov 			break;
327376975e9cSPavel Emelyanov 		ret = 0;
327476975e9cSPavel Emelyanov 		break;
327576975e9cSPavel Emelyanov 
327696f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3277cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
327896f84061SJason Wang 		break;
327996f84061SJason Wang 
3280aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3281aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3282aff3d70aSJason Wang 		break;
3283aff3d70aSJason Wang 
328426d31925SNicolas Dichtel 	case TUNSETCARRIER:
328526d31925SNicolas Dichtel 		ret = -EFAULT;
328626d31925SNicolas Dichtel 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
328726d31925SNicolas Dichtel 			goto unlock;
328826d31925SNicolas Dichtel 
328926d31925SNicolas Dichtel 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
329026d31925SNicolas Dichtel 		break;
329126d31925SNicolas Dichtel 
32920c3e0e3bSKirill Tkhai 	case TUNGETDEVNETNS:
32930c3e0e3bSKirill Tkhai 		ret = -EPERM;
32940c3e0e3bSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
32950c3e0e3bSKirill Tkhai 			goto unlock;
32960c3e0e3bSKirill Tkhai 		ret = open_related_ns(&net->ns, get_net_ns);
32970c3e0e3bSKirill Tkhai 		break;
32980c3e0e3bSKirill Tkhai 
32991da177e4SLinus Torvalds 	default:
3300631ab46bSEric W. Biederman 		ret = -EINVAL;
3301631ab46bSEric W. Biederman 		break;
3302ee289b64SJoe Perches 	}
33031da177e4SLinus Torvalds 
330483c1f36fSSabrina Dubroca 	if (do_notify)
330583c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
330683c1f36fSSabrina Dubroca 
3307876bfd4dSHerbert Xu unlock:
3308876bfd4dSHerbert Xu 	rtnl_unlock();
3309876bfd4dSHerbert Xu 	if (tun)
3310631ab46bSEric W. Biederman 		tun_put(tun);
3311631ab46bSEric W. Biederman 	return ret;
33121da177e4SLinus Torvalds }
33131da177e4SLinus Torvalds 
331450857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
331550857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
331650857e2aSArnd Bergmann {
331750857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
331850857e2aSArnd Bergmann }
331950857e2aSArnd Bergmann 
332050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
332150857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
332250857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
332350857e2aSArnd Bergmann {
332450857e2aSArnd Bergmann 	switch (cmd) {
332550857e2aSArnd Bergmann 	case TUNSETIFF:
332650857e2aSArnd Bergmann 	case TUNGETIFF:
332750857e2aSArnd Bergmann 	case TUNSETTXFILTER:
332850857e2aSArnd Bergmann 	case TUNGETSNDBUF:
332950857e2aSArnd Bergmann 	case TUNSETSNDBUF:
333050857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
333150857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
333250857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
333350857e2aSArnd Bergmann 		break;
333450857e2aSArnd Bergmann 	default:
333550857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
333650857e2aSArnd Bergmann 		break;
333750857e2aSArnd Bergmann 	}
333850857e2aSArnd Bergmann 
333950857e2aSArnd Bergmann 	/*
334050857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
334150857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
334250857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
334350857e2aSArnd Bergmann 	 * contents.
334450857e2aSArnd Bergmann 	 */
334550857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
334650857e2aSArnd Bergmann }
334750857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
334850857e2aSArnd Bergmann 
33491da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
33501da177e4SLinus Torvalds {
335154f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
33521da177e4SLinus Torvalds 	int ret;
33531da177e4SLinus Torvalds 
335454f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
33559d319522SJonathan Corbet 		goto out;
33561da177e4SLinus Torvalds 
33571da177e4SLinus Torvalds 	if (on) {
335801919134SEric W. Biederman 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
335954f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
33601da177e4SLinus Torvalds 	} else
336154f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
33629d319522SJonathan Corbet 	ret = 0;
33639d319522SJonathan Corbet out:
33649d319522SJonathan Corbet 	return ret;
33651da177e4SLinus Torvalds }
33661da177e4SLinus Torvalds 
33671da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
33681da177e4SLinus Torvalds {
3369140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3370631ab46bSEric W. Biederman 	struct tun_file *tfile;
3371deed49fbSThomas Gleixner 
3372140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
337311aa9c28SEric W. Biederman 					    &tun_proto, 0);
3374631ab46bSEric W. Biederman 	if (!tfile)
3375631ab46bSEric W. Biederman 		return -ENOMEM;
3376b196d88aSJason Wang 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3377b196d88aSJason Wang 		sk_free(&tfile->sk);
3378b196d88aSJason Wang 		return -ENOMEM;
3379b196d88aSJason Wang 	}
3380b196d88aSJason Wang 
3381c7256f57SEric Dumazet 	mutex_init(&tfile->napi_mutex);
3382c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
338354f968d6SJason Wang 	tfile->flags = 0;
3384fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
338554f968d6SJason Wang 
3386333f7909SAl Viro 	init_waitqueue_head(&tfile->socket.wq.wait);
338754f968d6SJason Wang 
338854f968d6SJason Wang 	tfile->socket.file = file;
338954f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
339054f968d6SJason Wang 
339154f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
339254f968d6SJason Wang 
339354f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
339454f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
339554f968d6SJason Wang 
3396631ab46bSEric W. Biederman 	file->private_data = tfile;
33974008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
339854f968d6SJason Wang 
339919a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
340019a6afb2SJason Wang 
34011da177e4SLinus Torvalds 	return 0;
34021da177e4SLinus Torvalds }
34031da177e4SLinus Torvalds 
34041da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
34051da177e4SLinus Torvalds {
3406631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
34071da177e4SLinus Torvalds 
3408c8d68e6bSJason Wang 	tun_detach(tfile, true);
34091da177e4SLinus Torvalds 
34101da177e4SLinus Torvalds 	return 0;
34111da177e4SLinus Torvalds }
34121da177e4SLinus Torvalds 
341393e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
34149484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
341593e14b6dSMasatake YAMATO {
34169484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
341793e14b6dSMasatake YAMATO 	struct tun_struct *tun;
341893e14b6dSMasatake YAMATO 	struct ifreq ifr;
341993e14b6dSMasatake YAMATO 
342093e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
342193e14b6dSMasatake YAMATO 
342293e14b6dSMasatake YAMATO 	rtnl_lock();
34239484dc74Syuan linyu 	tun = tun_get(tfile);
342493e14b6dSMasatake YAMATO 	if (tun)
342512132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
342693e14b6dSMasatake YAMATO 	rtnl_unlock();
342793e14b6dSMasatake YAMATO 
342893e14b6dSMasatake YAMATO 	if (tun)
342993e14b6dSMasatake YAMATO 		tun_put(tun);
343093e14b6dSMasatake YAMATO 
3431a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
343293e14b6dSMasatake YAMATO }
343393e14b6dSMasatake YAMATO #endif
343493e14b6dSMasatake YAMATO 
3435d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
34361da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
34371da177e4SLinus Torvalds 	.llseek = no_llseek,
34389b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3439f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
34401da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3441876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
344250857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
344350857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
344450857e2aSArnd Bergmann #endif
34451da177e4SLinus Torvalds 	.open	= tun_chr_open,
34461da177e4SLinus Torvalds 	.release = tun_chr_close,
344793e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
344893e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
344993e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
345093e14b6dSMasatake YAMATO #endif
34511da177e4SLinus Torvalds };
34521da177e4SLinus Torvalds 
34531da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
34541da177e4SLinus Torvalds 	.minor = TUN_MINOR,
34551da177e4SLinus Torvalds 	.name = "tun",
3456e454cea2SKay Sievers 	.nodename = "net/tun",
34571da177e4SLinus Torvalds 	.fops = &tun_fops,
34581da177e4SLinus Torvalds };
34591da177e4SLinus Torvalds 
34601da177e4SLinus Torvalds /* ethtool interface */
34611da177e4SLinus Torvalds 
34624e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
346329ccc49dSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
34641da177e4SLinus Torvalds {
346529ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
346629ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
346729ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
346829ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
346929ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
347029ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
347129ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
34724e24f2ddSChas Williams }
34734e24f2ddSChas Williams 
34744e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev,
34754e24f2ddSChas Williams 				  struct ethtool_link_ksettings *cmd)
34764e24f2ddSChas Williams {
34774e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
34784e24f2ddSChas Williams 
34794e24f2ddSChas Williams 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
34804e24f2ddSChas Williams 	return 0;
34814e24f2ddSChas Williams }
34824e24f2ddSChas Williams 
34834e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev,
34844e24f2ddSChas Williams 				  const struct ethtool_link_ksettings *cmd)
34854e24f2ddSChas Williams {
34864e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
34874e24f2ddSChas Williams 
34884e24f2ddSChas Williams 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
34891da177e4SLinus Torvalds 	return 0;
34901da177e4SLinus Torvalds }
34911da177e4SLinus Torvalds 
34921da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
34931da177e4SLinus Torvalds {
34941da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
34951da177e4SLinus Torvalds 
349633a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
349733a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
34981da177e4SLinus Torvalds 
34991da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
350040630b82SMichael S. Tsirkin 	case IFF_TUN:
350133a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
35021da177e4SLinus Torvalds 		break;
350340630b82SMichael S. Tsirkin 	case IFF_TAP:
350433a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
35051da177e4SLinus Torvalds 		break;
35061da177e4SLinus Torvalds 	}
35071da177e4SLinus Torvalds }
35081da177e4SLinus Torvalds 
35091da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
35101da177e4SLinus Torvalds {
35111da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35123424170fSMichal Kubecek 
35133424170fSMichal Kubecek 	return tun->msg_enable;
35141da177e4SLinus Torvalds }
35151da177e4SLinus Torvalds 
35161da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
35171da177e4SLinus Torvalds {
35181da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35193424170fSMichal Kubecek 
35203424170fSMichal Kubecek 	tun->msg_enable = value;
35211da177e4SLinus Torvalds }
35221da177e4SLinus Torvalds 
35235503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
35245503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35255503fcecSJason Wang {
35265503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35275503fcecSJason Wang 
35285503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
35295503fcecSJason Wang 
35305503fcecSJason Wang 	return 0;
35315503fcecSJason Wang }
35325503fcecSJason Wang 
35335503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
35345503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35355503fcecSJason Wang {
35365503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35375503fcecSJason Wang 
35385503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
35395503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
35405503fcecSJason Wang 	else
35415503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
35425503fcecSJason Wang 
35435503fcecSJason Wang 	return 0;
35445503fcecSJason Wang }
35455503fcecSJason Wang 
35467282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
3547e5ad00b3SJakub Kicinski 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
35481da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
35491da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
35501da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3551bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3552eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
35535503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
35545503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
355529ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
35564e24f2ddSChas Williams 	.set_link_ksettings = tun_set_link_ksettings,
35571da177e4SLinus Torvalds };
35581da177e4SLinus Torvalds 
35591576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
35601576d986SJason Wang {
35611576d986SJason Wang 	struct net_device *dev = tun->dev;
35621576d986SJason Wang 	struct tun_file *tfile;
35635990a305SJason Wang 	struct ptr_ring **rings;
35641576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
35651576d986SJason Wang 	int ret, i;
35661576d986SJason Wang 
35675990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
35685990a305SJason Wang 	if (!rings)
35691576d986SJason Wang 		return -ENOMEM;
35701576d986SJason Wang 
35711576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
35721576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
35735990a305SJason Wang 		rings[i] = &tfile->tx_ring;
35741576d986SJason Wang 	}
35751576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
35765990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
35771576d986SJason Wang 
35785990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
35795990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3580fc72d1d5SJason Wang 				       tun_ptr_free);
35811576d986SJason Wang 
35825990a305SJason Wang 	kfree(rings);
35831576d986SJason Wang 	return ret;
35841576d986SJason Wang }
35851576d986SJason Wang 
35861576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
35871576d986SJason Wang 			    unsigned long event, void *ptr)
35881576d986SJason Wang {
35891576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
35901576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
359172b319dcSFei Li 	int i;
35921576d986SJason Wang 
359386dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
359486dfb4acSCraig Gallek 		return NOTIFY_DONE;
359586dfb4acSCraig Gallek 
35961576d986SJason Wang 	switch (event) {
35971576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
35981576d986SJason Wang 		if (tun_queue_resize(tun))
35991576d986SJason Wang 			return NOTIFY_BAD;
36001576d986SJason Wang 		break;
360172b319dcSFei Li 	case NETDEV_UP:
360272b319dcSFei Li 		for (i = 0; i < tun->numqueues; i++) {
360372b319dcSFei Li 			struct tun_file *tfile;
360472b319dcSFei Li 
360572b319dcSFei Li 			tfile = rtnl_dereference(tun->tfiles[i]);
360672b319dcSFei Li 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
360772b319dcSFei Li 		}
360872b319dcSFei Li 		break;
36091576d986SJason Wang 	default:
36101576d986SJason Wang 		break;
36111576d986SJason Wang 	}
36121576d986SJason Wang 
36131576d986SJason Wang 	return NOTIFY_DONE;
36141576d986SJason Wang }
36151576d986SJason Wang 
36161576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
36171576d986SJason Wang 	.notifier_call	= tun_device_event,
36181576d986SJason Wang };
361979d17604SPavel Emelyanov 
36201da177e4SLinus Torvalds static int __init tun_init(void)
36211da177e4SLinus Torvalds {
36221da177e4SLinus Torvalds 	int ret = 0;
36231da177e4SLinus Torvalds 
36246b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
36251da177e4SLinus Torvalds 
3626f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
362779d17604SPavel Emelyanov 	if (ret) {
36286b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3629f019a7a5SEric W. Biederman 		goto err_linkops;
363079d17604SPavel Emelyanov 	}
363179d17604SPavel Emelyanov 
36321da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
363379d17604SPavel Emelyanov 	if (ret) {
36346b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
363579d17604SPavel Emelyanov 		goto err_misc;
363679d17604SPavel Emelyanov 	}
36371576d986SJason Wang 
36385edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
36395edfbd3cSTonghao Zhang 	if (ret) {
36405edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
36415edfbd3cSTonghao Zhang 		goto err_notifier;
36425edfbd3cSTonghao Zhang 	}
36435edfbd3cSTonghao Zhang 
364479d17604SPavel Emelyanov 	return  0;
36455edfbd3cSTonghao Zhang 
36465edfbd3cSTonghao Zhang err_notifier:
36475edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
364879d17604SPavel Emelyanov err_misc:
3649f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3650f019a7a5SEric W. Biederman err_linkops:
36511da177e4SLinus Torvalds 	return ret;
36521da177e4SLinus Torvalds }
36531da177e4SLinus Torvalds 
36541da177e4SLinus Torvalds static void tun_cleanup(void)
36551da177e4SLinus Torvalds {
36561da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3657f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
36581576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
36591da177e4SLinus Torvalds }
36601da177e4SLinus Torvalds 
366105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
366205c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
366305c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
366405c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
366505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
366605c2828cSMichael S. Tsirkin {
36676e914fc7SJason Wang 	struct tun_file *tfile;
366805c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
366905c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
36706e914fc7SJason Wang 	tfile = file->private_data;
36716e914fc7SJason Wang 	if (!tfile)
367205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
367354f968d6SJason Wang 	return &tfile->socket;
367405c2828cSMichael S. Tsirkin }
367505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
367605c2828cSMichael S. Tsirkin 
36775990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
367883339c6bSJason Wang {
367983339c6bSJason Wang 	struct tun_file *tfile;
368083339c6bSJason Wang 
368183339c6bSJason Wang 	if (file->f_op != &tun_fops)
368283339c6bSJason Wang 		return ERR_PTR(-EINVAL);
368383339c6bSJason Wang 	tfile = file->private_data;
368483339c6bSJason Wang 	if (!tfile)
368583339c6bSJason Wang 		return ERR_PTR(-EBADFD);
36865990a305SJason Wang 	return &tfile->tx_ring;
368783339c6bSJason Wang }
36885990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
368983339c6bSJason Wang 
36901da177e4SLinus Torvalds module_init(tun_init);
36911da177e4SLinus Torvalds module_exit(tun_cleanup);
36921da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
36931da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
36941da177e4SLinus Torvalds MODULE_LICENSE("GPL");
36951da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3696578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3697