xref: /openbmc/linux/drivers/net/tun.c (revision 5aac0390a63b8718237a61dd0d24a29201d1c94a)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
41da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds /*
101da177e4SLinus Torvalds  *  Changes:
111da177e4SLinus Torvalds  *
12ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
14ff4cc3acSMike Kershaw  *
151da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
16344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
191da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
201da177e4SLinus Torvalds  *    Increased default tx queue length.
211da177e4SLinus Torvalds  *    Added ethtool API.
221da177e4SLinus Torvalds  *    Minor cleanups
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
251da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
296b8a66eeSJoe Perches 
301da177e4SLinus Torvalds #define DRV_NAME	"tun"
311da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
321da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
331da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds #include <linux/module.h>
361da177e4SLinus Torvalds #include <linux/errno.h>
371da177e4SLinus Torvalds #include <linux/kernel.h>
38174cd4b1SIngo Molnar #include <linux/sched/signal.h>
391da177e4SLinus Torvalds #include <linux/major.h>
401da177e4SLinus Torvalds #include <linux/slab.h>
411da177e4SLinus Torvalds #include <linux/poll.h>
421da177e4SLinus Torvalds #include <linux/fcntl.h>
431da177e4SLinus Torvalds #include <linux/init.h>
441da177e4SLinus Torvalds #include <linux/skbuff.h>
451da177e4SLinus Torvalds #include <linux/netdevice.h>
461da177e4SLinus Torvalds #include <linux/etherdevice.h>
471da177e4SLinus Torvalds #include <linux/miscdevice.h>
481da177e4SLinus Torvalds #include <linux/ethtool.h>
491da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5050857e2aSArnd Bergmann #include <linux/compat.h>
511da177e4SLinus Torvalds #include <linux/if.h>
521da177e4SLinus Torvalds #include <linux/if_arp.h>
531da177e4SLinus Torvalds #include <linux/if_ether.h>
541da177e4SLinus Torvalds #include <linux/if_tun.h>
556680ec68SJason Wang #include <linux/if_vlan.h>
561da177e4SLinus Torvalds #include <linux/crc32.h>
57d647a591SPavel Emelyanov #include <linux/nsproxy.h>
58f43798c2SRusty Russell #include <linux/virtio_net.h>
5999405162SMichael S. Tsirkin #include <linux/rcupdate.h>
60881d966bSEric W. Biederman #include <net/net_namespace.h>
6179d17604SPavel Emelyanov #include <net/netns/generic.h>
62f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
6333dccbb0SHerbert Xu #include <net/sock.h>
64735fc405SJesper Dangaard Brouer #include <net/xdp.h>
65b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h>
6693e14b6dSMasatake YAMATO #include <linux/seq_file.h>
67e0b46d0eSHerbert Xu #include <linux/uio.h>
681576d986SJason Wang #include <linux/skb_array.h>
69761876c8SJason Wang #include <linux/bpf.h>
70761876c8SJason Wang #include <linux/bpf_trace.h>
7190e33d45SPetar Penkov #include <linux/mutex.h>
721da177e4SLinus Torvalds 
737c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
74f2780d6dSKirill Tkhai #include <linux/proc_fs.h>
751da177e4SLinus Torvalds 
764e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
774e24f2ddSChas Williams 				       struct ethtool_link_ksettings *cmd);
784e24f2ddSChas Williams 
797df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
8066ccbc9cSJason Wang 
81031f5e03SMichael S. Tsirkin /* TUN device flags */
82031f5e03SMichael S. Tsirkin 
83031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
84031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
85031f5e03SMichael S. Tsirkin  */
86031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
871cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
881cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
898b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
90031f5e03SMichael S. Tsirkin 
91031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
9290e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
9390e33d45SPetar Penkov 
940690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
950690899bSMichael S. Tsirkin 
96f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
97f271b2ccSMax Krasnyansky struct tap_filter {
98f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
99f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
100f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
101f271b2ccSMax Krasnyansky };
102f271b2ccSMax Krasnyansky 
103baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
104baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
105baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
106b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
107c8d68e6bSJason Wang 
10896442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
10996442e42SJason Wang 
110608b9977SPaolo Abeni struct tun_pcpu_stats {
1115260dd3eSEric Dumazet 	u64_stats_t rx_packets;
1125260dd3eSEric Dumazet 	u64_stats_t rx_bytes;
1135260dd3eSEric Dumazet 	u64_stats_t tx_packets;
1145260dd3eSEric Dumazet 	u64_stats_t tx_bytes;
115608b9977SPaolo Abeni 	struct u64_stats_sync syncp;
116608b9977SPaolo Abeni 	u32 rx_dropped;
117608b9977SPaolo Abeni 	u32 tx_dropped;
118608b9977SPaolo Abeni 	u32 rx_frame_errors;
119608b9977SPaolo Abeni };
120608b9977SPaolo Abeni 
12154f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
12292d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
12354f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
12454f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
12536fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
12654f968d6SJason Wang  * this).
1276e914fc7SJason Wang  *
1286e914fc7SJason Wang  * RCU usage:
12936fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1306e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
13154f968d6SJason Wang  */
132631ab46bSEric W. Biederman struct tun_file {
13354f968d6SJason Wang 	struct sock sk;
13454f968d6SJason Wang 	struct socket socket;
1356e914fc7SJason Wang 	struct tun_struct __rcu *tun;
13654f968d6SJason Wang 	struct fasync_struct *fasync;
13754f968d6SJason Wang 	/* only used for fasnyc */
13854f968d6SJason Wang 	unsigned int flags;
139fb7589a1SPavel Emelyanov 	union {
140c8d68e6bSJason Wang 		u16 queue_index;
141fb7589a1SPavel Emelyanov 		unsigned int ifindex;
142fb7589a1SPavel Emelyanov 	};
14394317099SPetar Penkov 	struct napi_struct napi;
144aec72f33SEric Dumazet 	bool napi_enabled;
145af3fb24eSEric Dumazet 	bool napi_frags_enabled;
14690e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1474008e97fSJason Wang 	struct list_head next;
1484008e97fSJason Wang 	struct tun_struct *detached;
1495990a305SJason Wang 	struct ptr_ring tx_ring;
1508bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
151631ab46bSEric W. Biederman };
152631ab46bSEric W. Biederman 
153f9e06c45SJason Wang struct tun_page {
154f9e06c45SJason Wang 	struct page *page;
155f9e06c45SJason Wang 	int count;
156f9e06c45SJason Wang };
157f9e06c45SJason Wang 
15896442e42SJason Wang struct tun_flow_entry {
15996442e42SJason Wang 	struct hlist_node hash_link;
16096442e42SJason Wang 	struct rcu_head rcu;
16196442e42SJason Wang 	struct tun_struct *tun;
16296442e42SJason Wang 
16396442e42SJason Wang 	u32 rxhash;
1649bc88939STom Herbert 	u32 rps_rxhash;
16596442e42SJason Wang 	int queue_index;
16683b1bc12SLi RongQing 	unsigned long updated ____cacheline_aligned_in_smp;
16796442e42SJason Wang };
16896442e42SJason Wang 
16996442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
170f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
17196442e42SJason Wang 
172cd5681d7SJason Wang struct tun_prog {
17396f84061SJason Wang 	struct rcu_head rcu;
17496f84061SJason Wang 	struct bpf_prog *prog;
17596f84061SJason Wang };
17696f84061SJason Wang 
17754f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
17836fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
17954f968d6SJason Wang  * file were attached to a persist device.
18054f968d6SJason Wang  */
18114daa021SRusty Russell struct tun_struct {
182c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
183c8d68e6bSJason Wang 	unsigned int            numqueues;
184f271b2ccSMax Krasnyansky 	unsigned int 		flags;
1850625c883SEric W. Biederman 	kuid_t			owner;
1860625c883SEric W. Biederman 	kgid_t			group;
18714daa021SRusty Russell 
18814daa021SRusty Russell 	struct net_device	*dev;
189c8f44affSMichał Mirosław 	netdev_features_t	set_features;
19088255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
191d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
192d9d52b51SMichael S. Tsirkin 
193eaea34b2SPaolo Abeni 	int			align;
194d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
19554f968d6SJason Wang 	int			sndbuf;
19654f968d6SJason Wang 	struct tap_filter	txflt;
19754f968d6SJason Wang 	struct sock_fprog	fprog;
19854f968d6SJason Wang 	/* protected by rtnl lock */
19954f968d6SJason Wang 	bool			filter_attached;
2003424170fSMichal Kubecek 	u32			msg_enable;
20196442e42SJason Wang 	spinlock_t lock;
20296442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
20396442e42SJason Wang 	struct timer_list flow_gc_timer;
20496442e42SJason Wang 	unsigned long ageing_time;
2054008e97fSJason Wang 	unsigned int numdisabled;
2064008e97fSJason Wang 	struct list_head disabled;
2075dbbaf2dSPaul Moore 	void *security;
208b8732fb7SJason Wang 	u32 flow_count;
2095503fcecSJason Wang 	u32 rx_batched;
210608b9977SPaolo Abeni 	struct tun_pcpu_stats __percpu *pcpu_stats;
211761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
212cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
213aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
2144e24f2ddSChas Williams 	struct ethtool_link_ksettings link_ksettings;
21514daa021SRusty Russell };
21614daa021SRusty Russell 
217aff3d70aSJason Wang struct veth {
218aff3d70aSJason Wang 	__be16 h_vlan_proto;
219aff3d70aSJason Wang 	__be16 h_vlan_TCI;
2201da177e4SLinus Torvalds };
2211da177e4SLinus Torvalds 
22294317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
22394317099SPetar Penkov {
22494317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
22594317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
22694317099SPetar Penkov 	struct sk_buff_head process_queue;
22794317099SPetar Penkov 	struct sk_buff *skb;
22894317099SPetar Penkov 	int received = 0;
22994317099SPetar Penkov 
23094317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
23194317099SPetar Penkov 
23294317099SPetar Penkov 	spin_lock(&queue->lock);
23394317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
23494317099SPetar Penkov 	spin_unlock(&queue->lock);
23594317099SPetar Penkov 
23694317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
23794317099SPetar Penkov 		napi_gro_receive(napi, skb);
23894317099SPetar Penkov 		++received;
23994317099SPetar Penkov 	}
24094317099SPetar Penkov 
24194317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
24294317099SPetar Penkov 		spin_lock(&queue->lock);
24394317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
24494317099SPetar Penkov 		spin_unlock(&queue->lock);
24594317099SPetar Penkov 	}
24694317099SPetar Penkov 
24794317099SPetar Penkov 	return received;
24894317099SPetar Penkov }
24994317099SPetar Penkov 
25094317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
25194317099SPetar Penkov {
25294317099SPetar Penkov 	unsigned int received;
25394317099SPetar Penkov 
25494317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
25594317099SPetar Penkov 
25694317099SPetar Penkov 	if (received < budget)
25794317099SPetar Penkov 		napi_complete_done(napi, received);
25894317099SPetar Penkov 
25994317099SPetar Penkov 	return received;
26094317099SPetar Penkov }
26194317099SPetar Penkov 
26294317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
263af3fb24eSEric Dumazet 			  bool napi_en, bool napi_frags)
26494317099SPetar Penkov {
265aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
266af3fb24eSEric Dumazet 	tfile->napi_frags_enabled = napi_en && napi_frags;
26794317099SPetar Penkov 	if (napi_en) {
268c39e342aSPetar Penkov 		netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
26994317099SPetar Penkov 				  NAPI_POLL_WEIGHT);
27094317099SPetar Penkov 		napi_enable(&tfile->napi);
27194317099SPetar Penkov 	}
27294317099SPetar Penkov }
27394317099SPetar Penkov 
27406e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile)
27594317099SPetar Penkov {
276aec72f33SEric Dumazet 	if (tfile->napi_enabled)
27794317099SPetar Penkov 		napi_disable(&tfile->napi);
27894317099SPetar Penkov }
27994317099SPetar Penkov 
28006e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile)
28194317099SPetar Penkov {
282aec72f33SEric Dumazet 	if (tfile->napi_enabled)
28394317099SPetar Penkov 		netif_napi_del(&tfile->napi);
28494317099SPetar Penkov }
28594317099SPetar Penkov 
286af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile)
28790e33d45SPetar Penkov {
288af3fb24eSEric Dumazet 	return tfile->napi_frags_enabled;
28990e33d45SPetar Penkov }
29090e33d45SPetar Penkov 
2918b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
2928b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
2938b8e658bSGreg Kurz {
2948b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
2958b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
2968b8e658bSGreg Kurz }
2978b8e658bSGreg Kurz 
2988b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
2998b8e658bSGreg Kurz {
3008b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3018b8e658bSGreg Kurz 
3028b8e658bSGreg Kurz 	if (put_user(be, argp))
3038b8e658bSGreg Kurz 		return -EFAULT;
3048b8e658bSGreg Kurz 
3058b8e658bSGreg Kurz 	return 0;
3068b8e658bSGreg Kurz }
3078b8e658bSGreg Kurz 
3088b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3098b8e658bSGreg Kurz {
3108b8e658bSGreg Kurz 	int be;
3118b8e658bSGreg Kurz 
3128b8e658bSGreg Kurz 	if (get_user(be, argp))
3138b8e658bSGreg Kurz 		return -EFAULT;
3148b8e658bSGreg Kurz 
3158b8e658bSGreg Kurz 	if (be)
3168b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3178b8e658bSGreg Kurz 	else
3188b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3198b8e658bSGreg Kurz 
3208b8e658bSGreg Kurz 	return 0;
3218b8e658bSGreg Kurz }
3228b8e658bSGreg Kurz #else
3238b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3248b8e658bSGreg Kurz {
3258b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3268b8e658bSGreg Kurz }
3278b8e658bSGreg Kurz 
3288b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3298b8e658bSGreg Kurz {
3308b8e658bSGreg Kurz 	return -EINVAL;
3318b8e658bSGreg Kurz }
3328b8e658bSGreg Kurz 
3338b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3348b8e658bSGreg Kurz {
3358b8e658bSGreg Kurz 	return -EINVAL;
3368b8e658bSGreg Kurz }
3378b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3388b8e658bSGreg Kurz 
33925bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
34025bd55bbSGreg Kurz {
3417d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3428b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
34325bd55bbSGreg Kurz }
34425bd55bbSGreg Kurz 
34556f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
34656f0dcc5SMichael S. Tsirkin {
34725bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
34856f0dcc5SMichael S. Tsirkin }
34956f0dcc5SMichael S. Tsirkin 
35056f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
35156f0dcc5SMichael S. Tsirkin {
35225bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
35356f0dcc5SMichael S. Tsirkin }
35456f0dcc5SMichael S. Tsirkin 
35596442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
35696442e42SJason Wang {
357f13b5468SLi RongQing 	return rxhash & TUN_MASK_FLOW_ENTRIES;
35896442e42SJason Wang }
35996442e42SJason Wang 
36096442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
36196442e42SJason Wang {
36296442e42SJason Wang 	struct tun_flow_entry *e;
36396442e42SJason Wang 
364b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
36596442e42SJason Wang 		if (e->rxhash == rxhash)
36696442e42SJason Wang 			return e;
36796442e42SJason Wang 	}
36896442e42SJason Wang 	return NULL;
36996442e42SJason Wang }
37096442e42SJason Wang 
37196442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
37296442e42SJason Wang 					      struct hlist_head *head,
37396442e42SJason Wang 					      u32 rxhash, u16 queue_index)
37496442e42SJason Wang {
3759fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
3769fdc6befSEric Dumazet 
37796442e42SJason Wang 	if (e) {
3783424170fSMichal Kubecek 		netif_info(tun, tx_queued, tun->dev,
3793424170fSMichal Kubecek 			   "create flow: hash %u index %u\n",
38096442e42SJason Wang 			   rxhash, queue_index);
38196442e42SJason Wang 		e->updated = jiffies;
38296442e42SJason Wang 		e->rxhash = rxhash;
3839bc88939STom Herbert 		e->rps_rxhash = 0;
38496442e42SJason Wang 		e->queue_index = queue_index;
38596442e42SJason Wang 		e->tun = tun;
38696442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
387b8732fb7SJason Wang 		++tun->flow_count;
38896442e42SJason Wang 	}
38996442e42SJason Wang 	return e;
39096442e42SJason Wang }
39196442e42SJason Wang 
39296442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
39396442e42SJason Wang {
3943424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
39596442e42SJason Wang 		   e->rxhash, e->queue_index);
39696442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
3979fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
398b8732fb7SJason Wang 	--tun->flow_count;
39996442e42SJason Wang }
40096442e42SJason Wang 
40196442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
40296442e42SJason Wang {
40396442e42SJason Wang 	int i;
40496442e42SJason Wang 
40596442e42SJason Wang 	spin_lock_bh(&tun->lock);
40696442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
40796442e42SJason Wang 		struct tun_flow_entry *e;
408b67bfe0dSSasha Levin 		struct hlist_node *n;
40996442e42SJason Wang 
410b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
41196442e42SJason Wang 			tun_flow_delete(tun, e);
41296442e42SJason Wang 	}
41396442e42SJason Wang 	spin_unlock_bh(&tun->lock);
41496442e42SJason Wang }
41596442e42SJason Wang 
41696442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
41796442e42SJason Wang {
41896442e42SJason Wang 	int i;
41996442e42SJason Wang 
42096442e42SJason Wang 	spin_lock_bh(&tun->lock);
42196442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
42296442e42SJason Wang 		struct tun_flow_entry *e;
423b67bfe0dSSasha Levin 		struct hlist_node *n;
42496442e42SJason Wang 
425b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
42696442e42SJason Wang 			if (e->queue_index == queue_index)
42796442e42SJason Wang 				tun_flow_delete(tun, e);
42896442e42SJason Wang 		}
42996442e42SJason Wang 	}
43096442e42SJason Wang 	spin_unlock_bh(&tun->lock);
43196442e42SJason Wang }
43296442e42SJason Wang 
433e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
43496442e42SJason Wang {
435e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
43696442e42SJason Wang 	unsigned long delay = tun->ageing_time;
43796442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
43896442e42SJason Wang 	unsigned long count = 0;
43996442e42SJason Wang 	int i;
44096442e42SJason Wang 
4417dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
44296442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
44396442e42SJason Wang 		struct tun_flow_entry *e;
444b67bfe0dSSasha Levin 		struct hlist_node *n;
44596442e42SJason Wang 
446b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
44796442e42SJason Wang 			unsigned long this_timer;
44881d98fa4SEric Dumazet 
44996442e42SJason Wang 			this_timer = e->updated + delay;
45081d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
45196442e42SJason Wang 				tun_flow_delete(tun, e);
45281d98fa4SEric Dumazet 				continue;
45381d98fa4SEric Dumazet 			}
45481d98fa4SEric Dumazet 			count++;
45581d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
45696442e42SJason Wang 				next_timer = this_timer;
45796442e42SJason Wang 		}
45896442e42SJason Wang 	}
45996442e42SJason Wang 
46096442e42SJason Wang 	if (count)
46196442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
4627dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
46396442e42SJason Wang }
46496442e42SJason Wang 
46549974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
4669e85722dSJason Wang 			    struct tun_file *tfile)
46796442e42SJason Wang {
46896442e42SJason Wang 	struct hlist_head *head;
46996442e42SJason Wang 	struct tun_flow_entry *e;
47096442e42SJason Wang 	unsigned long delay = tun->ageing_time;
4719e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
47296442e42SJason Wang 
47396442e42SJason Wang 	head = &tun->flows[tun_hashfn(rxhash)];
47496442e42SJason Wang 
47596442e42SJason Wang 	rcu_read_lock();
47696442e42SJason Wang 
47796442e42SJason Wang 	e = tun_flow_find(head, rxhash);
47896442e42SJason Wang 	if (likely(e)) {
47996442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
4804ffdd22eSEric Dumazet 		if (READ_ONCE(e->queue_index) != queue_index)
4814ffdd22eSEric Dumazet 			WRITE_ONCE(e->queue_index, queue_index);
48283b1bc12SLi RongQing 		if (e->updated != jiffies)
48396442e42SJason Wang 			e->updated = jiffies;
4849bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
48596442e42SJason Wang 	} else {
48696442e42SJason Wang 		spin_lock_bh(&tun->lock);
487b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
488b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
48996442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
49096442e42SJason Wang 
49196442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
49296442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
49396442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
49496442e42SJason Wang 		spin_unlock_bh(&tun->lock);
49596442e42SJason Wang 	}
49696442e42SJason Wang 
49796442e42SJason Wang 	rcu_read_unlock();
49896442e42SJason Wang }
49996442e42SJason Wang 
500516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the
5019bc88939STom Herbert  * flow_hash table accordingly.
5029bc88939STom Herbert  */
5039bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5049bc88939STom Herbert {
505567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5069bc88939STom Herbert 		e->rps_rxhash = hash;
5079bc88939STom Herbert }
5089bc88939STom Herbert 
5094b035271SWang Li /* We try to identify a flow through its rxhash. The reason that
51092d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
511c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
512c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
5134b035271SWang Li  * different rxq no. here.
514c8d68e6bSJason Wang  */
51596f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
516c8d68e6bSJason Wang {
51796442e42SJason Wang 	struct tun_flow_entry *e;
518c8d68e6bSJason Wang 	u32 txq = 0;
519c8d68e6bSJason Wang 	u32 numqueues = 0;
520c8d68e6bSJason Wang 
5216aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
522c8d68e6bSJason Wang 
523feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
52496442e42SJason Wang 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5259bc88939STom Herbert 	if (e) {
5269bc88939STom Herbert 		tun_flow_save_rps_rxhash(e, txq);
527fbe4d456SZhi Yong Wu 		txq = e->queue_index;
5284b035271SWang Li 	} else {
529c8d68e6bSJason Wang 		/* use multiply and shift instead of expensive divide */
530c8d68e6bSJason Wang 		txq = ((u64)txq * numqueues) >> 32;
531c8d68e6bSJason Wang 	}
532c8d68e6bSJason Wang 
533c8d68e6bSJason Wang 	return txq;
534c8d68e6bSJason Wang }
535c8d68e6bSJason Wang 
53696f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
53796f84061SJason Wang {
538cd5681d7SJason Wang 	struct tun_prog *prog;
539a35d310fSJason Wang 	u32 numqueues;
54096f84061SJason Wang 	u16 ret = 0;
54196f84061SJason Wang 
542a35d310fSJason Wang 	numqueues = READ_ONCE(tun->numqueues);
543a35d310fSJason Wang 	if (!numqueues)
544a35d310fSJason Wang 		return 0;
545a35d310fSJason Wang 
54696f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
54796f84061SJason Wang 	if (prog)
54896f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
54996f84061SJason Wang 
550a35d310fSJason Wang 	return ret % numqueues;
55196f84061SJason Wang }
55296f84061SJason Wang 
55396f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
554a350ecceSPaolo Abeni 			    struct net_device *sb_dev)
55596f84061SJason Wang {
55696f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
55796f84061SJason Wang 	u16 ret;
55896f84061SJason Wang 
55996f84061SJason Wang 	rcu_read_lock();
56096f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
56196f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
56296f84061SJason Wang 	else
56396f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
56496f84061SJason Wang 	rcu_read_unlock();
56596f84061SJason Wang 
56696f84061SJason Wang 	return ret;
56796f84061SJason Wang }
56896f84061SJason Wang 
569cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
570cde8b15fSJason Wang {
571cde8b15fSJason Wang 	const struct cred *cred = current_cred();
572c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
573cde8b15fSJason Wang 
574cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
575cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
576c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
577cde8b15fSJason Wang }
578cde8b15fSJason Wang 
579c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
580c8d68e6bSJason Wang {
581c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
582c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
583c8d68e6bSJason Wang }
584c8d68e6bSJason Wang 
5854008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
5864008e97fSJason Wang {
5874008e97fSJason Wang 	tfile->detached = tun;
5884008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
5894008e97fSJason Wang 	++tun->numdisabled;
5904008e97fSJason Wang }
5914008e97fSJason Wang 
592d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
5934008e97fSJason Wang {
5944008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
5954008e97fSJason Wang 
5964008e97fSJason Wang 	tfile->detached = NULL;
5974008e97fSJason Wang 	list_del_init(&tfile->next);
5984008e97fSJason Wang 	--tun->numdisabled;
5994008e97fSJason Wang 	return tun;
6004008e97fSJason Wang }
6014008e97fSJason Wang 
6023a403076SJason Wang void tun_ptr_free(void *ptr)
603fc72d1d5SJason Wang {
604fc72d1d5SJason Wang 	if (!ptr)
605fc72d1d5SJason Wang 		return;
6061ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
6071ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
608fc72d1d5SJason Wang 
60903993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
610fc72d1d5SJason Wang 	} else {
611fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
612fc72d1d5SJason Wang 	}
613fc72d1d5SJason Wang }
6143a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free);
615fc72d1d5SJason Wang 
6164bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6174bfb0513SJason Wang {
618fc72d1d5SJason Wang 	void *ptr;
6191576d986SJason Wang 
620fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
621fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6221576d986SJason Wang 
6235503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6244bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6254bfb0513SJason Wang }
6264bfb0513SJason Wang 
627c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
628c8d68e6bSJason Wang {
629c8d68e6bSJason Wang 	struct tun_file *ntfile;
630c8d68e6bSJason Wang 	struct tun_struct *tun;
631c8d68e6bSJason Wang 
632b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
633b8deabd3SJason Wang 
63494317099SPetar Penkov 	if (tun && clean) {
63506e55addSEric Dumazet 		tun_napi_disable(tfile);
63606e55addSEric Dumazet 		tun_napi_del(tfile);
63794317099SPetar Penkov 	}
63894317099SPetar Penkov 
6399e85722dSJason Wang 	if (tun && !tfile->detached) {
640c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
641c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
642c8d68e6bSJason Wang 
643c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
644c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
645b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
646c8d68e6bSJason Wang 		ntfile->queue_index = index;
6479871a9e4SJason Wang 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
6489871a9e4SJason Wang 				   NULL);
649c8d68e6bSJason Wang 
650c8d68e6bSJason Wang 		--tun->numqueues;
6519e85722dSJason Wang 		if (clean) {
652c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
653c8d68e6bSJason Wang 			sock_put(&tfile->sk);
6549e85722dSJason Wang 		} else
6554008e97fSJason Wang 			tun_disable_queue(tun, tfile);
656c8d68e6bSJason Wang 
657c8d68e6bSJason Wang 		synchronize_net();
65896442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
659c8d68e6bSJason Wang 		/* Drop read queue */
6604bfb0513SJason Wang 		tun_queue_purge(tfile);
661c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
662dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
6634008e97fSJason Wang 		tun = tun_enable_queue(tfile);
664dd38bd85SJason Wang 		sock_put(&tfile->sk);
665dd38bd85SJason Wang 	}
666c8d68e6bSJason Wang 
667c8d68e6bSJason Wang 	if (clean) {
668af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
669af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
670af668b3cSMichael S. Tsirkin 
67140630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
672af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
6734008e97fSJason Wang 				unregister_netdevice(tun->dev);
674af668b3cSMichael S. Tsirkin 		}
675b196d88aSJason Wang 		if (tun)
676b196d88aSJason Wang 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
6777063efd3SJason Wang 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
678140e807dSEric W. Biederman 		sock_put(&tfile->sk);
679c8d68e6bSJason Wang 	}
680c8d68e6bSJason Wang }
681c8d68e6bSJason Wang 
682c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
683c8d68e6bSJason Wang {
68483c1f36fSSabrina Dubroca 	struct tun_struct *tun;
68583c1f36fSSabrina Dubroca 	struct net_device *dev;
68683c1f36fSSabrina Dubroca 
687c8d68e6bSJason Wang 	rtnl_lock();
68883c1f36fSSabrina Dubroca 	tun = rtnl_dereference(tfile->tun);
68983c1f36fSSabrina Dubroca 	dev = tun ? tun->dev : NULL;
690c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
69183c1f36fSSabrina Dubroca 	if (dev)
69283c1f36fSSabrina Dubroca 		netdev_state_change(dev);
693c8d68e6bSJason Wang 	rtnl_unlock();
694c8d68e6bSJason Wang }
695c8d68e6bSJason Wang 
696c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
697c8d68e6bSJason Wang {
698c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
6994008e97fSJason Wang 	struct tun_file *tfile, *tmp;
700c8d68e6bSJason Wang 	int i, n = tun->numqueues;
701c8d68e6bSJason Wang 
702c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
703b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
704c8d68e6bSJason Wang 		BUG_ON(!tfile);
70506e55addSEric Dumazet 		tun_napi_disable(tfile);
706addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7079e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
708c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
709c8d68e6bSJason Wang 		--tun->numqueues;
710c8d68e6bSJason Wang 	}
7119e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
712addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7139e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
714c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7159e85722dSJason Wang 	}
716c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
717c8d68e6bSJason Wang 
718c8d68e6bSJason Wang 	synchronize_net();
719c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
720b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
72106e55addSEric Dumazet 		tun_napi_del(tfile);
722c8d68e6bSJason Wang 		/* Drop read queue */
7234bfb0513SJason Wang 		tun_queue_purge(tfile);
724b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
725c8d68e6bSJason Wang 		sock_put(&tfile->sk);
726c8d68e6bSJason Wang 	}
7274008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7284008e97fSJason Wang 		tun_enable_queue(tfile);
7294bfb0513SJason Wang 		tun_queue_purge(tfile);
730b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7314008e97fSJason Wang 		sock_put(&tfile->sk);
7324008e97fSJason Wang 	}
7334008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
734dd38bd85SJason Wang 
73540630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
736dd38bd85SJason Wang 		module_put(THIS_MODULE);
737c8d68e6bSJason Wang }
738c8d68e6bSJason Wang 
73994317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
74077f22f92SYang Yingliang 		      bool skip_filter, bool napi, bool napi_frags,
74177f22f92SYang Yingliang 		      bool publish_tun)
742a7385ba2SEric W. Biederman {
743631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7441576d986SJason Wang 	struct net_device *dev = tun->dev;
74538231b7aSEric W. Biederman 	int err;
746a7385ba2SEric W. Biederman 
7475dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7485dbbaf2dSPaul Moore 	if (err < 0)
7495dbbaf2dSPaul Moore 		goto out;
7505dbbaf2dSPaul Moore 
75138231b7aSEric W. Biederman 	err = -EINVAL;
7529e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
75338231b7aSEric W. Biederman 		goto out;
75438231b7aSEric W. Biederman 
75538231b7aSEric W. Biederman 	err = -EBUSY;
75640630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
757c8d68e6bSJason Wang 		goto out;
758c8d68e6bSJason Wang 
759c8d68e6bSJason Wang 	err = -E2BIG;
7604008e97fSJason Wang 	if (!tfile->detached &&
7614008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
76238231b7aSEric W. Biederman 		goto out;
76338231b7aSEric W. Biederman 
76438231b7aSEric W. Biederman 	err = 0;
76554f968d6SJason Wang 
76692d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
767849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
7688ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
7698ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
7708ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
77154f968d6SJason Wang 		if (!err)
77254f968d6SJason Wang 			goto out;
77354f968d6SJason Wang 	}
7741576d986SJason Wang 
7751576d986SJason Wang 	if (!tfile->detached &&
776b196d88aSJason Wang 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
777b196d88aSJason Wang 			    GFP_KERNEL, tun_ptr_free)) {
7781576d986SJason Wang 		err = -ENOMEM;
7791576d986SJason Wang 		goto out;
7801576d986SJason Wang 	}
7811576d986SJason Wang 
782c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
783addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
7848bf5c4eeSJesper Dangaard Brouer 
7858bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
7868bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
7878bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
7888bf5c4eeSJesper Dangaard Brouer 
7898bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
7908bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
7918bf5c4eeSJesper Dangaard Brouer 	} else {
7928bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
7938bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
7948bf5c4eeSJesper Dangaard Brouer 				       tun->dev, tfile->queue_index);
7958bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
7968bf5c4eeSJesper Dangaard Brouer 			goto out;
7978d5d8852SJesper Dangaard Brouer 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
7988d5d8852SJesper Dangaard Brouer 						 MEM_TYPE_PAGE_SHARED, NULL);
7998d5d8852SJesper Dangaard Brouer 		if (err < 0) {
8008d5d8852SJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
8018d5d8852SJesper Dangaard Brouer 			goto out;
8028d5d8852SJesper Dangaard Brouer 		}
8038bf5c4eeSJesper Dangaard Brouer 		err = 0;
8048bf5c4eeSJesper Dangaard Brouer 	}
8058bf5c4eeSJesper Dangaard Brouer 
80694317099SPetar Penkov 	if (tfile->detached) {
8074008e97fSJason Wang 		tun_enable_queue(tfile);
80894317099SPetar Penkov 	} else {
8094008e97fSJason Wang 		sock_hold(&tfile->sk);
810af3fb24eSEric Dumazet 		tun_napi_init(tun, tfile, napi, napi_frags);
81194317099SPetar Penkov 	}
8124008e97fSJason Wang 
813e4a2a304SJason Wang 	if (rtnl_dereference(tun->xdp_prog))
814e4a2a304SJason Wang 		sock_set_flag(&tfile->sk, SOCK_XDP);
815e4a2a304SJason Wang 
816c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
817c8d68e6bSJason Wang 	 * refcnt.
818c8d68e6bSJason Wang 	 */
819a7385ba2SEric W. Biederman 
8200b7959b6SStanislav Fomichev 	/* Publish tfile->tun and tun->tfiles only after we've fully
8210b7959b6SStanislav Fomichev 	 * initialized tfile; otherwise we risk using half-initialized
8220b7959b6SStanislav Fomichev 	 * object.
8230b7959b6SStanislav Fomichev 	 */
82477f22f92SYang Yingliang 	if (publish_tun)
8250b7959b6SStanislav Fomichev 		rcu_assign_pointer(tfile->tun, tun);
8260b7959b6SStanislav Fomichev 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
8270b7959b6SStanislav Fomichev 	tun->numqueues++;
8283a03cb84SGeorge Amanakis 	tun_set_real_num_queues(tun);
82938231b7aSEric W. Biederman out:
83038231b7aSEric W. Biederman 	return err;
831a7385ba2SEric W. Biederman }
832a7385ba2SEric W. Biederman 
8339484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
834631ab46bSEric W. Biederman {
8356e914fc7SJason Wang 	struct tun_struct *tun;
836c70f1829SEric W. Biederman 
8376e914fc7SJason Wang 	rcu_read_lock();
8386e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8396e914fc7SJason Wang 	if (tun)
8406e914fc7SJason Wang 		dev_hold(tun->dev);
8416e914fc7SJason Wang 	rcu_read_unlock();
842c70f1829SEric W. Biederman 
843c70f1829SEric W. Biederman 	return tun;
844631ab46bSEric W. Biederman }
845631ab46bSEric W. Biederman 
846631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
847631ab46bSEric W. Biederman {
8486e914fc7SJason Wang 	dev_put(tun->dev);
849631ab46bSEric W. Biederman }
850631ab46bSEric W. Biederman 
8516b8a66eeSJoe Perches /* TAP filtering */
852f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
853f271b2ccSMax Krasnyansky {
854f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
855f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
856f271b2ccSMax Krasnyansky }
857f271b2ccSMax Krasnyansky 
858f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
859f271b2ccSMax Krasnyansky {
860f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
861f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
862f271b2ccSMax Krasnyansky }
863f271b2ccSMax Krasnyansky 
864f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
865f271b2ccSMax Krasnyansky {
866f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
867f271b2ccSMax Krasnyansky 	struct tun_filter uf;
868f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
869f271b2ccSMax Krasnyansky 
870f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
871f271b2ccSMax Krasnyansky 		return -EFAULT;
872f271b2ccSMax Krasnyansky 
873f271b2ccSMax Krasnyansky 	if (!uf.count) {
874f271b2ccSMax Krasnyansky 		/* Disabled */
875f271b2ccSMax Krasnyansky 		filter->count = 0;
876f271b2ccSMax Krasnyansky 		return 0;
877f271b2ccSMax Krasnyansky 	}
878f271b2ccSMax Krasnyansky 
879f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
88028e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
88128e8190dSMarkus Elfring 	if (IS_ERR(addr))
88228e8190dSMarkus Elfring 		return PTR_ERR(addr);
883f271b2ccSMax Krasnyansky 
884f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
885f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
886f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
887f271b2ccSMax Krasnyansky 	filter->count = 0;
888f271b2ccSMax Krasnyansky 	wmb();
889f271b2ccSMax Krasnyansky 
890f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
891f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
892f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
893f271b2ccSMax Krasnyansky 
894f271b2ccSMax Krasnyansky 	nexact = n;
895f271b2ccSMax Krasnyansky 
896cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
897cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
898f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
899cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
900cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
901cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9023b8d2a69SMarkus Elfring 			goto free_addr;
903cfbf84fcSAlex Williamson 		}
904f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
905cfbf84fcSAlex Williamson 	}
906f271b2ccSMax Krasnyansky 
907f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
908f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
909f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
910f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
911f271b2ccSMax Krasnyansky 
912f271b2ccSMax Krasnyansky 	/* Now enable the filter */
913f271b2ccSMax Krasnyansky 	wmb();
914f271b2ccSMax Krasnyansky 	filter->count = nexact;
915f271b2ccSMax Krasnyansky 
916f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
917f271b2ccSMax Krasnyansky 	err = nexact;
9183b8d2a69SMarkus Elfring free_addr:
919f271b2ccSMax Krasnyansky 	kfree(addr);
920f271b2ccSMax Krasnyansky 	return err;
921f271b2ccSMax Krasnyansky }
922f271b2ccSMax Krasnyansky 
923f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
924f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
925f271b2ccSMax Krasnyansky {
926f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
927f271b2ccSMax Krasnyansky 	 * at this point. */
928f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
929f271b2ccSMax Krasnyansky 	int i;
930f271b2ccSMax Krasnyansky 
931f271b2ccSMax Krasnyansky 	/* Exact match */
932f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9332e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
934f271b2ccSMax Krasnyansky 			return 1;
935f271b2ccSMax Krasnyansky 
936f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
937f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
938f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
939f271b2ccSMax Krasnyansky 
940f271b2ccSMax Krasnyansky 	return 0;
941f271b2ccSMax Krasnyansky }
942f271b2ccSMax Krasnyansky 
943f271b2ccSMax Krasnyansky /*
944f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
945f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
946f271b2ccSMax Krasnyansky  */
947f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
948f271b2ccSMax Krasnyansky {
949f271b2ccSMax Krasnyansky 	if (!filter->count)
950f271b2ccSMax Krasnyansky 		return 1;
951f271b2ccSMax Krasnyansky 
952f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
953f271b2ccSMax Krasnyansky }
954f271b2ccSMax Krasnyansky 
9551da177e4SLinus Torvalds /* Network device part of the driver */
9561da177e4SLinus Torvalds 
9571da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops;
9581da177e4SLinus Torvalds 
959c70f1829SEric W. Biederman /* Net device detach from fd. */
960c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
961c70f1829SEric W. Biederman {
962c8d68e6bSJason Wang 	tun_detach_all(dev);
963c70f1829SEric W. Biederman }
964c70f1829SEric W. Biederman 
9651da177e4SLinus Torvalds /* Net device open. */
9661da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
9671da177e4SLinus Torvalds {
968c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
969b20e2d54SHannes Frederic Sowa 
9701da177e4SLinus Torvalds 	return 0;
9711da177e4SLinus Torvalds }
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds /* Net device close. */
9741da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
9751da177e4SLinus Torvalds {
976c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
9771da177e4SLinus Torvalds 	return 0;
9781da177e4SLinus Torvalds }
9791da177e4SLinus Torvalds 
9801da177e4SLinus Torvalds /* Net device start xmit */
98196f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
9821da177e4SLinus Torvalds {
9833df97ba8SJason Wang #ifdef CONFIG_RPS
984dc05360fSEric Dumazet 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
9859bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
9869bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
9879bc88939STom Herbert 		 */
9884b035271SWang Li 		struct tun_flow_entry *e;
9899bc88939STom Herbert 		__u32 rxhash;
9909bc88939STom Herbert 
991feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
9924b035271SWang Li 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
9939bc88939STom Herbert 		if (e)
9949bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, rxhash);
9959bc88939STom Herbert 	}
9963df97ba8SJason Wang #endif
99796f84061SJason Wang }
99896f84061SJason Wang 
999aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1000aff3d70aSJason Wang 				    struct sk_buff *skb,
1001aff3d70aSJason Wang 				    int len)
1002aff3d70aSJason Wang {
1003aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1004aff3d70aSJason Wang 
1005aff3d70aSJason Wang 	if (prog)
1006aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1007aff3d70aSJason Wang 
1008aff3d70aSJason Wang 	return len;
1009aff3d70aSJason Wang }
1010aff3d70aSJason Wang 
101196f84061SJason Wang /* Net device start xmit */
101296f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
101396f84061SJason Wang {
101496f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
101596f84061SJason Wang 	int txq = skb->queue_mapping;
101696f84061SJason Wang 	struct tun_file *tfile;
1017aff3d70aSJason Wang 	int len = skb->len;
101896f84061SJason Wang 
101996f84061SJason Wang 	rcu_read_lock();
102096f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
102196f84061SJason Wang 
102296f84061SJason Wang 	/* Drop packet if interface is not attached */
10239871a9e4SJason Wang 	if (!tfile)
102496f84061SJason Wang 		goto drop;
102596f84061SJason Wang 
102696f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
102796f84061SJason Wang 		tun_automq_xmit(tun, skb);
10289bc88939STom Herbert 
10293424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
10306e914fc7SJason Wang 
1031f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1032f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1033f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
1034f271b2ccSMax Krasnyansky 	if (!check_filter(&tun->txflt, skb))
1035f271b2ccSMax Krasnyansky 		goto drop;
1036f271b2ccSMax Krasnyansky 
103754f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
103854f968d6SJason Wang 	    sk_filter(tfile->socket.sk, skb))
103999405162SMichael S. Tsirkin 		goto drop;
104099405162SMichael S. Tsirkin 
1041aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
104281c89507SBjørn Mork 	if (len == 0 || pskb_trim(skb, len))
1043aff3d70aSJason Wang 		goto drop;
1044aff3d70aSJason Wang 
10451f8b977aSWillem de Bruijn 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
10467bf66305SJason Wang 		goto drop;
10477bf66305SJason Wang 
10487b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1049eda29772SRichard Cochran 
10500110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
10517bf66305SJason Wang 	 * for indefinite time.
10527bf66305SJason Wang 	 */
10530110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
10540110d6f2SMichael S. Tsirkin 
1055895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
1056f8af75f3SEric Dumazet 
10575990a305SJason Wang 	if (ptr_ring_produce(&tfile->tx_ring, skb))
10581576d986SJason Wang 		goto drop;
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds 	/* Notify and wake up reader process */
106154f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
106254f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
10639e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
10646e914fc7SJason Wang 
10656e914fc7SJason Wang 	rcu_read_unlock();
10666ed10654SPatrick McHardy 	return NETDEV_TX_OK;
10671da177e4SLinus Torvalds 
10681da177e4SLinus Torvalds drop:
1069608b9977SPaolo Abeni 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1070149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
10711da177e4SLinus Torvalds 	kfree_skb(skb);
10726e914fc7SJason Wang 	rcu_read_unlock();
1073baeababbSJason Wang 	return NET_XMIT_DROP;
10741da177e4SLinus Torvalds }
10751da177e4SLinus Torvalds 
1076f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
10771da177e4SLinus Torvalds {
1078f271b2ccSMax Krasnyansky 	/*
1079f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1080f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1081f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1082f271b2ccSMax Krasnyansky 	 */
10831da177e4SLinus Torvalds }
10841da177e4SLinus Torvalds 
1085c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1086c8f44affSMichał Mirosław 	netdev_features_t features)
108788255375SMichał Mirosław {
108888255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
108988255375SMichał Mirosław 
109088255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
109188255375SMichał Mirosław }
1092eaea34b2SPaolo Abeni 
1093eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1094eaea34b2SPaolo Abeni {
1095eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1096eaea34b2SPaolo Abeni 
1097eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1098eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1099eaea34b2SPaolo Abeni 
1100eaea34b2SPaolo Abeni 	tun->align = new_hr;
1101eaea34b2SPaolo Abeni }
1102eaea34b2SPaolo Abeni 
1103bc1f4470Sstephen hemminger static void
1104608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1105608b9977SPaolo Abeni {
1106608b9977SPaolo Abeni 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1107608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1108608b9977SPaolo Abeni 	struct tun_pcpu_stats *p;
1109608b9977SPaolo Abeni 	int i;
1110608b9977SPaolo Abeni 
1111608b9977SPaolo Abeni 	for_each_possible_cpu(i) {
1112608b9977SPaolo Abeni 		u64 rxpackets, rxbytes, txpackets, txbytes;
1113608b9977SPaolo Abeni 		unsigned int start;
1114608b9977SPaolo Abeni 
1115608b9977SPaolo Abeni 		p = per_cpu_ptr(tun->pcpu_stats, i);
1116608b9977SPaolo Abeni 		do {
1117608b9977SPaolo Abeni 			start = u64_stats_fetch_begin(&p->syncp);
11185260dd3eSEric Dumazet 			rxpackets	= u64_stats_read(&p->rx_packets);
11195260dd3eSEric Dumazet 			rxbytes		= u64_stats_read(&p->rx_bytes);
11205260dd3eSEric Dumazet 			txpackets	= u64_stats_read(&p->tx_packets);
11215260dd3eSEric Dumazet 			txbytes		= u64_stats_read(&p->tx_bytes);
1122608b9977SPaolo Abeni 		} while (u64_stats_fetch_retry(&p->syncp, start));
1123608b9977SPaolo Abeni 
1124608b9977SPaolo Abeni 		stats->rx_packets	+= rxpackets;
1125608b9977SPaolo Abeni 		stats->rx_bytes		+= rxbytes;
1126608b9977SPaolo Abeni 		stats->tx_packets	+= txpackets;
1127608b9977SPaolo Abeni 		stats->tx_bytes		+= txbytes;
1128608b9977SPaolo Abeni 
1129608b9977SPaolo Abeni 		/* u32 counters */
1130608b9977SPaolo Abeni 		rx_dropped	+= p->rx_dropped;
1131608b9977SPaolo Abeni 		rx_frame_errors	+= p->rx_frame_errors;
1132608b9977SPaolo Abeni 		tx_dropped	+= p->tx_dropped;
1133608b9977SPaolo Abeni 	}
1134608b9977SPaolo Abeni 	stats->rx_dropped  = rx_dropped;
1135608b9977SPaolo Abeni 	stats->rx_frame_errors = rx_frame_errors;
1136608b9977SPaolo Abeni 	stats->tx_dropped = tx_dropped;
1137608b9977SPaolo Abeni }
1138608b9977SPaolo Abeni 
1139761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1140761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1141761876c8SJason Wang {
1142761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1143e4a2a304SJason Wang 	struct tun_file *tfile;
1144761876c8SJason Wang 	struct bpf_prog *old_prog;
1145e4a2a304SJason Wang 	int i;
1146761876c8SJason Wang 
1147761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1148761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1149761876c8SJason Wang 	if (old_prog)
1150761876c8SJason Wang 		bpf_prog_put(old_prog);
1151761876c8SJason Wang 
1152e4a2a304SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
1153e4a2a304SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
1154e4a2a304SJason Wang 		if (prog)
1155e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1156e4a2a304SJason Wang 		else
1157e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1158e4a2a304SJason Wang 	}
1159e4a2a304SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
1160e4a2a304SJason Wang 		if (prog)
1161e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1162e4a2a304SJason Wang 		else
1163e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1164e4a2a304SJason Wang 	}
1165e4a2a304SJason Wang 
1166761876c8SJason Wang 	return 0;
1167761876c8SJason Wang }
1168761876c8SJason Wang 
1169f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1170761876c8SJason Wang {
1171761876c8SJason Wang 	switch (xdp->command) {
1172761876c8SJason Wang 	case XDP_SETUP_PROG:
1173761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1174761876c8SJason Wang 	default:
1175761876c8SJason Wang 		return -EINVAL;
1176761876c8SJason Wang 	}
1177761876c8SJason Wang }
1178761876c8SJason Wang 
117926d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
118026d31925SNicolas Dichtel {
118126d31925SNicolas Dichtel 	if (new_carrier) {
118226d31925SNicolas Dichtel 		struct tun_struct *tun = netdev_priv(dev);
118326d31925SNicolas Dichtel 
118426d31925SNicolas Dichtel 		if (!tun->numqueues)
118526d31925SNicolas Dichtel 			return -EPERM;
118626d31925SNicolas Dichtel 
118726d31925SNicolas Dichtel 		netif_carrier_on(dev);
118826d31925SNicolas Dichtel 	} else {
118926d31925SNicolas Dichtel 		netif_carrier_off(dev);
119026d31925SNicolas Dichtel 	}
119126d31925SNicolas Dichtel 	return 0;
119226d31925SNicolas Dichtel }
119326d31925SNicolas Dichtel 
1194758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1195c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1196758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1197758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
119800829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
119988255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1200c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1201eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1202608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
120326d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1204758e43b7SStephen Hemminger };
1205758e43b7SStephen Hemminger 
12060c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile)
12070c9d917bSJesper Dangaard Brouer {
12080c9d917bSJesper Dangaard Brouer 	/* Notify and wake up reader process */
12090c9d917bSJesper Dangaard Brouer 	if (tfile->flags & TUN_FASYNC)
12100c9d917bSJesper Dangaard Brouer 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
12110c9d917bSJesper Dangaard Brouer 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
12120c9d917bSJesper Dangaard Brouer }
12130c9d917bSJesper Dangaard Brouer 
121442b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n,
121542b33468SJesper Dangaard Brouer 			struct xdp_frame **frames, u32 flags)
1216fc72d1d5SJason Wang {
1217fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1218fc72d1d5SJason Wang 	struct tun_file *tfile;
1219fc72d1d5SJason Wang 	u32 numqueues;
1220735fc405SJesper Dangaard Brouer 	int drops = 0;
1221735fc405SJesper Dangaard Brouer 	int cnt = n;
1222735fc405SJesper Dangaard Brouer 	int i;
1223fc72d1d5SJason Wang 
12240c9d917bSJesper Dangaard Brouer 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
122542b33468SJesper Dangaard Brouer 		return -EINVAL;
122642b33468SJesper Dangaard Brouer 
1227fc72d1d5SJason Wang 	rcu_read_lock();
1228fc72d1d5SJason Wang 
12299871a9e4SJason Wang resample:
1230fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1231fc72d1d5SJason Wang 	if (!numqueues) {
1232735fc405SJesper Dangaard Brouer 		rcu_read_unlock();
1233735fc405SJesper Dangaard Brouer 		return -ENXIO; /* Caller will free/return all frames */
1234fc72d1d5SJason Wang 	}
1235fc72d1d5SJason Wang 
1236fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1237fc72d1d5SJason Wang 					    numqueues]);
12389871a9e4SJason Wang 	if (unlikely(!tfile))
12399871a9e4SJason Wang 		goto resample;
1240735fc405SJesper Dangaard Brouer 
1241735fc405SJesper Dangaard Brouer 	spin_lock(&tfile->tx_ring.producer_lock);
1242735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
1243735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdp = frames[i];
1244fc72d1d5SJason Wang 		/* Encode the XDP flag into lowest bit for consumer to differ
1245fc72d1d5SJason Wang 		 * XDP buffer from sk_buff.
1246fc72d1d5SJason Wang 		 */
1247735fc405SJesper Dangaard Brouer 		void *frame = tun_xdp_to_ptr(xdp);
1248fc72d1d5SJason Wang 
1249735fc405SJesper Dangaard Brouer 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1250735fc405SJesper Dangaard Brouer 			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1251735fc405SJesper Dangaard Brouer 			xdp_return_frame_rx_napi(xdp);
1252735fc405SJesper Dangaard Brouer 			drops++;
1253735fc405SJesper Dangaard Brouer 		}
1254735fc405SJesper Dangaard Brouer 	}
1255735fc405SJesper Dangaard Brouer 	spin_unlock(&tfile->tx_ring.producer_lock);
1256735fc405SJesper Dangaard Brouer 
12570c9d917bSJesper Dangaard Brouer 	if (flags & XDP_XMIT_FLUSH)
12580c9d917bSJesper Dangaard Brouer 		__tun_xdp_flush_tfile(tfile);
12590c9d917bSJesper Dangaard Brouer 
1260fc72d1d5SJason Wang 	rcu_read_unlock();
1261735fc405SJesper Dangaard Brouer 	return cnt - drops;
1262fc72d1d5SJason Wang }
1263fc72d1d5SJason Wang 
126444fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
126544fa2dbdSJesper Dangaard Brouer {
12661b698fa5SLorenzo Bianconi 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
126744fa2dbdSJesper Dangaard Brouer 
126844fa2dbdSJesper Dangaard Brouer 	if (unlikely(!frame))
126944fa2dbdSJesper Dangaard Brouer 		return -EOVERFLOW;
127044fa2dbdSJesper Dangaard Brouer 
127142421a56SJesper Dangaard Brouer 	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1272fc72d1d5SJason Wang }
1273fc72d1d5SJason Wang 
1274758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1275c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1276758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1277758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
127800829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
127988255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1280afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1281758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1282758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1283c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
12845e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1285eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1286608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
1287f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1288fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
128926d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1290758e43b7SStephen Hemminger };
1291758e43b7SStephen Hemminger 
1292944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
129396442e42SJason Wang {
129496442e42SJason Wang 	int i;
129596442e42SJason Wang 
129696442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
129796442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
129896442e42SJason Wang 
129996442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1300e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1301e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1302e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
130396442e42SJason Wang }
130496442e42SJason Wang 
130596442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
130696442e42SJason Wang {
130796442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
130896442e42SJason Wang 	tun_flow_flush(tun);
130996442e42SJason Wang }
131096442e42SJason Wang 
131191572088SJarod Wilson #define MIN_MTU 68
131291572088SJarod Wilson #define MAX_MTU 65535
131391572088SJarod Wilson 
13141da177e4SLinus Torvalds /* Initialize net device. */
13151da177e4SLinus Torvalds static void tun_net_init(struct net_device *dev)
13161da177e4SLinus Torvalds {
13171da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
132040630b82SMichael S. Tsirkin 	case IFF_TUN:
1321758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1322b9815eb1SJason A. Donenfeld 		dev->header_ops = &ip_tunnel_header_ops;
1323758e43b7SStephen Hemminger 
13241da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13251da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13261da177e4SLinus Torvalds 		dev->addr_len = 0;
13271da177e4SLinus Torvalds 		dev->mtu = 1500;
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds 		/* Zero header length */
13301da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
13311da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
13321da177e4SLinus Torvalds 		break;
13331da177e4SLinus Torvalds 
133440630b82SMichael S. Tsirkin 	case IFF_TAP:
13357a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
13361da177e4SLinus Torvalds 		/* Ethernet TAP Device */
13371da177e4SLinus Torvalds 		ether_setup(dev);
1338550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1339a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
134036226a8dSBrian Braunstein 
1341f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
134236226a8dSBrian Braunstein 
13431da177e4SLinus Torvalds 		break;
13441da177e4SLinus Torvalds 	}
134591572088SJarod Wilson 
134691572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
134791572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
13481da177e4SLinus Torvalds }
13491da177e4SLinus Torvalds 
13502f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
13512f3ab622SJason Wang {
13522f3ab622SJason Wang 	struct sock *sk = tfile->socket.sk;
13532f3ab622SJason Wang 
13542f3ab622SJason Wang 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
13552f3ab622SJason Wang }
13562f3ab622SJason Wang 
13571da177e4SLinus Torvalds /* Character device part */
13581da177e4SLinus Torvalds 
13591da177e4SLinus Torvalds /* Poll */
1360afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
13611da177e4SLinus Torvalds {
1362b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
13639484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
13643c8a9c63SMariusz Kozlowski 	struct sock *sk;
1365afc9a42bSAl Viro 	__poll_t mask = 0;
13661da177e4SLinus Torvalds 
13671da177e4SLinus Torvalds 	if (!tun)
1368a9a08845SLinus Torvalds 		return EPOLLERR;
13691da177e4SLinus Torvalds 
137054f968d6SJason Wang 	sk = tfile->socket.sk;
13713c8a9c63SMariusz Kozlowski 
13729e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
13731da177e4SLinus Torvalds 
13745990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
1375a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
13761da177e4SLinus Torvalds 
13772f3ab622SJason Wang 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
13782f3ab622SJason Wang 	 * guarantee EPOLLOUT to be raised by either here or
13792f3ab622SJason Wang 	 * tun_sock_write_space(). Then process could get notification
13802f3ab622SJason Wang 	 * after it writes to a down device and meets -EIO.
13812f3ab622SJason Wang 	 */
13822f3ab622SJason Wang 	if (tun_sock_writeable(tun, tfile) ||
13839cd3e072SEric Dumazet 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
13842f3ab622SJason Wang 	     tun_sock_writeable(tun, tfile)))
1385a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
138633dccbb0SHerbert Xu 
1387c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1388a9a08845SLinus Torvalds 		mask = EPOLLERR;
1389c70f1829SEric W. Biederman 
1390631ab46bSEric W. Biederman 	tun_put(tun);
13911da177e4SLinus Torvalds 	return mask;
13921da177e4SLinus Torvalds }
13931da177e4SLinus Torvalds 
139490e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
139590e33d45SPetar Penkov 					    size_t len,
139690e33d45SPetar Penkov 					    const struct iov_iter *it)
139790e33d45SPetar Penkov {
139890e33d45SPetar Penkov 	struct sk_buff *skb;
139990e33d45SPetar Penkov 	size_t linear;
140090e33d45SPetar Penkov 	int err;
140190e33d45SPetar Penkov 	int i;
140290e33d45SPetar Penkov 
140390e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
140490e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
140590e33d45SPetar Penkov 
140690e33d45SPetar Penkov 	local_bh_disable();
140790e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
140890e33d45SPetar Penkov 	local_bh_enable();
140990e33d45SPetar Penkov 	if (!skb)
141090e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
141190e33d45SPetar Penkov 
141290e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
141390e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
141490e33d45SPetar Penkov 	if (err)
141590e33d45SPetar Penkov 		goto free;
141690e33d45SPetar Penkov 
141790e33d45SPetar Penkov 	skb->len = len;
141890e33d45SPetar Penkov 	skb->data_len = len - linear;
141990e33d45SPetar Penkov 	skb->truesize += skb->data_len;
142090e33d45SPetar Penkov 
142190e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
142290e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
1423aa6daacaSEric Dumazet 		struct page *page;
1424aa6daacaSEric Dumazet 		void *frag;
142590e33d45SPetar Penkov 
142690e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
142790e33d45SPetar Penkov 			err = -EINVAL;
142890e33d45SPetar Penkov 			goto free;
142990e33d45SPetar Penkov 		}
1430aa6daacaSEric Dumazet 		frag = netdev_alloc_frag(fragsz);
1431aa6daacaSEric Dumazet 		if (!frag) {
143290e33d45SPetar Penkov 			err = -ENOMEM;
143390e33d45SPetar Penkov 			goto free;
143490e33d45SPetar Penkov 		}
1435aa6daacaSEric Dumazet 		page = virt_to_head_page(frag);
1436aa6daacaSEric Dumazet 		skb_fill_page_desc(skb, i - 1, page,
1437aa6daacaSEric Dumazet 				   frag - page_address(page), fragsz);
143890e33d45SPetar Penkov 	}
143990e33d45SPetar Penkov 
144090e33d45SPetar Penkov 	return skb;
144190e33d45SPetar Penkov free:
144290e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
144390e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
144490e33d45SPetar Penkov 	return ERR_PTR(err);
144590e33d45SPetar Penkov }
144690e33d45SPetar Penkov 
1447f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1448f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
144954f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
145033dccbb0SHerbert Xu 				     size_t prepad, size_t len,
145133dccbb0SHerbert Xu 				     size_t linear, int noblock)
1452f42157cbSRusty Russell {
145354f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1454f42157cbSRusty Russell 	struct sk_buff *skb;
145533dccbb0SHerbert Xu 	int err;
1456f42157cbSRusty Russell 
1457f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
14580eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
145933dccbb0SHerbert Xu 		linear = len;
1460f42157cbSRusty Russell 
146133dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
146228d64271SEric Dumazet 				   &err, 0);
1463f42157cbSRusty Russell 	if (!skb)
146433dccbb0SHerbert Xu 		return ERR_PTR(err);
1465f42157cbSRusty Russell 
1466f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1467f42157cbSRusty Russell 	skb_put(skb, linear);
146833dccbb0SHerbert Xu 	skb->data_len = len - linear;
146933dccbb0SHerbert Xu 	skb->len += len - linear;
1470f42157cbSRusty Russell 
1471f42157cbSRusty Russell 	return skb;
1472f42157cbSRusty Russell }
1473f42157cbSRusty Russell 
14745503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
14755503fcecSJason Wang 			   struct sk_buff *skb, int more)
14765503fcecSJason Wang {
14775503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
14785503fcecSJason Wang 	struct sk_buff_head process_queue;
14795503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
14805503fcecSJason Wang 	bool rcv = false;
14815503fcecSJason Wang 
14825503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
14835503fcecSJason Wang 		local_bh_disable();
14848ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
14855503fcecSJason Wang 		netif_receive_skb(skb);
14865503fcecSJason Wang 		local_bh_enable();
14875503fcecSJason Wang 		return;
14885503fcecSJason Wang 	}
14895503fcecSJason Wang 
14905503fcecSJason Wang 	spin_lock(&queue->lock);
14915503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
14925503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
14935503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
14945503fcecSJason Wang 		rcv = true;
14955503fcecSJason Wang 	} else {
14965503fcecSJason Wang 		__skb_queue_tail(queue, skb);
14975503fcecSJason Wang 	}
14985503fcecSJason Wang 	spin_unlock(&queue->lock);
14995503fcecSJason Wang 
15005503fcecSJason Wang 	if (rcv) {
15015503fcecSJason Wang 		struct sk_buff *nskb;
15025503fcecSJason Wang 
15035503fcecSJason Wang 		local_bh_disable();
15048ebebcbaSMatthew Cover 		while ((nskb = __skb_dequeue(&process_queue))) {
15058ebebcbaSMatthew Cover 			skb_record_rx_queue(nskb, tfile->queue_index);
15065503fcecSJason Wang 			netif_receive_skb(nskb);
15078ebebcbaSMatthew Cover 		}
15088ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15095503fcecSJason Wang 		netif_receive_skb(skb);
15105503fcecSJason Wang 		local_bh_enable();
15115503fcecSJason Wang 	}
15125503fcecSJason Wang }
15135503fcecSJason Wang 
151466ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
151566ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
151666ccbc9cSJason Wang {
151766ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
151866ccbc9cSJason Wang 		return false;
151966ccbc9cSJason Wang 
152066ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
152166ccbc9cSJason Wang 		return false;
152266ccbc9cSJason Wang 
152366ccbc9cSJason Wang 	if (!noblock)
152466ccbc9cSJason Wang 		return false;
152566ccbc9cSJason Wang 
152666ccbc9cSJason Wang 	if (zerocopy)
152766ccbc9cSJason Wang 		return false;
152866ccbc9cSJason Wang 
152966ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
153066ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
153166ccbc9cSJason Wang 		return false;
153266ccbc9cSJason Wang 
153366ccbc9cSJason Wang 	return true;
153466ccbc9cSJason Wang }
153566ccbc9cSJason Wang 
15364b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
15374b663366SAlexis Bauvin 				       struct page_frag *alloc_frag, char *buf,
15388ae1aff0SJason Wang 				       int buflen, int len, int pad)
1539ac1f1f6cSJason Wang {
1540ac1f1f6cSJason Wang 	struct sk_buff *skb = build_skb(buf, buflen);
1541ac1f1f6cSJason Wang 
1542ac1f1f6cSJason Wang 	if (!skb)
1543ac1f1f6cSJason Wang 		return ERR_PTR(-ENOMEM);
1544ac1f1f6cSJason Wang 
15458ae1aff0SJason Wang 	skb_reserve(skb, pad);
1546ac1f1f6cSJason Wang 	skb_put(skb, len);
15474b663366SAlexis Bauvin 	skb_set_owner_w(skb, tfile->socket.sk);
1548ac1f1f6cSJason Wang 
1549ac1f1f6cSJason Wang 	get_page(alloc_frag->page);
1550ac1f1f6cSJason Wang 	alloc_frag->offset += buflen;
1551ac1f1f6cSJason Wang 
1552ac1f1f6cSJason Wang 	return skb;
1553ac1f1f6cSJason Wang }
1554ac1f1f6cSJason Wang 
15558ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
15568ae1aff0SJason Wang 		       struct xdp_buff *xdp, u32 act)
15578ae1aff0SJason Wang {
15588ae1aff0SJason Wang 	int err;
15598ae1aff0SJason Wang 
15608ae1aff0SJason Wang 	switch (act) {
15618ae1aff0SJason Wang 	case XDP_REDIRECT:
15628ae1aff0SJason Wang 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
15638ae1aff0SJason Wang 		if (err)
15648ae1aff0SJason Wang 			return err;
15658ae1aff0SJason Wang 		break;
15668ae1aff0SJason Wang 	case XDP_TX:
15678ae1aff0SJason Wang 		err = tun_xdp_tx(tun->dev, xdp);
15688ae1aff0SJason Wang 		if (err < 0)
15698ae1aff0SJason Wang 			return err;
15708ae1aff0SJason Wang 		break;
15718ae1aff0SJason Wang 	case XDP_PASS:
15728ae1aff0SJason Wang 		break;
15738ae1aff0SJason Wang 	default:
15748ae1aff0SJason Wang 		bpf_warn_invalid_xdp_action(act);
1575df561f66SGustavo A. R. Silva 		fallthrough;
15768ae1aff0SJason Wang 	case XDP_ABORTED:
15778ae1aff0SJason Wang 		trace_xdp_exception(tun->dev, xdp_prog, act);
1578df561f66SGustavo A. R. Silva 		fallthrough;
15798ae1aff0SJason Wang 	case XDP_DROP:
15808ae1aff0SJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
15818ae1aff0SJason Wang 		break;
15828ae1aff0SJason Wang 	}
15838ae1aff0SJason Wang 
15848ae1aff0SJason Wang 	return act;
15858ae1aff0SJason Wang }
15868ae1aff0SJason Wang 
1587761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1588761876c8SJason Wang 				     struct tun_file *tfile,
158966ccbc9cSJason Wang 				     struct iov_iter *from,
1590761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
15911cfe6e93SJason Wang 				     int len, int *skb_xdp)
159266ccbc9cSJason Wang {
15930bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
1594761876c8SJason Wang 	struct bpf_prog *xdp_prog;
15957df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
159666ccbc9cSJason Wang 	char *buf;
159766ccbc9cSJason Wang 	size_t copied;
15988ae1aff0SJason Wang 	int pad = TUN_RX_PAD;
15998ae1aff0SJason Wang 	int err = 0;
16007df13219SJason Wang 
16017df13219SJason Wang 	rcu_read_lock();
16027df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16037df13219SJason Wang 	if (xdp_prog)
16044f23aff8SJason Wang 		pad += XDP_PACKET_HEADROOM;
16057df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16067df13219SJason Wang 	rcu_read_unlock();
160766ccbc9cSJason Wang 
160863b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
160966ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
161066ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
161166ccbc9cSJason Wang 
161266ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
161366ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16147df13219SJason Wang 				     alloc_frag->offset + pad,
161566ccbc9cSJason Wang 				     len, from);
161666ccbc9cSJason Wang 	if (copied != len)
161766ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
161866ccbc9cSJason Wang 
16197df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16207df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16217df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16227df13219SJason Wang 	 */
1623ac1f1f6cSJason Wang 	if (hdr->gso_type || !xdp_prog) {
16241cfe6e93SJason Wang 		*skb_xdp = 1;
16254b663366SAlexis Bauvin 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
16264b663366SAlexis Bauvin 				       pad);
1627ac1f1f6cSJason Wang 	}
1628ac1f1f6cSJason Wang 
16291cfe6e93SJason Wang 	*skb_xdp = 0;
163066ccbc9cSJason Wang 
16316547e387SToshiaki Makita 	local_bh_disable();
1632761876c8SJason Wang 	rcu_read_lock();
1633761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16348ae1aff0SJason Wang 	if (xdp_prog) {
1635761876c8SJason Wang 		struct xdp_buff xdp;
1636761876c8SJason Wang 		u32 act;
1637761876c8SJason Wang 
1638761876c8SJason Wang 		xdp.data_hard_start = buf;
16397df13219SJason Wang 		xdp.data = buf + pad;
1640de8f3a83SDaniel Borkmann 		xdp_set_data_meta_invalid(&xdp);
1641761876c8SJason Wang 		xdp.data_end = xdp.data + len;
16428bf5c4eeSJesper Dangaard Brouer 		xdp.rxq = &tfile->xdp_rxq;
1643fb3e6e93SJesper Dangaard Brouer 		xdp.frame_sz = buflen;
1644761876c8SJason Wang 
16458ae1aff0SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
16468ae1aff0SJason Wang 		if (act == XDP_REDIRECT || act == XDP_TX) {
1647761876c8SJason Wang 			get_page(alloc_frag->page);
1648761876c8SJason Wang 			alloc_frag->offset += buflen;
1649761876c8SJason Wang 		}
16508ae1aff0SJason Wang 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1651bee34890SWill Deacon 		if (err < 0) {
1652bee34890SWill Deacon 			if (act == XDP_REDIRECT || act == XDP_TX)
1653bee34890SWill Deacon 				put_page(alloc_frag->page);
1654bee34890SWill Deacon 			goto out;
1655bee34890SWill Deacon 		}
1656bee34890SWill Deacon 
16571a097910SJason Wang 		if (err == XDP_REDIRECT)
16581d233886SToke Høiland-Jørgensen 			xdp_do_flush();
16598ae1aff0SJason Wang 		if (err != XDP_PASS)
16608ae1aff0SJason Wang 			goto out;
16618ae1aff0SJason Wang 
16628ae1aff0SJason Wang 		pad = xdp.data - xdp.data_hard_start;
16638ae1aff0SJason Wang 		len = xdp.data_end - xdp.data;
1664761876c8SJason Wang 	}
1665761876c8SJason Wang 	rcu_read_unlock();
16666547e387SToshiaki Makita 	local_bh_enable();
1667291aeb2bSJason Wang 
16684b663366SAlexis Bauvin 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1669761876c8SJason Wang 
1670f7053b6cSJason Wang out:
1671761876c8SJason Wang 	rcu_read_unlock();
16726547e387SToshiaki Makita 	local_bh_enable();
1673761876c8SJason Wang 	return NULL;
167466ccbc9cSJason Wang }
167566ccbc9cSJason Wang 
16761da177e4SLinus Torvalds /* Get packet from user space buffer */
167754f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1678f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
16795503fcecSJason Wang 			    int noblock, bool more)
16801da177e4SLinus Torvalds {
168109640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
16821da177e4SLinus Torvalds 	struct sk_buff *skb;
1683f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1684eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1685f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
1686608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
168796f8d9ecSJason Wang 	int good_linear;
16880690899bSMichael S. Tsirkin 	int copylen;
16890690899bSMichael S. Tsirkin 	bool zerocopy = false;
16900690899bSMichael S. Tsirkin 	int err;
169196f84061SJason Wang 	u32 rxhash = 0;
16921cfe6e93SJason Wang 	int skb_xdp = 1;
1693af3fb24eSEric Dumazet 	bool frags = tun_napi_frags_enabled(tfile);
16941da177e4SLinus Torvalds 
169540630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
169615718ea0SDan Carpenter 		if (len < sizeof(pi))
16971da177e4SLinus Torvalds 			return -EINVAL;
169815718ea0SDan Carpenter 		len -= sizeof(pi);
16991da177e4SLinus Torvalds 
1700cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17011da177e4SLinus Torvalds 			return -EFAULT;
17021da177e4SLinus Torvalds 	}
17031da177e4SLinus Torvalds 
170440630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1705e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1706e1edab87SWillem de Bruijn 
1707e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1708f43798c2SRusty Russell 			return -EINVAL;
1709e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1710f43798c2SRusty Russell 
1711cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1712f43798c2SRusty Russell 			return -EFAULT;
1713f43798c2SRusty Russell 
17144909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
171556f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
171656f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17174909122fSHerbert Xu 
171856f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1719f43798c2SRusty Russell 			return -EINVAL;
1720e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1721f43798c2SRusty Russell 	}
1722f43798c2SRusty Russell 
172340630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1724a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17250eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
172656f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1727e01bf1c8SRusty Russell 			return -EINVAL;
1728e01bf1c8SRusty Russell 	}
17291da177e4SLinus Torvalds 
173096f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
173196f8d9ecSJason Wang 
173288529176SJason Wang 	if (msg_control) {
1733f5ff53b4SAl Viro 		struct iov_iter i = *from;
1734f5ff53b4SAl Viro 
173588529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
173688529176SJason Wang 		 * enough room for skb expand head in case it is used.
17370690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
17380690899bSMichael S. Tsirkin 		 */
173956f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
174096f8d9ecSJason Wang 		if (copylen > good_linear)
174196f8d9ecSJason Wang 			copylen = good_linear;
17423dd5c330SJason Wang 		linear = copylen;
1743f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1744f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
174588529176SJason Wang 			zerocopy = true;
174688529176SJason Wang 	}
174788529176SJason Wang 
174890e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
17491cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
17501cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
17511cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
17521cfe6e93SJason Wang 		 */
17531cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
175466ccbc9cSJason Wang 		if (IS_ERR(skb)) {
175566ccbc9cSJason Wang 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
175666ccbc9cSJason Wang 			return PTR_ERR(skb);
175766ccbc9cSJason Wang 		}
1758761876c8SJason Wang 		if (!skb)
1759761876c8SJason Wang 			return total_len;
176066ccbc9cSJason Wang 	} else {
176188529176SJason Wang 		if (!zerocopy) {
17620690899bSMichael S. Tsirkin 			copylen = len;
176356f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
176496f8d9ecSJason Wang 				linear = good_linear;
176596f8d9ecSJason Wang 			else
176656f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
17673dd5c330SJason Wang 		}
17680690899bSMichael S. Tsirkin 
176990e33d45SPetar Penkov 		if (frags) {
177090e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
177190e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
177290e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
177390e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
177490e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
177590e33d45SPetar Penkov 			 */
177690e33d45SPetar Penkov 			zerocopy = false;
177790e33d45SPetar Penkov 		} else {
177890e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
177990e33d45SPetar Penkov 					    noblock);
178090e33d45SPetar Penkov 		}
178190e33d45SPetar Penkov 
178233dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
178333dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1784608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
178590e33d45SPetar Penkov 			if (frags)
178690e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
178733dccbb0SHerbert Xu 			return PTR_ERR(skb);
17881da177e4SLinus Torvalds 		}
17891da177e4SLinus Torvalds 
17900690899bSMichael S. Tsirkin 		if (zerocopy)
1791f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1792af1cc7a2SJason Wang 		else
1793f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
17940690899bSMichael S. Tsirkin 
17950690899bSMichael S. Tsirkin 		if (err) {
17964477138fSEric Dumazet 			err = -EFAULT;
17974477138fSEric Dumazet drop:
1798608b9977SPaolo Abeni 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
17998f22757eSDave Jones 			kfree_skb(skb);
180090e33d45SPetar Penkov 			if (frags) {
180190e33d45SPetar Penkov 				tfile->napi.skb = NULL;
180290e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
180390e33d45SPetar Penkov 			}
180490e33d45SPetar Penkov 
18054477138fSEric Dumazet 			return err;
18068f22757eSDave Jones 		}
180766ccbc9cSJason Wang 	}
18081da177e4SLinus Torvalds 
18093e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1810df10db98SPaolo Abeni 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1811df10db98SPaolo Abeni 		kfree_skb(skb);
181290e33d45SPetar Penkov 		if (frags) {
181390e33d45SPetar Penkov 			tfile->napi.skb = NULL;
181490e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
181590e33d45SPetar Penkov 		}
181690e33d45SPetar Penkov 
1817df10db98SPaolo Abeni 		return -EINVAL;
1818df10db98SPaolo Abeni 	}
1819df10db98SPaolo Abeni 
18201da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
182140630b82SMichael S. Tsirkin 	case IFF_TUN:
182240630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18232580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18242580c4c1SAlexander Potapenko 
18252580c4c1SAlexander Potapenko 			switch (ip_version) {
18262580c4c1SAlexander Potapenko 			case 4:
1827f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1828f09f7ee2SAng Way Chuang 				break;
18292580c4c1SAlexander Potapenko 			case 6:
1830f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1831f09f7ee2SAng Way Chuang 				break;
1832f09f7ee2SAng Way Chuang 			default:
1833608b9977SPaolo Abeni 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1834f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1835f09f7ee2SAng Way Chuang 				return -EINVAL;
1836f09f7ee2SAng Way Chuang 			}
1837f09f7ee2SAng Way Chuang 		}
1838f09f7ee2SAng Way Chuang 
1839459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
18401da177e4SLinus Torvalds 		skb->protocol = pi.proto;
18414c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
18421da177e4SLinus Torvalds 		break;
184340630b82SMichael S. Tsirkin 	case IFF_TAP:
184496aa1b22SWillem de Bruijn 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
184596aa1b22SWillem de Bruijn 			err = -ENOMEM;
184696aa1b22SWillem de Bruijn 			goto drop;
184796aa1b22SWillem de Bruijn 		}
18481da177e4SLinus Torvalds 		skb->protocol = eth_type_trans(skb, tun->dev);
18491da177e4SLinus Torvalds 		break;
18506403eab1SJoe Perches 	}
18511da177e4SLinus Torvalds 
18520690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
18530690899bSMichael S. Tsirkin 	if (zerocopy) {
18540690899bSMichael S. Tsirkin 		skb_shinfo(skb)->destructor_arg = msg_control;
18550690899bSMichael S. Tsirkin 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1856c9af6db4SPravin B Shelar 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1857af1cc7a2SJason Wang 	} else if (msg_control) {
1858af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
1859af1cc7a2SJason Wang 		uarg->callback(uarg, false);
18600690899bSMichael S. Tsirkin 	}
18610690899bSMichael S. Tsirkin 
186272f65107SVlad Yasevich 	skb_reset_network_header(skb);
1863d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
18643fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
186538502af7SJason Wang 
18661cfe6e93SJason Wang 	if (skb_xdp) {
1867761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1868761876c8SJason Wang 		int ret;
1869761876c8SJason Wang 
18706547e387SToshiaki Makita 		local_bh_disable();
1871761876c8SJason Wang 		rcu_read_lock();
1872761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1873761876c8SJason Wang 		if (xdp_prog) {
1874761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1875761876c8SJason Wang 			if (ret != XDP_PASS) {
1876761876c8SJason Wang 				rcu_read_unlock();
18776547e387SToshiaki Makita 				local_bh_enable();
18781efba987SEric Dumazet 				if (frags) {
18791efba987SEric Dumazet 					tfile->napi.skb = NULL;
18801efba987SEric Dumazet 					mutex_unlock(&tfile->napi_mutex);
18811efba987SEric Dumazet 				}
1882761876c8SJason Wang 				return total_len;
1883761876c8SJason Wang 			}
1884761876c8SJason Wang 		}
1885761876c8SJason Wang 		rcu_read_unlock();
18866547e387SToshiaki Makita 		local_bh_enable();
1887761876c8SJason Wang 	}
1888761876c8SJason Wang 
1889cf1a1e07SPaolo Abeni 	/* Compute the costly rx hash only if needed for flow updates.
1890cf1a1e07SPaolo Abeni 	 * We may get a very small possibility of OOO during switching, not
1891cf1a1e07SPaolo Abeni 	 * worth to optimize.
1892cf1a1e07SPaolo Abeni 	 */
1893cf1a1e07SPaolo Abeni 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1894cf1a1e07SPaolo Abeni 	    !tfile->detached)
1895feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
189694317099SPetar Penkov 
18974477138fSEric Dumazet 	rcu_read_lock();
18984477138fSEric Dumazet 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
18994477138fSEric Dumazet 		err = -EIO;
19009180bb4fSEric Dumazet 		rcu_read_unlock();
19014477138fSEric Dumazet 		goto drop;
19024477138fSEric Dumazet 	}
19034477138fSEric Dumazet 
190490e33d45SPetar Penkov 	if (frags) {
190596aa1b22SWillem de Bruijn 		u32 headlen;
190696aa1b22SWillem de Bruijn 
190790e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
190896aa1b22SWillem de Bruijn 		skb_push(skb, ETH_HLEN);
190996aa1b22SWillem de Bruijn 		headlen = eth_get_headlen(tun->dev, skb->data,
1910c43f1255SStanislav Fomichev 					  skb_headlen(skb));
191190e33d45SPetar Penkov 
1912010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
191390e33d45SPetar Penkov 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
191490e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
19154477138fSEric Dumazet 			rcu_read_unlock();
191690e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
191790e33d45SPetar Penkov 			WARN_ON(1);
191890e33d45SPetar Penkov 			return -ENOMEM;
191990e33d45SPetar Penkov 		}
192090e33d45SPetar Penkov 
192190e33d45SPetar Penkov 		local_bh_disable();
192290e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
192390e33d45SPetar Penkov 		local_bh_enable();
192490e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1925aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
192694317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
192794317099SPetar Penkov 		int queue_len;
192894317099SPetar Penkov 
192994317099SPetar Penkov 		spin_lock_bh(&queue->lock);
193094317099SPetar Penkov 		__skb_queue_tail(queue, skb);
193194317099SPetar Penkov 		queue_len = skb_queue_len(queue);
193294317099SPetar Penkov 		spin_unlock(&queue->lock);
193394317099SPetar Penkov 
193494317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
193594317099SPetar Penkov 			napi_schedule(&tfile->napi);
193694317099SPetar Penkov 
193794317099SPetar Penkov 		local_bh_enable();
193894317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19395503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
194094317099SPetar Penkov 	} else {
19411da177e4SLinus Torvalds 		netif_rx_ni(skb);
194294317099SPetar Penkov 	}
19434477138fSEric Dumazet 	rcu_read_unlock();
19441da177e4SLinus Torvalds 
1945608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
1946608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
19475260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
19485260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, len);
1949608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
1950608b9977SPaolo Abeni 	put_cpu_ptr(stats);
19511da177e4SLinus Torvalds 
195296f84061SJason Wang 	if (rxhash)
19539e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
195496f84061SJason Wang 
19550690899bSMichael S. Tsirkin 	return total_len;
19561da177e4SLinus Torvalds }
19571da177e4SLinus Torvalds 
1958f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
19591da177e4SLinus Torvalds {
196033dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
196154f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
19629484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
1963631ab46bSEric W. Biederman 	ssize_t result;
1964*5aac0390SJens Axboe 	int noblock = 0;
19651da177e4SLinus Torvalds 
19661da177e4SLinus Torvalds 	if (!tun)
19671da177e4SLinus Torvalds 		return -EBADFD;
19681da177e4SLinus Torvalds 
1969*5aac0390SJens Axboe 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
1970*5aac0390SJens Axboe 		noblock = 1;
1971*5aac0390SJens Axboe 
1972*5aac0390SJens Axboe 	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
1973631ab46bSEric W. Biederman 
1974631ab46bSEric W. Biederman 	tun_put(tun);
1975631ab46bSEric W. Biederman 	return result;
19761da177e4SLinus Torvalds }
19771da177e4SLinus Torvalds 
1978fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
1979fc72d1d5SJason Wang 				struct tun_file *tfile,
19801ffcbc85SJesper Dangaard Brouer 				struct xdp_frame *xdp_frame,
1981fc72d1d5SJason Wang 				struct iov_iter *iter)
1982fc72d1d5SJason Wang {
1983fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
19841ffcbc85SJesper Dangaard Brouer 	size_t size = xdp_frame->len;
1985fc72d1d5SJason Wang 	struct tun_pcpu_stats *stats;
1986fc72d1d5SJason Wang 	size_t ret;
1987fc72d1d5SJason Wang 
1988fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
1989fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
1990fc72d1d5SJason Wang 
1991fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1992fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
1993fc72d1d5SJason Wang 			return -EINVAL;
1994fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
1995fc72d1d5SJason Wang 			     sizeof(gso)))
1996fc72d1d5SJason Wang 			return -EFAULT;
1997fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
1998fc72d1d5SJason Wang 	}
1999fc72d1d5SJason Wang 
20001ffcbc85SJesper Dangaard Brouer 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2001fc72d1d5SJason Wang 
2002fc72d1d5SJason Wang 	stats = get_cpu_ptr(tun->pcpu_stats);
2003fc72d1d5SJason Wang 	u64_stats_update_begin(&stats->syncp);
20045260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
20055260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, ret);
2006fc72d1d5SJason Wang 	u64_stats_update_end(&stats->syncp);
2007fc72d1d5SJason Wang 	put_cpu_ptr(tun->pcpu_stats);
2008fc72d1d5SJason Wang 
2009fc72d1d5SJason Wang 	return ret;
2010fc72d1d5SJason Wang }
2011fc72d1d5SJason Wang 
20121da177e4SLinus Torvalds /* Put packet to the user space buffer */
20136f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
201454f968d6SJason Wang 			    struct tun_file *tfile,
20151da177e4SLinus Torvalds 			    struct sk_buff *skb,
2016e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20171da177e4SLinus Torvalds {
20181da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2019608b9977SPaolo Abeni 	struct tun_pcpu_stats *stats;
2020e0b46d0eSHerbert Xu 	ssize_t total;
20218c847d25SJason Wang 	int vlan_offset = 0;
2022a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20232eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2024a8f9bfdfSHerbert Xu 
2025df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2026a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20271da177e4SLinus Torvalds 
202840630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2029e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20301da177e4SLinus Torvalds 
2031e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2032e0b46d0eSHerbert Xu 
203340630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2034e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20351da177e4SLinus Torvalds 			return -EINVAL;
20361da177e4SLinus Torvalds 
2037e0b46d0eSHerbert Xu 		total += sizeof(pi);
2038e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20391da177e4SLinus Torvalds 			/* Packet will be striped */
20401da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20411da177e4SLinus Torvalds 		}
20421da177e4SLinus Torvalds 
2043e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20441da177e4SLinus Torvalds 			return -EFAULT;
20451da177e4SLinus Torvalds 	}
20461da177e4SLinus Torvalds 
20472eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20489403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
204934166093SMike Rapoport 
2050e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2051f43798c2SRusty Russell 			return -EINVAL;
2052f43798c2SRusty Russell 
20533e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
2054fd3a8862SWillem de Bruijn 					    tun_is_little_endian(tun), true,
2055fd3a8862SWillem de Bruijn 					    vlan_hlen)) {
2056f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20576b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2058ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
205956f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
206056f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2061ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2062ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2063ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
206456f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2065ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2066ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2067ef3db4a5SMichael S. Tsirkin 		}
2068f43798c2SRusty Russell 
2069e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2070f43798c2SRusty Russell 			return -EFAULT;
20718c847d25SJason Wang 
20728c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2073f43798c2SRusty Russell 	}
2074f43798c2SRusty Russell 
2075a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2076e0b46d0eSHerbert Xu 		int ret;
2077aff3d70aSJason Wang 		struct veth veth;
20781da177e4SLinus Torvalds 
20796680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2080df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
20811da177e4SLinus Torvalds 
20826680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
20836680ec68SJason Wang 
2084e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2085e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
20866680ec68SJason Wang 			goto done;
20876680ec68SJason Wang 
2088e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2089e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
20906680ec68SJason Wang 			goto done;
20916680ec68SJason Wang 	}
20926680ec68SJason Wang 
2093e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
20946680ec68SJason Wang 
20956680ec68SJason Wang done:
2096608b9977SPaolo Abeni 	/* caller is in process context, */
2097608b9977SPaolo Abeni 	stats = get_cpu_ptr(tun->pcpu_stats);
2098608b9977SPaolo Abeni 	u64_stats_update_begin(&stats->syncp);
20995260dd3eSEric Dumazet 	u64_stats_inc(&stats->tx_packets);
21005260dd3eSEric Dumazet 	u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
2101608b9977SPaolo Abeni 	u64_stats_update_end(&stats->syncp);
2102608b9977SPaolo Abeni 	put_cpu_ptr(tun->pcpu_stats);
21031da177e4SLinus Torvalds 
21041da177e4SLinus Torvalds 	return total;
21051da177e4SLinus Torvalds }
21061da177e4SLinus Torvalds 
2107fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21081576d986SJason Wang {
21091576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2110fc72d1d5SJason Wang 	void *ptr = NULL;
2111f48cc6b2SJason Wang 	int error = 0;
21121576d986SJason Wang 
2113fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2114fc72d1d5SJason Wang 	if (ptr)
21151576d986SJason Wang 		goto out;
21161576d986SJason Wang 	if (noblock) {
2117f48cc6b2SJason Wang 		error = -EAGAIN;
21181576d986SJason Wang 		goto out;
21191576d986SJason Wang 	}
21201576d986SJason Wang 
2121333f7909SAl Viro 	add_wait_queue(&tfile->socket.wq.wait, &wait);
21221576d986SJason Wang 
21231576d986SJason Wang 	while (1) {
212471828b22STimur Celik 		set_current_state(TASK_INTERRUPTIBLE);
2125fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2126fc72d1d5SJason Wang 		if (ptr)
21271576d986SJason Wang 			break;
21281576d986SJason Wang 		if (signal_pending(current)) {
2129f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21301576d986SJason Wang 			break;
21311576d986SJason Wang 		}
21321576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2133f48cc6b2SJason Wang 			error = -EFAULT;
21341576d986SJason Wang 			break;
21351576d986SJason Wang 		}
21361576d986SJason Wang 
21371576d986SJason Wang 		schedule();
21381576d986SJason Wang 	}
21391576d986SJason Wang 
2140ecef67cbSTimur Celik 	__set_current_state(TASK_RUNNING);
2141333f7909SAl Viro 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
21421576d986SJason Wang 
21431576d986SJason Wang out:
2144f48cc6b2SJason Wang 	*err = error;
2145fc72d1d5SJason Wang 	return ptr;
21461576d986SJason Wang }
21471576d986SJason Wang 
214854f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21499b067034SAl Viro 			   struct iov_iter *to,
2150fc72d1d5SJason Wang 			   int noblock, void *ptr)
21511da177e4SLinus Torvalds {
21529b067034SAl Viro 	ssize_t ret;
21531576d986SJason Wang 	int err;
21541da177e4SLinus Torvalds 
2155c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2156fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21579b067034SAl Viro 		return 0;
2158c33ee15bSWei Xu 	}
21591da177e4SLinus Torvalds 
2160fc72d1d5SJason Wang 	if (!ptr) {
21611576d986SJason Wang 		/* Read frames from ring */
2162fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2163fc72d1d5SJason Wang 		if (!ptr)
2164957f094fSAlex Gartrell 			return err;
2165ac77cfd4SJason Wang 	}
2166e0b46d0eSHerbert Xu 
21671ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
21681ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2169fc72d1d5SJason Wang 
21701ffcbc85SJesper Dangaard Brouer 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
217103993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
2172fc72d1d5SJason Wang 	} else {
2173fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2174fc72d1d5SJason Wang 
21759b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2176f51a5e82SJason Wang 		if (unlikely(ret < 0))
21771da177e4SLinus Torvalds 			kfree_skb(skb);
2178f51a5e82SJason Wang 		else
2179f51a5e82SJason Wang 			consume_skb(skb);
2180fc72d1d5SJason Wang 	}
21811da177e4SLinus Torvalds 
218205c2828cSMichael S. Tsirkin 	return ret;
218305c2828cSMichael S. Tsirkin }
218405c2828cSMichael S. Tsirkin 
21859b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
218605c2828cSMichael S. Tsirkin {
218705c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
218805c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
21899484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
21909b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
2191*5aac0390SJens Axboe 	int noblock = 0;
219205c2828cSMichael S. Tsirkin 
219305c2828cSMichael S. Tsirkin 	if (!tun)
219405c2828cSMichael S. Tsirkin 		return -EBADFD;
2195*5aac0390SJens Axboe 
2196*5aac0390SJens Axboe 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2197*5aac0390SJens Axboe 		noblock = 1;
2198*5aac0390SJens Axboe 
2199*5aac0390SJens Axboe 	ret = tun_do_read(tun, tfile, to, noblock, NULL);
220042404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2201d0b7da8aSZhi Yong Wu 	if (ret > 0)
2202d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2203631ab46bSEric W. Biederman 	tun_put(tun);
22041da177e4SLinus Torvalds 	return ret;
22051da177e4SLinus Torvalds }
22061da177e4SLinus Torvalds 
2207cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
220896f84061SJason Wang {
2209cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
221096f84061SJason Wang 
221196f84061SJason Wang 	bpf_prog_destroy(prog->prog);
221296f84061SJason Wang 	kfree(prog);
221396f84061SJason Wang }
221496f84061SJason Wang 
22159d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun,
22169d6474e4SJason Wang 			  struct tun_prog __rcu **prog_p,
221796f84061SJason Wang 			  struct bpf_prog *prog)
221896f84061SJason Wang {
2219cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
222096f84061SJason Wang 
222196f84061SJason Wang 	if (prog) {
222296f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
222396f84061SJason Wang 		if (!new)
222496f84061SJason Wang 			return -ENOMEM;
222596f84061SJason Wang 		new->prog = prog;
222696f84061SJason Wang 	}
222796f84061SJason Wang 
2228124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2229cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2230124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2231cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2232124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
223396f84061SJason Wang 
223496f84061SJason Wang 	if (old)
2235cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
223696f84061SJason Wang 
223796f84061SJason Wang 	return 0;
223896f84061SJason Wang }
223996f84061SJason Wang 
224096442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
224196442e42SJason Wang {
224296442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
224396442e42SJason Wang 
22444008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
224511fc7d5aSEric Dumazet 
2246608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
224711fc7d5aSEric Dumazet 	/* We clear pcpu_stats so that tun_set_iff() can tell if
224811fc7d5aSEric Dumazet 	 * tun_free_netdev() has been called from register_netdevice().
224911fc7d5aSEric Dumazet 	 */
225011fc7d5aSEric Dumazet 	tun->pcpu_stats = NULL;
225111fc7d5aSEric Dumazet 
225296442e42SJason Wang 	tun_flow_uninit(tun);
22535dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2254cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2255aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
225696442e42SJason Wang }
225796442e42SJason Wang 
22581da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22591da177e4SLinus Torvalds {
22601da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22611da177e4SLinus Torvalds 
22620625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22630625c883SEric W. Biederman 	tun->group = INVALID_GID;
22644e24f2ddSChas Williams 	tun_default_link_ksettings(dev, &tun->link_ksettings);
22651da177e4SLinus Torvalds 
22661da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2267cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2268cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2269016adb72SJason Wang 	/* We prefer our own queue length */
2270016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
22711da177e4SLinus Torvalds }
22721da177e4SLinus Torvalds 
2273f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2274f019a7a5SEric W. Biederman  * device with netlink.
2275f019a7a5SEric W. Biederman  */
2276a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2277a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2278f019a7a5SEric W. Biederman {
227935b827b6SNicolas Dichtel 	NL_SET_ERR_MSG(extack,
228035b827b6SNicolas Dichtel 		       "tun/tap creation via rtnetlink is not supported.");
228135b827b6SNicolas Dichtel 	return -EOPNOTSUPP;
2282f019a7a5SEric W. Biederman }
2283f019a7a5SEric W. Biederman 
22841ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev)
22851ec010e7SSabrina Dubroca {
22861ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
22871ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
22881ec010e7SSabrina Dubroca 
22891ec010e7SSabrina Dubroca 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
22901ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
22911ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* TYPE */
22921ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PI */
22931ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
22941ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PERSIST */
22951ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
22961ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
22971ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
22981ec010e7SSabrina Dubroca 	       0;
22991ec010e7SSabrina Dubroca }
23001ec010e7SSabrina Dubroca 
23011ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
23021ec010e7SSabrina Dubroca {
23031ec010e7SSabrina Dubroca 	struct tun_struct *tun = netdev_priv(dev);
23041ec010e7SSabrina Dubroca 
23051ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
23061ec010e7SSabrina Dubroca 		goto nla_put_failure;
23071ec010e7SSabrina Dubroca 	if (uid_valid(tun->owner) &&
23081ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_OWNER,
23091ec010e7SSabrina Dubroca 			from_kuid_munged(current_user_ns(), tun->owner)))
23101ec010e7SSabrina Dubroca 		goto nla_put_failure;
23111ec010e7SSabrina Dubroca 	if (gid_valid(tun->group) &&
23121ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_GROUP,
23131ec010e7SSabrina Dubroca 			from_kgid_munged(current_user_ns(), tun->group)))
23141ec010e7SSabrina Dubroca 		goto nla_put_failure;
23151ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
23161ec010e7SSabrina Dubroca 		goto nla_put_failure;
23171ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
23181ec010e7SSabrina Dubroca 		goto nla_put_failure;
23191ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
23201ec010e7SSabrina Dubroca 		goto nla_put_failure;
23211ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
23221ec010e7SSabrina Dubroca 		       !!(tun->flags & IFF_MULTI_QUEUE)))
23231ec010e7SSabrina Dubroca 		goto nla_put_failure;
23241ec010e7SSabrina Dubroca 	if (tun->flags & IFF_MULTI_QUEUE) {
23251ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
23261ec010e7SSabrina Dubroca 			goto nla_put_failure;
23271ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
23281ec010e7SSabrina Dubroca 				tun->numdisabled))
23291ec010e7SSabrina Dubroca 			goto nla_put_failure;
23301ec010e7SSabrina Dubroca 	}
23311ec010e7SSabrina Dubroca 
23321ec010e7SSabrina Dubroca 	return 0;
23331ec010e7SSabrina Dubroca 
23341ec010e7SSabrina Dubroca nla_put_failure:
23351ec010e7SSabrina Dubroca 	return -EMSGSIZE;
23361ec010e7SSabrina Dubroca }
23371ec010e7SSabrina Dubroca 
2338f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2339f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2340f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2341f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2342f019a7a5SEric W. Biederman 	.validate	= tun_validate,
23431ec010e7SSabrina Dubroca 	.get_size       = tun_get_size,
23441ec010e7SSabrina Dubroca 	.fill_info      = tun_fill_info,
2345f019a7a5SEric W. Biederman };
2346f019a7a5SEric W. Biederman 
234733dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
234833dccbb0SHerbert Xu {
234954f968d6SJason Wang 	struct tun_file *tfile;
235043815482SEric Dumazet 	wait_queue_head_t *wqueue;
235133dccbb0SHerbert Xu 
235233dccbb0SHerbert Xu 	if (!sock_writeable(sk))
235333dccbb0SHerbert Xu 		return;
235433dccbb0SHerbert Xu 
23559cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
235633dccbb0SHerbert Xu 		return;
235733dccbb0SHerbert Xu 
235843815482SEric Dumazet 	wqueue = sk_sleep(sk);
235943815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
2360a9a08845SLinus Torvalds 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2361a9a08845SLinus Torvalds 						EPOLLWRNORM | EPOLLWRBAND);
2362c722c625SHerbert Xu 
236354f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
236454f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
236533dccbb0SHerbert Xu }
236633dccbb0SHerbert Xu 
2367f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage)
2368f9e06c45SJason Wang {
2369f9e06c45SJason Wang 	if (tpage->page)
2370f9e06c45SJason Wang 		__page_frag_cache_drain(tpage->page, tpage->count);
2371f9e06c45SJason Wang }
2372f9e06c45SJason Wang 
2373043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun,
2374043d222fSJason Wang 		       struct tun_file *tfile,
2375f9e06c45SJason Wang 		       struct xdp_buff *xdp, int *flush,
2376f9e06c45SJason Wang 		       struct tun_page *tpage)
2377043d222fSJason Wang {
23784e4b08e5SPrashant Bhole 	unsigned int datasize = xdp->data_end - xdp->data;
2379043d222fSJason Wang 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2380043d222fSJason Wang 	struct virtio_net_hdr *gso = &hdr->gso;
2381043d222fSJason Wang 	struct tun_pcpu_stats *stats;
2382043d222fSJason Wang 	struct bpf_prog *xdp_prog;
2383043d222fSJason Wang 	struct sk_buff *skb = NULL;
2384043d222fSJason Wang 	u32 rxhash = 0, act;
2385043d222fSJason Wang 	int buflen = hdr->buflen;
2386043d222fSJason Wang 	int err = 0;
2387043d222fSJason Wang 	bool skb_xdp = false;
2388f9e06c45SJason Wang 	struct page *page;
2389043d222fSJason Wang 
2390043d222fSJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
2391043d222fSJason Wang 	if (xdp_prog) {
2392043d222fSJason Wang 		if (gso->gso_type) {
2393043d222fSJason Wang 			skb_xdp = true;
2394043d222fSJason Wang 			goto build;
2395043d222fSJason Wang 		}
2396043d222fSJason Wang 		xdp_set_data_meta_invalid(xdp);
2397043d222fSJason Wang 		xdp->rxq = &tfile->xdp_rxq;
2398fb3e6e93SJesper Dangaard Brouer 		xdp->frame_sz = buflen;
2399043d222fSJason Wang 
2400043d222fSJason Wang 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2401043d222fSJason Wang 		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2402043d222fSJason Wang 		if (err < 0) {
2403043d222fSJason Wang 			put_page(virt_to_head_page(xdp->data));
2404043d222fSJason Wang 			return err;
2405043d222fSJason Wang 		}
2406043d222fSJason Wang 
2407043d222fSJason Wang 		switch (err) {
2408043d222fSJason Wang 		case XDP_REDIRECT:
2409043d222fSJason Wang 			*flush = true;
2410df561f66SGustavo A. R. Silva 			fallthrough;
2411043d222fSJason Wang 		case XDP_TX:
2412043d222fSJason Wang 			return 0;
2413043d222fSJason Wang 		case XDP_PASS:
2414043d222fSJason Wang 			break;
2415043d222fSJason Wang 		default:
2416f9e06c45SJason Wang 			page = virt_to_head_page(xdp->data);
2417f9e06c45SJason Wang 			if (tpage->page == page) {
2418f9e06c45SJason Wang 				++tpage->count;
2419f9e06c45SJason Wang 			} else {
2420f9e06c45SJason Wang 				tun_put_page(tpage);
2421f9e06c45SJason Wang 				tpage->page = page;
2422f9e06c45SJason Wang 				tpage->count = 1;
2423f9e06c45SJason Wang 			}
2424043d222fSJason Wang 			return 0;
2425043d222fSJason Wang 		}
2426043d222fSJason Wang 	}
2427043d222fSJason Wang 
2428043d222fSJason Wang build:
2429043d222fSJason Wang 	skb = build_skb(xdp->data_hard_start, buflen);
2430043d222fSJason Wang 	if (!skb) {
2431043d222fSJason Wang 		err = -ENOMEM;
2432043d222fSJason Wang 		goto out;
2433043d222fSJason Wang 	}
2434043d222fSJason Wang 
2435043d222fSJason Wang 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2436043d222fSJason Wang 	skb_put(skb, xdp->data_end - xdp->data);
2437043d222fSJason Wang 
2438043d222fSJason Wang 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2439043d222fSJason Wang 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2440043d222fSJason Wang 		kfree_skb(skb);
2441043d222fSJason Wang 		err = -EINVAL;
2442043d222fSJason Wang 		goto out;
2443043d222fSJason Wang 	}
2444043d222fSJason Wang 
2445043d222fSJason Wang 	skb->protocol = eth_type_trans(skb, tun->dev);
2446043d222fSJason Wang 	skb_reset_network_header(skb);
2447d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
24483fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
2449043d222fSJason Wang 
2450043d222fSJason Wang 	if (skb_xdp) {
2451043d222fSJason Wang 		err = do_xdp_generic(xdp_prog, skb);
2452043d222fSJason Wang 		if (err != XDP_PASS)
2453043d222fSJason Wang 			goto out;
2454043d222fSJason Wang 	}
2455043d222fSJason Wang 
2456f29eb2a9SPaolo Abeni 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2457f29eb2a9SPaolo Abeni 	    !tfile->detached)
2458043d222fSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
2459043d222fSJason Wang 
2460043d222fSJason Wang 	netif_receive_skb(skb);
2461043d222fSJason Wang 
24626342ca64SPrashant Bhole 	/* No need for get_cpu_ptr() here since this function is
24636342ca64SPrashant Bhole 	 * always called with bh disabled
24646342ca64SPrashant Bhole 	 */
24656342ca64SPrashant Bhole 	stats = this_cpu_ptr(tun->pcpu_stats);
2466043d222fSJason Wang 	u64_stats_update_begin(&stats->syncp);
24675260dd3eSEric Dumazet 	u64_stats_inc(&stats->rx_packets);
24685260dd3eSEric Dumazet 	u64_stats_add(&stats->rx_bytes, datasize);
2469043d222fSJason Wang 	u64_stats_update_end(&stats->syncp);
2470043d222fSJason Wang 
2471043d222fSJason Wang 	if (rxhash)
2472043d222fSJason Wang 		tun_flow_update(tun, rxhash, tfile);
2473043d222fSJason Wang 
2474043d222fSJason Wang out:
2475043d222fSJason Wang 	return err;
2476043d222fSJason Wang }
2477043d222fSJason Wang 
24781b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
247905c2828cSMichael S. Tsirkin {
2480043d222fSJason Wang 	int ret, i;
248154f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
24829484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2483fe8dd45bSJason Wang 	struct tun_msg_ctl *ctl = m->msg_control;
2484043d222fSJason Wang 	struct xdp_buff *xdp;
248554f968d6SJason Wang 
248654f968d6SJason Wang 	if (!tun)
248754f968d6SJason Wang 		return -EBADFD;
2488f5ff53b4SAl Viro 
2489043d222fSJason Wang 	if (ctl && (ctl->type == TUN_MSG_PTR)) {
24906f0271d9SDavid S. Miller 		struct tun_page tpage;
2491043d222fSJason Wang 		int n = ctl->num;
2492043d222fSJason Wang 		int flush = 0;
2493043d222fSJason Wang 
24946f0271d9SDavid S. Miller 		memset(&tpage, 0, sizeof(tpage));
24956f0271d9SDavid S. Miller 
2496043d222fSJason Wang 		local_bh_disable();
2497043d222fSJason Wang 		rcu_read_lock();
2498043d222fSJason Wang 
2499043d222fSJason Wang 		for (i = 0; i < n; i++) {
2500043d222fSJason Wang 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2501f9e06c45SJason Wang 			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2502043d222fSJason Wang 		}
2503043d222fSJason Wang 
2504043d222fSJason Wang 		if (flush)
25051d233886SToke Høiland-Jørgensen 			xdp_do_flush();
2506043d222fSJason Wang 
2507043d222fSJason Wang 		rcu_read_unlock();
2508043d222fSJason Wang 		local_bh_enable();
2509043d222fSJason Wang 
2510f9e06c45SJason Wang 		tun_put_page(&tpage);
2511f9e06c45SJason Wang 
2512043d222fSJason Wang 		ret = total_len;
2513043d222fSJason Wang 		goto out;
2514043d222fSJason Wang 	}
2515fe8dd45bSJason Wang 
2516fe8dd45bSJason Wang 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
25175503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
25185503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
2519043d222fSJason Wang out:
252054f968d6SJason Wang 	tun_put(tun);
252154f968d6SJason Wang 	return ret;
252205c2828cSMichael S. Tsirkin }
252305c2828cSMichael S. Tsirkin 
25241b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
252505c2828cSMichael S. Tsirkin 		       int flags)
252605c2828cSMichael S. Tsirkin {
252754f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25289484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2529fc72d1d5SJason Wang 	void *ptr = m->msg_control;
253005c2828cSMichael S. Tsirkin 	int ret;
253154f968d6SJason Wang 
2532c33ee15bSWei Xu 	if (!tun) {
2533c33ee15bSWei Xu 		ret = -EBADFD;
2534fc72d1d5SJason Wang 		goto out_free;
2535c33ee15bSWei Xu 	}
253654f968d6SJason Wang 
2537eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
25383811ae76SGao feng 		ret = -EINVAL;
2539c33ee15bSWei Xu 		goto out_put_tun;
25403811ae76SGao feng 	}
2541eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2542eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2543eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2544eda29772SRichard Cochran 		goto out;
2545eda29772SRichard Cochran 	}
2546fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
254787897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
254842404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
254942404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
255042404c09SDavid S. Miller 	}
25513811ae76SGao feng out:
255254f968d6SJason Wang 	tun_put(tun);
255305c2828cSMichael S. Tsirkin 	return ret;
2554c33ee15bSWei Xu 
2555c33ee15bSWei Xu out_put_tun:
2556c33ee15bSWei Xu 	tun_put(tun);
2557fc72d1d5SJason Wang out_free:
2558fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2559c33ee15bSWei Xu 	return ret;
256005c2828cSMichael S. Tsirkin }
256105c2828cSMichael S. Tsirkin 
2562fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2563fc72d1d5SJason Wang {
2564fc72d1d5SJason Wang 	if (likely(ptr)) {
25651ffcbc85SJesper Dangaard Brouer 		if (tun_is_xdp_frame(ptr)) {
25661ffcbc85SJesper Dangaard Brouer 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2567fc72d1d5SJason Wang 
25681ffcbc85SJesper Dangaard Brouer 			return xdpf->len;
2569fc72d1d5SJason Wang 		}
2570fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2571fc72d1d5SJason Wang 	} else {
2572fc72d1d5SJason Wang 		return 0;
2573fc72d1d5SJason Wang 	}
2574fc72d1d5SJason Wang }
2575fc72d1d5SJason Wang 
25761576d986SJason Wang static int tun_peek_len(struct socket *sock)
25771576d986SJason Wang {
25781576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25791576d986SJason Wang 	struct tun_struct *tun;
25801576d986SJason Wang 	int ret = 0;
25811576d986SJason Wang 
25829484dc74Syuan linyu 	tun = tun_get(tfile);
25831576d986SJason Wang 	if (!tun)
25841576d986SJason Wang 		return 0;
25851576d986SJason Wang 
2586fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
25871576d986SJason Wang 	tun_put(tun);
25881576d986SJason Wang 
25891576d986SJason Wang 	return ret;
25901576d986SJason Wang }
25911576d986SJason Wang 
259205c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
259305c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
25941576d986SJason Wang 	.peek_len = tun_peek_len,
259505c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
259605c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
259705c2828cSMichael S. Tsirkin };
259805c2828cSMichael S. Tsirkin 
259933dccbb0SHerbert Xu static struct proto tun_proto = {
260033dccbb0SHerbert Xu 	.name		= "tun",
260133dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
260254f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
260333dccbb0SHerbert Xu };
2604f019a7a5SEric W. Biederman 
2605980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2606980c9e8cSDavid Woodhouse {
2607031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2608980c9e8cSDavid Woodhouse }
2609980c9e8cSDavid Woodhouse 
2610980c9e8cSDavid Woodhouse static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2611980c9e8cSDavid Woodhouse 			      char *buf)
2612980c9e8cSDavid Woodhouse {
2613980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2614980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2615980c9e8cSDavid Woodhouse }
2616980c9e8cSDavid Woodhouse 
2617980c9e8cSDavid Woodhouse static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2618980c9e8cSDavid Woodhouse 			      char *buf)
2619980c9e8cSDavid Woodhouse {
2620980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26210625c883SEric W. Biederman 	return uid_valid(tun->owner)?
26220625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26230625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
26240625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2625980c9e8cSDavid Woodhouse }
2626980c9e8cSDavid Woodhouse 
2627980c9e8cSDavid Woodhouse static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2628980c9e8cSDavid Woodhouse 			      char *buf)
2629980c9e8cSDavid Woodhouse {
2630980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26310625c883SEric W. Biederman 	return gid_valid(tun->group) ?
26320625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26330625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
26340625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2635980c9e8cSDavid Woodhouse }
2636980c9e8cSDavid Woodhouse 
2637980c9e8cSDavid Woodhouse static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2638980c9e8cSDavid Woodhouse static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2639980c9e8cSDavid Woodhouse static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2640980c9e8cSDavid Woodhouse 
2641c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2642c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2643c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2644c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2645c4d33e24STakashi Iwai 	NULL
2646c4d33e24STakashi Iwai };
2647c4d33e24STakashi Iwai 
2648c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2649c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2650c4d33e24STakashi Iwai };
2651c4d33e24STakashi Iwai 
2652d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
26531da177e4SLinus Torvalds {
26541da177e4SLinus Torvalds 	struct tun_struct *tun;
265554f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
26561da177e4SLinus Torvalds 	struct net_device *dev;
26571da177e4SLinus Torvalds 	int err;
26581da177e4SLinus Torvalds 
26597c0c3b1aSJason Wang 	if (tfile->detached)
26607c0c3b1aSJason Wang 		return -EINVAL;
26617c0c3b1aSJason Wang 
266290e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
266390e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
266490e33d45SPetar Penkov 			return -EPERM;
266590e33d45SPetar Penkov 
266690e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
266790e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
266890e33d45SPetar Penkov 			return -EINVAL;
266990e33d45SPetar Penkov 	}
267090e33d45SPetar Penkov 
267174a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
267274a3e5a7SEric W. Biederman 	if (dev) {
2673f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2674f85ba780SDavid Woodhouse 			return -EBUSY;
267574a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
267674a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
267774a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
267874a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
267974a3e5a7SEric W. Biederman 		else
268074a3e5a7SEric W. Biederman 			return -EINVAL;
268174a3e5a7SEric W. Biederman 
26828e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
268340630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
26848e6d91aeSJason Wang 			return -EINVAL;
26858e6d91aeSJason Wang 
2686cde8b15fSJason Wang 		if (tun_not_capable(tun))
26872b980dbdSPaul Moore 			return -EPERM;
26885dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
26892b980dbdSPaul Moore 		if (err < 0)
26902b980dbdSPaul Moore 			return err;
26912b980dbdSPaul Moore 
269294317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2693af3fb24eSEric Dumazet 				 ifr->ifr_flags & IFF_NAPI,
269477f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2695a7385ba2SEric W. Biederman 		if (err < 0)
2696a7385ba2SEric W. Biederman 			return err;
26974008e97fSJason Wang 
269840630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2699e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2700e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2701e8dbad66SJason Wang 			 * to initialize the device again.
2702e8dbad66SJason Wang 			 */
270383c1f36fSSabrina Dubroca 			netdev_state_change(dev);
2704e8dbad66SJason Wang 			return 0;
2705e8dbad66SJason Wang 		}
27069fffc5c6SSabrina Dubroca 
27079fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27089fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
270983c1f36fSSabrina Dubroca 
271083c1f36fSSabrina Dubroca 		netdev_state_change(dev);
271183c1f36fSSabrina Dubroca 	} else {
27121da177e4SLinus Torvalds 		char *name;
27131da177e4SLinus Torvalds 		unsigned long flags = 0;
2714edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2715edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
27161da177e4SLinus Torvalds 
2717c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2718ca6bb5d7SDavid Woodhouse 			return -EPERM;
27192b980dbdSPaul Moore 		err = security_tun_dev_create();
27202b980dbdSPaul Moore 		if (err < 0)
27212b980dbdSPaul Moore 			return err;
2722ca6bb5d7SDavid Woodhouse 
27231da177e4SLinus Torvalds 		/* Set dev type */
27241da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
27251da177e4SLinus Torvalds 			/* TUN device */
272640630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
27271da177e4SLinus Torvalds 			name = "tun%d";
27281da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
27291da177e4SLinus Torvalds 			/* TAP device */
273040630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
27311da177e4SLinus Torvalds 			name = "tap%d";
27321da177e4SLinus Torvalds 		} else
273336989b90SKusanagi Kouichi 			return -EINVAL;
27341da177e4SLinus Torvalds 
27351da177e4SLinus Torvalds 		if (*ifr->ifr_name)
27361da177e4SLinus Torvalds 			name = ifr->ifr_name;
27371da177e4SLinus Torvalds 
2738c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2739c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2740c835a677STom Gundersen 				       queues);
2741edfb6a14SJason Wang 
27421da177e4SLinus Torvalds 		if (!dev)
27431da177e4SLinus Torvalds 			return -ENOMEM;
27441da177e4SLinus Torvalds 
2745fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2746f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2747fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2748c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2749758e43b7SStephen Hemminger 
27501da177e4SLinus Torvalds 		tun = netdev_priv(dev);
27511da177e4SLinus Torvalds 		tun->dev = dev;
27521da177e4SLinus Torvalds 		tun->flags = flags;
2753f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2754d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
27551da177e4SLinus Torvalds 
2756eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
275754f968d6SJason Wang 		tun->filter_attached = false;
275854f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
27595503fcecSJason Wang 		tun->rx_batched = 0;
276096f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
276133dccbb0SHerbert Xu 
2762608b9977SPaolo Abeni 		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2763608b9977SPaolo Abeni 		if (!tun->pcpu_stats) {
2764608b9977SPaolo Abeni 			err = -ENOMEM;
2765608b9977SPaolo Abeni 			goto err_free_dev;
2766608b9977SPaolo Abeni 		}
2767608b9977SPaolo Abeni 
276896442e42SJason Wang 		spin_lock_init(&tun->lock);
276996442e42SJason Wang 
27705dbbaf2dSPaul Moore 		err = security_tun_dev_alloc_security(&tun->security);
27715dbbaf2dSPaul Moore 		if (err < 0)
2772608b9977SPaolo Abeni 			goto err_free_stat;
27732b980dbdSPaul Moore 
27741da177e4SLinus Torvalds 		tun_net_init(dev);
2775944a1376SPavel Emelyanov 		tun_flow_init(tun);
277696442e42SJason Wang 
277788255375SMichał Mirosław 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
27786680ec68SJason Wang 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
27796680ec68SJason Wang 				   NETIF_F_HW_VLAN_STAG_TX;
27802a2bbf17SPaolo Abeni 		dev->features = dev->hw_features | NETIF_F_LLTX;
27816671b224SFernando Luis Vazquez Cao 		dev->vlan_features = dev->features &
27826671b224SFernando Luis Vazquez Cao 				     ~(NETIF_F_HW_VLAN_CTAG_TX |
27836671b224SFernando Luis Vazquez Cao 				       NETIF_F_HW_VLAN_STAG_TX);
278488255375SMichał Mirosław 
27859fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27869fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
27879fffc5c6SSabrina Dubroca 
27884008e97fSJason Wang 		INIT_LIST_HEAD(&tun->disabled);
2789af3fb24eSEric Dumazet 		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
279077f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2791eb0fb363SJason Wang 		if (err < 0)
2792662ca437SJason Wang 			goto err_free_flow;
2793eb0fb363SJason Wang 
27941da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
27951da177e4SLinus Torvalds 		if (err < 0)
2796662ca437SJason Wang 			goto err_detach;
279777f22f92SYang Yingliang 		/* free_netdev() won't check refcnt, to aovid race
279877f22f92SYang Yingliang 		 * with dev_put() we need publish tun after registration.
279977f22f92SYang Yingliang 		 */
280077f22f92SYang Yingliang 		rcu_assign_pointer(tfile->tun, tun);
2801af668b3cSMichael S. Tsirkin 	}
2802980c9e8cSDavid Woodhouse 
2803eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
28041da177e4SLinus Torvalds 
2805e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2806e35259a9SMax Krasnyansky 	 * xoff state.
2807e35259a9SMax Krasnyansky 	 */
2808e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2809c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2810e35259a9SMax Krasnyansky 
28111da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
28121da177e4SLinus Torvalds 	return 0;
28131da177e4SLinus Torvalds 
2814662ca437SJason Wang err_detach:
2815662ca437SJason Wang 	tun_detach_all(dev);
281611fc7d5aSEric Dumazet 	/* We are here because register_netdevice() has failed.
281711fc7d5aSEric Dumazet 	 * If register_netdevice() already called tun_free_netdev()
281811fc7d5aSEric Dumazet 	 * while dealing with the error, tun->pcpu_stats has been cleared.
281911fc7d5aSEric Dumazet 	 */
282011fc7d5aSEric Dumazet 	if (!tun->pcpu_stats)
2821ff244c6bSEric Dumazet 		goto err_free_dev;
2822ff244c6bSEric Dumazet 
2823662ca437SJason Wang err_free_flow:
2824662ca437SJason Wang 	tun_flow_uninit(tun);
2825662ca437SJason Wang 	security_tun_dev_free_security(tun->security);
2826608b9977SPaolo Abeni err_free_stat:
2827608b9977SPaolo Abeni 	free_percpu(tun->pcpu_stats);
28281da177e4SLinus Torvalds err_free_dev:
28291da177e4SLinus Torvalds 	free_netdev(dev);
28301da177e4SLinus Torvalds 	return err;
28311da177e4SLinus Torvalds }
28321da177e4SLinus Torvalds 
283312132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2834e3b99556SMark McLoughlin {
2835e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2836e3b99556SMark McLoughlin 
2837980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2838e3b99556SMark McLoughlin 
2839e3b99556SMark McLoughlin }
2840e3b99556SMark McLoughlin 
28415228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
28425228ddc9SRusty Russell  * privs required. */
284388255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
28445228ddc9SRusty Russell {
2845c8f44affSMichał Mirosław 	netdev_features_t features = 0;
28465228ddc9SRusty Russell 
28475228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
284888255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
28495228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
28505228ddc9SRusty Russell 
28515228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
28525228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
28535228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
28545228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
28555228ddc9SRusty Russell 			}
28565228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
28575228ddc9SRusty Russell 				features |= NETIF_F_TSO;
28585228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
28595228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
28605228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
28615228ddc9SRusty Russell 		}
28620c19f846SWillem de Bruijn 
28630c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
28645228ddc9SRusty Russell 	}
28655228ddc9SRusty Russell 
28665228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
28675228ddc9SRusty Russell 	 * trying to set them. */
28685228ddc9SRusty Russell 	if (arg)
28695228ddc9SRusty Russell 		return -EINVAL;
28705228ddc9SRusty Russell 
287188255375SMichał Mirosław 	tun->set_features = features;
287209050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
287309050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
287488255375SMichał Mirosław 	netdev_update_features(tun->dev);
28755228ddc9SRusty Russell 
28765228ddc9SRusty Russell 	return 0;
28775228ddc9SRusty Russell }
28785228ddc9SRusty Russell 
2879c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2880c8d68e6bSJason Wang {
2881c8d68e6bSJason Wang 	int i;
2882c8d68e6bSJason Wang 	struct tun_file *tfile;
2883c8d68e6bSJason Wang 
2884c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2885b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
28868ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
28878ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
28888ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2889c8d68e6bSJason Wang 	}
2890c8d68e6bSJason Wang 
2891c8d68e6bSJason Wang 	tun->filter_attached = false;
2892c8d68e6bSJason Wang }
2893c8d68e6bSJason Wang 
2894c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2895c8d68e6bSJason Wang {
2896c8d68e6bSJason Wang 	int i, ret = 0;
2897c8d68e6bSJason Wang 	struct tun_file *tfile;
2898c8d68e6bSJason Wang 
2899c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2900b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
29018ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
29028ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
29038ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2904c8d68e6bSJason Wang 		if (ret) {
2905c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2906c8d68e6bSJason Wang 			return ret;
2907c8d68e6bSJason Wang 		}
2908c8d68e6bSJason Wang 	}
2909c8d68e6bSJason Wang 
2910c8d68e6bSJason Wang 	tun->filter_attached = true;
2911c8d68e6bSJason Wang 	return ret;
2912c8d68e6bSJason Wang }
2913c8d68e6bSJason Wang 
2914c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2915c8d68e6bSJason Wang {
2916c8d68e6bSJason Wang 	struct tun_file *tfile;
2917c8d68e6bSJason Wang 	int i;
2918c8d68e6bSJason Wang 
2919c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2920b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2921c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2922c8d68e6bSJason Wang 	}
2923c8d68e6bSJason Wang }
2924c8d68e6bSJason Wang 
2925cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2926cde8b15fSJason Wang {
2927cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2928cde8b15fSJason Wang 	struct tun_struct *tun;
2929cde8b15fSJason Wang 	int ret = 0;
2930cde8b15fSJason Wang 
2931cde8b15fSJason Wang 	rtnl_lock();
2932cde8b15fSJason Wang 
2933cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
29344008e97fSJason Wang 		tun = tfile->detached;
29355dbbaf2dSPaul Moore 		if (!tun) {
2936cde8b15fSJason Wang 			ret = -EINVAL;
29375dbbaf2dSPaul Moore 			goto unlock;
29385dbbaf2dSPaul Moore 		}
29395dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
29405dbbaf2dSPaul Moore 		if (ret < 0)
29415dbbaf2dSPaul Moore 			goto unlock;
2942af3fb24eSEric Dumazet 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
294377f22f92SYang Yingliang 				 tun->flags & IFF_NAPI_FRAGS, true);
29444008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2945b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
294640630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
29474008e97fSJason Wang 			ret = -EINVAL;
2948cde8b15fSJason Wang 		else
29494008e97fSJason Wang 			__tun_detach(tfile, false);
29504008e97fSJason Wang 	} else
2951cde8b15fSJason Wang 		ret = -EINVAL;
2952cde8b15fSJason Wang 
295383c1f36fSSabrina Dubroca 	if (ret >= 0)
295483c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
295583c1f36fSSabrina Dubroca 
29565dbbaf2dSPaul Moore unlock:
2957cde8b15fSJason Wang 	rtnl_unlock();
2958cde8b15fSJason Wang 	return ret;
2959cde8b15fSJason Wang }
2960cde8b15fSJason Wang 
29618f3f330dSJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2962cd5681d7SJason Wang 			void __user *data)
296396f84061SJason Wang {
296496f84061SJason Wang 	struct bpf_prog *prog;
296596f84061SJason Wang 	int fd;
296696f84061SJason Wang 
296796f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
296896f84061SJason Wang 		return -EFAULT;
296996f84061SJason Wang 
297096f84061SJason Wang 	if (fd == -1) {
297196f84061SJason Wang 		prog = NULL;
297296f84061SJason Wang 	} else {
297396f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
297496f84061SJason Wang 		if (IS_ERR(prog))
297596f84061SJason Wang 			return PTR_ERR(prog);
297696f84061SJason Wang 	}
297796f84061SJason Wang 
2978cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
297996f84061SJason Wang }
298096f84061SJason Wang 
298150857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
298250857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
29831da177e4SLinus Torvalds {
298436b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
2985f663706aSKirill Tkhai 	struct net *net = sock_net(&tfile->sk);
2986631ab46bSEric W. Biederman 	struct tun_struct *tun;
29871da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
298826d31925SNicolas Dichtel 	unsigned int ifindex, carrier;
29891da177e4SLinus Torvalds 	struct ifreq ifr;
29900625c883SEric W. Biederman 	kuid_t owner;
29910625c883SEric W. Biederman 	kgid_t group;
299233dccbb0SHerbert Xu 	int sndbuf;
2993d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
29941cf8e410SMichael S. Tsirkin 	int le;
2995f271b2ccSMax Krasnyansky 	int ret;
299683c1f36fSSabrina Dubroca 	bool do_notify = false;
29971da177e4SLinus Torvalds 
2998f2780d6dSKirill Tkhai 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
2999f2780d6dSKirill Tkhai 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
300050857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
30011da177e4SLinus Torvalds 			return -EFAULT;
30028bbb1813SDavid S. Miller 	} else {
3003a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
30048bbb1813SDavid S. Miller 	}
3005631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
3006631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
3007631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
3008031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
3009031f5e03SMichael S. Tsirkin 		 */
3010031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3011631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
3012f663706aSKirill Tkhai 	} else if (cmd == TUNSETQUEUE) {
3013cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
3014f663706aSKirill Tkhai 	} else if (cmd == SIOCGSKNS) {
3015f663706aSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3016f663706aSKirill Tkhai 			return -EPERM;
3017f663706aSKirill Tkhai 		return open_related_ns(&net->ns, get_net_ns);
3018f663706aSKirill Tkhai 	}
3019631ab46bSEric W. Biederman 
3020c8d68e6bSJason Wang 	ret = 0;
3021876bfd4dSHerbert Xu 	rtnl_lock();
3022876bfd4dSHerbert Xu 
30239484dc74Syuan linyu 	tun = tun_get(tfile);
30240f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
30250f16bc13SGao Feng 		ret = -EEXIST;
30260f16bc13SGao Feng 		if (tun)
30270f16bc13SGao Feng 			goto unlock;
30280f16bc13SGao Feng 
30291da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
30301da177e4SLinus Torvalds 
3031f2780d6dSKirill Tkhai 		ret = tun_set_iff(net, file, &ifr);
30321da177e4SLinus Torvalds 
3033876bfd4dSHerbert Xu 		if (ret)
3034876bfd4dSHerbert Xu 			goto unlock;
30351da177e4SLinus Torvalds 
303650857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3037876bfd4dSHerbert Xu 			ret = -EFAULT;
3038876bfd4dSHerbert Xu 		goto unlock;
30391da177e4SLinus Torvalds 	}
3040fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
3041fb7589a1SPavel Emelyanov 		ret = -EPERM;
3042fb7589a1SPavel Emelyanov 		if (tun)
3043fb7589a1SPavel Emelyanov 			goto unlock;
3044fb7589a1SPavel Emelyanov 
3045fb7589a1SPavel Emelyanov 		ret = -EFAULT;
3046fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3047fb7589a1SPavel Emelyanov 			goto unlock;
3048fb7589a1SPavel Emelyanov 
3049fb7589a1SPavel Emelyanov 		ret = 0;
3050fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
3051fb7589a1SPavel Emelyanov 		goto unlock;
3052fb7589a1SPavel Emelyanov 	}
30531da177e4SLinus Torvalds 
3054876bfd4dSHerbert Xu 	ret = -EBADFD;
30551da177e4SLinus Torvalds 	if (!tun)
3056876bfd4dSHerbert Xu 		goto unlock;
30571da177e4SLinus Torvalds 
30583424170fSMichal Kubecek 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
30591da177e4SLinus Torvalds 
30600c3e0e3bSKirill Tkhai 	net = dev_net(tun->dev);
3061631ab46bSEric W. Biederman 	ret = 0;
30621da177e4SLinus Torvalds 	switch (cmd) {
3063e3b99556SMark McLoughlin 	case TUNGETIFF:
306412132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
3065e3b99556SMark McLoughlin 
30663d407a80SPavel Emelyanov 		if (tfile->detached)
30673d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3068849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
3069849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
30703d407a80SPavel Emelyanov 
307150857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3072631ab46bSEric W. Biederman 			ret = -EFAULT;
3073e3b99556SMark McLoughlin 		break;
3074e3b99556SMark McLoughlin 
30751da177e4SLinus Torvalds 	case TUNSETNOCSUM:
30761da177e4SLinus Torvalds 		/* Disable/Enable checksum */
30771da177e4SLinus Torvalds 
307888255375SMichał Mirosław 		/* [unimplemented] */
30793424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
30806b8a66eeSJoe Perches 			   arg ? "disabled" : "enabled");
30811da177e4SLinus Torvalds 		break;
30821da177e4SLinus Torvalds 
30831da177e4SLinus Torvalds 	case TUNSETPERSIST:
308454f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
308554f968d6SJason Wang 		 * module to prevent the module being unprobed.
308654f968d6SJason Wang 		 */
308740630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
308840630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
308954f968d6SJason Wang 			__module_get(THIS_MODULE);
309083c1f36fSSabrina Dubroca 			do_notify = true;
3091dd38bd85SJason Wang 		}
309240630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
309340630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
309454f968d6SJason Wang 			module_put(THIS_MODULE);
309583c1f36fSSabrina Dubroca 			do_notify = true;
309654f968d6SJason Wang 		}
30971da177e4SLinus Torvalds 
30983424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "persist %s\n",
30996b8a66eeSJoe Perches 			   arg ? "enabled" : "disabled");
31001da177e4SLinus Torvalds 		break;
31011da177e4SLinus Torvalds 
31021da177e4SLinus Torvalds 	case TUNSETOWNER:
31031da177e4SLinus Torvalds 		/* Set owner of the device */
31040625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
31050625c883SEric W. Biederman 		if (!uid_valid(owner)) {
31060625c883SEric W. Biederman 			ret = -EINVAL;
31070625c883SEric W. Biederman 			break;
31080625c883SEric W. Biederman 		}
31090625c883SEric W. Biederman 		tun->owner = owner;
311083c1f36fSSabrina Dubroca 		do_notify = true;
31113424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
31120625c883SEric W. Biederman 			   from_kuid(&init_user_ns, tun->owner));
31131da177e4SLinus Torvalds 		break;
31141da177e4SLinus Torvalds 
31158c644623SGuido Guenther 	case TUNSETGROUP:
31168c644623SGuido Guenther 		/* Set group of the device */
31170625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
31180625c883SEric W. Biederman 		if (!gid_valid(group)) {
31190625c883SEric W. Biederman 			ret = -EINVAL;
31200625c883SEric W. Biederman 			break;
31210625c883SEric W. Biederman 		}
31220625c883SEric W. Biederman 		tun->group = group;
312383c1f36fSSabrina Dubroca 		do_notify = true;
31243424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "group set to %u\n",
31250625c883SEric W. Biederman 			   from_kgid(&init_user_ns, tun->group));
31268c644623SGuido Guenther 		break;
31278c644623SGuido Guenther 
3128ff4cc3acSMike Kershaw 	case TUNSETLINK:
3129ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
3130ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
31313424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev,
31326b8a66eeSJoe Perches 				   "Linktype set failed because interface is up\n");
313348abfe05SDavid S. Miller 			ret = -EBUSY;
3134ff4cc3acSMike Kershaw 		} else {
3135ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
31363424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
31376b8a66eeSJoe Perches 				   tun->dev->type);
313848abfe05SDavid S. Miller 			ret = 0;
3139ff4cc3acSMike Kershaw 		}
3140631ab46bSEric W. Biederman 		break;
3141ff4cc3acSMike Kershaw 
31421da177e4SLinus Torvalds 	case TUNSETDEBUG:
31433424170fSMichal Kubecek 		tun->msg_enable = (u32)arg;
31441da177e4SLinus Torvalds 		break;
31453424170fSMichal Kubecek 
31465228ddc9SRusty Russell 	case TUNSETOFFLOAD:
314788255375SMichał Mirosław 		ret = set_offload(tun, arg);
3148631ab46bSEric W. Biederman 		break;
31495228ddc9SRusty Russell 
3150f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
3151f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
3152631ab46bSEric W. Biederman 		ret = -EINVAL;
315340630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3154631ab46bSEric W. Biederman 			break;
3155c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
3156631ab46bSEric W. Biederman 		break;
31571da177e4SLinus Torvalds 
31581da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
3159b595076aSUwe Kleine-König 		/* Get hw address */
3160f271b2ccSMax Krasnyansky 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3161f271b2ccSMax Krasnyansky 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
316250857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3163631ab46bSEric W. Biederman 			ret = -EFAULT;
3164631ab46bSEric W. Biederman 		break;
31651da177e4SLinus Torvalds 
31661da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
3167f271b2ccSMax Krasnyansky 		/* Set hw address */
31683a37a963SPetr Machata 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3169631ab46bSEric W. Biederman 		break;
317033dccbb0SHerbert Xu 
317133dccbb0SHerbert Xu 	case TUNGETSNDBUF:
317254f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
317333dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
317433dccbb0SHerbert Xu 			ret = -EFAULT;
317533dccbb0SHerbert Xu 		break;
317633dccbb0SHerbert Xu 
317733dccbb0SHerbert Xu 	case TUNSETSNDBUF:
317833dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
317933dccbb0SHerbert Xu 			ret = -EFAULT;
318033dccbb0SHerbert Xu 			break;
318133dccbb0SHerbert Xu 		}
318293161922SCraig Gallek 		if (sndbuf <= 0) {
318393161922SCraig Gallek 			ret = -EINVAL;
318493161922SCraig Gallek 			break;
318593161922SCraig Gallek 		}
318633dccbb0SHerbert Xu 
3187c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
3188c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
318933dccbb0SHerbert Xu 		break;
319033dccbb0SHerbert Xu 
3191d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
3192d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
3193d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3194d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3195d9d52b51SMichael S. Tsirkin 		break;
3196d9d52b51SMichael S. Tsirkin 
3197d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
3198d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3199d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3200d9d52b51SMichael S. Tsirkin 			break;
3201d9d52b51SMichael S. Tsirkin 		}
3202d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3203d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
3204d9d52b51SMichael S. Tsirkin 			break;
3205d9d52b51SMichael S. Tsirkin 		}
3206d9d52b51SMichael S. Tsirkin 
3207d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
3208d9d52b51SMichael S. Tsirkin 		break;
3209d9d52b51SMichael S. Tsirkin 
32101cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
32111cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
32121cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
32131cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32141cf8e410SMichael S. Tsirkin 		break;
32151cf8e410SMichael S. Tsirkin 
32161cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
32171cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
32181cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32191cf8e410SMichael S. Tsirkin 			break;
32201cf8e410SMichael S. Tsirkin 		}
32211cf8e410SMichael S. Tsirkin 		if (le)
32221cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
32231cf8e410SMichael S. Tsirkin 		else
32241cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
32251cf8e410SMichael S. Tsirkin 		break;
32261cf8e410SMichael S. Tsirkin 
32278b8e658bSGreg Kurz 	case TUNGETVNETBE:
32288b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
32298b8e658bSGreg Kurz 		break;
32308b8e658bSGreg Kurz 
32318b8e658bSGreg Kurz 	case TUNSETVNETBE:
32328b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
32338b8e658bSGreg Kurz 		break;
32348b8e658bSGreg Kurz 
323599405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
323699405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
323799405162SMichael S. Tsirkin 		ret = -EINVAL;
323840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
323999405162SMichael S. Tsirkin 			break;
324099405162SMichael S. Tsirkin 		ret = -EFAULT;
324154f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
324299405162SMichael S. Tsirkin 			break;
324399405162SMichael S. Tsirkin 
3244c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
324599405162SMichael S. Tsirkin 		break;
324699405162SMichael S. Tsirkin 
324799405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
324899405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
324999405162SMichael S. Tsirkin 		ret = -EINVAL;
325040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
325199405162SMichael S. Tsirkin 			break;
3252c8d68e6bSJason Wang 		ret = 0;
3253c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
325499405162SMichael S. Tsirkin 		break;
325599405162SMichael S. Tsirkin 
325676975e9cSPavel Emelyanov 	case TUNGETFILTER:
325776975e9cSPavel Emelyanov 		ret = -EINVAL;
325840630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
325976975e9cSPavel Emelyanov 			break;
326076975e9cSPavel Emelyanov 		ret = -EFAULT;
326176975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
326276975e9cSPavel Emelyanov 			break;
326376975e9cSPavel Emelyanov 		ret = 0;
326476975e9cSPavel Emelyanov 		break;
326576975e9cSPavel Emelyanov 
326696f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3267cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
326896f84061SJason Wang 		break;
326996f84061SJason Wang 
3270aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3271aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3272aff3d70aSJason Wang 		break;
3273aff3d70aSJason Wang 
327426d31925SNicolas Dichtel 	case TUNSETCARRIER:
327526d31925SNicolas Dichtel 		ret = -EFAULT;
327626d31925SNicolas Dichtel 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
327726d31925SNicolas Dichtel 			goto unlock;
327826d31925SNicolas Dichtel 
327926d31925SNicolas Dichtel 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
328026d31925SNicolas Dichtel 		break;
328126d31925SNicolas Dichtel 
32820c3e0e3bSKirill Tkhai 	case TUNGETDEVNETNS:
32830c3e0e3bSKirill Tkhai 		ret = -EPERM;
32840c3e0e3bSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
32850c3e0e3bSKirill Tkhai 			goto unlock;
32860c3e0e3bSKirill Tkhai 		ret = open_related_ns(&net->ns, get_net_ns);
32870c3e0e3bSKirill Tkhai 		break;
32880c3e0e3bSKirill Tkhai 
32891da177e4SLinus Torvalds 	default:
3290631ab46bSEric W. Biederman 		ret = -EINVAL;
3291631ab46bSEric W. Biederman 		break;
3292ee289b64SJoe Perches 	}
32931da177e4SLinus Torvalds 
329483c1f36fSSabrina Dubroca 	if (do_notify)
329583c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
329683c1f36fSSabrina Dubroca 
3297876bfd4dSHerbert Xu unlock:
3298876bfd4dSHerbert Xu 	rtnl_unlock();
3299876bfd4dSHerbert Xu 	if (tun)
3300631ab46bSEric W. Biederman 		tun_put(tun);
3301631ab46bSEric W. Biederman 	return ret;
33021da177e4SLinus Torvalds }
33031da177e4SLinus Torvalds 
330450857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
330550857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
330650857e2aSArnd Bergmann {
330750857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
330850857e2aSArnd Bergmann }
330950857e2aSArnd Bergmann 
331050857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
331150857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
331250857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
331350857e2aSArnd Bergmann {
331450857e2aSArnd Bergmann 	switch (cmd) {
331550857e2aSArnd Bergmann 	case TUNSETIFF:
331650857e2aSArnd Bergmann 	case TUNGETIFF:
331750857e2aSArnd Bergmann 	case TUNSETTXFILTER:
331850857e2aSArnd Bergmann 	case TUNGETSNDBUF:
331950857e2aSArnd Bergmann 	case TUNSETSNDBUF:
332050857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
332150857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
332250857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
332350857e2aSArnd Bergmann 		break;
332450857e2aSArnd Bergmann 	default:
332550857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
332650857e2aSArnd Bergmann 		break;
332750857e2aSArnd Bergmann 	}
332850857e2aSArnd Bergmann 
332950857e2aSArnd Bergmann 	/*
333050857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
333150857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
333250857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
333350857e2aSArnd Bergmann 	 * contents.
333450857e2aSArnd Bergmann 	 */
333550857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
333650857e2aSArnd Bergmann }
333750857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
333850857e2aSArnd Bergmann 
33391da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
33401da177e4SLinus Torvalds {
334154f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
33421da177e4SLinus Torvalds 	int ret;
33431da177e4SLinus Torvalds 
334454f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
33459d319522SJonathan Corbet 		goto out;
33461da177e4SLinus Torvalds 
33471da177e4SLinus Torvalds 	if (on) {
334801919134SEric W. Biederman 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
334954f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
33501da177e4SLinus Torvalds 	} else
335154f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
33529d319522SJonathan Corbet 	ret = 0;
33539d319522SJonathan Corbet out:
33549d319522SJonathan Corbet 	return ret;
33551da177e4SLinus Torvalds }
33561da177e4SLinus Torvalds 
33571da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
33581da177e4SLinus Torvalds {
3359140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3360631ab46bSEric W. Biederman 	struct tun_file *tfile;
3361deed49fbSThomas Gleixner 
3362140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
336311aa9c28SEric W. Biederman 					    &tun_proto, 0);
3364631ab46bSEric W. Biederman 	if (!tfile)
3365631ab46bSEric W. Biederman 		return -ENOMEM;
3366b196d88aSJason Wang 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3367b196d88aSJason Wang 		sk_free(&tfile->sk);
3368b196d88aSJason Wang 		return -ENOMEM;
3369b196d88aSJason Wang 	}
3370b196d88aSJason Wang 
3371c7256f57SEric Dumazet 	mutex_init(&tfile->napi_mutex);
3372c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
337354f968d6SJason Wang 	tfile->flags = 0;
3374fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
337554f968d6SJason Wang 
3376333f7909SAl Viro 	init_waitqueue_head(&tfile->socket.wq.wait);
337754f968d6SJason Wang 
337854f968d6SJason Wang 	tfile->socket.file = file;
337954f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
338054f968d6SJason Wang 
338154f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
338254f968d6SJason Wang 
338354f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
338454f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
338554f968d6SJason Wang 
3386631ab46bSEric W. Biederman 	file->private_data = tfile;
33874008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
338854f968d6SJason Wang 
338919a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
339019a6afb2SJason Wang 
33911da177e4SLinus Torvalds 	return 0;
33921da177e4SLinus Torvalds }
33931da177e4SLinus Torvalds 
33941da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
33951da177e4SLinus Torvalds {
3396631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
33971da177e4SLinus Torvalds 
3398c8d68e6bSJason Wang 	tun_detach(tfile, true);
33991da177e4SLinus Torvalds 
34001da177e4SLinus Torvalds 	return 0;
34011da177e4SLinus Torvalds }
34021da177e4SLinus Torvalds 
340393e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
34049484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
340593e14b6dSMasatake YAMATO {
34069484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
340793e14b6dSMasatake YAMATO 	struct tun_struct *tun;
340893e14b6dSMasatake YAMATO 	struct ifreq ifr;
340993e14b6dSMasatake YAMATO 
341093e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
341193e14b6dSMasatake YAMATO 
341293e14b6dSMasatake YAMATO 	rtnl_lock();
34139484dc74Syuan linyu 	tun = tun_get(tfile);
341493e14b6dSMasatake YAMATO 	if (tun)
341512132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
341693e14b6dSMasatake YAMATO 	rtnl_unlock();
341793e14b6dSMasatake YAMATO 
341893e14b6dSMasatake YAMATO 	if (tun)
341993e14b6dSMasatake YAMATO 		tun_put(tun);
342093e14b6dSMasatake YAMATO 
3421a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
342293e14b6dSMasatake YAMATO }
342393e14b6dSMasatake YAMATO #endif
342493e14b6dSMasatake YAMATO 
3425d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
34261da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
34271da177e4SLinus Torvalds 	.llseek = no_llseek,
34289b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3429f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
34301da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3431876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
343250857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
343350857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
343450857e2aSArnd Bergmann #endif
34351da177e4SLinus Torvalds 	.open	= tun_chr_open,
34361da177e4SLinus Torvalds 	.release = tun_chr_close,
343793e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
343893e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
343993e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
344093e14b6dSMasatake YAMATO #endif
34411da177e4SLinus Torvalds };
34421da177e4SLinus Torvalds 
34431da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
34441da177e4SLinus Torvalds 	.minor = TUN_MINOR,
34451da177e4SLinus Torvalds 	.name = "tun",
3446e454cea2SKay Sievers 	.nodename = "net/tun",
34471da177e4SLinus Torvalds 	.fops = &tun_fops,
34481da177e4SLinus Torvalds };
34491da177e4SLinus Torvalds 
34501da177e4SLinus Torvalds /* ethtool interface */
34511da177e4SLinus Torvalds 
34524e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
345329ccc49dSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
34541da177e4SLinus Torvalds {
345529ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
345629ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
345729ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
345829ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
345929ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
346029ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
346129ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
34624e24f2ddSChas Williams }
34634e24f2ddSChas Williams 
34644e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev,
34654e24f2ddSChas Williams 				  struct ethtool_link_ksettings *cmd)
34664e24f2ddSChas Williams {
34674e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
34684e24f2ddSChas Williams 
34694e24f2ddSChas Williams 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
34704e24f2ddSChas Williams 	return 0;
34714e24f2ddSChas Williams }
34724e24f2ddSChas Williams 
34734e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev,
34744e24f2ddSChas Williams 				  const struct ethtool_link_ksettings *cmd)
34754e24f2ddSChas Williams {
34764e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
34774e24f2ddSChas Williams 
34784e24f2ddSChas Williams 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
34791da177e4SLinus Torvalds 	return 0;
34801da177e4SLinus Torvalds }
34811da177e4SLinus Torvalds 
34821da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
34831da177e4SLinus Torvalds {
34841da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
34851da177e4SLinus Torvalds 
348633a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
348733a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
34881da177e4SLinus Torvalds 
34891da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
349040630b82SMichael S. Tsirkin 	case IFF_TUN:
349133a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
34921da177e4SLinus Torvalds 		break;
349340630b82SMichael S. Tsirkin 	case IFF_TAP:
349433a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
34951da177e4SLinus Torvalds 		break;
34961da177e4SLinus Torvalds 	}
34971da177e4SLinus Torvalds }
34981da177e4SLinus Torvalds 
34991da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
35001da177e4SLinus Torvalds {
35011da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35023424170fSMichal Kubecek 
35033424170fSMichal Kubecek 	return tun->msg_enable;
35041da177e4SLinus Torvalds }
35051da177e4SLinus Torvalds 
35061da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
35071da177e4SLinus Torvalds {
35081da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35093424170fSMichal Kubecek 
35103424170fSMichal Kubecek 	tun->msg_enable = value;
35111da177e4SLinus Torvalds }
35121da177e4SLinus Torvalds 
35135503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
35145503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35155503fcecSJason Wang {
35165503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35175503fcecSJason Wang 
35185503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
35195503fcecSJason Wang 
35205503fcecSJason Wang 	return 0;
35215503fcecSJason Wang }
35225503fcecSJason Wang 
35235503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
35245503fcecSJason Wang 			    struct ethtool_coalesce *ec)
35255503fcecSJason Wang {
35265503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35275503fcecSJason Wang 
35285503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
35295503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
35305503fcecSJason Wang 	else
35315503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
35325503fcecSJason Wang 
35335503fcecSJason Wang 	return 0;
35345503fcecSJason Wang }
35355503fcecSJason Wang 
35367282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
3537e5ad00b3SJakub Kicinski 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
35381da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
35391da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
35401da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3541bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3542eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
35435503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
35445503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
354529ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
35464e24f2ddSChas Williams 	.set_link_ksettings = tun_set_link_ksettings,
35471da177e4SLinus Torvalds };
35481da177e4SLinus Torvalds 
35491576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
35501576d986SJason Wang {
35511576d986SJason Wang 	struct net_device *dev = tun->dev;
35521576d986SJason Wang 	struct tun_file *tfile;
35535990a305SJason Wang 	struct ptr_ring **rings;
35541576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
35551576d986SJason Wang 	int ret, i;
35561576d986SJason Wang 
35575990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
35585990a305SJason Wang 	if (!rings)
35591576d986SJason Wang 		return -ENOMEM;
35601576d986SJason Wang 
35611576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
35621576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
35635990a305SJason Wang 		rings[i] = &tfile->tx_ring;
35641576d986SJason Wang 	}
35651576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
35665990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
35671576d986SJason Wang 
35685990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
35695990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3570fc72d1d5SJason Wang 				       tun_ptr_free);
35711576d986SJason Wang 
35725990a305SJason Wang 	kfree(rings);
35731576d986SJason Wang 	return ret;
35741576d986SJason Wang }
35751576d986SJason Wang 
35761576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
35771576d986SJason Wang 			    unsigned long event, void *ptr)
35781576d986SJason Wang {
35791576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
35801576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
358172b319dcSFei Li 	int i;
35821576d986SJason Wang 
358386dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
358486dfb4acSCraig Gallek 		return NOTIFY_DONE;
358586dfb4acSCraig Gallek 
35861576d986SJason Wang 	switch (event) {
35871576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
35881576d986SJason Wang 		if (tun_queue_resize(tun))
35891576d986SJason Wang 			return NOTIFY_BAD;
35901576d986SJason Wang 		break;
359172b319dcSFei Li 	case NETDEV_UP:
359272b319dcSFei Li 		for (i = 0; i < tun->numqueues; i++) {
359372b319dcSFei Li 			struct tun_file *tfile;
359472b319dcSFei Li 
359572b319dcSFei Li 			tfile = rtnl_dereference(tun->tfiles[i]);
359672b319dcSFei Li 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
359772b319dcSFei Li 		}
359872b319dcSFei Li 		break;
35991576d986SJason Wang 	default:
36001576d986SJason Wang 		break;
36011576d986SJason Wang 	}
36021576d986SJason Wang 
36031576d986SJason Wang 	return NOTIFY_DONE;
36041576d986SJason Wang }
36051576d986SJason Wang 
36061576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
36071576d986SJason Wang 	.notifier_call	= tun_device_event,
36081576d986SJason Wang };
360979d17604SPavel Emelyanov 
36101da177e4SLinus Torvalds static int __init tun_init(void)
36111da177e4SLinus Torvalds {
36121da177e4SLinus Torvalds 	int ret = 0;
36131da177e4SLinus Torvalds 
36146b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
36151da177e4SLinus Torvalds 
3616f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
361779d17604SPavel Emelyanov 	if (ret) {
36186b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3619f019a7a5SEric W. Biederman 		goto err_linkops;
362079d17604SPavel Emelyanov 	}
362179d17604SPavel Emelyanov 
36221da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
362379d17604SPavel Emelyanov 	if (ret) {
36246b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
362579d17604SPavel Emelyanov 		goto err_misc;
362679d17604SPavel Emelyanov 	}
36271576d986SJason Wang 
36285edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
36295edfbd3cSTonghao Zhang 	if (ret) {
36305edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
36315edfbd3cSTonghao Zhang 		goto err_notifier;
36325edfbd3cSTonghao Zhang 	}
36335edfbd3cSTonghao Zhang 
363479d17604SPavel Emelyanov 	return  0;
36355edfbd3cSTonghao Zhang 
36365edfbd3cSTonghao Zhang err_notifier:
36375edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
363879d17604SPavel Emelyanov err_misc:
3639f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3640f019a7a5SEric W. Biederman err_linkops:
36411da177e4SLinus Torvalds 	return ret;
36421da177e4SLinus Torvalds }
36431da177e4SLinus Torvalds 
36441da177e4SLinus Torvalds static void tun_cleanup(void)
36451da177e4SLinus Torvalds {
36461da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3647f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
36481576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
36491da177e4SLinus Torvalds }
36501da177e4SLinus Torvalds 
365105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
365205c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
365305c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
365405c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
365505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
365605c2828cSMichael S. Tsirkin {
36576e914fc7SJason Wang 	struct tun_file *tfile;
365805c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
365905c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
36606e914fc7SJason Wang 	tfile = file->private_data;
36616e914fc7SJason Wang 	if (!tfile)
366205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
366354f968d6SJason Wang 	return &tfile->socket;
366405c2828cSMichael S. Tsirkin }
366505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
366605c2828cSMichael S. Tsirkin 
36675990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
366883339c6bSJason Wang {
366983339c6bSJason Wang 	struct tun_file *tfile;
367083339c6bSJason Wang 
367183339c6bSJason Wang 	if (file->f_op != &tun_fops)
367283339c6bSJason Wang 		return ERR_PTR(-EINVAL);
367383339c6bSJason Wang 	tfile = file->private_data;
367483339c6bSJason Wang 	if (!tfile)
367583339c6bSJason Wang 		return ERR_PTR(-EBADFD);
36765990a305SJason Wang 	return &tfile->tx_ring;
367783339c6bSJason Wang }
36785990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
367983339c6bSJason Wang 
36801da177e4SLinus Torvalds module_init(tun_init);
36811da177e4SLinus Torvalds module_exit(tun_cleanup);
36821da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
36831da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
36841da177e4SLinus Torvalds MODULE_LICENSE("GPL");
36851da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3686578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3687