xref: /openbmc/linux/drivers/net/tun.c (revision 16d083e28f1a4f6deef82be92d6a0f5aa2fe7e08)
1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  TUN - Universal TUN/TAP device driver.
41da177e4SLinus Torvalds  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds /*
101da177e4SLinus Torvalds  *  Changes:
111da177e4SLinus Torvalds  *
12ff4cc3acSMike Kershaw  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13ff4cc3acSMike Kershaw  *    Add TUNSETLINK ioctl to set the link encapsulation
14ff4cc3acSMike Kershaw  *
151da177e4SLinus Torvalds  *  Mark Smith <markzzzsmith@yahoo.com.au>
16344dc8edSJoe Perches  *    Use eth_random_addr() for tap MAC address.
171da177e4SLinus Torvalds  *
181da177e4SLinus Torvalds  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
191da177e4SLinus Torvalds  *    Fixes in packet dropping, queue length setting and queue wakeup.
201da177e4SLinus Torvalds  *    Increased default tx queue length.
211da177e4SLinus Torvalds  *    Added ethtool API.
221da177e4SLinus Torvalds  *    Minor cleanups
231da177e4SLinus Torvalds  *
241da177e4SLinus Torvalds  *  Daniel Podlejski <underley@underley.eu.org>
251da177e4SLinus Torvalds  *    Modifications for 2.3.99-pre5 kernel.
261da177e4SLinus Torvalds  */
271da177e4SLinus Torvalds 
286b8a66eeSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
296b8a66eeSJoe Perches 
301da177e4SLinus Torvalds #define DRV_NAME	"tun"
311da177e4SLinus Torvalds #define DRV_VERSION	"1.6"
321da177e4SLinus Torvalds #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
331da177e4SLinus Torvalds #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
341da177e4SLinus Torvalds 
351da177e4SLinus Torvalds #include <linux/module.h>
361da177e4SLinus Torvalds #include <linux/errno.h>
371da177e4SLinus Torvalds #include <linux/kernel.h>
38174cd4b1SIngo Molnar #include <linux/sched/signal.h>
391da177e4SLinus Torvalds #include <linux/major.h>
401da177e4SLinus Torvalds #include <linux/slab.h>
411da177e4SLinus Torvalds #include <linux/poll.h>
421da177e4SLinus Torvalds #include <linux/fcntl.h>
431da177e4SLinus Torvalds #include <linux/init.h>
441da177e4SLinus Torvalds #include <linux/skbuff.h>
451da177e4SLinus Torvalds #include <linux/netdevice.h>
461da177e4SLinus Torvalds #include <linux/etherdevice.h>
471da177e4SLinus Torvalds #include <linux/miscdevice.h>
481da177e4SLinus Torvalds #include <linux/ethtool.h>
491da177e4SLinus Torvalds #include <linux/rtnetlink.h>
5050857e2aSArnd Bergmann #include <linux/compat.h>
511da177e4SLinus Torvalds #include <linux/if.h>
521da177e4SLinus Torvalds #include <linux/if_arp.h>
531da177e4SLinus Torvalds #include <linux/if_ether.h>
541da177e4SLinus Torvalds #include <linux/if_tun.h>
556680ec68SJason Wang #include <linux/if_vlan.h>
561da177e4SLinus Torvalds #include <linux/crc32.h>
57d647a591SPavel Emelyanov #include <linux/nsproxy.h>
58f43798c2SRusty Russell #include <linux/virtio_net.h>
5999405162SMichael S. Tsirkin #include <linux/rcupdate.h>
60881d966bSEric W. Biederman #include <net/net_namespace.h>
6179d17604SPavel Emelyanov #include <net/netns/generic.h>
62f019a7a5SEric W. Biederman #include <net/rtnetlink.h>
6333dccbb0SHerbert Xu #include <net/sock.h>
64735fc405SJesper Dangaard Brouer #include <net/xdp.h>
65b9815eb1SJason A. Donenfeld #include <net/ip_tunnels.h>
6693e14b6dSMasatake YAMATO #include <linux/seq_file.h>
67e0b46d0eSHerbert Xu #include <linux/uio.h>
681576d986SJason Wang #include <linux/skb_array.h>
69761876c8SJason Wang #include <linux/bpf.h>
70761876c8SJason Wang #include <linux/bpf_trace.h>
7190e33d45SPetar Penkov #include <linux/mutex.h>
72cca8ea3bSPhillip Potter #include <linux/ieee802154.h>
73cca8ea3bSPhillip Potter #include <linux/if_ltalk.h>
74cca8ea3bSPhillip Potter #include <uapi/linux/if_fddi.h>
75cca8ea3bSPhillip Potter #include <uapi/linux/if_hippi.h>
76cca8ea3bSPhillip Potter #include <uapi/linux/if_fc.h>
77cca8ea3bSPhillip Potter #include <net/ax25.h>
78cca8ea3bSPhillip Potter #include <net/rose.h>
79cca8ea3bSPhillip Potter #include <net/6lowpan.h>
801da177e4SLinus Torvalds 
817c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
82f2780d6dSKirill Tkhai #include <linux/proc_fs.h>
831da177e4SLinus Torvalds 
844e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
854e24f2ddSChas Williams 				       struct ethtool_link_ksettings *cmd);
864e24f2ddSChas Williams 
877df13219SJason Wang #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
8866ccbc9cSJason Wang 
89031f5e03SMichael S. Tsirkin /* TUN device flags */
90031f5e03SMichael S. Tsirkin 
91031f5e03SMichael S. Tsirkin /* IFF_ATTACH_QUEUE is never stored in device flags,
92031f5e03SMichael S. Tsirkin  * overload it to mean fasync when stored there.
93031f5e03SMichael S. Tsirkin  */
94031f5e03SMichael S. Tsirkin #define TUN_FASYNC	IFF_ATTACH_QUEUE
951cf8e410SMichael S. Tsirkin /* High bits in flags field are unused. */
961cf8e410SMichael S. Tsirkin #define TUN_VNET_LE     0x80000000
978b8e658bSGreg Kurz #define TUN_VNET_BE     0x40000000
98031f5e03SMichael S. Tsirkin 
99031f5e03SMichael S. Tsirkin #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
10090e33d45SPetar Penkov 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
10190e33d45SPetar Penkov 
1020690899bSMichael S. Tsirkin #define GOODCOPY_LEN 128
1030690899bSMichael S. Tsirkin 
104f271b2ccSMax Krasnyansky #define FLT_EXACT_COUNT 8
105f271b2ccSMax Krasnyansky struct tap_filter {
106f271b2ccSMax Krasnyansky 	unsigned int    count;    /* Number of addrs. Zero means disabled */
107f271b2ccSMax Krasnyansky 	u32             mask[2];  /* Mask of the hashed addrs */
108f271b2ccSMax Krasnyansky 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
109f271b2ccSMax Krasnyansky };
110f271b2ccSMax Krasnyansky 
111baf71c5cSPankaj Gupta /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
112baf71c5cSPankaj Gupta  * to max number of VCPUs in guest. */
113baf71c5cSPankaj Gupta #define MAX_TAP_QUEUES 256
114b8732fb7SJason Wang #define MAX_TAP_FLOWS  4096
115c8d68e6bSJason Wang 
11696442e42SJason Wang #define TUN_FLOW_EXPIRE (3 * HZ)
11796442e42SJason Wang 
11854f968d6SJason Wang /* A tun_file connects an open character device to a tuntap netdevice. It
11992d4ea6eSstephen hemminger  * also contains all socket related structures (except sock_fprog and tap_filter)
12054f968d6SJason Wang  * to serve as one transmit queue for tuntap device. The sock_fprog and
12154f968d6SJason Wang  * tap_filter were kept in tun_struct since they were used for filtering for the
12236fe8c09SRami Rosen  * netdevice not for a specific queue (at least I didn't see the requirement for
12354f968d6SJason Wang  * this).
1246e914fc7SJason Wang  *
1256e914fc7SJason Wang  * RCU usage:
12636fe8c09SRami Rosen  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
1276e914fc7SJason Wang  * other can only be read while rcu_read_lock or rtnl_lock is held.
12854f968d6SJason Wang  */
129631ab46bSEric W. Biederman struct tun_file {
13054f968d6SJason Wang 	struct sock sk;
13154f968d6SJason Wang 	struct socket socket;
1326e914fc7SJason Wang 	struct tun_struct __rcu *tun;
13354f968d6SJason Wang 	struct fasync_struct *fasync;
13454f968d6SJason Wang 	/* only used for fasnyc */
13554f968d6SJason Wang 	unsigned int flags;
136fb7589a1SPavel Emelyanov 	union {
137c8d68e6bSJason Wang 		u16 queue_index;
138fb7589a1SPavel Emelyanov 		unsigned int ifindex;
139fb7589a1SPavel Emelyanov 	};
14094317099SPetar Penkov 	struct napi_struct napi;
141aec72f33SEric Dumazet 	bool napi_enabled;
142af3fb24eSEric Dumazet 	bool napi_frags_enabled;
14390e33d45SPetar Penkov 	struct mutex napi_mutex;	/* Protects access to the above napi */
1444008e97fSJason Wang 	struct list_head next;
1454008e97fSJason Wang 	struct tun_struct *detached;
1465990a305SJason Wang 	struct ptr_ring tx_ring;
1478bf5c4eeSJesper Dangaard Brouer 	struct xdp_rxq_info xdp_rxq;
148631ab46bSEric W. Biederman };
149631ab46bSEric W. Biederman 
150f9e06c45SJason Wang struct tun_page {
151f9e06c45SJason Wang 	struct page *page;
152f9e06c45SJason Wang 	int count;
153f9e06c45SJason Wang };
154f9e06c45SJason Wang 
15596442e42SJason Wang struct tun_flow_entry {
15696442e42SJason Wang 	struct hlist_node hash_link;
15796442e42SJason Wang 	struct rcu_head rcu;
15896442e42SJason Wang 	struct tun_struct *tun;
15996442e42SJason Wang 
16096442e42SJason Wang 	u32 rxhash;
1619bc88939STom Herbert 	u32 rps_rxhash;
16296442e42SJason Wang 	int queue_index;
16383b1bc12SLi RongQing 	unsigned long updated ____cacheline_aligned_in_smp;
16496442e42SJason Wang };
16596442e42SJason Wang 
16696442e42SJason Wang #define TUN_NUM_FLOW_ENTRIES 1024
167f13b5468SLi RongQing #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
16896442e42SJason Wang 
169cd5681d7SJason Wang struct tun_prog {
17096f84061SJason Wang 	struct rcu_head rcu;
17196f84061SJason Wang 	struct bpf_prog *prog;
17296f84061SJason Wang };
17396f84061SJason Wang 
17454f968d6SJason Wang /* Since the socket were moved to tun_file, to preserve the behavior of persist
17536fe8c09SRami Rosen  * device, socket filter, sndbuf and vnet header size were restore when the
17654f968d6SJason Wang  * file were attached to a persist device.
17754f968d6SJason Wang  */
17814daa021SRusty Russell struct tun_struct {
179c8d68e6bSJason Wang 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
180c8d68e6bSJason Wang 	unsigned int            numqueues;
181f271b2ccSMax Krasnyansky 	unsigned int 		flags;
1820625c883SEric W. Biederman 	kuid_t			owner;
1830625c883SEric W. Biederman 	kgid_t			group;
18414daa021SRusty Russell 
18514daa021SRusty Russell 	struct net_device	*dev;
186c8f44affSMichał Mirosław 	netdev_features_t	set_features;
18788255375SMichał Mirosław #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
188d591a1f3SDavid S. Miller 			  NETIF_F_TSO6)
189d9d52b51SMichael S. Tsirkin 
190eaea34b2SPaolo Abeni 	int			align;
191d9d52b51SMichael S. Tsirkin 	int			vnet_hdr_sz;
19254f968d6SJason Wang 	int			sndbuf;
19354f968d6SJason Wang 	struct tap_filter	txflt;
19454f968d6SJason Wang 	struct sock_fprog	fprog;
19554f968d6SJason Wang 	/* protected by rtnl lock */
19654f968d6SJason Wang 	bool			filter_attached;
1973424170fSMichal Kubecek 	u32			msg_enable;
19896442e42SJason Wang 	spinlock_t lock;
19996442e42SJason Wang 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
20096442e42SJason Wang 	struct timer_list flow_gc_timer;
20196442e42SJason Wang 	unsigned long ageing_time;
2024008e97fSJason Wang 	unsigned int numdisabled;
2034008e97fSJason Wang 	struct list_head disabled;
2045dbbaf2dSPaul Moore 	void *security;
205b8732fb7SJason Wang 	u32 flow_count;
2065503fcecSJason Wang 	u32 rx_batched;
207497a5757SHeiner Kallweit 	atomic_long_t rx_frame_errors;
208761876c8SJason Wang 	struct bpf_prog __rcu *xdp_prog;
209cd5681d7SJason Wang 	struct tun_prog __rcu *steering_prog;
210aff3d70aSJason Wang 	struct tun_prog __rcu *filter_prog;
2114e24f2ddSChas Williams 	struct ethtool_link_ksettings link_ksettings;
212158b515fSGeorge Kennedy 	/* init args */
213158b515fSGeorge Kennedy 	struct file *file;
214158b515fSGeorge Kennedy 	struct ifreq *ifr;
21514daa021SRusty Russell };
21614daa021SRusty Russell 
217aff3d70aSJason Wang struct veth {
218aff3d70aSJason Wang 	__be16 h_vlan_proto;
219aff3d70aSJason Wang 	__be16 h_vlan_TCI;
2201da177e4SLinus Torvalds };
2211da177e4SLinus Torvalds 
222158b515fSGeorge Kennedy static void tun_flow_init(struct tun_struct *tun);
223158b515fSGeorge Kennedy static void tun_flow_uninit(struct tun_struct *tun);
224158b515fSGeorge Kennedy 
22594317099SPetar Penkov static int tun_napi_receive(struct napi_struct *napi, int budget)
22694317099SPetar Penkov {
22794317099SPetar Penkov 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
22894317099SPetar Penkov 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
22994317099SPetar Penkov 	struct sk_buff_head process_queue;
23094317099SPetar Penkov 	struct sk_buff *skb;
23194317099SPetar Penkov 	int received = 0;
23294317099SPetar Penkov 
23394317099SPetar Penkov 	__skb_queue_head_init(&process_queue);
23494317099SPetar Penkov 
23594317099SPetar Penkov 	spin_lock(&queue->lock);
23694317099SPetar Penkov 	skb_queue_splice_tail_init(queue, &process_queue);
23794317099SPetar Penkov 	spin_unlock(&queue->lock);
23894317099SPetar Penkov 
23994317099SPetar Penkov 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
24094317099SPetar Penkov 		napi_gro_receive(napi, skb);
24194317099SPetar Penkov 		++received;
24294317099SPetar Penkov 	}
24394317099SPetar Penkov 
24494317099SPetar Penkov 	if (!skb_queue_empty(&process_queue)) {
24594317099SPetar Penkov 		spin_lock(&queue->lock);
24694317099SPetar Penkov 		skb_queue_splice(&process_queue, queue);
24794317099SPetar Penkov 		spin_unlock(&queue->lock);
24894317099SPetar Penkov 	}
24994317099SPetar Penkov 
25094317099SPetar Penkov 	return received;
25194317099SPetar Penkov }
25294317099SPetar Penkov 
25394317099SPetar Penkov static int tun_napi_poll(struct napi_struct *napi, int budget)
25494317099SPetar Penkov {
25594317099SPetar Penkov 	unsigned int received;
25694317099SPetar Penkov 
25794317099SPetar Penkov 	received = tun_napi_receive(napi, budget);
25894317099SPetar Penkov 
25994317099SPetar Penkov 	if (received < budget)
26094317099SPetar Penkov 		napi_complete_done(napi, received);
26194317099SPetar Penkov 
26294317099SPetar Penkov 	return received;
26394317099SPetar Penkov }
26494317099SPetar Penkov 
26594317099SPetar Penkov static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
266af3fb24eSEric Dumazet 			  bool napi_en, bool napi_frags)
26794317099SPetar Penkov {
268aec72f33SEric Dumazet 	tfile->napi_enabled = napi_en;
269af3fb24eSEric Dumazet 	tfile->napi_frags_enabled = napi_en && napi_frags;
27094317099SPetar Penkov 	if (napi_en) {
271*16d083e2SJakub Kicinski 		netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
27294317099SPetar Penkov 		napi_enable(&tfile->napi);
27394317099SPetar Penkov 	}
27494317099SPetar Penkov }
27594317099SPetar Penkov 
27606e55addSEric Dumazet static void tun_napi_disable(struct tun_file *tfile)
27794317099SPetar Penkov {
278aec72f33SEric Dumazet 	if (tfile->napi_enabled)
27994317099SPetar Penkov 		napi_disable(&tfile->napi);
28094317099SPetar Penkov }
28194317099SPetar Penkov 
28206e55addSEric Dumazet static void tun_napi_del(struct tun_file *tfile)
28394317099SPetar Penkov {
284aec72f33SEric Dumazet 	if (tfile->napi_enabled)
28594317099SPetar Penkov 		netif_napi_del(&tfile->napi);
28694317099SPetar Penkov }
28794317099SPetar Penkov 
288af3fb24eSEric Dumazet static bool tun_napi_frags_enabled(const struct tun_file *tfile)
28990e33d45SPetar Penkov {
290af3fb24eSEric Dumazet 	return tfile->napi_frags_enabled;
29190e33d45SPetar Penkov }
29290e33d45SPetar Penkov 
2938b8e658bSGreg Kurz #ifdef CONFIG_TUN_VNET_CROSS_LE
2948b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
2958b8e658bSGreg Kurz {
2968b8e658bSGreg Kurz 	return tun->flags & TUN_VNET_BE ? false :
2978b8e658bSGreg Kurz 		virtio_legacy_is_little_endian();
2988b8e658bSGreg Kurz }
2998b8e658bSGreg Kurz 
3008b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3018b8e658bSGreg Kurz {
3028b8e658bSGreg Kurz 	int be = !!(tun->flags & TUN_VNET_BE);
3038b8e658bSGreg Kurz 
3048b8e658bSGreg Kurz 	if (put_user(be, argp))
3058b8e658bSGreg Kurz 		return -EFAULT;
3068b8e658bSGreg Kurz 
3078b8e658bSGreg Kurz 	return 0;
3088b8e658bSGreg Kurz }
3098b8e658bSGreg Kurz 
3108b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3118b8e658bSGreg Kurz {
3128b8e658bSGreg Kurz 	int be;
3138b8e658bSGreg Kurz 
3148b8e658bSGreg Kurz 	if (get_user(be, argp))
3158b8e658bSGreg Kurz 		return -EFAULT;
3168b8e658bSGreg Kurz 
3178b8e658bSGreg Kurz 	if (be)
3188b8e658bSGreg Kurz 		tun->flags |= TUN_VNET_BE;
3198b8e658bSGreg Kurz 	else
3208b8e658bSGreg Kurz 		tun->flags &= ~TUN_VNET_BE;
3218b8e658bSGreg Kurz 
3228b8e658bSGreg Kurz 	return 0;
3238b8e658bSGreg Kurz }
3248b8e658bSGreg Kurz #else
3258b8e658bSGreg Kurz static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
3268b8e658bSGreg Kurz {
3278b8e658bSGreg Kurz 	return virtio_legacy_is_little_endian();
3288b8e658bSGreg Kurz }
3298b8e658bSGreg Kurz 
3308b8e658bSGreg Kurz static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
3318b8e658bSGreg Kurz {
3328b8e658bSGreg Kurz 	return -EINVAL;
3338b8e658bSGreg Kurz }
3348b8e658bSGreg Kurz 
3358b8e658bSGreg Kurz static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
3368b8e658bSGreg Kurz {
3378b8e658bSGreg Kurz 	return -EINVAL;
3388b8e658bSGreg Kurz }
3398b8e658bSGreg Kurz #endif /* CONFIG_TUN_VNET_CROSS_LE */
3408b8e658bSGreg Kurz 
34125bd55bbSGreg Kurz static inline bool tun_is_little_endian(struct tun_struct *tun)
34225bd55bbSGreg Kurz {
3437d824109SGreg Kurz 	return tun->flags & TUN_VNET_LE ||
3448b8e658bSGreg Kurz 		tun_legacy_is_little_endian(tun);
34525bd55bbSGreg Kurz }
34625bd55bbSGreg Kurz 
34756f0dcc5SMichael S. Tsirkin static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
34856f0dcc5SMichael S. Tsirkin {
34925bd55bbSGreg Kurz 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
35056f0dcc5SMichael S. Tsirkin }
35156f0dcc5SMichael S. Tsirkin 
35256f0dcc5SMichael S. Tsirkin static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
35356f0dcc5SMichael S. Tsirkin {
35425bd55bbSGreg Kurz 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
35556f0dcc5SMichael S. Tsirkin }
35656f0dcc5SMichael S. Tsirkin 
35796442e42SJason Wang static inline u32 tun_hashfn(u32 rxhash)
35896442e42SJason Wang {
359f13b5468SLi RongQing 	return rxhash & TUN_MASK_FLOW_ENTRIES;
36096442e42SJason Wang }
36196442e42SJason Wang 
36296442e42SJason Wang static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
36396442e42SJason Wang {
36496442e42SJason Wang 	struct tun_flow_entry *e;
36596442e42SJason Wang 
366b67bfe0dSSasha Levin 	hlist_for_each_entry_rcu(e, head, hash_link) {
36796442e42SJason Wang 		if (e->rxhash == rxhash)
36896442e42SJason Wang 			return e;
36996442e42SJason Wang 	}
37096442e42SJason Wang 	return NULL;
37196442e42SJason Wang }
37296442e42SJason Wang 
37396442e42SJason Wang static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
37496442e42SJason Wang 					      struct hlist_head *head,
37596442e42SJason Wang 					      u32 rxhash, u16 queue_index)
37696442e42SJason Wang {
3779fdc6befSEric Dumazet 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
3789fdc6befSEric Dumazet 
37996442e42SJason Wang 	if (e) {
3803424170fSMichal Kubecek 		netif_info(tun, tx_queued, tun->dev,
3813424170fSMichal Kubecek 			   "create flow: hash %u index %u\n",
38296442e42SJason Wang 			   rxhash, queue_index);
38396442e42SJason Wang 		e->updated = jiffies;
38496442e42SJason Wang 		e->rxhash = rxhash;
3859bc88939STom Herbert 		e->rps_rxhash = 0;
38696442e42SJason Wang 		e->queue_index = queue_index;
38796442e42SJason Wang 		e->tun = tun;
38896442e42SJason Wang 		hlist_add_head_rcu(&e->hash_link, head);
389b8732fb7SJason Wang 		++tun->flow_count;
39096442e42SJason Wang 	}
39196442e42SJason Wang 	return e;
39296442e42SJason Wang }
39396442e42SJason Wang 
39496442e42SJason Wang static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
39596442e42SJason Wang {
3963424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
39796442e42SJason Wang 		   e->rxhash, e->queue_index);
39896442e42SJason Wang 	hlist_del_rcu(&e->hash_link);
3999fdc6befSEric Dumazet 	kfree_rcu(e, rcu);
400b8732fb7SJason Wang 	--tun->flow_count;
40196442e42SJason Wang }
40296442e42SJason Wang 
40396442e42SJason Wang static void tun_flow_flush(struct tun_struct *tun)
40496442e42SJason Wang {
40596442e42SJason Wang 	int i;
40696442e42SJason Wang 
40796442e42SJason Wang 	spin_lock_bh(&tun->lock);
40896442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
40996442e42SJason Wang 		struct tun_flow_entry *e;
410b67bfe0dSSasha Levin 		struct hlist_node *n;
41196442e42SJason Wang 
412b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
41396442e42SJason Wang 			tun_flow_delete(tun, e);
41496442e42SJason Wang 	}
41596442e42SJason Wang 	spin_unlock_bh(&tun->lock);
41696442e42SJason Wang }
41796442e42SJason Wang 
41896442e42SJason Wang static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
41996442e42SJason Wang {
42096442e42SJason Wang 	int i;
42196442e42SJason Wang 
42296442e42SJason Wang 	spin_lock_bh(&tun->lock);
42396442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
42496442e42SJason Wang 		struct tun_flow_entry *e;
425b67bfe0dSSasha Levin 		struct hlist_node *n;
42696442e42SJason Wang 
427b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
42896442e42SJason Wang 			if (e->queue_index == queue_index)
42996442e42SJason Wang 				tun_flow_delete(tun, e);
43096442e42SJason Wang 		}
43196442e42SJason Wang 	}
43296442e42SJason Wang 	spin_unlock_bh(&tun->lock);
43396442e42SJason Wang }
43496442e42SJason Wang 
435e99e88a9SKees Cook static void tun_flow_cleanup(struct timer_list *t)
43696442e42SJason Wang {
437e99e88a9SKees Cook 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
43896442e42SJason Wang 	unsigned long delay = tun->ageing_time;
43996442e42SJason Wang 	unsigned long next_timer = jiffies + delay;
44096442e42SJason Wang 	unsigned long count = 0;
44196442e42SJason Wang 	int i;
44296442e42SJason Wang 
4437dbfb4efSEric Dumazet 	spin_lock(&tun->lock);
44496442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
44596442e42SJason Wang 		struct tun_flow_entry *e;
446b67bfe0dSSasha Levin 		struct hlist_node *n;
44796442e42SJason Wang 
448b67bfe0dSSasha Levin 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
44996442e42SJason Wang 			unsigned long this_timer;
45081d98fa4SEric Dumazet 
45196442e42SJason Wang 			this_timer = e->updated + delay;
45281d98fa4SEric Dumazet 			if (time_before_eq(this_timer, jiffies)) {
45396442e42SJason Wang 				tun_flow_delete(tun, e);
45481d98fa4SEric Dumazet 				continue;
45581d98fa4SEric Dumazet 			}
45681d98fa4SEric Dumazet 			count++;
45781d98fa4SEric Dumazet 			if (time_before(this_timer, next_timer))
45896442e42SJason Wang 				next_timer = this_timer;
45996442e42SJason Wang 		}
46096442e42SJason Wang 	}
46196442e42SJason Wang 
46296442e42SJason Wang 	if (count)
46396442e42SJason Wang 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
4647dbfb4efSEric Dumazet 	spin_unlock(&tun->lock);
46596442e42SJason Wang }
46696442e42SJason Wang 
46749974420SEric Dumazet static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
4689e85722dSJason Wang 			    struct tun_file *tfile)
46996442e42SJason Wang {
47096442e42SJason Wang 	struct hlist_head *head;
47196442e42SJason Wang 	struct tun_flow_entry *e;
47296442e42SJason Wang 	unsigned long delay = tun->ageing_time;
4739e85722dSJason Wang 	u16 queue_index = tfile->queue_index;
47496442e42SJason Wang 
47596442e42SJason Wang 	head = &tun->flows[tun_hashfn(rxhash)];
47696442e42SJason Wang 
47796442e42SJason Wang 	rcu_read_lock();
47896442e42SJason Wang 
47996442e42SJason Wang 	e = tun_flow_find(head, rxhash);
48096442e42SJason Wang 	if (likely(e)) {
48196442e42SJason Wang 		/* TODO: keep queueing to old queue until it's empty? */
4824ffdd22eSEric Dumazet 		if (READ_ONCE(e->queue_index) != queue_index)
4834ffdd22eSEric Dumazet 			WRITE_ONCE(e->queue_index, queue_index);
48483b1bc12SLi RongQing 		if (e->updated != jiffies)
48596442e42SJason Wang 			e->updated = jiffies;
4869bc88939STom Herbert 		sock_rps_record_flow_hash(e->rps_rxhash);
48796442e42SJason Wang 	} else {
48896442e42SJason Wang 		spin_lock_bh(&tun->lock);
489b8732fb7SJason Wang 		if (!tun_flow_find(head, rxhash) &&
490b8732fb7SJason Wang 		    tun->flow_count < MAX_TAP_FLOWS)
49196442e42SJason Wang 			tun_flow_create(tun, head, rxhash, queue_index);
49296442e42SJason Wang 
49396442e42SJason Wang 		if (!timer_pending(&tun->flow_gc_timer))
49496442e42SJason Wang 			mod_timer(&tun->flow_gc_timer,
49596442e42SJason Wang 				  round_jiffies_up(jiffies + delay));
49696442e42SJason Wang 		spin_unlock_bh(&tun->lock);
49796442e42SJason Wang 	}
49896442e42SJason Wang 
49996442e42SJason Wang 	rcu_read_unlock();
50096442e42SJason Wang }
50196442e42SJason Wang 
502516c512bSMichal Kubecek /* Save the hash received in the stack receive path and update the
5039bc88939STom Herbert  * flow_hash table accordingly.
5049bc88939STom Herbert  */
5059bc88939STom Herbert static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
5069bc88939STom Herbert {
507567e4b79SEric Dumazet 	if (unlikely(e->rps_rxhash != hash))
5089bc88939STom Herbert 		e->rps_rxhash = hash;
5099bc88939STom Herbert }
5109bc88939STom Herbert 
5114b035271SWang Li /* We try to identify a flow through its rxhash. The reason that
51292d4ea6eSstephen hemminger  * we do not check rxq no. is because some cards(e.g 82599), chooses
513c8d68e6bSJason Wang  * the rxq based on the txq where the last packet of the flow comes. As
514c8d68e6bSJason Wang  * the userspace application move between processors, we may get a
5154b035271SWang Li  * different rxq no. here.
516c8d68e6bSJason Wang  */
51796f84061SJason Wang static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
518c8d68e6bSJason Wang {
51996442e42SJason Wang 	struct tun_flow_entry *e;
520c8d68e6bSJason Wang 	u32 txq = 0;
521c8d68e6bSJason Wang 	u32 numqueues = 0;
522c8d68e6bSJason Wang 
5236aa7de05SMark Rutland 	numqueues = READ_ONCE(tun->numqueues);
524c8d68e6bSJason Wang 
525feec084aSJason Wang 	txq = __skb_get_hash_symmetric(skb);
52696442e42SJason Wang 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
5279bc88939STom Herbert 	if (e) {
5289bc88939STom Herbert 		tun_flow_save_rps_rxhash(e, txq);
529fbe4d456SZhi Yong Wu 		txq = e->queue_index;
5304b035271SWang Li 	} else {
531c8d68e6bSJason Wang 		/* use multiply and shift instead of expensive divide */
532c8d68e6bSJason Wang 		txq = ((u64)txq * numqueues) >> 32;
533c8d68e6bSJason Wang 	}
534c8d68e6bSJason Wang 
535c8d68e6bSJason Wang 	return txq;
536c8d68e6bSJason Wang }
537c8d68e6bSJason Wang 
53896f84061SJason Wang static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
53996f84061SJason Wang {
540cd5681d7SJason Wang 	struct tun_prog *prog;
541a35d310fSJason Wang 	u32 numqueues;
54296f84061SJason Wang 	u16 ret = 0;
54396f84061SJason Wang 
544a35d310fSJason Wang 	numqueues = READ_ONCE(tun->numqueues);
545a35d310fSJason Wang 	if (!numqueues)
546a35d310fSJason Wang 		return 0;
547a35d310fSJason Wang 
54896f84061SJason Wang 	prog = rcu_dereference(tun->steering_prog);
54996f84061SJason Wang 	if (prog)
55096f84061SJason Wang 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
55196f84061SJason Wang 
552a35d310fSJason Wang 	return ret % numqueues;
55396f84061SJason Wang }
55496f84061SJason Wang 
55596f84061SJason Wang static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
556a350ecceSPaolo Abeni 			    struct net_device *sb_dev)
55796f84061SJason Wang {
55896f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
55996f84061SJason Wang 	u16 ret;
56096f84061SJason Wang 
56196f84061SJason Wang 	rcu_read_lock();
56296f84061SJason Wang 	if (rcu_dereference(tun->steering_prog))
56396f84061SJason Wang 		ret = tun_ebpf_select_queue(tun, skb);
56496f84061SJason Wang 	else
56596f84061SJason Wang 		ret = tun_automq_select_queue(tun, skb);
56696f84061SJason Wang 	rcu_read_unlock();
56796f84061SJason Wang 
56896f84061SJason Wang 	return ret;
56996f84061SJason Wang }
57096f84061SJason Wang 
571cde8b15fSJason Wang static inline bool tun_not_capable(struct tun_struct *tun)
572cde8b15fSJason Wang {
573cde8b15fSJason Wang 	const struct cred *cred = current_cred();
574c260b772SEric W. Biederman 	struct net *net = dev_net(tun->dev);
575cde8b15fSJason Wang 
576cde8b15fSJason Wang 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
577cde8b15fSJason Wang 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
578c260b772SEric W. Biederman 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
579cde8b15fSJason Wang }
580cde8b15fSJason Wang 
581c8d68e6bSJason Wang static void tun_set_real_num_queues(struct tun_struct *tun)
582c8d68e6bSJason Wang {
583c8d68e6bSJason Wang 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
584c8d68e6bSJason Wang 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
585c8d68e6bSJason Wang }
586c8d68e6bSJason Wang 
5874008e97fSJason Wang static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
5884008e97fSJason Wang {
5894008e97fSJason Wang 	tfile->detached = tun;
5904008e97fSJason Wang 	list_add_tail(&tfile->next, &tun->disabled);
5914008e97fSJason Wang 	++tun->numdisabled;
5924008e97fSJason Wang }
5934008e97fSJason Wang 
594d32649d1SJason Wang static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
5954008e97fSJason Wang {
5964008e97fSJason Wang 	struct tun_struct *tun = tfile->detached;
5974008e97fSJason Wang 
5984008e97fSJason Wang 	tfile->detached = NULL;
5994008e97fSJason Wang 	list_del_init(&tfile->next);
6004008e97fSJason Wang 	--tun->numdisabled;
6014008e97fSJason Wang 	return tun;
6024008e97fSJason Wang }
6034008e97fSJason Wang 
6043a403076SJason Wang void tun_ptr_free(void *ptr)
605fc72d1d5SJason Wang {
606fc72d1d5SJason Wang 	if (!ptr)
607fc72d1d5SJason Wang 		return;
6081ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
6091ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
610fc72d1d5SJason Wang 
61103993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
612fc72d1d5SJason Wang 	} else {
613fc72d1d5SJason Wang 		__skb_array_destroy_skb(ptr);
614fc72d1d5SJason Wang 	}
615fc72d1d5SJason Wang }
6163a403076SJason Wang EXPORT_SYMBOL_GPL(tun_ptr_free);
617fc72d1d5SJason Wang 
6184bfb0513SJason Wang static void tun_queue_purge(struct tun_file *tfile)
6194bfb0513SJason Wang {
620fc72d1d5SJason Wang 	void *ptr;
6211576d986SJason Wang 
622fc72d1d5SJason Wang 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
623fc72d1d5SJason Wang 		tun_ptr_free(ptr);
6241576d986SJason Wang 
6255503fcecSJason Wang 	skb_queue_purge(&tfile->sk.sk_write_queue);
6264bfb0513SJason Wang 	skb_queue_purge(&tfile->sk.sk_error_queue);
6274bfb0513SJason Wang }
6284bfb0513SJason Wang 
629c8d68e6bSJason Wang static void __tun_detach(struct tun_file *tfile, bool clean)
630c8d68e6bSJason Wang {
631c8d68e6bSJason Wang 	struct tun_file *ntfile;
632c8d68e6bSJason Wang 	struct tun_struct *tun;
633c8d68e6bSJason Wang 
634b8deabd3SJason Wang 	tun = rtnl_dereference(tfile->tun);
635b8deabd3SJason Wang 
63694317099SPetar Penkov 	if (tun && clean) {
63706e55addSEric Dumazet 		tun_napi_disable(tfile);
63806e55addSEric Dumazet 		tun_napi_del(tfile);
63994317099SPetar Penkov 	}
64094317099SPetar Penkov 
6419e85722dSJason Wang 	if (tun && !tfile->detached) {
642c8d68e6bSJason Wang 		u16 index = tfile->queue_index;
643c8d68e6bSJason Wang 		BUG_ON(index >= tun->numqueues);
644c8d68e6bSJason Wang 
645c8d68e6bSJason Wang 		rcu_assign_pointer(tun->tfiles[index],
646c8d68e6bSJason Wang 				   tun->tfiles[tun->numqueues - 1]);
647b8deabd3SJason Wang 		ntfile = rtnl_dereference(tun->tfiles[index]);
648c8d68e6bSJason Wang 		ntfile->queue_index = index;
6499871a9e4SJason Wang 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
6509871a9e4SJason Wang 				   NULL);
651c8d68e6bSJason Wang 
652c8d68e6bSJason Wang 		--tun->numqueues;
6539e85722dSJason Wang 		if (clean) {
654c956674bSMonam Agarwal 			RCU_INIT_POINTER(tfile->tun, NULL);
655c8d68e6bSJason Wang 			sock_put(&tfile->sk);
6569e85722dSJason Wang 		} else
6574008e97fSJason Wang 			tun_disable_queue(tun, tfile);
658c8d68e6bSJason Wang 
659c8d68e6bSJason Wang 		synchronize_net();
66096442e42SJason Wang 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
661c8d68e6bSJason Wang 		/* Drop read queue */
6624bfb0513SJason Wang 		tun_queue_purge(tfile);
663c8d68e6bSJason Wang 		tun_set_real_num_queues(tun);
664dd38bd85SJason Wang 	} else if (tfile->detached && clean) {
6654008e97fSJason Wang 		tun = tun_enable_queue(tfile);
666dd38bd85SJason Wang 		sock_put(&tfile->sk);
667dd38bd85SJason Wang 	}
668c8d68e6bSJason Wang 
669c8d68e6bSJason Wang 	if (clean) {
670af668b3cSMichael S. Tsirkin 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
671af668b3cSMichael S. Tsirkin 			netif_carrier_off(tun->dev);
672af668b3cSMichael S. Tsirkin 
67340630b82SMichael S. Tsirkin 			if (!(tun->flags & IFF_PERSIST) &&
674af668b3cSMichael S. Tsirkin 			    tun->dev->reg_state == NETREG_REGISTERED)
6754008e97fSJason Wang 				unregister_netdevice(tun->dev);
676af668b3cSMichael S. Tsirkin 		}
677b196d88aSJason Wang 		if (tun)
678b196d88aSJason Wang 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
6797063efd3SJason Wang 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
680140e807dSEric W. Biederman 		sock_put(&tfile->sk);
681c8d68e6bSJason Wang 	}
682c8d68e6bSJason Wang }
683c8d68e6bSJason Wang 
684c8d68e6bSJason Wang static void tun_detach(struct tun_file *tfile, bool clean)
685c8d68e6bSJason Wang {
68683c1f36fSSabrina Dubroca 	struct tun_struct *tun;
68783c1f36fSSabrina Dubroca 	struct net_device *dev;
68883c1f36fSSabrina Dubroca 
689c8d68e6bSJason Wang 	rtnl_lock();
69083c1f36fSSabrina Dubroca 	tun = rtnl_dereference(tfile->tun);
69183c1f36fSSabrina Dubroca 	dev = tun ? tun->dev : NULL;
692c8d68e6bSJason Wang 	__tun_detach(tfile, clean);
69383c1f36fSSabrina Dubroca 	if (dev)
69483c1f36fSSabrina Dubroca 		netdev_state_change(dev);
695c8d68e6bSJason Wang 	rtnl_unlock();
696c8d68e6bSJason Wang }
697c8d68e6bSJason Wang 
698c8d68e6bSJason Wang static void tun_detach_all(struct net_device *dev)
699c8d68e6bSJason Wang {
700c8d68e6bSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
7014008e97fSJason Wang 	struct tun_file *tfile, *tmp;
702c8d68e6bSJason Wang 	int i, n = tun->numqueues;
703c8d68e6bSJason Wang 
704c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
705b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
706c8d68e6bSJason Wang 		BUG_ON(!tfile);
70706e55addSEric Dumazet 		tun_napi_disable(tfile);
708addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7099e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
710c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
711c8d68e6bSJason Wang 		--tun->numqueues;
712c8d68e6bSJason Wang 	}
7139e85722dSJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
714addf8fc4SJason Wang 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
7159e641bdcSXi Wang 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
716c956674bSMonam Agarwal 		RCU_INIT_POINTER(tfile->tun, NULL);
7179e85722dSJason Wang 	}
718c8d68e6bSJason Wang 	BUG_ON(tun->numqueues != 0);
719c8d68e6bSJason Wang 
720c8d68e6bSJason Wang 	synchronize_net();
721c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
722b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
72306e55addSEric Dumazet 		tun_napi_del(tfile);
724c8d68e6bSJason Wang 		/* Drop read queue */
7254bfb0513SJason Wang 		tun_queue_purge(tfile);
726b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
727c8d68e6bSJason Wang 		sock_put(&tfile->sk);
728c8d68e6bSJason Wang 	}
7294008e97fSJason Wang 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
7304008e97fSJason Wang 		tun_enable_queue(tfile);
7314bfb0513SJason Wang 		tun_queue_purge(tfile);
732b196d88aSJason Wang 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
7334008e97fSJason Wang 		sock_put(&tfile->sk);
7344008e97fSJason Wang 	}
7354008e97fSJason Wang 	BUG_ON(tun->numdisabled != 0);
736dd38bd85SJason Wang 
73740630b82SMichael S. Tsirkin 	if (tun->flags & IFF_PERSIST)
738dd38bd85SJason Wang 		module_put(THIS_MODULE);
739c8d68e6bSJason Wang }
740c8d68e6bSJason Wang 
74194317099SPetar Penkov static int tun_attach(struct tun_struct *tun, struct file *file,
74277f22f92SYang Yingliang 		      bool skip_filter, bool napi, bool napi_frags,
74377f22f92SYang Yingliang 		      bool publish_tun)
744a7385ba2SEric W. Biederman {
745631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
7461576d986SJason Wang 	struct net_device *dev = tun->dev;
74738231b7aSEric W. Biederman 	int err;
748a7385ba2SEric W. Biederman 
7495dbbaf2dSPaul Moore 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
7505dbbaf2dSPaul Moore 	if (err < 0)
7515dbbaf2dSPaul Moore 		goto out;
7525dbbaf2dSPaul Moore 
75338231b7aSEric W. Biederman 	err = -EINVAL;
7549e85722dSJason Wang 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
75538231b7aSEric W. Biederman 		goto out;
75638231b7aSEric W. Biederman 
75738231b7aSEric W. Biederman 	err = -EBUSY;
75840630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
759c8d68e6bSJason Wang 		goto out;
760c8d68e6bSJason Wang 
761c8d68e6bSJason Wang 	err = -E2BIG;
7624008e97fSJason Wang 	if (!tfile->detached &&
7634008e97fSJason Wang 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
76438231b7aSEric W. Biederman 		goto out;
76538231b7aSEric W. Biederman 
76638231b7aSEric W. Biederman 	err = 0;
76754f968d6SJason Wang 
76892d4ea6eSstephen hemminger 	/* Re-attach the filter to persist device */
769849c9b6fSPavel Emelyanov 	if (!skip_filter && (tun->filter_attached == true)) {
7708ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
7718ced425eSHannes Frederic Sowa 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
7728ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
77354f968d6SJason Wang 		if (!err)
77454f968d6SJason Wang 			goto out;
77554f968d6SJason Wang 	}
7761576d986SJason Wang 
7771576d986SJason Wang 	if (!tfile->detached &&
778b196d88aSJason Wang 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
779b196d88aSJason Wang 			    GFP_KERNEL, tun_ptr_free)) {
7801576d986SJason Wang 		err = -ENOMEM;
7811576d986SJason Wang 		goto out;
7821576d986SJason Wang 	}
7831576d986SJason Wang 
784c8d68e6bSJason Wang 	tfile->queue_index = tun->numqueues;
785addf8fc4SJason Wang 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
7868bf5c4eeSJesper Dangaard Brouer 
7878bf5c4eeSJesper Dangaard Brouer 	if (tfile->detached) {
7888bf5c4eeSJesper Dangaard Brouer 		/* Re-attach detached tfile, updating XDP queue_index */
7898bf5c4eeSJesper Dangaard Brouer 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
7908bf5c4eeSJesper Dangaard Brouer 
7918bf5c4eeSJesper Dangaard Brouer 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
7928bf5c4eeSJesper Dangaard Brouer 			tfile->xdp_rxq.queue_index = tfile->queue_index;
7938bf5c4eeSJesper Dangaard Brouer 	} else {
7948bf5c4eeSJesper Dangaard Brouer 		/* Setup XDP RX-queue info, for new tfile getting attached */
7958bf5c4eeSJesper Dangaard Brouer 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
796b02e5a0eSBjörn Töpel 				       tun->dev, tfile->queue_index, 0);
7978bf5c4eeSJesper Dangaard Brouer 		if (err < 0)
7988bf5c4eeSJesper Dangaard Brouer 			goto out;
7998d5d8852SJesper Dangaard Brouer 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
8008d5d8852SJesper Dangaard Brouer 						 MEM_TYPE_PAGE_SHARED, NULL);
8018d5d8852SJesper Dangaard Brouer 		if (err < 0) {
8028d5d8852SJesper Dangaard Brouer 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
8038d5d8852SJesper Dangaard Brouer 			goto out;
8048d5d8852SJesper Dangaard Brouer 		}
8058bf5c4eeSJesper Dangaard Brouer 		err = 0;
8068bf5c4eeSJesper Dangaard Brouer 	}
8078bf5c4eeSJesper Dangaard Brouer 
80894317099SPetar Penkov 	if (tfile->detached) {
8094008e97fSJason Wang 		tun_enable_queue(tfile);
81094317099SPetar Penkov 	} else {
8114008e97fSJason Wang 		sock_hold(&tfile->sk);
812af3fb24eSEric Dumazet 		tun_napi_init(tun, tfile, napi, napi_frags);
81394317099SPetar Penkov 	}
8144008e97fSJason Wang 
815e4a2a304SJason Wang 	if (rtnl_dereference(tun->xdp_prog))
816e4a2a304SJason Wang 		sock_set_flag(&tfile->sk, SOCK_XDP);
817e4a2a304SJason Wang 
818c8d68e6bSJason Wang 	/* device is allowed to go away first, so no need to hold extra
819c8d68e6bSJason Wang 	 * refcnt.
820c8d68e6bSJason Wang 	 */
821a7385ba2SEric W. Biederman 
8220b7959b6SStanislav Fomichev 	/* Publish tfile->tun and tun->tfiles only after we've fully
8230b7959b6SStanislav Fomichev 	 * initialized tfile; otherwise we risk using half-initialized
8240b7959b6SStanislav Fomichev 	 * object.
8250b7959b6SStanislav Fomichev 	 */
82677f22f92SYang Yingliang 	if (publish_tun)
8270b7959b6SStanislav Fomichev 		rcu_assign_pointer(tfile->tun, tun);
8280b7959b6SStanislav Fomichev 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
8290b7959b6SStanislav Fomichev 	tun->numqueues++;
8303a03cb84SGeorge Amanakis 	tun_set_real_num_queues(tun);
83138231b7aSEric W. Biederman out:
83238231b7aSEric W. Biederman 	return err;
833a7385ba2SEric W. Biederman }
834a7385ba2SEric W. Biederman 
8359484dc74Syuan linyu static struct tun_struct *tun_get(struct tun_file *tfile)
836631ab46bSEric W. Biederman {
8376e914fc7SJason Wang 	struct tun_struct *tun;
838c70f1829SEric W. Biederman 
8396e914fc7SJason Wang 	rcu_read_lock();
8406e914fc7SJason Wang 	tun = rcu_dereference(tfile->tun);
8416e914fc7SJason Wang 	if (tun)
8426e914fc7SJason Wang 		dev_hold(tun->dev);
8436e914fc7SJason Wang 	rcu_read_unlock();
844c70f1829SEric W. Biederman 
845c70f1829SEric W. Biederman 	return tun;
846631ab46bSEric W. Biederman }
847631ab46bSEric W. Biederman 
848631ab46bSEric W. Biederman static void tun_put(struct tun_struct *tun)
849631ab46bSEric W. Biederman {
8506e914fc7SJason Wang 	dev_put(tun->dev);
851631ab46bSEric W. Biederman }
852631ab46bSEric W. Biederman 
8536b8a66eeSJoe Perches /* TAP filtering */
854f271b2ccSMax Krasnyansky static void addr_hash_set(u32 *mask, const u8 *addr)
855f271b2ccSMax Krasnyansky {
856f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
857f271b2ccSMax Krasnyansky 	mask[n >> 5] |= (1 << (n & 31));
858f271b2ccSMax Krasnyansky }
859f271b2ccSMax Krasnyansky 
860f271b2ccSMax Krasnyansky static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
861f271b2ccSMax Krasnyansky {
862f271b2ccSMax Krasnyansky 	int n = ether_crc(ETH_ALEN, addr) >> 26;
863f271b2ccSMax Krasnyansky 	return mask[n >> 5] & (1 << (n & 31));
864f271b2ccSMax Krasnyansky }
865f271b2ccSMax Krasnyansky 
866f271b2ccSMax Krasnyansky static int update_filter(struct tap_filter *filter, void __user *arg)
867f271b2ccSMax Krasnyansky {
868f271b2ccSMax Krasnyansky 	struct { u8 u[ETH_ALEN]; } *addr;
869f271b2ccSMax Krasnyansky 	struct tun_filter uf;
870f271b2ccSMax Krasnyansky 	int err, alen, n, nexact;
871f271b2ccSMax Krasnyansky 
872f271b2ccSMax Krasnyansky 	if (copy_from_user(&uf, arg, sizeof(uf)))
873f271b2ccSMax Krasnyansky 		return -EFAULT;
874f271b2ccSMax Krasnyansky 
875f271b2ccSMax Krasnyansky 	if (!uf.count) {
876f271b2ccSMax Krasnyansky 		/* Disabled */
877f271b2ccSMax Krasnyansky 		filter->count = 0;
878f271b2ccSMax Krasnyansky 		return 0;
879f271b2ccSMax Krasnyansky 	}
880f271b2ccSMax Krasnyansky 
881f271b2ccSMax Krasnyansky 	alen = ETH_ALEN * uf.count;
88228e8190dSMarkus Elfring 	addr = memdup_user(arg + sizeof(uf), alen);
88328e8190dSMarkus Elfring 	if (IS_ERR(addr))
88428e8190dSMarkus Elfring 		return PTR_ERR(addr);
885f271b2ccSMax Krasnyansky 
886f271b2ccSMax Krasnyansky 	/* The filter is updated without holding any locks. Which is
887f271b2ccSMax Krasnyansky 	 * perfectly safe. We disable it first and in the worst
888f271b2ccSMax Krasnyansky 	 * case we'll accept a few undesired packets. */
889f271b2ccSMax Krasnyansky 	filter->count = 0;
890f271b2ccSMax Krasnyansky 	wmb();
891f271b2ccSMax Krasnyansky 
892f271b2ccSMax Krasnyansky 	/* Use first set of addresses as an exact filter */
893f271b2ccSMax Krasnyansky 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
894f271b2ccSMax Krasnyansky 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
895f271b2ccSMax Krasnyansky 
896f271b2ccSMax Krasnyansky 	nexact = n;
897f271b2ccSMax Krasnyansky 
898cfbf84fcSAlex Williamson 	/* Remaining multicast addresses are hashed,
899cfbf84fcSAlex Williamson 	 * unicast will leave the filter disabled. */
900f271b2ccSMax Krasnyansky 	memset(filter->mask, 0, sizeof(filter->mask));
901cfbf84fcSAlex Williamson 	for (; n < uf.count; n++) {
902cfbf84fcSAlex Williamson 		if (!is_multicast_ether_addr(addr[n].u)) {
903cfbf84fcSAlex Williamson 			err = 0; /* no filter */
9043b8d2a69SMarkus Elfring 			goto free_addr;
905cfbf84fcSAlex Williamson 		}
906f271b2ccSMax Krasnyansky 		addr_hash_set(filter->mask, addr[n].u);
907cfbf84fcSAlex Williamson 	}
908f271b2ccSMax Krasnyansky 
909f271b2ccSMax Krasnyansky 	/* For ALLMULTI just set the mask to all ones.
910f271b2ccSMax Krasnyansky 	 * This overrides the mask populated above. */
911f271b2ccSMax Krasnyansky 	if ((uf.flags & TUN_FLT_ALLMULTI))
912f271b2ccSMax Krasnyansky 		memset(filter->mask, ~0, sizeof(filter->mask));
913f271b2ccSMax Krasnyansky 
914f271b2ccSMax Krasnyansky 	/* Now enable the filter */
915f271b2ccSMax Krasnyansky 	wmb();
916f271b2ccSMax Krasnyansky 	filter->count = nexact;
917f271b2ccSMax Krasnyansky 
918f271b2ccSMax Krasnyansky 	/* Return the number of exact filters */
919f271b2ccSMax Krasnyansky 	err = nexact;
9203b8d2a69SMarkus Elfring free_addr:
921f271b2ccSMax Krasnyansky 	kfree(addr);
922f271b2ccSMax Krasnyansky 	return err;
923f271b2ccSMax Krasnyansky }
924f271b2ccSMax Krasnyansky 
925f271b2ccSMax Krasnyansky /* Returns: 0 - drop, !=0 - accept */
926f271b2ccSMax Krasnyansky static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
927f271b2ccSMax Krasnyansky {
928f271b2ccSMax Krasnyansky 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
929f271b2ccSMax Krasnyansky 	 * at this point. */
930f271b2ccSMax Krasnyansky 	struct ethhdr *eh = (struct ethhdr *) skb->data;
931f271b2ccSMax Krasnyansky 	int i;
932f271b2ccSMax Krasnyansky 
933f271b2ccSMax Krasnyansky 	/* Exact match */
934f271b2ccSMax Krasnyansky 	for (i = 0; i < filter->count; i++)
9352e42e474SJoe Perches 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
936f271b2ccSMax Krasnyansky 			return 1;
937f271b2ccSMax Krasnyansky 
938f271b2ccSMax Krasnyansky 	/* Inexact match (multicast only) */
939f271b2ccSMax Krasnyansky 	if (is_multicast_ether_addr(eh->h_dest))
940f271b2ccSMax Krasnyansky 		return addr_hash_test(filter->mask, eh->h_dest);
941f271b2ccSMax Krasnyansky 
942f271b2ccSMax Krasnyansky 	return 0;
943f271b2ccSMax Krasnyansky }
944f271b2ccSMax Krasnyansky 
945f271b2ccSMax Krasnyansky /*
946f271b2ccSMax Krasnyansky  * Checks whether the packet is accepted or not.
947f271b2ccSMax Krasnyansky  * Returns: 0 - drop, !=0 - accept
948f271b2ccSMax Krasnyansky  */
949f271b2ccSMax Krasnyansky static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
950f271b2ccSMax Krasnyansky {
951f271b2ccSMax Krasnyansky 	if (!filter->count)
952f271b2ccSMax Krasnyansky 		return 1;
953f271b2ccSMax Krasnyansky 
954f271b2ccSMax Krasnyansky 	return run_filter(filter, skb);
955f271b2ccSMax Krasnyansky }
956f271b2ccSMax Krasnyansky 
9571da177e4SLinus Torvalds /* Network device part of the driver */
9581da177e4SLinus Torvalds 
9591da177e4SLinus Torvalds static const struct ethtool_ops tun_ethtool_ops;
9601da177e4SLinus Torvalds 
961158b515fSGeorge Kennedy static int tun_net_init(struct net_device *dev)
962158b515fSGeorge Kennedy {
963158b515fSGeorge Kennedy 	struct tun_struct *tun = netdev_priv(dev);
964158b515fSGeorge Kennedy 	struct ifreq *ifr = tun->ifr;
965158b515fSGeorge Kennedy 	int err;
966158b515fSGeorge Kennedy 
967158b515fSGeorge Kennedy 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
968158b515fSGeorge Kennedy 	if (!dev->tstats)
969158b515fSGeorge Kennedy 		return -ENOMEM;
970158b515fSGeorge Kennedy 
971158b515fSGeorge Kennedy 	spin_lock_init(&tun->lock);
972158b515fSGeorge Kennedy 
973158b515fSGeorge Kennedy 	err = security_tun_dev_alloc_security(&tun->security);
974158b515fSGeorge Kennedy 	if (err < 0) {
975158b515fSGeorge Kennedy 		free_percpu(dev->tstats);
976158b515fSGeorge Kennedy 		return err;
977158b515fSGeorge Kennedy 	}
978158b515fSGeorge Kennedy 
979158b515fSGeorge Kennedy 	tun_flow_init(tun);
980158b515fSGeorge Kennedy 
981158b515fSGeorge Kennedy 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
982158b515fSGeorge Kennedy 			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
983158b515fSGeorge Kennedy 			   NETIF_F_HW_VLAN_STAG_TX;
984158b515fSGeorge Kennedy 	dev->features = dev->hw_features | NETIF_F_LLTX;
985158b515fSGeorge Kennedy 	dev->vlan_features = dev->features &
986158b515fSGeorge Kennedy 			     ~(NETIF_F_HW_VLAN_CTAG_TX |
987158b515fSGeorge Kennedy 			       NETIF_F_HW_VLAN_STAG_TX);
988158b515fSGeorge Kennedy 
989158b515fSGeorge Kennedy 	tun->flags = (tun->flags & ~TUN_FEATURES) |
990158b515fSGeorge Kennedy 		      (ifr->ifr_flags & TUN_FEATURES);
991158b515fSGeorge Kennedy 
992158b515fSGeorge Kennedy 	INIT_LIST_HEAD(&tun->disabled);
993158b515fSGeorge Kennedy 	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
994158b515fSGeorge Kennedy 			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
995158b515fSGeorge Kennedy 	if (err < 0) {
996158b515fSGeorge Kennedy 		tun_flow_uninit(tun);
997158b515fSGeorge Kennedy 		security_tun_dev_free_security(tun->security);
998158b515fSGeorge Kennedy 		free_percpu(dev->tstats);
999158b515fSGeorge Kennedy 		return err;
1000158b515fSGeorge Kennedy 	}
1001158b515fSGeorge Kennedy 	return 0;
1002158b515fSGeorge Kennedy }
1003158b515fSGeorge Kennedy 
1004c70f1829SEric W. Biederman /* Net device detach from fd. */
1005c70f1829SEric W. Biederman static void tun_net_uninit(struct net_device *dev)
1006c70f1829SEric W. Biederman {
1007c8d68e6bSJason Wang 	tun_detach_all(dev);
1008c70f1829SEric W. Biederman }
1009c70f1829SEric W. Biederman 
10101da177e4SLinus Torvalds /* Net device open. */
10111da177e4SLinus Torvalds static int tun_net_open(struct net_device *dev)
10121da177e4SLinus Torvalds {
1013c8d68e6bSJason Wang 	netif_tx_start_all_queues(dev);
1014b20e2d54SHannes Frederic Sowa 
10151da177e4SLinus Torvalds 	return 0;
10161da177e4SLinus Torvalds }
10171da177e4SLinus Torvalds 
10181da177e4SLinus Torvalds /* Net device close. */
10191da177e4SLinus Torvalds static int tun_net_close(struct net_device *dev)
10201da177e4SLinus Torvalds {
1021c8d68e6bSJason Wang 	netif_tx_stop_all_queues(dev);
10221da177e4SLinus Torvalds 	return 0;
10231da177e4SLinus Torvalds }
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds /* Net device start xmit */
102696f84061SJason Wang static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
10271da177e4SLinus Torvalds {
10283df97ba8SJason Wang #ifdef CONFIG_RPS
1029dc05360fSEric Dumazet 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
10309bc88939STom Herbert 		/* Select queue was not called for the skbuff, so we extract the
10319bc88939STom Herbert 		 * RPS hash and save it into the flow_table here.
10329bc88939STom Herbert 		 */
10334b035271SWang Li 		struct tun_flow_entry *e;
10349bc88939STom Herbert 		__u32 rxhash;
10359bc88939STom Herbert 
1036feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
10374b035271SWang Li 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
10389bc88939STom Herbert 		if (e)
10399bc88939STom Herbert 			tun_flow_save_rps_rxhash(e, rxhash);
10409bc88939STom Herbert 	}
10413df97ba8SJason Wang #endif
104296f84061SJason Wang }
104396f84061SJason Wang 
1044aff3d70aSJason Wang static unsigned int run_ebpf_filter(struct tun_struct *tun,
1045aff3d70aSJason Wang 				    struct sk_buff *skb,
1046aff3d70aSJason Wang 				    int len)
1047aff3d70aSJason Wang {
1048aff3d70aSJason Wang 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1049aff3d70aSJason Wang 
1050aff3d70aSJason Wang 	if (prog)
1051aff3d70aSJason Wang 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1052aff3d70aSJason Wang 
1053aff3d70aSJason Wang 	return len;
1054aff3d70aSJason Wang }
1055aff3d70aSJason Wang 
105696f84061SJason Wang /* Net device start xmit */
105796f84061SJason Wang static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
105896f84061SJason Wang {
105996f84061SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
10604b4f052eSDongli Zhang 	enum skb_drop_reason drop_reason;
106196f84061SJason Wang 	int txq = skb->queue_mapping;
1062a31d27fbSNicolas Dichtel 	struct netdev_queue *queue;
106396f84061SJason Wang 	struct tun_file *tfile;
1064aff3d70aSJason Wang 	int len = skb->len;
106596f84061SJason Wang 
106696f84061SJason Wang 	rcu_read_lock();
106796f84061SJason Wang 	tfile = rcu_dereference(tun->tfiles[txq]);
106896f84061SJason Wang 
106996f84061SJason Wang 	/* Drop packet if interface is not attached */
10704b4f052eSDongli Zhang 	if (!tfile) {
10714b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_DEV_READY;
107296f84061SJason Wang 		goto drop;
10734b4f052eSDongli Zhang 	}
107496f84061SJason Wang 
107596f84061SJason Wang 	if (!rcu_dereference(tun->steering_prog))
107696f84061SJason Wang 		tun_automq_xmit(tun, skb);
10779bc88939STom Herbert 
10783424170fSMichal Kubecek 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
10796e914fc7SJason Wang 
1080f271b2ccSMax Krasnyansky 	/* Drop if the filter does not like it.
1081f271b2ccSMax Krasnyansky 	 * This is a noop if the filter is disabled.
1082f271b2ccSMax Krasnyansky 	 * Filter can be enabled only for the TAP devices. */
10834b4f052eSDongli Zhang 	if (!check_filter(&tun->txflt, skb)) {
10844b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1085f271b2ccSMax Krasnyansky 		goto drop;
10864b4f052eSDongli Zhang 	}
1087f271b2ccSMax Krasnyansky 
108854f968d6SJason Wang 	if (tfile->socket.sk->sk_filter &&
10894b4f052eSDongli Zhang 	    sk_filter(tfile->socket.sk, skb)) {
10904b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
109199405162SMichael S. Tsirkin 		goto drop;
10924b4f052eSDongli Zhang 	}
109399405162SMichael S. Tsirkin 
1094aff3d70aSJason Wang 	len = run_ebpf_filter(tun, skb, len);
10954b4f052eSDongli Zhang 	if (len == 0) {
10964b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_TAP_FILTER;
109745a15d89SDongli Zhang 		goto drop;
10984b4f052eSDongli Zhang 	}
109945a15d89SDongli Zhang 
11004b4f052eSDongli Zhang 	if (pskb_trim(skb, len)) {
11014b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_NOMEM;
1102aff3d70aSJason Wang 		goto drop;
11034b4f052eSDongli Zhang 	}
1104aff3d70aSJason Wang 
11054b4f052eSDongli Zhang 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
11064b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
11077bf66305SJason Wang 		goto drop;
11084b4f052eSDongli Zhang 	}
11097bf66305SJason Wang 
11107b996243SSoheil Hassas Yeganeh 	skb_tx_timestamp(skb);
1111eda29772SRichard Cochran 
11120110d6f2SMichael S. Tsirkin 	/* Orphan the skb - required as we might hang on to it
11137bf66305SJason Wang 	 * for indefinite time.
11147bf66305SJason Wang 	 */
11150110d6f2SMichael S. Tsirkin 	skb_orphan(skb);
11160110d6f2SMichael S. Tsirkin 
1117895b5c9fSFlorian Westphal 	nf_reset_ct(skb);
1118f8af75f3SEric Dumazet 
11194b4f052eSDongli Zhang 	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
11204b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_FULL_RING;
11211576d986SJason Wang 		goto drop;
11224b4f052eSDongli Zhang 	}
11231da177e4SLinus Torvalds 
1124a31d27fbSNicolas Dichtel 	/* NETIF_F_LLTX requires to do our own update of trans_start */
1125a31d27fbSNicolas Dichtel 	queue = netdev_get_tx_queue(dev, txq);
1126968a1a5dSAntoine Tenart 	txq_trans_cond_update(queue);
1127a31d27fbSNicolas Dichtel 
11281da177e4SLinus Torvalds 	/* Notify and wake up reader process */
112954f968d6SJason Wang 	if (tfile->flags & TUN_FASYNC)
113054f968d6SJason Wang 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
11319e641bdcSXi Wang 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
11326e914fc7SJason Wang 
11336e914fc7SJason Wang 	rcu_read_unlock();
11346ed10654SPatrick McHardy 	return NETDEV_TX_OK;
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds drop:
1137625788b5SEric Dumazet 	dev_core_stats_tx_dropped_inc(dev);
1138149d36f7SMichael S. Tsirkin 	skb_tx_error(skb);
11394b4f052eSDongli Zhang 	kfree_skb_reason(skb, drop_reason);
11406e914fc7SJason Wang 	rcu_read_unlock();
1141baeababbSJason Wang 	return NET_XMIT_DROP;
11421da177e4SLinus Torvalds }
11431da177e4SLinus Torvalds 
1144f271b2ccSMax Krasnyansky static void tun_net_mclist(struct net_device *dev)
11451da177e4SLinus Torvalds {
1146f271b2ccSMax Krasnyansky 	/*
1147f271b2ccSMax Krasnyansky 	 * This callback is supposed to deal with mc filter in
1148f271b2ccSMax Krasnyansky 	 * _rx_ path and has nothing to do with the _tx_ path.
1149f271b2ccSMax Krasnyansky 	 * In rx path we always accept everything userspace gives us.
1150f271b2ccSMax Krasnyansky 	 */
11511da177e4SLinus Torvalds }
11521da177e4SLinus Torvalds 
1153c8f44affSMichał Mirosław static netdev_features_t tun_net_fix_features(struct net_device *dev,
1154c8f44affSMichał Mirosław 	netdev_features_t features)
115588255375SMichał Mirosław {
115688255375SMichał Mirosław 	struct tun_struct *tun = netdev_priv(dev);
115788255375SMichał Mirosław 
115888255375SMichał Mirosław 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
115988255375SMichał Mirosław }
1160eaea34b2SPaolo Abeni 
1161eaea34b2SPaolo Abeni static void tun_set_headroom(struct net_device *dev, int new_hr)
1162eaea34b2SPaolo Abeni {
1163eaea34b2SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1164eaea34b2SPaolo Abeni 
1165eaea34b2SPaolo Abeni 	if (new_hr < NET_SKB_PAD)
1166eaea34b2SPaolo Abeni 		new_hr = NET_SKB_PAD;
1167eaea34b2SPaolo Abeni 
1168eaea34b2SPaolo Abeni 	tun->align = new_hr;
1169eaea34b2SPaolo Abeni }
1170eaea34b2SPaolo Abeni 
1171bc1f4470Sstephen hemminger static void
1172608b9977SPaolo Abeni tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1173608b9977SPaolo Abeni {
1174608b9977SPaolo Abeni 	struct tun_struct *tun = netdev_priv(dev);
1175608b9977SPaolo Abeni 
1176497a5757SHeiner Kallweit 	dev_get_tstats64(dev, stats);
1177608b9977SPaolo Abeni 
1178497a5757SHeiner Kallweit 	stats->rx_frame_errors +=
1179497a5757SHeiner Kallweit 		(unsigned long)atomic_long_read(&tun->rx_frame_errors);
1180608b9977SPaolo Abeni }
1181608b9977SPaolo Abeni 
1182761876c8SJason Wang static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1183761876c8SJason Wang 		       struct netlink_ext_ack *extack)
1184761876c8SJason Wang {
1185761876c8SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1186e4a2a304SJason Wang 	struct tun_file *tfile;
1187761876c8SJason Wang 	struct bpf_prog *old_prog;
1188e4a2a304SJason Wang 	int i;
1189761876c8SJason Wang 
1190761876c8SJason Wang 	old_prog = rtnl_dereference(tun->xdp_prog);
1191761876c8SJason Wang 	rcu_assign_pointer(tun->xdp_prog, prog);
1192761876c8SJason Wang 	if (old_prog)
1193761876c8SJason Wang 		bpf_prog_put(old_prog);
1194761876c8SJason Wang 
1195e4a2a304SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
1196e4a2a304SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
1197e4a2a304SJason Wang 		if (prog)
1198e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1199e4a2a304SJason Wang 		else
1200e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1201e4a2a304SJason Wang 	}
1202e4a2a304SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next) {
1203e4a2a304SJason Wang 		if (prog)
1204e4a2a304SJason Wang 			sock_set_flag(&tfile->sk, SOCK_XDP);
1205e4a2a304SJason Wang 		else
1206e4a2a304SJason Wang 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1207e4a2a304SJason Wang 	}
1208e4a2a304SJason Wang 
1209761876c8SJason Wang 	return 0;
1210761876c8SJason Wang }
1211761876c8SJason Wang 
1212f4e63525SJakub Kicinski static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1213761876c8SJason Wang {
1214761876c8SJason Wang 	switch (xdp->command) {
1215761876c8SJason Wang 	case XDP_SETUP_PROG:
1216761876c8SJason Wang 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1217761876c8SJason Wang 	default:
1218761876c8SJason Wang 		return -EINVAL;
1219761876c8SJason Wang 	}
1220761876c8SJason Wang }
1221761876c8SJason Wang 
122226d31925SNicolas Dichtel static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
122326d31925SNicolas Dichtel {
122426d31925SNicolas Dichtel 	if (new_carrier) {
122526d31925SNicolas Dichtel 		struct tun_struct *tun = netdev_priv(dev);
122626d31925SNicolas Dichtel 
122726d31925SNicolas Dichtel 		if (!tun->numqueues)
122826d31925SNicolas Dichtel 			return -EPERM;
122926d31925SNicolas Dichtel 
123026d31925SNicolas Dichtel 		netif_carrier_on(dev);
123126d31925SNicolas Dichtel 	} else {
123226d31925SNicolas Dichtel 		netif_carrier_off(dev);
123326d31925SNicolas Dichtel 	}
123426d31925SNicolas Dichtel 	return 0;
123526d31925SNicolas Dichtel }
123626d31925SNicolas Dichtel 
1237758e43b7SStephen Hemminger static const struct net_device_ops tun_netdev_ops = {
1238158b515fSGeorge Kennedy 	.ndo_init		= tun_net_init,
1239c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1240758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1241758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
124200829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
124388255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1244c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
1245eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1246608b9977SPaolo Abeni 	.ndo_get_stats64	= tun_net_get_stats64,
124726d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1248758e43b7SStephen Hemminger };
1249758e43b7SStephen Hemminger 
12500c9d917bSJesper Dangaard Brouer static void __tun_xdp_flush_tfile(struct tun_file *tfile)
12510c9d917bSJesper Dangaard Brouer {
12520c9d917bSJesper Dangaard Brouer 	/* Notify and wake up reader process */
12530c9d917bSJesper Dangaard Brouer 	if (tfile->flags & TUN_FASYNC)
12540c9d917bSJesper Dangaard Brouer 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
12550c9d917bSJesper Dangaard Brouer 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
12560c9d917bSJesper Dangaard Brouer }
12570c9d917bSJesper Dangaard Brouer 
125842b33468SJesper Dangaard Brouer static int tun_xdp_xmit(struct net_device *dev, int n,
125942b33468SJesper Dangaard Brouer 			struct xdp_frame **frames, u32 flags)
1260fc72d1d5SJason Wang {
1261fc72d1d5SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
1262fc72d1d5SJason Wang 	struct tun_file *tfile;
1263fc72d1d5SJason Wang 	u32 numqueues;
1264fdc13979SLorenzo Bianconi 	int nxmit = 0;
1265735fc405SJesper Dangaard Brouer 	int i;
1266fc72d1d5SJason Wang 
12670c9d917bSJesper Dangaard Brouer 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
126842b33468SJesper Dangaard Brouer 		return -EINVAL;
126942b33468SJesper Dangaard Brouer 
1270fc72d1d5SJason Wang 	rcu_read_lock();
1271fc72d1d5SJason Wang 
12729871a9e4SJason Wang resample:
1273fc72d1d5SJason Wang 	numqueues = READ_ONCE(tun->numqueues);
1274fc72d1d5SJason Wang 	if (!numqueues) {
1275735fc405SJesper Dangaard Brouer 		rcu_read_unlock();
1276735fc405SJesper Dangaard Brouer 		return -ENXIO; /* Caller will free/return all frames */
1277fc72d1d5SJason Wang 	}
1278fc72d1d5SJason Wang 
1279fc72d1d5SJason Wang 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1280fc72d1d5SJason Wang 					    numqueues]);
12819871a9e4SJason Wang 	if (unlikely(!tfile))
12829871a9e4SJason Wang 		goto resample;
1283735fc405SJesper Dangaard Brouer 
1284735fc405SJesper Dangaard Brouer 	spin_lock(&tfile->tx_ring.producer_lock);
1285735fc405SJesper Dangaard Brouer 	for (i = 0; i < n; i++) {
1286735fc405SJesper Dangaard Brouer 		struct xdp_frame *xdp = frames[i];
1287fc72d1d5SJason Wang 		/* Encode the XDP flag into lowest bit for consumer to differ
1288fc72d1d5SJason Wang 		 * XDP buffer from sk_buff.
1289fc72d1d5SJason Wang 		 */
1290735fc405SJesper Dangaard Brouer 		void *frame = tun_xdp_to_ptr(xdp);
1291fc72d1d5SJason Wang 
1292735fc405SJesper Dangaard Brouer 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1293625788b5SEric Dumazet 			dev_core_stats_tx_dropped_inc(dev);
1294fdc13979SLorenzo Bianconi 			break;
1295735fc405SJesper Dangaard Brouer 		}
1296fdc13979SLorenzo Bianconi 		nxmit++;
1297735fc405SJesper Dangaard Brouer 	}
1298735fc405SJesper Dangaard Brouer 	spin_unlock(&tfile->tx_ring.producer_lock);
1299735fc405SJesper Dangaard Brouer 
13000c9d917bSJesper Dangaard Brouer 	if (flags & XDP_XMIT_FLUSH)
13010c9d917bSJesper Dangaard Brouer 		__tun_xdp_flush_tfile(tfile);
13020c9d917bSJesper Dangaard Brouer 
1303fc72d1d5SJason Wang 	rcu_read_unlock();
1304fdc13979SLorenzo Bianconi 	return nxmit;
1305fc72d1d5SJason Wang }
1306fc72d1d5SJason Wang 
130744fa2dbdSJesper Dangaard Brouer static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
130844fa2dbdSJesper Dangaard Brouer {
13091b698fa5SLorenzo Bianconi 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1310fdc13979SLorenzo Bianconi 	int nxmit;
131144fa2dbdSJesper Dangaard Brouer 
131244fa2dbdSJesper Dangaard Brouer 	if (unlikely(!frame))
131344fa2dbdSJesper Dangaard Brouer 		return -EOVERFLOW;
131444fa2dbdSJesper Dangaard Brouer 
1315fdc13979SLorenzo Bianconi 	nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1316fdc13979SLorenzo Bianconi 	if (!nxmit)
1317fdc13979SLorenzo Bianconi 		xdp_return_frame_rx_napi(frame);
1318fdc13979SLorenzo Bianconi 	return nxmit;
1319fc72d1d5SJason Wang }
1320fc72d1d5SJason Wang 
1321758e43b7SStephen Hemminger static const struct net_device_ops tap_netdev_ops = {
1322158b515fSGeorge Kennedy 	.ndo_init		= tun_net_init,
1323c70f1829SEric W. Biederman 	.ndo_uninit		= tun_net_uninit,
1324758e43b7SStephen Hemminger 	.ndo_open		= tun_net_open,
1325758e43b7SStephen Hemminger 	.ndo_stop		= tun_net_close,
132600829823SStephen Hemminger 	.ndo_start_xmit		= tun_net_xmit,
132788255375SMichał Mirosław 	.ndo_fix_features	= tun_net_fix_features,
1328afc4b13dSJiri Pirko 	.ndo_set_rx_mode	= tun_net_mclist,
1329758e43b7SStephen Hemminger 	.ndo_set_mac_address	= eth_mac_addr,
1330758e43b7SStephen Hemminger 	.ndo_validate_addr	= eth_validate_addr,
1331c8d68e6bSJason Wang 	.ndo_select_queue	= tun_select_queue,
13325e52796aSToshiaki Makita 	.ndo_features_check	= passthru_features_check,
1333eaea34b2SPaolo Abeni 	.ndo_set_rx_headroom	= tun_set_headroom,
1334497a5757SHeiner Kallweit 	.ndo_get_stats64	= dev_get_tstats64,
1335f4e63525SJakub Kicinski 	.ndo_bpf		= tun_xdp,
1336fc72d1d5SJason Wang 	.ndo_xdp_xmit		= tun_xdp_xmit,
133726d31925SNicolas Dichtel 	.ndo_change_carrier	= tun_net_change_carrier,
1338758e43b7SStephen Hemminger };
1339758e43b7SStephen Hemminger 
1340944a1376SPavel Emelyanov static void tun_flow_init(struct tun_struct *tun)
134196442e42SJason Wang {
134296442e42SJason Wang 	int i;
134396442e42SJason Wang 
134496442e42SJason Wang 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
134596442e42SJason Wang 		INIT_HLIST_HEAD(&tun->flows[i]);
134696442e42SJason Wang 
134796442e42SJason Wang 	tun->ageing_time = TUN_FLOW_EXPIRE;
1348e99e88a9SKees Cook 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1349e99e88a9SKees Cook 	mod_timer(&tun->flow_gc_timer,
1350e99e88a9SKees Cook 		  round_jiffies_up(jiffies + tun->ageing_time));
135196442e42SJason Wang }
135296442e42SJason Wang 
135396442e42SJason Wang static void tun_flow_uninit(struct tun_struct *tun)
135496442e42SJason Wang {
135596442e42SJason Wang 	del_timer_sync(&tun->flow_gc_timer);
135696442e42SJason Wang 	tun_flow_flush(tun);
135796442e42SJason Wang }
135896442e42SJason Wang 
135991572088SJarod Wilson #define MIN_MTU 68
136091572088SJarod Wilson #define MAX_MTU 65535
136191572088SJarod Wilson 
13621da177e4SLinus Torvalds /* Initialize net device. */
1363158b515fSGeorge Kennedy static void tun_net_initialize(struct net_device *dev)
13641da177e4SLinus Torvalds {
13651da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
13661da177e4SLinus Torvalds 
13671da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
136840630b82SMichael S. Tsirkin 	case IFF_TUN:
1369758e43b7SStephen Hemminger 		dev->netdev_ops = &tun_netdev_ops;
1370b9815eb1SJason A. Donenfeld 		dev->header_ops = &ip_tunnel_header_ops;
1371758e43b7SStephen Hemminger 
13721da177e4SLinus Torvalds 		/* Point-to-Point TUN Device */
13731da177e4SLinus Torvalds 		dev->hard_header_len = 0;
13741da177e4SLinus Torvalds 		dev->addr_len = 0;
13751da177e4SLinus Torvalds 		dev->mtu = 1500;
13761da177e4SLinus Torvalds 
13771da177e4SLinus Torvalds 		/* Zero header length */
13781da177e4SLinus Torvalds 		dev->type = ARPHRD_NONE;
13791da177e4SLinus Torvalds 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
13801da177e4SLinus Torvalds 		break;
13811da177e4SLinus Torvalds 
138240630b82SMichael S. Tsirkin 	case IFF_TAP:
13837a0a9608SKusanagi Kouichi 		dev->netdev_ops = &tap_netdev_ops;
13841da177e4SLinus Torvalds 		/* Ethernet TAP Device */
13851da177e4SLinus Torvalds 		ether_setup(dev);
1386550fd08cSNeil Horman 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1387a676847bSstephen hemminger 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
138836226a8dSBrian Braunstein 
1389f2cedb63SDanny Kukawka 		eth_hw_addr_random(dev);
139036226a8dSBrian Braunstein 
13911da177e4SLinus Torvalds 		break;
13921da177e4SLinus Torvalds 	}
139391572088SJarod Wilson 
139491572088SJarod Wilson 	dev->min_mtu = MIN_MTU;
139591572088SJarod Wilson 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
13961da177e4SLinus Torvalds }
13971da177e4SLinus Torvalds 
13982f3ab622SJason Wang static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
13992f3ab622SJason Wang {
14002f3ab622SJason Wang 	struct sock *sk = tfile->socket.sk;
14012f3ab622SJason Wang 
14022f3ab622SJason Wang 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
14032f3ab622SJason Wang }
14042f3ab622SJason Wang 
14051da177e4SLinus Torvalds /* Character device part */
14061da177e4SLinus Torvalds 
14071da177e4SLinus Torvalds /* Poll */
1408afc9a42bSAl Viro static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
14091da177e4SLinus Torvalds {
1410b2430de3SEric W. Biederman 	struct tun_file *tfile = file->private_data;
14119484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
14123c8a9c63SMariusz Kozlowski 	struct sock *sk;
1413afc9a42bSAl Viro 	__poll_t mask = 0;
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 	if (!tun)
1416a9a08845SLinus Torvalds 		return EPOLLERR;
14171da177e4SLinus Torvalds 
141854f968d6SJason Wang 	sk = tfile->socket.sk;
14193c8a9c63SMariusz Kozlowski 
14209e641bdcSXi Wang 	poll_wait(file, sk_sleep(sk), wait);
14211da177e4SLinus Torvalds 
14225990a305SJason Wang 	if (!ptr_ring_empty(&tfile->tx_ring))
1423a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM;
14241da177e4SLinus Torvalds 
14252f3ab622SJason Wang 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
14262f3ab622SJason Wang 	 * guarantee EPOLLOUT to be raised by either here or
14272f3ab622SJason Wang 	 * tun_sock_write_space(). Then process could get notification
14282f3ab622SJason Wang 	 * after it writes to a down device and meets -EIO.
14292f3ab622SJason Wang 	 */
14302f3ab622SJason Wang 	if (tun_sock_writeable(tun, tfile) ||
14319cd3e072SEric Dumazet 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
14322f3ab622SJason Wang 	     tun_sock_writeable(tun, tfile)))
1433a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
143433dccbb0SHerbert Xu 
1435c70f1829SEric W. Biederman 	if (tun->dev->reg_state != NETREG_REGISTERED)
1436a9a08845SLinus Torvalds 		mask = EPOLLERR;
1437c70f1829SEric W. Biederman 
1438631ab46bSEric W. Biederman 	tun_put(tun);
14391da177e4SLinus Torvalds 	return mask;
14401da177e4SLinus Torvalds }
14411da177e4SLinus Torvalds 
144290e33d45SPetar Penkov static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
144390e33d45SPetar Penkov 					    size_t len,
144490e33d45SPetar Penkov 					    const struct iov_iter *it)
144590e33d45SPetar Penkov {
144690e33d45SPetar Penkov 	struct sk_buff *skb;
144790e33d45SPetar Penkov 	size_t linear;
144890e33d45SPetar Penkov 	int err;
144990e33d45SPetar Penkov 	int i;
145090e33d45SPetar Penkov 
145190e33d45SPetar Penkov 	if (it->nr_segs > MAX_SKB_FRAGS + 1)
1452950271d7SYunjian Wang 		return ERR_PTR(-EMSGSIZE);
145390e33d45SPetar Penkov 
145490e33d45SPetar Penkov 	local_bh_disable();
145590e33d45SPetar Penkov 	skb = napi_get_frags(&tfile->napi);
145690e33d45SPetar Penkov 	local_bh_enable();
145790e33d45SPetar Penkov 	if (!skb)
145890e33d45SPetar Penkov 		return ERR_PTR(-ENOMEM);
145990e33d45SPetar Penkov 
146090e33d45SPetar Penkov 	linear = iov_iter_single_seg_count(it);
146190e33d45SPetar Penkov 	err = __skb_grow(skb, linear);
146290e33d45SPetar Penkov 	if (err)
146390e33d45SPetar Penkov 		goto free;
146490e33d45SPetar Penkov 
146590e33d45SPetar Penkov 	skb->len = len;
146690e33d45SPetar Penkov 	skb->data_len = len - linear;
146790e33d45SPetar Penkov 	skb->truesize += skb->data_len;
146890e33d45SPetar Penkov 
146990e33d45SPetar Penkov 	for (i = 1; i < it->nr_segs; i++) {
147090e33d45SPetar Penkov 		size_t fragsz = it->iov[i].iov_len;
1471aa6daacaSEric Dumazet 		struct page *page;
1472aa6daacaSEric Dumazet 		void *frag;
147390e33d45SPetar Penkov 
147490e33d45SPetar Penkov 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
147590e33d45SPetar Penkov 			err = -EINVAL;
147690e33d45SPetar Penkov 			goto free;
147790e33d45SPetar Penkov 		}
1478aa6daacaSEric Dumazet 		frag = netdev_alloc_frag(fragsz);
1479aa6daacaSEric Dumazet 		if (!frag) {
148090e33d45SPetar Penkov 			err = -ENOMEM;
148190e33d45SPetar Penkov 			goto free;
148290e33d45SPetar Penkov 		}
1483aa6daacaSEric Dumazet 		page = virt_to_head_page(frag);
1484aa6daacaSEric Dumazet 		skb_fill_page_desc(skb, i - 1, page,
1485aa6daacaSEric Dumazet 				   frag - page_address(page), fragsz);
148690e33d45SPetar Penkov 	}
148790e33d45SPetar Penkov 
148890e33d45SPetar Penkov 	return skb;
148990e33d45SPetar Penkov free:
149090e33d45SPetar Penkov 	/* frees skb and all frags allocated with napi_alloc_frag() */
149190e33d45SPetar Penkov 	napi_free_frags(&tfile->napi);
149290e33d45SPetar Penkov 	return ERR_PTR(err);
149390e33d45SPetar Penkov }
149490e33d45SPetar Penkov 
1495f42157cbSRusty Russell /* prepad is the amount to reserve at front.  len is length after that.
1496f42157cbSRusty Russell  * linear is a hint as to how much to copy (usually headers). */
149754f968d6SJason Wang static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
149833dccbb0SHerbert Xu 				     size_t prepad, size_t len,
149933dccbb0SHerbert Xu 				     size_t linear, int noblock)
1500f42157cbSRusty Russell {
150154f968d6SJason Wang 	struct sock *sk = tfile->socket.sk;
1502f42157cbSRusty Russell 	struct sk_buff *skb;
150333dccbb0SHerbert Xu 	int err;
1504f42157cbSRusty Russell 
1505f42157cbSRusty Russell 	/* Under a page?  Don't bother with paged skb. */
15060eca93bcSHerbert Xu 	if (prepad + len < PAGE_SIZE || !linear)
150733dccbb0SHerbert Xu 		linear = len;
1508f42157cbSRusty Russell 
150933dccbb0SHerbert Xu 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
151028d64271SEric Dumazet 				   &err, 0);
1511f42157cbSRusty Russell 	if (!skb)
151233dccbb0SHerbert Xu 		return ERR_PTR(err);
1513f42157cbSRusty Russell 
1514f42157cbSRusty Russell 	skb_reserve(skb, prepad);
1515f42157cbSRusty Russell 	skb_put(skb, linear);
151633dccbb0SHerbert Xu 	skb->data_len = len - linear;
151733dccbb0SHerbert Xu 	skb->len += len - linear;
1518f42157cbSRusty Russell 
1519f42157cbSRusty Russell 	return skb;
1520f42157cbSRusty Russell }
1521f42157cbSRusty Russell 
15225503fcecSJason Wang static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
15235503fcecSJason Wang 			   struct sk_buff *skb, int more)
15245503fcecSJason Wang {
15255503fcecSJason Wang 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
15265503fcecSJason Wang 	struct sk_buff_head process_queue;
15275503fcecSJason Wang 	u32 rx_batched = tun->rx_batched;
15285503fcecSJason Wang 	bool rcv = false;
15295503fcecSJason Wang 
15305503fcecSJason Wang 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
15315503fcecSJason Wang 		local_bh_disable();
15328ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15335503fcecSJason Wang 		netif_receive_skb(skb);
15345503fcecSJason Wang 		local_bh_enable();
15355503fcecSJason Wang 		return;
15365503fcecSJason Wang 	}
15375503fcecSJason Wang 
15385503fcecSJason Wang 	spin_lock(&queue->lock);
15395503fcecSJason Wang 	if (!more || skb_queue_len(queue) == rx_batched) {
15405503fcecSJason Wang 		__skb_queue_head_init(&process_queue);
15415503fcecSJason Wang 		skb_queue_splice_tail_init(queue, &process_queue);
15425503fcecSJason Wang 		rcv = true;
15435503fcecSJason Wang 	} else {
15445503fcecSJason Wang 		__skb_queue_tail(queue, skb);
15455503fcecSJason Wang 	}
15465503fcecSJason Wang 	spin_unlock(&queue->lock);
15475503fcecSJason Wang 
15485503fcecSJason Wang 	if (rcv) {
15495503fcecSJason Wang 		struct sk_buff *nskb;
15505503fcecSJason Wang 
15515503fcecSJason Wang 		local_bh_disable();
15528ebebcbaSMatthew Cover 		while ((nskb = __skb_dequeue(&process_queue))) {
15538ebebcbaSMatthew Cover 			skb_record_rx_queue(nskb, tfile->queue_index);
15545503fcecSJason Wang 			netif_receive_skb(nskb);
15558ebebcbaSMatthew Cover 		}
15568ebebcbaSMatthew Cover 		skb_record_rx_queue(skb, tfile->queue_index);
15575503fcecSJason Wang 		netif_receive_skb(skb);
15585503fcecSJason Wang 		local_bh_enable();
15595503fcecSJason Wang 	}
15605503fcecSJason Wang }
15615503fcecSJason Wang 
156266ccbc9cSJason Wang static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
156366ccbc9cSJason Wang 			      int len, int noblock, bool zerocopy)
156466ccbc9cSJason Wang {
156566ccbc9cSJason Wang 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
156666ccbc9cSJason Wang 		return false;
156766ccbc9cSJason Wang 
156866ccbc9cSJason Wang 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
156966ccbc9cSJason Wang 		return false;
157066ccbc9cSJason Wang 
157166ccbc9cSJason Wang 	if (!noblock)
157266ccbc9cSJason Wang 		return false;
157366ccbc9cSJason Wang 
157466ccbc9cSJason Wang 	if (zerocopy)
157566ccbc9cSJason Wang 		return false;
157666ccbc9cSJason Wang 
157766ccbc9cSJason Wang 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
157866ccbc9cSJason Wang 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
157966ccbc9cSJason Wang 		return false;
158066ccbc9cSJason Wang 
158166ccbc9cSJason Wang 	return true;
158266ccbc9cSJason Wang }
158366ccbc9cSJason Wang 
15844b663366SAlexis Bauvin static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
15854b663366SAlexis Bauvin 				       struct page_frag *alloc_frag, char *buf,
15868ae1aff0SJason Wang 				       int buflen, int len, int pad)
1587ac1f1f6cSJason Wang {
1588ac1f1f6cSJason Wang 	struct sk_buff *skb = build_skb(buf, buflen);
1589ac1f1f6cSJason Wang 
1590ac1f1f6cSJason Wang 	if (!skb)
1591ac1f1f6cSJason Wang 		return ERR_PTR(-ENOMEM);
1592ac1f1f6cSJason Wang 
15938ae1aff0SJason Wang 	skb_reserve(skb, pad);
1594ac1f1f6cSJason Wang 	skb_put(skb, len);
15954b663366SAlexis Bauvin 	skb_set_owner_w(skb, tfile->socket.sk);
1596ac1f1f6cSJason Wang 
1597ac1f1f6cSJason Wang 	get_page(alloc_frag->page);
1598ac1f1f6cSJason Wang 	alloc_frag->offset += buflen;
1599ac1f1f6cSJason Wang 
1600ac1f1f6cSJason Wang 	return skb;
1601ac1f1f6cSJason Wang }
1602ac1f1f6cSJason Wang 
16038ae1aff0SJason Wang static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
16048ae1aff0SJason Wang 		       struct xdp_buff *xdp, u32 act)
16058ae1aff0SJason Wang {
16068ae1aff0SJason Wang 	int err;
16078ae1aff0SJason Wang 
16088ae1aff0SJason Wang 	switch (act) {
16098ae1aff0SJason Wang 	case XDP_REDIRECT:
16108ae1aff0SJason Wang 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
16118ae1aff0SJason Wang 		if (err)
16128ae1aff0SJason Wang 			return err;
16138ae1aff0SJason Wang 		break;
16148ae1aff0SJason Wang 	case XDP_TX:
16158ae1aff0SJason Wang 		err = tun_xdp_tx(tun->dev, xdp);
16168ae1aff0SJason Wang 		if (err < 0)
16178ae1aff0SJason Wang 			return err;
16188ae1aff0SJason Wang 		break;
16198ae1aff0SJason Wang 	case XDP_PASS:
16208ae1aff0SJason Wang 		break;
16218ae1aff0SJason Wang 	default:
1622c8064e5bSPaolo Abeni 		bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1623df561f66SGustavo A. R. Silva 		fallthrough;
16248ae1aff0SJason Wang 	case XDP_ABORTED:
16258ae1aff0SJason Wang 		trace_xdp_exception(tun->dev, xdp_prog, act);
1626df561f66SGustavo A. R. Silva 		fallthrough;
16278ae1aff0SJason Wang 	case XDP_DROP:
1628625788b5SEric Dumazet 		dev_core_stats_rx_dropped_inc(tun->dev);
16298ae1aff0SJason Wang 		break;
16308ae1aff0SJason Wang 	}
16318ae1aff0SJason Wang 
16328ae1aff0SJason Wang 	return act;
16338ae1aff0SJason Wang }
16348ae1aff0SJason Wang 
1635761876c8SJason Wang static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1636761876c8SJason Wang 				     struct tun_file *tfile,
163766ccbc9cSJason Wang 				     struct iov_iter *from,
1638761876c8SJason Wang 				     struct virtio_net_hdr *hdr,
16391cfe6e93SJason Wang 				     int len, int *skb_xdp)
164066ccbc9cSJason Wang {
16410bbd7dadSEric Dumazet 	struct page_frag *alloc_frag = &current->task_frag;
1642761876c8SJason Wang 	struct bpf_prog *xdp_prog;
16437df13219SJason Wang 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
164466ccbc9cSJason Wang 	char *buf;
164566ccbc9cSJason Wang 	size_t copied;
16468ae1aff0SJason Wang 	int pad = TUN_RX_PAD;
16478ae1aff0SJason Wang 	int err = 0;
16487df13219SJason Wang 
16497df13219SJason Wang 	rcu_read_lock();
16507df13219SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16517df13219SJason Wang 	if (xdp_prog)
16524f23aff8SJason Wang 		pad += XDP_PACKET_HEADROOM;
16537df13219SJason Wang 	buflen += SKB_DATA_ALIGN(len + pad);
16547df13219SJason Wang 	rcu_read_unlock();
165566ccbc9cSJason Wang 
165663b9ab65SJason Wang 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
165766ccbc9cSJason Wang 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
165866ccbc9cSJason Wang 		return ERR_PTR(-ENOMEM);
165966ccbc9cSJason Wang 
166066ccbc9cSJason Wang 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
166166ccbc9cSJason Wang 	copied = copy_page_from_iter(alloc_frag->page,
16627df13219SJason Wang 				     alloc_frag->offset + pad,
166366ccbc9cSJason Wang 				     len, from);
166466ccbc9cSJason Wang 	if (copied != len)
166566ccbc9cSJason Wang 		return ERR_PTR(-EFAULT);
166666ccbc9cSJason Wang 
16677df13219SJason Wang 	/* There's a small window that XDP may be set after the check
16687df13219SJason Wang 	 * of xdp_prog above, this should be rare and for simplicity
16697df13219SJason Wang 	 * we do XDP on skb in case the headroom is not enough.
16707df13219SJason Wang 	 */
1671ac1f1f6cSJason Wang 	if (hdr->gso_type || !xdp_prog) {
16721cfe6e93SJason Wang 		*skb_xdp = 1;
16734b663366SAlexis Bauvin 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
16744b663366SAlexis Bauvin 				       pad);
1675ac1f1f6cSJason Wang 	}
1676ac1f1f6cSJason Wang 
16771cfe6e93SJason Wang 	*skb_xdp = 0;
167866ccbc9cSJason Wang 
16796547e387SToshiaki Makita 	local_bh_disable();
1680761876c8SJason Wang 	rcu_read_lock();
1681761876c8SJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
16828ae1aff0SJason Wang 	if (xdp_prog) {
1683761876c8SJason Wang 		struct xdp_buff xdp;
1684761876c8SJason Wang 		u32 act;
1685761876c8SJason Wang 
168643b5169dSLorenzo Bianconi 		xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1687be9df4afSLorenzo Bianconi 		xdp_prepare_buff(&xdp, buf, pad, len, false);
1688761876c8SJason Wang 
16898ae1aff0SJason Wang 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
16908ae1aff0SJason Wang 		if (act == XDP_REDIRECT || act == XDP_TX) {
1691761876c8SJason Wang 			get_page(alloc_frag->page);
1692761876c8SJason Wang 			alloc_frag->offset += buflen;
1693761876c8SJason Wang 		}
16948ae1aff0SJason Wang 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1695bee34890SWill Deacon 		if (err < 0) {
1696bee34890SWill Deacon 			if (act == XDP_REDIRECT || act == XDP_TX)
1697bee34890SWill Deacon 				put_page(alloc_frag->page);
1698bee34890SWill Deacon 			goto out;
1699bee34890SWill Deacon 		}
1700bee34890SWill Deacon 
17011a097910SJason Wang 		if (err == XDP_REDIRECT)
17021d233886SToke Høiland-Jørgensen 			xdp_do_flush();
17038ae1aff0SJason Wang 		if (err != XDP_PASS)
17048ae1aff0SJason Wang 			goto out;
17058ae1aff0SJason Wang 
17068ae1aff0SJason Wang 		pad = xdp.data - xdp.data_hard_start;
17078ae1aff0SJason Wang 		len = xdp.data_end - xdp.data;
1708761876c8SJason Wang 	}
1709761876c8SJason Wang 	rcu_read_unlock();
17106547e387SToshiaki Makita 	local_bh_enable();
1711291aeb2bSJason Wang 
17124b663366SAlexis Bauvin 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1713761876c8SJason Wang 
1714f7053b6cSJason Wang out:
1715761876c8SJason Wang 	rcu_read_unlock();
17166547e387SToshiaki Makita 	local_bh_enable();
1717761876c8SJason Wang 	return NULL;
171866ccbc9cSJason Wang }
171966ccbc9cSJason Wang 
17201da177e4SLinus Torvalds /* Get packet from user space buffer */
172154f968d6SJason Wang static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1722f5ff53b4SAl Viro 			    void *msg_control, struct iov_iter *from,
17235503fcecSJason Wang 			    int noblock, bool more)
17241da177e4SLinus Torvalds {
172509640e63SHarvey Harrison 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
17261da177e4SLinus Torvalds 	struct sk_buff *skb;
1727f5ff53b4SAl Viro 	size_t total_len = iov_iter_count(from);
1728eaea34b2SPaolo Abeni 	size_t len = total_len, align = tun->align, linear;
1729f43798c2SRusty Russell 	struct virtio_net_hdr gso = { 0 };
173096f8d9ecSJason Wang 	int good_linear;
17310690899bSMichael S. Tsirkin 	int copylen;
17320690899bSMichael S. Tsirkin 	bool zerocopy = false;
17330690899bSMichael S. Tsirkin 	int err;
173496f84061SJason Wang 	u32 rxhash = 0;
17351cfe6e93SJason Wang 	int skb_xdp = 1;
1736af3fb24eSEric Dumazet 	bool frags = tun_napi_frags_enabled(tfile);
17374b4f052eSDongli Zhang 	enum skb_drop_reason drop_reason;
17381da177e4SLinus Torvalds 
173940630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
174015718ea0SDan Carpenter 		if (len < sizeof(pi))
17411da177e4SLinus Torvalds 			return -EINVAL;
174215718ea0SDan Carpenter 		len -= sizeof(pi);
17431da177e4SLinus Torvalds 
1744cbbd26b8SAl Viro 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
17451da177e4SLinus Torvalds 			return -EFAULT;
17461da177e4SLinus Torvalds 	}
17471da177e4SLinus Torvalds 
174840630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR) {
1749e1edab87SWillem de Bruijn 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1750e1edab87SWillem de Bruijn 
1751e1edab87SWillem de Bruijn 		if (len < vnet_hdr_sz)
1752f43798c2SRusty Russell 			return -EINVAL;
1753e1edab87SWillem de Bruijn 		len -= vnet_hdr_sz;
1754f43798c2SRusty Russell 
1755cbbd26b8SAl Viro 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1756f43798c2SRusty Russell 			return -EFAULT;
1757f43798c2SRusty Russell 
17584909122fSHerbert Xu 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
175956f0dcc5SMichael S. Tsirkin 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
176056f0dcc5SMichael S. Tsirkin 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
17614909122fSHerbert Xu 
176256f0dcc5SMichael S. Tsirkin 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1763f43798c2SRusty Russell 			return -EINVAL;
1764e1edab87SWillem de Bruijn 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1765f43798c2SRusty Russell 	}
1766f43798c2SRusty Russell 
176740630b82SMichael S. Tsirkin 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1768a504b86eSstephen hemminger 		align += NET_IP_ALIGN;
17690eca93bcSHerbert Xu 		if (unlikely(len < ETH_HLEN ||
177056f0dcc5SMichael S. Tsirkin 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1771e01bf1c8SRusty Russell 			return -EINVAL;
1772e01bf1c8SRusty Russell 	}
17731da177e4SLinus Torvalds 
177496f8d9ecSJason Wang 	good_linear = SKB_MAX_HEAD(align);
177596f8d9ecSJason Wang 
177688529176SJason Wang 	if (msg_control) {
1777f5ff53b4SAl Viro 		struct iov_iter i = *from;
1778f5ff53b4SAl Viro 
177988529176SJason Wang 		/* There are 256 bytes to be copied in skb, so there is
178088529176SJason Wang 		 * enough room for skb expand head in case it is used.
17810690899bSMichael S. Tsirkin 		 * The rest of the buffer is mapped from userspace.
17820690899bSMichael S. Tsirkin 		 */
178356f0dcc5SMichael S. Tsirkin 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
178496f8d9ecSJason Wang 		if (copylen > good_linear)
178596f8d9ecSJason Wang 			copylen = good_linear;
17863dd5c330SJason Wang 		linear = copylen;
1787f5ff53b4SAl Viro 		iov_iter_advance(&i, copylen);
1788f5ff53b4SAl Viro 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
178988529176SJason Wang 			zerocopy = true;
179088529176SJason Wang 	}
179188529176SJason Wang 
179290e33d45SPetar Penkov 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
17931cfe6e93SJason Wang 		/* For the packet that is not easy to be processed
17941cfe6e93SJason Wang 		 * (e.g gso or jumbo packet), we will do it at after
17951cfe6e93SJason Wang 		 * skb was created with generic XDP routine.
17961cfe6e93SJason Wang 		 */
17971cfe6e93SJason Wang 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
179866ccbc9cSJason Wang 		if (IS_ERR(skb)) {
1799625788b5SEric Dumazet 			dev_core_stats_rx_dropped_inc(tun->dev);
180066ccbc9cSJason Wang 			return PTR_ERR(skb);
180166ccbc9cSJason Wang 		}
1802761876c8SJason Wang 		if (!skb)
1803761876c8SJason Wang 			return total_len;
180466ccbc9cSJason Wang 	} else {
180588529176SJason Wang 		if (!zerocopy) {
18060690899bSMichael S. Tsirkin 			copylen = len;
180756f0dcc5SMichael S. Tsirkin 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
180896f8d9ecSJason Wang 				linear = good_linear;
180996f8d9ecSJason Wang 			else
181056f0dcc5SMichael S. Tsirkin 				linear = tun16_to_cpu(tun, gso.hdr_len);
18113dd5c330SJason Wang 		}
18120690899bSMichael S. Tsirkin 
181390e33d45SPetar Penkov 		if (frags) {
181490e33d45SPetar Penkov 			mutex_lock(&tfile->napi_mutex);
181590e33d45SPetar Penkov 			skb = tun_napi_alloc_frags(tfile, copylen, from);
181690e33d45SPetar Penkov 			/* tun_napi_alloc_frags() enforces a layout for the skb.
181790e33d45SPetar Penkov 			 * If zerocopy is enabled, then this layout will be
181890e33d45SPetar Penkov 			 * overwritten by zerocopy_sg_from_iter().
181990e33d45SPetar Penkov 			 */
182090e33d45SPetar Penkov 			zerocopy = false;
182190e33d45SPetar Penkov 		} else {
182290e33d45SPetar Penkov 			skb = tun_alloc_skb(tfile, align, copylen, linear,
182390e33d45SPetar Penkov 					    noblock);
182490e33d45SPetar Penkov 		}
182590e33d45SPetar Penkov 
182633dccbb0SHerbert Xu 		if (IS_ERR(skb)) {
182733dccbb0SHerbert Xu 			if (PTR_ERR(skb) != -EAGAIN)
1828625788b5SEric Dumazet 				dev_core_stats_rx_dropped_inc(tun->dev);
182990e33d45SPetar Penkov 			if (frags)
183090e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
183133dccbb0SHerbert Xu 			return PTR_ERR(skb);
18321da177e4SLinus Torvalds 		}
18331da177e4SLinus Torvalds 
18340690899bSMichael S. Tsirkin 		if (zerocopy)
1835f5ff53b4SAl Viro 			err = zerocopy_sg_from_iter(skb, from);
1836af1cc7a2SJason Wang 		else
1837f5ff53b4SAl Viro 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
18380690899bSMichael S. Tsirkin 
18390690899bSMichael S. Tsirkin 		if (err) {
18404477138fSEric Dumazet 			err = -EFAULT;
18414b4f052eSDongli Zhang 			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
18424477138fSEric Dumazet drop:
1843625788b5SEric Dumazet 			dev_core_stats_rx_dropped_inc(tun->dev);
18444b4f052eSDongli Zhang 			kfree_skb_reason(skb, drop_reason);
184590e33d45SPetar Penkov 			if (frags) {
184690e33d45SPetar Penkov 				tfile->napi.skb = NULL;
184790e33d45SPetar Penkov 				mutex_unlock(&tfile->napi_mutex);
184890e33d45SPetar Penkov 			}
184990e33d45SPetar Penkov 
18504477138fSEric Dumazet 			return err;
18518f22757eSDave Jones 		}
185266ccbc9cSJason Wang 	}
18531da177e4SLinus Torvalds 
18543e9e40e7SJarno Rajahalme 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1855497a5757SHeiner Kallweit 		atomic_long_inc(&tun->rx_frame_errors);
1856df10db98SPaolo Abeni 		kfree_skb(skb);
185790e33d45SPetar Penkov 		if (frags) {
185890e33d45SPetar Penkov 			tfile->napi.skb = NULL;
185990e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
186090e33d45SPetar Penkov 		}
186190e33d45SPetar Penkov 
1862df10db98SPaolo Abeni 		return -EINVAL;
1863df10db98SPaolo Abeni 	}
1864df10db98SPaolo Abeni 
18651da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
186640630b82SMichael S. Tsirkin 	case IFF_TUN:
186740630b82SMichael S. Tsirkin 		if (tun->flags & IFF_NO_PI) {
18682580c4c1SAlexander Potapenko 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
18692580c4c1SAlexander Potapenko 
18702580c4c1SAlexander Potapenko 			switch (ip_version) {
18712580c4c1SAlexander Potapenko 			case 4:
1872f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IP);
1873f09f7ee2SAng Way Chuang 				break;
18742580c4c1SAlexander Potapenko 			case 6:
1875f09f7ee2SAng Way Chuang 				pi.proto = htons(ETH_P_IPV6);
1876f09f7ee2SAng Way Chuang 				break;
1877f09f7ee2SAng Way Chuang 			default:
1878625788b5SEric Dumazet 				dev_core_stats_rx_dropped_inc(tun->dev);
1879f09f7ee2SAng Way Chuang 				kfree_skb(skb);
1880f09f7ee2SAng Way Chuang 				return -EINVAL;
1881f09f7ee2SAng Way Chuang 			}
1882f09f7ee2SAng Way Chuang 		}
1883f09f7ee2SAng Way Chuang 
1884459a98edSArnaldo Carvalho de Melo 		skb_reset_mac_header(skb);
18851da177e4SLinus Torvalds 		skb->protocol = pi.proto;
18864c13eb66SArnaldo Carvalho de Melo 		skb->dev = tun->dev;
18871da177e4SLinus Torvalds 		break;
188840630b82SMichael S. Tsirkin 	case IFF_TAP:
188996aa1b22SWillem de Bruijn 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
189096aa1b22SWillem de Bruijn 			err = -ENOMEM;
18914b4f052eSDongli Zhang 			drop_reason = SKB_DROP_REASON_HDR_TRUNC;
189296aa1b22SWillem de Bruijn 			goto drop;
189396aa1b22SWillem de Bruijn 		}
18941da177e4SLinus Torvalds 		skb->protocol = eth_type_trans(skb, tun->dev);
18951da177e4SLinus Torvalds 		break;
18966403eab1SJoe Perches 	}
18971da177e4SLinus Torvalds 
18980690899bSMichael S. Tsirkin 	/* copy skb_ubuf_info for callback when skb has no error */
18990690899bSMichael S. Tsirkin 	if (zerocopy) {
19009ee5e5adSJonathan Lemon 		skb_zcopy_init(skb, msg_control);
1901af1cc7a2SJason Wang 	} else if (msg_control) {
1902af1cc7a2SJason Wang 		struct ubuf_info *uarg = msg_control;
190336177832SJonathan Lemon 		uarg->callback(NULL, uarg, false);
19040690899bSMichael S. Tsirkin 	}
19050690899bSMichael S. Tsirkin 
190672f65107SVlad Yasevich 	skb_reset_network_header(skb);
1907d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
19083fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
190938502af7SJason Wang 
19101cfe6e93SJason Wang 	if (skb_xdp) {
1911761876c8SJason Wang 		struct bpf_prog *xdp_prog;
1912761876c8SJason Wang 		int ret;
1913761876c8SJason Wang 
19146547e387SToshiaki Makita 		local_bh_disable();
1915761876c8SJason Wang 		rcu_read_lock();
1916761876c8SJason Wang 		xdp_prog = rcu_dereference(tun->xdp_prog);
1917761876c8SJason Wang 		if (xdp_prog) {
1918761876c8SJason Wang 			ret = do_xdp_generic(xdp_prog, skb);
1919761876c8SJason Wang 			if (ret != XDP_PASS) {
1920761876c8SJason Wang 				rcu_read_unlock();
19216547e387SToshiaki Makita 				local_bh_enable();
19221efba987SEric Dumazet 				if (frags) {
19231efba987SEric Dumazet 					tfile->napi.skb = NULL;
19241efba987SEric Dumazet 					mutex_unlock(&tfile->napi_mutex);
19251efba987SEric Dumazet 				}
1926761876c8SJason Wang 				return total_len;
1927761876c8SJason Wang 			}
1928761876c8SJason Wang 		}
1929761876c8SJason Wang 		rcu_read_unlock();
19306547e387SToshiaki Makita 		local_bh_enable();
1931761876c8SJason Wang 	}
1932761876c8SJason Wang 
1933cf1a1e07SPaolo Abeni 	/* Compute the costly rx hash only if needed for flow updates.
1934cf1a1e07SPaolo Abeni 	 * We may get a very small possibility of OOO during switching, not
1935cf1a1e07SPaolo Abeni 	 * worth to optimize.
1936cf1a1e07SPaolo Abeni 	 */
1937cf1a1e07SPaolo Abeni 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1938cf1a1e07SPaolo Abeni 	    !tfile->detached)
1939feec084aSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
194094317099SPetar Penkov 
19414477138fSEric Dumazet 	rcu_read_lock();
19424477138fSEric Dumazet 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
19434477138fSEric Dumazet 		err = -EIO;
19449180bb4fSEric Dumazet 		rcu_read_unlock();
19454b4f052eSDongli Zhang 		drop_reason = SKB_DROP_REASON_DEV_READY;
19464477138fSEric Dumazet 		goto drop;
19474477138fSEric Dumazet 	}
19484477138fSEric Dumazet 
194990e33d45SPetar Penkov 	if (frags) {
195096aa1b22SWillem de Bruijn 		u32 headlen;
195196aa1b22SWillem de Bruijn 
195290e33d45SPetar Penkov 		/* Exercise flow dissector code path. */
195396aa1b22SWillem de Bruijn 		skb_push(skb, ETH_HLEN);
195496aa1b22SWillem de Bruijn 		headlen = eth_get_headlen(tun->dev, skb->data,
1955c43f1255SStanislav Fomichev 					  skb_headlen(skb));
195690e33d45SPetar Penkov 
1957010f245bSEric Dumazet 		if (unlikely(headlen > skb_headlen(skb))) {
1958625788b5SEric Dumazet 			dev_core_stats_rx_dropped_inc(tun->dev);
195990e33d45SPetar Penkov 			napi_free_frags(&tfile->napi);
19604477138fSEric Dumazet 			rcu_read_unlock();
196190e33d45SPetar Penkov 			mutex_unlock(&tfile->napi_mutex);
196290e33d45SPetar Penkov 			WARN_ON(1);
196390e33d45SPetar Penkov 			return -ENOMEM;
196490e33d45SPetar Penkov 		}
196590e33d45SPetar Penkov 
196690e33d45SPetar Penkov 		local_bh_disable();
196790e33d45SPetar Penkov 		napi_gro_frags(&tfile->napi);
196890e33d45SPetar Penkov 		local_bh_enable();
196990e33d45SPetar Penkov 		mutex_unlock(&tfile->napi_mutex);
1970aec72f33SEric Dumazet 	} else if (tfile->napi_enabled) {
197194317099SPetar Penkov 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
197294317099SPetar Penkov 		int queue_len;
197394317099SPetar Penkov 
197494317099SPetar Penkov 		spin_lock_bh(&queue->lock);
197594317099SPetar Penkov 		__skb_queue_tail(queue, skb);
197694317099SPetar Penkov 		queue_len = skb_queue_len(queue);
197794317099SPetar Penkov 		spin_unlock(&queue->lock);
197894317099SPetar Penkov 
197994317099SPetar Penkov 		if (!more || queue_len > NAPI_POLL_WEIGHT)
198094317099SPetar Penkov 			napi_schedule(&tfile->napi);
198194317099SPetar Penkov 
198294317099SPetar Penkov 		local_bh_enable();
198394317099SPetar Penkov 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
19845503fcecSJason Wang 		tun_rx_batched(tun, tfile, skb, more);
198594317099SPetar Penkov 	} else {
19863d391f65SSebastian Andrzej Siewior 		netif_rx(skb);
198794317099SPetar Penkov 	}
19884477138fSEric Dumazet 	rcu_read_unlock();
19891da177e4SLinus Torvalds 
1990497a5757SHeiner Kallweit 	preempt_disable();
1991497a5757SHeiner Kallweit 	dev_sw_netstats_rx_add(tun->dev, len);
1992497a5757SHeiner Kallweit 	preempt_enable();
19931da177e4SLinus Torvalds 
199496f84061SJason Wang 	if (rxhash)
19959e85722dSJason Wang 		tun_flow_update(tun, rxhash, tfile);
199696f84061SJason Wang 
19970690899bSMichael S. Tsirkin 	return total_len;
19981da177e4SLinus Torvalds }
19991da177e4SLinus Torvalds 
2000f5ff53b4SAl Viro static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
20011da177e4SLinus Torvalds {
200233dccbb0SHerbert Xu 	struct file *file = iocb->ki_filp;
200354f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
20049484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2005631ab46bSEric W. Biederman 	ssize_t result;
20065aac0390SJens Axboe 	int noblock = 0;
20071da177e4SLinus Torvalds 
20081da177e4SLinus Torvalds 	if (!tun)
20091da177e4SLinus Torvalds 		return -EBADFD;
20101da177e4SLinus Torvalds 
20115aac0390SJens Axboe 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
20125aac0390SJens Axboe 		noblock = 1;
20135aac0390SJens Axboe 
20145aac0390SJens Axboe 	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2015631ab46bSEric W. Biederman 
2016631ab46bSEric W. Biederman 	tun_put(tun);
2017631ab46bSEric W. Biederman 	return result;
20181da177e4SLinus Torvalds }
20191da177e4SLinus Torvalds 
2020fc72d1d5SJason Wang static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2021fc72d1d5SJason Wang 				struct tun_file *tfile,
20221ffcbc85SJesper Dangaard Brouer 				struct xdp_frame *xdp_frame,
2023fc72d1d5SJason Wang 				struct iov_iter *iter)
2024fc72d1d5SJason Wang {
2025fc72d1d5SJason Wang 	int vnet_hdr_sz = 0;
20261ffcbc85SJesper Dangaard Brouer 	size_t size = xdp_frame->len;
2027fc72d1d5SJason Wang 	size_t ret;
2028fc72d1d5SJason Wang 
2029fc72d1d5SJason Wang 	if (tun->flags & IFF_VNET_HDR) {
2030fc72d1d5SJason Wang 		struct virtio_net_hdr gso = { 0 };
2031fc72d1d5SJason Wang 
2032fc72d1d5SJason Wang 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2033fc72d1d5SJason Wang 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2034fc72d1d5SJason Wang 			return -EINVAL;
2035fc72d1d5SJason Wang 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2036fc72d1d5SJason Wang 			     sizeof(gso)))
2037fc72d1d5SJason Wang 			return -EFAULT;
2038fc72d1d5SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2039fc72d1d5SJason Wang 	}
2040fc72d1d5SJason Wang 
20411ffcbc85SJesper Dangaard Brouer 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2042fc72d1d5SJason Wang 
2043497a5757SHeiner Kallweit 	preempt_disable();
2044497a5757SHeiner Kallweit 	dev_sw_netstats_tx_add(tun->dev, 1, ret);
2045497a5757SHeiner Kallweit 	preempt_enable();
2046fc72d1d5SJason Wang 
2047fc72d1d5SJason Wang 	return ret;
2048fc72d1d5SJason Wang }
2049fc72d1d5SJason Wang 
20501da177e4SLinus Torvalds /* Put packet to the user space buffer */
20516f7c156cSstephen hemminger static ssize_t tun_put_user(struct tun_struct *tun,
205254f968d6SJason Wang 			    struct tun_file *tfile,
20531da177e4SLinus Torvalds 			    struct sk_buff *skb,
2054e0b46d0eSHerbert Xu 			    struct iov_iter *iter)
20551da177e4SLinus Torvalds {
20561da177e4SLinus Torvalds 	struct tun_pi pi = { 0, skb->protocol };
2057e0b46d0eSHerbert Xu 	ssize_t total;
20588c847d25SJason Wang 	int vlan_offset = 0;
2059a8f9bfdfSHerbert Xu 	int vlan_hlen = 0;
20602eb783c4SHerbert Xu 	int vnet_hdr_sz = 0;
2061a8f9bfdfSHerbert Xu 
2062df8a39deSJiri Pirko 	if (skb_vlan_tag_present(skb))
2063a8f9bfdfSHerbert Xu 		vlan_hlen = VLAN_HLEN;
20641da177e4SLinus Torvalds 
206540630b82SMichael S. Tsirkin 	if (tun->flags & IFF_VNET_HDR)
2066e1edab87SWillem de Bruijn 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
20671da177e4SLinus Torvalds 
2068e0b46d0eSHerbert Xu 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2069e0b46d0eSHerbert Xu 
207040630b82SMichael S. Tsirkin 	if (!(tun->flags & IFF_NO_PI)) {
2071e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < sizeof(pi))
20721da177e4SLinus Torvalds 			return -EINVAL;
20731da177e4SLinus Torvalds 
2074e0b46d0eSHerbert Xu 		total += sizeof(pi);
2075e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < total) {
20761da177e4SLinus Torvalds 			/* Packet will be striped */
20771da177e4SLinus Torvalds 			pi.flags |= TUN_PKT_STRIP;
20781da177e4SLinus Torvalds 		}
20791da177e4SLinus Torvalds 
2080e0b46d0eSHerbert Xu 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
20811da177e4SLinus Torvalds 			return -EFAULT;
20821da177e4SLinus Torvalds 	}
20831da177e4SLinus Torvalds 
20842eb783c4SHerbert Xu 	if (vnet_hdr_sz) {
20859403cd7cSJarno Rajahalme 		struct virtio_net_hdr gso;
208634166093SMike Rapoport 
2087e0b46d0eSHerbert Xu 		if (iov_iter_count(iter) < vnet_hdr_sz)
2088f43798c2SRusty Russell 			return -EINVAL;
2089f43798c2SRusty Russell 
20903e9e40e7SJarno Rajahalme 		if (virtio_net_hdr_from_skb(skb, &gso,
2091fd3a8862SWillem de Bruijn 					    tun_is_little_endian(tun), true,
2092fd3a8862SWillem de Bruijn 					    vlan_hlen)) {
2093f43798c2SRusty Russell 			struct skb_shared_info *sinfo = skb_shinfo(skb);
20946b8a66eeSJoe Perches 			pr_err("unexpected GSO type: "
2095ef3db4a5SMichael S. Tsirkin 			       "0x%x, gso_size %d, hdr_len %d\n",
209656f0dcc5SMichael S. Tsirkin 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
209756f0dcc5SMichael S. Tsirkin 			       tun16_to_cpu(tun, gso.hdr_len));
2098ef3db4a5SMichael S. Tsirkin 			print_hex_dump(KERN_ERR, "tun: ",
2099ef3db4a5SMichael S. Tsirkin 				       DUMP_PREFIX_NONE,
2100ef3db4a5SMichael S. Tsirkin 				       16, 1, skb->head,
210156f0dcc5SMichael S. Tsirkin 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2102ef3db4a5SMichael S. Tsirkin 			WARN_ON_ONCE(1);
2103ef3db4a5SMichael S. Tsirkin 			return -EINVAL;
2104ef3db4a5SMichael S. Tsirkin 		}
2105f43798c2SRusty Russell 
2106e0b46d0eSHerbert Xu 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2107f43798c2SRusty Russell 			return -EFAULT;
21088c847d25SJason Wang 
21098c847d25SJason Wang 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2110f43798c2SRusty Russell 	}
2111f43798c2SRusty Russell 
2112a8f9bfdfSHerbert Xu 	if (vlan_hlen) {
2113e0b46d0eSHerbert Xu 		int ret;
2114aff3d70aSJason Wang 		struct veth veth;
21151da177e4SLinus Torvalds 
21166680ec68SJason Wang 		veth.h_vlan_proto = skb->vlan_proto;
2117df8a39deSJiri Pirko 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
21181da177e4SLinus Torvalds 
21196680ec68SJason Wang 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
21206680ec68SJason Wang 
2121e0b46d0eSHerbert Xu 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2122e0b46d0eSHerbert Xu 		if (ret || !iov_iter_count(iter))
21236680ec68SJason Wang 			goto done;
21246680ec68SJason Wang 
2125e0b46d0eSHerbert Xu 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2126e0b46d0eSHerbert Xu 		if (ret != sizeof(veth) || !iov_iter_count(iter))
21276680ec68SJason Wang 			goto done;
21286680ec68SJason Wang 	}
21296680ec68SJason Wang 
2130e0b46d0eSHerbert Xu 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
21316680ec68SJason Wang 
21326680ec68SJason Wang done:
2133608b9977SPaolo Abeni 	/* caller is in process context, */
2134497a5757SHeiner Kallweit 	preempt_disable();
2135497a5757SHeiner Kallweit 	dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2136497a5757SHeiner Kallweit 	preempt_enable();
21371da177e4SLinus Torvalds 
21381da177e4SLinus Torvalds 	return total;
21391da177e4SLinus Torvalds }
21401da177e4SLinus Torvalds 
2141fc72d1d5SJason Wang static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
21421576d986SJason Wang {
21431576d986SJason Wang 	DECLARE_WAITQUEUE(wait, current);
2144fc72d1d5SJason Wang 	void *ptr = NULL;
2145f48cc6b2SJason Wang 	int error = 0;
21461576d986SJason Wang 
2147fc72d1d5SJason Wang 	ptr = ptr_ring_consume(&tfile->tx_ring);
2148fc72d1d5SJason Wang 	if (ptr)
21491576d986SJason Wang 		goto out;
21501576d986SJason Wang 	if (noblock) {
2151f48cc6b2SJason Wang 		error = -EAGAIN;
21521576d986SJason Wang 		goto out;
21531576d986SJason Wang 	}
21541576d986SJason Wang 
2155333f7909SAl Viro 	add_wait_queue(&tfile->socket.wq.wait, &wait);
21561576d986SJason Wang 
21571576d986SJason Wang 	while (1) {
215871828b22STimur Celik 		set_current_state(TASK_INTERRUPTIBLE);
2159fc72d1d5SJason Wang 		ptr = ptr_ring_consume(&tfile->tx_ring);
2160fc72d1d5SJason Wang 		if (ptr)
21611576d986SJason Wang 			break;
21621576d986SJason Wang 		if (signal_pending(current)) {
2163f48cc6b2SJason Wang 			error = -ERESTARTSYS;
21641576d986SJason Wang 			break;
21651576d986SJason Wang 		}
21661576d986SJason Wang 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2167f48cc6b2SJason Wang 			error = -EFAULT;
21681576d986SJason Wang 			break;
21691576d986SJason Wang 		}
21701576d986SJason Wang 
21711576d986SJason Wang 		schedule();
21721576d986SJason Wang 	}
21731576d986SJason Wang 
2174ecef67cbSTimur Celik 	__set_current_state(TASK_RUNNING);
2175333f7909SAl Viro 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
21761576d986SJason Wang 
21771576d986SJason Wang out:
2178f48cc6b2SJason Wang 	*err = error;
2179fc72d1d5SJason Wang 	return ptr;
21801576d986SJason Wang }
21811576d986SJason Wang 
218254f968d6SJason Wang static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
21839b067034SAl Viro 			   struct iov_iter *to,
2184fc72d1d5SJason Wang 			   int noblock, void *ptr)
21851da177e4SLinus Torvalds {
21869b067034SAl Viro 	ssize_t ret;
21871576d986SJason Wang 	int err;
21881da177e4SLinus Torvalds 
2189c33ee15bSWei Xu 	if (!iov_iter_count(to)) {
2190fc72d1d5SJason Wang 		tun_ptr_free(ptr);
21919b067034SAl Viro 		return 0;
2192c33ee15bSWei Xu 	}
21931da177e4SLinus Torvalds 
2194fc72d1d5SJason Wang 	if (!ptr) {
21951576d986SJason Wang 		/* Read frames from ring */
2196fc72d1d5SJason Wang 		ptr = tun_ring_recv(tfile, noblock, &err);
2197fc72d1d5SJason Wang 		if (!ptr)
2198957f094fSAlex Gartrell 			return err;
2199ac77cfd4SJason Wang 	}
2200e0b46d0eSHerbert Xu 
22011ffcbc85SJesper Dangaard Brouer 	if (tun_is_xdp_frame(ptr)) {
22021ffcbc85SJesper Dangaard Brouer 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2203fc72d1d5SJason Wang 
22041ffcbc85SJesper Dangaard Brouer 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
220503993094SJesper Dangaard Brouer 		xdp_return_frame(xdpf);
2206fc72d1d5SJason Wang 	} else {
2207fc72d1d5SJason Wang 		struct sk_buff *skb = ptr;
2208fc72d1d5SJason Wang 
22099b067034SAl Viro 		ret = tun_put_user(tun, tfile, skb, to);
2210f51a5e82SJason Wang 		if (unlikely(ret < 0))
22111da177e4SLinus Torvalds 			kfree_skb(skb);
2212f51a5e82SJason Wang 		else
2213f51a5e82SJason Wang 			consume_skb(skb);
2214fc72d1d5SJason Wang 	}
22151da177e4SLinus Torvalds 
221605c2828cSMichael S. Tsirkin 	return ret;
221705c2828cSMichael S. Tsirkin }
221805c2828cSMichael S. Tsirkin 
22199b067034SAl Viro static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
222005c2828cSMichael S. Tsirkin {
222105c2828cSMichael S. Tsirkin 	struct file *file = iocb->ki_filp;
222205c2828cSMichael S. Tsirkin 	struct tun_file *tfile = file->private_data;
22239484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
22249b067034SAl Viro 	ssize_t len = iov_iter_count(to), ret;
22255aac0390SJens Axboe 	int noblock = 0;
222605c2828cSMichael S. Tsirkin 
222705c2828cSMichael S. Tsirkin 	if (!tun)
222805c2828cSMichael S. Tsirkin 		return -EBADFD;
22295aac0390SJens Axboe 
22305aac0390SJens Axboe 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
22315aac0390SJens Axboe 		noblock = 1;
22325aac0390SJens Axboe 
22335aac0390SJens Axboe 	ret = tun_do_read(tun, tfile, to, noblock, NULL);
223442404c09SDavid S. Miller 	ret = min_t(ssize_t, ret, len);
2235d0b7da8aSZhi Yong Wu 	if (ret > 0)
2236d0b7da8aSZhi Yong Wu 		iocb->ki_pos = ret;
2237631ab46bSEric W. Biederman 	tun_put(tun);
22381da177e4SLinus Torvalds 	return ret;
22391da177e4SLinus Torvalds }
22401da177e4SLinus Torvalds 
2241cd5681d7SJason Wang static void tun_prog_free(struct rcu_head *rcu)
224296f84061SJason Wang {
2243cd5681d7SJason Wang 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
224496f84061SJason Wang 
224596f84061SJason Wang 	bpf_prog_destroy(prog->prog);
224696f84061SJason Wang 	kfree(prog);
224796f84061SJason Wang }
224896f84061SJason Wang 
22499d6474e4SJason Wang static int __tun_set_ebpf(struct tun_struct *tun,
22509d6474e4SJason Wang 			  struct tun_prog __rcu **prog_p,
225196f84061SJason Wang 			  struct bpf_prog *prog)
225296f84061SJason Wang {
2253cd5681d7SJason Wang 	struct tun_prog *old, *new = NULL;
225496f84061SJason Wang 
225596f84061SJason Wang 	if (prog) {
225696f84061SJason Wang 		new = kmalloc(sizeof(*new), GFP_KERNEL);
225796f84061SJason Wang 		if (!new)
225896f84061SJason Wang 			return -ENOMEM;
225996f84061SJason Wang 		new->prog = prog;
226096f84061SJason Wang 	}
226196f84061SJason Wang 
2262124da8f6SJason Wang 	spin_lock_bh(&tun->lock);
2263cd5681d7SJason Wang 	old = rcu_dereference_protected(*prog_p,
2264124da8f6SJason Wang 					lockdep_is_held(&tun->lock));
2265cd5681d7SJason Wang 	rcu_assign_pointer(*prog_p, new);
2266124da8f6SJason Wang 	spin_unlock_bh(&tun->lock);
226796f84061SJason Wang 
226896f84061SJason Wang 	if (old)
2269cd5681d7SJason Wang 		call_rcu(&old->rcu, tun_prog_free);
227096f84061SJason Wang 
227196f84061SJason Wang 	return 0;
227296f84061SJason Wang }
227396f84061SJason Wang 
227496442e42SJason Wang static void tun_free_netdev(struct net_device *dev)
227596442e42SJason Wang {
227696442e42SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
227796442e42SJason Wang 
22784008e97fSJason Wang 	BUG_ON(!(list_empty(&tun->disabled)));
227911fc7d5aSEric Dumazet 
2280497a5757SHeiner Kallweit 	free_percpu(dev->tstats);
228196442e42SJason Wang 	tun_flow_uninit(tun);
22825dbbaf2dSPaul Moore 	security_tun_dev_free_security(tun->security);
2283cd5681d7SJason Wang 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2284aff3d70aSJason Wang 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
228596442e42SJason Wang }
228696442e42SJason Wang 
22871da177e4SLinus Torvalds static void tun_setup(struct net_device *dev)
22881da177e4SLinus Torvalds {
22891da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
22901da177e4SLinus Torvalds 
22910625c883SEric W. Biederman 	tun->owner = INVALID_UID;
22920625c883SEric W. Biederman 	tun->group = INVALID_GID;
22934e24f2ddSChas Williams 	tun_default_link_ksettings(dev, &tun->link_ksettings);
22941da177e4SLinus Torvalds 
22951da177e4SLinus Torvalds 	dev->ethtool_ops = &tun_ethtool_ops;
2296cf124db5SDavid S. Miller 	dev->needs_free_netdev = true;
2297cf124db5SDavid S. Miller 	dev->priv_destructor = tun_free_netdev;
2298016adb72SJason Wang 	/* We prefer our own queue length */
2299016adb72SJason Wang 	dev->tx_queue_len = TUN_READQ_SIZE;
23001da177e4SLinus Torvalds }
23011da177e4SLinus Torvalds 
2302f019a7a5SEric W. Biederman /* Trivial set of netlink ops to allow deleting tun or tap
2303f019a7a5SEric W. Biederman  * device with netlink.
2304f019a7a5SEric W. Biederman  */
2305a8b8a889SMatthias Schiffer static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2306a8b8a889SMatthias Schiffer 			struct netlink_ext_ack *extack)
2307f019a7a5SEric W. Biederman {
230835b827b6SNicolas Dichtel 	NL_SET_ERR_MSG(extack,
230935b827b6SNicolas Dichtel 		       "tun/tap creation via rtnetlink is not supported.");
231035b827b6SNicolas Dichtel 	return -EOPNOTSUPP;
2311f019a7a5SEric W. Biederman }
2312f019a7a5SEric W. Biederman 
23131ec010e7SSabrina Dubroca static size_t tun_get_size(const struct net_device *dev)
23141ec010e7SSabrina Dubroca {
23151ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
23161ec010e7SSabrina Dubroca 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
23171ec010e7SSabrina Dubroca 
23181ec010e7SSabrina Dubroca 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
23191ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
23201ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* TYPE */
23211ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PI */
23221ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
23231ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* PERSIST */
23241ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
23251ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
23261ec010e7SSabrina Dubroca 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
23271ec010e7SSabrina Dubroca 	       0;
23281ec010e7SSabrina Dubroca }
23291ec010e7SSabrina Dubroca 
23301ec010e7SSabrina Dubroca static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
23311ec010e7SSabrina Dubroca {
23321ec010e7SSabrina Dubroca 	struct tun_struct *tun = netdev_priv(dev);
23331ec010e7SSabrina Dubroca 
23341ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
23351ec010e7SSabrina Dubroca 		goto nla_put_failure;
23361ec010e7SSabrina Dubroca 	if (uid_valid(tun->owner) &&
23371ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_OWNER,
23381ec010e7SSabrina Dubroca 			from_kuid_munged(current_user_ns(), tun->owner)))
23391ec010e7SSabrina Dubroca 		goto nla_put_failure;
23401ec010e7SSabrina Dubroca 	if (gid_valid(tun->group) &&
23411ec010e7SSabrina Dubroca 	    nla_put_u32(skb, IFLA_TUN_GROUP,
23421ec010e7SSabrina Dubroca 			from_kgid_munged(current_user_ns(), tun->group)))
23431ec010e7SSabrina Dubroca 		goto nla_put_failure;
23441ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
23451ec010e7SSabrina Dubroca 		goto nla_put_failure;
23461ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
23471ec010e7SSabrina Dubroca 		goto nla_put_failure;
23481ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
23491ec010e7SSabrina Dubroca 		goto nla_put_failure;
23501ec010e7SSabrina Dubroca 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
23511ec010e7SSabrina Dubroca 		       !!(tun->flags & IFF_MULTI_QUEUE)))
23521ec010e7SSabrina Dubroca 		goto nla_put_failure;
23531ec010e7SSabrina Dubroca 	if (tun->flags & IFF_MULTI_QUEUE) {
23541ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
23551ec010e7SSabrina Dubroca 			goto nla_put_failure;
23561ec010e7SSabrina Dubroca 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
23571ec010e7SSabrina Dubroca 				tun->numdisabled))
23581ec010e7SSabrina Dubroca 			goto nla_put_failure;
23591ec010e7SSabrina Dubroca 	}
23601ec010e7SSabrina Dubroca 
23611ec010e7SSabrina Dubroca 	return 0;
23621ec010e7SSabrina Dubroca 
23631ec010e7SSabrina Dubroca nla_put_failure:
23641ec010e7SSabrina Dubroca 	return -EMSGSIZE;
23651ec010e7SSabrina Dubroca }
23661ec010e7SSabrina Dubroca 
2367f019a7a5SEric W. Biederman static struct rtnl_link_ops tun_link_ops __read_mostly = {
2368f019a7a5SEric W. Biederman 	.kind		= DRV_NAME,
2369f019a7a5SEric W. Biederman 	.priv_size	= sizeof(struct tun_struct),
2370f019a7a5SEric W. Biederman 	.setup		= tun_setup,
2371f019a7a5SEric W. Biederman 	.validate	= tun_validate,
23721ec010e7SSabrina Dubroca 	.get_size       = tun_get_size,
23731ec010e7SSabrina Dubroca 	.fill_info      = tun_fill_info,
2374f019a7a5SEric W. Biederman };
2375f019a7a5SEric W. Biederman 
237633dccbb0SHerbert Xu static void tun_sock_write_space(struct sock *sk)
237733dccbb0SHerbert Xu {
237854f968d6SJason Wang 	struct tun_file *tfile;
237943815482SEric Dumazet 	wait_queue_head_t *wqueue;
238033dccbb0SHerbert Xu 
238133dccbb0SHerbert Xu 	if (!sock_writeable(sk))
238233dccbb0SHerbert Xu 		return;
238333dccbb0SHerbert Xu 
23849cd3e072SEric Dumazet 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
238533dccbb0SHerbert Xu 		return;
238633dccbb0SHerbert Xu 
238743815482SEric Dumazet 	wqueue = sk_sleep(sk);
238843815482SEric Dumazet 	if (wqueue && waitqueue_active(wqueue))
2389a9a08845SLinus Torvalds 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2390a9a08845SLinus Torvalds 						EPOLLWRNORM | EPOLLWRBAND);
2391c722c625SHerbert Xu 
239254f968d6SJason Wang 	tfile = container_of(sk, struct tun_file, sk);
239354f968d6SJason Wang 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
239433dccbb0SHerbert Xu }
239533dccbb0SHerbert Xu 
2396f9e06c45SJason Wang static void tun_put_page(struct tun_page *tpage)
2397f9e06c45SJason Wang {
2398f9e06c45SJason Wang 	if (tpage->page)
2399f9e06c45SJason Wang 		__page_frag_cache_drain(tpage->page, tpage->count);
2400f9e06c45SJason Wang }
2401f9e06c45SJason Wang 
2402043d222fSJason Wang static int tun_xdp_one(struct tun_struct *tun,
2403043d222fSJason Wang 		       struct tun_file *tfile,
2404f9e06c45SJason Wang 		       struct xdp_buff *xdp, int *flush,
2405f9e06c45SJason Wang 		       struct tun_page *tpage)
2406043d222fSJason Wang {
24074e4b08e5SPrashant Bhole 	unsigned int datasize = xdp->data_end - xdp->data;
2408043d222fSJason Wang 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2409043d222fSJason Wang 	struct virtio_net_hdr *gso = &hdr->gso;
2410043d222fSJason Wang 	struct bpf_prog *xdp_prog;
2411043d222fSJason Wang 	struct sk_buff *skb = NULL;
2412fb3f9037SHarold Huang 	struct sk_buff_head *queue;
2413043d222fSJason Wang 	u32 rxhash = 0, act;
2414043d222fSJason Wang 	int buflen = hdr->buflen;
2415fb3f9037SHarold Huang 	int ret = 0;
2416043d222fSJason Wang 	bool skb_xdp = false;
2417f9e06c45SJason Wang 	struct page *page;
2418043d222fSJason Wang 
2419043d222fSJason Wang 	xdp_prog = rcu_dereference(tun->xdp_prog);
2420043d222fSJason Wang 	if (xdp_prog) {
2421043d222fSJason Wang 		if (gso->gso_type) {
2422043d222fSJason Wang 			skb_xdp = true;
2423043d222fSJason Wang 			goto build;
2424043d222fSJason Wang 		}
242543b5169dSLorenzo Bianconi 
242643b5169dSLorenzo Bianconi 		xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2427043d222fSJason Wang 		xdp_set_data_meta_invalid(xdp);
2428043d222fSJason Wang 
2429043d222fSJason Wang 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2430fb3f9037SHarold Huang 		ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2431fb3f9037SHarold Huang 		if (ret < 0) {
2432043d222fSJason Wang 			put_page(virt_to_head_page(xdp->data));
2433fb3f9037SHarold Huang 			return ret;
2434043d222fSJason Wang 		}
2435043d222fSJason Wang 
2436fb3f9037SHarold Huang 		switch (ret) {
2437043d222fSJason Wang 		case XDP_REDIRECT:
2438043d222fSJason Wang 			*flush = true;
2439df561f66SGustavo A. R. Silva 			fallthrough;
2440043d222fSJason Wang 		case XDP_TX:
2441043d222fSJason Wang 			return 0;
2442043d222fSJason Wang 		case XDP_PASS:
2443043d222fSJason Wang 			break;
2444043d222fSJason Wang 		default:
2445f9e06c45SJason Wang 			page = virt_to_head_page(xdp->data);
2446f9e06c45SJason Wang 			if (tpage->page == page) {
2447f9e06c45SJason Wang 				++tpage->count;
2448f9e06c45SJason Wang 			} else {
2449f9e06c45SJason Wang 				tun_put_page(tpage);
2450f9e06c45SJason Wang 				tpage->page = page;
2451f9e06c45SJason Wang 				tpage->count = 1;
2452f9e06c45SJason Wang 			}
2453043d222fSJason Wang 			return 0;
2454043d222fSJason Wang 		}
2455043d222fSJason Wang 	}
2456043d222fSJason Wang 
2457043d222fSJason Wang build:
2458043d222fSJason Wang 	skb = build_skb(xdp->data_hard_start, buflen);
2459043d222fSJason Wang 	if (!skb) {
2460fb3f9037SHarold Huang 		ret = -ENOMEM;
2461043d222fSJason Wang 		goto out;
2462043d222fSJason Wang 	}
2463043d222fSJason Wang 
2464043d222fSJason Wang 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2465043d222fSJason Wang 	skb_put(skb, xdp->data_end - xdp->data);
2466043d222fSJason Wang 
2467043d222fSJason Wang 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2468497a5757SHeiner Kallweit 		atomic_long_inc(&tun->rx_frame_errors);
2469043d222fSJason Wang 		kfree_skb(skb);
2470fb3f9037SHarold Huang 		ret = -EINVAL;
2471043d222fSJason Wang 		goto out;
2472043d222fSJason Wang 	}
2473043d222fSJason Wang 
2474043d222fSJason Wang 	skb->protocol = eth_type_trans(skb, tun->dev);
2475043d222fSJason Wang 	skb_reset_network_header(skb);
2476d2aa125dSMaxim Mikityanskiy 	skb_probe_transport_header(skb);
24773fe260e0SGilberto Bertin 	skb_record_rx_queue(skb, tfile->queue_index);
2478043d222fSJason Wang 
2479043d222fSJason Wang 	if (skb_xdp) {
2480fb3f9037SHarold Huang 		ret = do_xdp_generic(xdp_prog, skb);
2481fb3f9037SHarold Huang 		if (ret != XDP_PASS) {
2482fb3f9037SHarold Huang 			ret = 0;
2483043d222fSJason Wang 			goto out;
2484043d222fSJason Wang 		}
2485fb3f9037SHarold Huang 	}
2486043d222fSJason Wang 
2487f29eb2a9SPaolo Abeni 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2488f29eb2a9SPaolo Abeni 	    !tfile->detached)
2489043d222fSJason Wang 		rxhash = __skb_get_hash_symmetric(skb);
2490043d222fSJason Wang 
2491fb3f9037SHarold Huang 	if (tfile->napi_enabled) {
2492fb3f9037SHarold Huang 		queue = &tfile->sk.sk_write_queue;
2493fb3f9037SHarold Huang 		spin_lock(&queue->lock);
2494fb3f9037SHarold Huang 		__skb_queue_tail(queue, skb);
2495fb3f9037SHarold Huang 		spin_unlock(&queue->lock);
2496fb3f9037SHarold Huang 		ret = 1;
2497fb3f9037SHarold Huang 	} else {
2498043d222fSJason Wang 		netif_receive_skb(skb);
2499fb3f9037SHarold Huang 		ret = 0;
2500fb3f9037SHarold Huang 	}
2501043d222fSJason Wang 
2502497a5757SHeiner Kallweit 	/* No need to disable preemption here since this function is
25036342ca64SPrashant Bhole 	 * always called with bh disabled
25046342ca64SPrashant Bhole 	 */
2505497a5757SHeiner Kallweit 	dev_sw_netstats_rx_add(tun->dev, datasize);
2506043d222fSJason Wang 
2507043d222fSJason Wang 	if (rxhash)
2508043d222fSJason Wang 		tun_flow_update(tun, rxhash, tfile);
2509043d222fSJason Wang 
2510043d222fSJason Wang out:
2511fb3f9037SHarold Huang 	return ret;
2512043d222fSJason Wang }
2513043d222fSJason Wang 
25141b784140SYing Xue static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
251505c2828cSMichael S. Tsirkin {
2516043d222fSJason Wang 	int ret, i;
251754f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25189484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2519fe8dd45bSJason Wang 	struct tun_msg_ctl *ctl = m->msg_control;
2520043d222fSJason Wang 	struct xdp_buff *xdp;
252154f968d6SJason Wang 
252254f968d6SJason Wang 	if (!tun)
252354f968d6SJason Wang 		return -EBADFD;
2524f5ff53b4SAl Viro 
252574a335a0SHarold Huang 	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
252674a335a0SHarold Huang 	    ctl && ctl->type == TUN_MSG_PTR) {
25276f0271d9SDavid S. Miller 		struct tun_page tpage;
2528043d222fSJason Wang 		int n = ctl->num;
2529fb3f9037SHarold Huang 		int flush = 0, queued = 0;
2530043d222fSJason Wang 
25316f0271d9SDavid S. Miller 		memset(&tpage, 0, sizeof(tpage));
25326f0271d9SDavid S. Miller 
2533043d222fSJason Wang 		local_bh_disable();
2534043d222fSJason Wang 		rcu_read_lock();
2535043d222fSJason Wang 
2536043d222fSJason Wang 		for (i = 0; i < n; i++) {
2537043d222fSJason Wang 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2538fb3f9037SHarold Huang 			ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2539fb3f9037SHarold Huang 			if (ret > 0)
2540fb3f9037SHarold Huang 				queued += ret;
2541043d222fSJason Wang 		}
2542043d222fSJason Wang 
2543043d222fSJason Wang 		if (flush)
25441d233886SToke Høiland-Jørgensen 			xdp_do_flush();
2545043d222fSJason Wang 
2546fb3f9037SHarold Huang 		if (tfile->napi_enabled && queued > 0)
2547fb3f9037SHarold Huang 			napi_schedule(&tfile->napi);
2548fb3f9037SHarold Huang 
2549043d222fSJason Wang 		rcu_read_unlock();
2550043d222fSJason Wang 		local_bh_enable();
2551043d222fSJason Wang 
2552f9e06c45SJason Wang 		tun_put_page(&tpage);
2553f9e06c45SJason Wang 
2554043d222fSJason Wang 		ret = total_len;
2555043d222fSJason Wang 		goto out;
2556043d222fSJason Wang 	}
2557fe8dd45bSJason Wang 
2558fe8dd45bSJason Wang 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
25595503fcecSJason Wang 			   m->msg_flags & MSG_DONTWAIT,
25605503fcecSJason Wang 			   m->msg_flags & MSG_MORE);
2561043d222fSJason Wang out:
256254f968d6SJason Wang 	tun_put(tun);
256354f968d6SJason Wang 	return ret;
256405c2828cSMichael S. Tsirkin }
256505c2828cSMichael S. Tsirkin 
25661b784140SYing Xue static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
256705c2828cSMichael S. Tsirkin 		       int flags)
256805c2828cSMichael S. Tsirkin {
256954f968d6SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
25709484dc74Syuan linyu 	struct tun_struct *tun = tun_get(tfile);
2571fc72d1d5SJason Wang 	void *ptr = m->msg_control;
257205c2828cSMichael S. Tsirkin 	int ret;
257354f968d6SJason Wang 
2574c33ee15bSWei Xu 	if (!tun) {
2575c33ee15bSWei Xu 		ret = -EBADFD;
2576fc72d1d5SJason Wang 		goto out_free;
2577c33ee15bSWei Xu 	}
257854f968d6SJason Wang 
2579eda29772SRichard Cochran 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
25803811ae76SGao feng 		ret = -EINVAL;
2581c33ee15bSWei Xu 		goto out_put_tun;
25823811ae76SGao feng 	}
2583eda29772SRichard Cochran 	if (flags & MSG_ERRQUEUE) {
2584eda29772SRichard Cochran 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2585eda29772SRichard Cochran 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2586eda29772SRichard Cochran 		goto out;
2587eda29772SRichard Cochran 	}
2588fc72d1d5SJason Wang 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
258987897931SAlex Gartrell 	if (ret > (ssize_t)total_len) {
259042404c09SDavid S. Miller 		m->msg_flags |= MSG_TRUNC;
259142404c09SDavid S. Miller 		ret = flags & MSG_TRUNC ? ret : total_len;
259242404c09SDavid S. Miller 	}
25933811ae76SGao feng out:
259454f968d6SJason Wang 	tun_put(tun);
259505c2828cSMichael S. Tsirkin 	return ret;
2596c33ee15bSWei Xu 
2597c33ee15bSWei Xu out_put_tun:
2598c33ee15bSWei Xu 	tun_put(tun);
2599fc72d1d5SJason Wang out_free:
2600fc72d1d5SJason Wang 	tun_ptr_free(ptr);
2601c33ee15bSWei Xu 	return ret;
260205c2828cSMichael S. Tsirkin }
260305c2828cSMichael S. Tsirkin 
2604fc72d1d5SJason Wang static int tun_ptr_peek_len(void *ptr)
2605fc72d1d5SJason Wang {
2606fc72d1d5SJason Wang 	if (likely(ptr)) {
26071ffcbc85SJesper Dangaard Brouer 		if (tun_is_xdp_frame(ptr)) {
26081ffcbc85SJesper Dangaard Brouer 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2609fc72d1d5SJason Wang 
26101ffcbc85SJesper Dangaard Brouer 			return xdpf->len;
2611fc72d1d5SJason Wang 		}
2612fc72d1d5SJason Wang 		return __skb_array_len_with_tag(ptr);
2613fc72d1d5SJason Wang 	} else {
2614fc72d1d5SJason Wang 		return 0;
2615fc72d1d5SJason Wang 	}
2616fc72d1d5SJason Wang }
2617fc72d1d5SJason Wang 
26181576d986SJason Wang static int tun_peek_len(struct socket *sock)
26191576d986SJason Wang {
26201576d986SJason Wang 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
26211576d986SJason Wang 	struct tun_struct *tun;
26221576d986SJason Wang 	int ret = 0;
26231576d986SJason Wang 
26249484dc74Syuan linyu 	tun = tun_get(tfile);
26251576d986SJason Wang 	if (!tun)
26261576d986SJason Wang 		return 0;
26271576d986SJason Wang 
2628fc72d1d5SJason Wang 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
26291576d986SJason Wang 	tun_put(tun);
26301576d986SJason Wang 
26311576d986SJason Wang 	return ret;
26321576d986SJason Wang }
26331576d986SJason Wang 
263405c2828cSMichael S. Tsirkin /* Ops structure to mimic raw sockets with tun */
263505c2828cSMichael S. Tsirkin static const struct proto_ops tun_socket_ops = {
26361576d986SJason Wang 	.peek_len = tun_peek_len,
263705c2828cSMichael S. Tsirkin 	.sendmsg = tun_sendmsg,
263805c2828cSMichael S. Tsirkin 	.recvmsg = tun_recvmsg,
263905c2828cSMichael S. Tsirkin };
264005c2828cSMichael S. Tsirkin 
264133dccbb0SHerbert Xu static struct proto tun_proto = {
264233dccbb0SHerbert Xu 	.name		= "tun",
264333dccbb0SHerbert Xu 	.owner		= THIS_MODULE,
264454f968d6SJason Wang 	.obj_size	= sizeof(struct tun_file),
264533dccbb0SHerbert Xu };
2646f019a7a5SEric W. Biederman 
2647980c9e8cSDavid Woodhouse static int tun_flags(struct tun_struct *tun)
2648980c9e8cSDavid Woodhouse {
2649031f5e03SMichael S. Tsirkin 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2650980c9e8cSDavid Woodhouse }
2651980c9e8cSDavid Woodhouse 
2652bc6d076dSYueHaibing static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2653980c9e8cSDavid Woodhouse 			      char *buf)
2654980c9e8cSDavid Woodhouse {
2655980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2656980c9e8cSDavid Woodhouse 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2657980c9e8cSDavid Woodhouse }
2658980c9e8cSDavid Woodhouse 
2659bc6d076dSYueHaibing static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2660980c9e8cSDavid Woodhouse 			  char *buf)
2661980c9e8cSDavid Woodhouse {
2662980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26630625c883SEric W. Biederman 	return uid_valid(tun->owner)?
26640625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26650625c883SEric W. Biederman 			from_kuid_munged(current_user_ns(), tun->owner)):
26660625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2667980c9e8cSDavid Woodhouse }
2668980c9e8cSDavid Woodhouse 
2669bc6d076dSYueHaibing static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2670980c9e8cSDavid Woodhouse 			  char *buf)
2671980c9e8cSDavid Woodhouse {
2672980c9e8cSDavid Woodhouse 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
26730625c883SEric W. Biederman 	return gid_valid(tun->group) ?
26740625c883SEric W. Biederman 		sprintf(buf, "%u\n",
26750625c883SEric W. Biederman 			from_kgid_munged(current_user_ns(), tun->group)):
26760625c883SEric W. Biederman 		sprintf(buf, "-1\n");
2677980c9e8cSDavid Woodhouse }
2678980c9e8cSDavid Woodhouse 
2679bc6d076dSYueHaibing static DEVICE_ATTR_RO(tun_flags);
2680bc6d076dSYueHaibing static DEVICE_ATTR_RO(owner);
2681bc6d076dSYueHaibing static DEVICE_ATTR_RO(group);
2682980c9e8cSDavid Woodhouse 
2683c4d33e24STakashi Iwai static struct attribute *tun_dev_attrs[] = {
2684c4d33e24STakashi Iwai 	&dev_attr_tun_flags.attr,
2685c4d33e24STakashi Iwai 	&dev_attr_owner.attr,
2686c4d33e24STakashi Iwai 	&dev_attr_group.attr,
2687c4d33e24STakashi Iwai 	NULL
2688c4d33e24STakashi Iwai };
2689c4d33e24STakashi Iwai 
2690c4d33e24STakashi Iwai static const struct attribute_group tun_attr_group = {
2691c4d33e24STakashi Iwai 	.attrs = tun_dev_attrs
2692c4d33e24STakashi Iwai };
2693c4d33e24STakashi Iwai 
2694d647a591SPavel Emelyanov static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
26951da177e4SLinus Torvalds {
26961da177e4SLinus Torvalds 	struct tun_struct *tun;
269754f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
26981da177e4SLinus Torvalds 	struct net_device *dev;
26991da177e4SLinus Torvalds 	int err;
27001da177e4SLinus Torvalds 
27017c0c3b1aSJason Wang 	if (tfile->detached)
27027c0c3b1aSJason Wang 		return -EINVAL;
27037c0c3b1aSJason Wang 
270490e33d45SPetar Penkov 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
270590e33d45SPetar Penkov 		if (!capable(CAP_NET_ADMIN))
270690e33d45SPetar Penkov 			return -EPERM;
270790e33d45SPetar Penkov 
270890e33d45SPetar Penkov 		if (!(ifr->ifr_flags & IFF_NAPI) ||
270990e33d45SPetar Penkov 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
271090e33d45SPetar Penkov 			return -EINVAL;
271190e33d45SPetar Penkov 	}
271290e33d45SPetar Penkov 
271374a3e5a7SEric W. Biederman 	dev = __dev_get_by_name(net, ifr->ifr_name);
271474a3e5a7SEric W. Biederman 	if (dev) {
2715f85ba780SDavid Woodhouse 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2716f85ba780SDavid Woodhouse 			return -EBUSY;
271774a3e5a7SEric W. Biederman 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
271874a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
271974a3e5a7SEric W. Biederman 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
272074a3e5a7SEric W. Biederman 			tun = netdev_priv(dev);
272174a3e5a7SEric W. Biederman 		else
272274a3e5a7SEric W. Biederman 			return -EINVAL;
272374a3e5a7SEric W. Biederman 
27248e6d91aeSJason Wang 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
272540630b82SMichael S. Tsirkin 		    !!(tun->flags & IFF_MULTI_QUEUE))
27268e6d91aeSJason Wang 			return -EINVAL;
27278e6d91aeSJason Wang 
2728cde8b15fSJason Wang 		if (tun_not_capable(tun))
27292b980dbdSPaul Moore 			return -EPERM;
27305dbbaf2dSPaul Moore 		err = security_tun_dev_open(tun->security);
27312b980dbdSPaul Moore 		if (err < 0)
27322b980dbdSPaul Moore 			return err;
27332b980dbdSPaul Moore 
273494317099SPetar Penkov 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2735af3fb24eSEric Dumazet 				 ifr->ifr_flags & IFF_NAPI,
273677f22f92SYang Yingliang 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2737a7385ba2SEric W. Biederman 		if (err < 0)
2738a7385ba2SEric W. Biederman 			return err;
27394008e97fSJason Wang 
274040630b82SMichael S. Tsirkin 		if (tun->flags & IFF_MULTI_QUEUE &&
2741e8dbad66SJason Wang 		    (tun->numqueues + tun->numdisabled > 1)) {
2742e8dbad66SJason Wang 			/* One or more queue has already been attached, no need
2743e8dbad66SJason Wang 			 * to initialize the device again.
2744e8dbad66SJason Wang 			 */
274583c1f36fSSabrina Dubroca 			netdev_state_change(dev);
2746e8dbad66SJason Wang 			return 0;
2747e8dbad66SJason Wang 		}
27489fffc5c6SSabrina Dubroca 
27499fffc5c6SSabrina Dubroca 		tun->flags = (tun->flags & ~TUN_FEATURES) |
27509fffc5c6SSabrina Dubroca 			      (ifr->ifr_flags & TUN_FEATURES);
275183c1f36fSSabrina Dubroca 
275283c1f36fSSabrina Dubroca 		netdev_state_change(dev);
275383c1f36fSSabrina Dubroca 	} else {
27541da177e4SLinus Torvalds 		char *name;
27551da177e4SLinus Torvalds 		unsigned long flags = 0;
2756edfb6a14SJason Wang 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2757edfb6a14SJason Wang 			     MAX_TAP_QUEUES : 1;
27581da177e4SLinus Torvalds 
2759c260b772SEric W. Biederman 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2760ca6bb5d7SDavid Woodhouse 			return -EPERM;
27612b980dbdSPaul Moore 		err = security_tun_dev_create();
27622b980dbdSPaul Moore 		if (err < 0)
27632b980dbdSPaul Moore 			return err;
2764ca6bb5d7SDavid Woodhouse 
27651da177e4SLinus Torvalds 		/* Set dev type */
27661da177e4SLinus Torvalds 		if (ifr->ifr_flags & IFF_TUN) {
27671da177e4SLinus Torvalds 			/* TUN device */
276840630b82SMichael S. Tsirkin 			flags |= IFF_TUN;
27691da177e4SLinus Torvalds 			name = "tun%d";
27701da177e4SLinus Torvalds 		} else if (ifr->ifr_flags & IFF_TAP) {
27711da177e4SLinus Torvalds 			/* TAP device */
277240630b82SMichael S. Tsirkin 			flags |= IFF_TAP;
27731da177e4SLinus Torvalds 			name = "tap%d";
27741da177e4SLinus Torvalds 		} else
277536989b90SKusanagi Kouichi 			return -EINVAL;
27761da177e4SLinus Torvalds 
27771da177e4SLinus Torvalds 		if (*ifr->ifr_name)
27781da177e4SLinus Torvalds 			name = ifr->ifr_name;
27791da177e4SLinus Torvalds 
2780c8d68e6bSJason Wang 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2781c835a677STom Gundersen 				       NET_NAME_UNKNOWN, tun_setup, queues,
2782c835a677STom Gundersen 				       queues);
2783edfb6a14SJason Wang 
27841da177e4SLinus Torvalds 		if (!dev)
27851da177e4SLinus Torvalds 			return -ENOMEM;
27861da177e4SLinus Torvalds 
2787fc54c658SPavel Emelyanov 		dev_net_set(dev, net);
2788f019a7a5SEric W. Biederman 		dev->rtnl_link_ops = &tun_link_ops;
2789fb7589a1SPavel Emelyanov 		dev->ifindex = tfile->ifindex;
2790c4d33e24STakashi Iwai 		dev->sysfs_groups[0] = &tun_attr_group;
2791758e43b7SStephen Hemminger 
27921da177e4SLinus Torvalds 		tun = netdev_priv(dev);
27931da177e4SLinus Torvalds 		tun->dev = dev;
27941da177e4SLinus Torvalds 		tun->flags = flags;
2795f271b2ccSMax Krasnyansky 		tun->txflt.count = 0;
2796d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
27971da177e4SLinus Torvalds 
2798eaea34b2SPaolo Abeni 		tun->align = NET_SKB_PAD;
279954f968d6SJason Wang 		tun->filter_attached = false;
280054f968d6SJason Wang 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
28015503fcecSJason Wang 		tun->rx_batched = 0;
280296f84061SJason Wang 		RCU_INIT_POINTER(tun->steering_prog, NULL);
280333dccbb0SHerbert Xu 
2804158b515fSGeorge Kennedy 		tun->ifr = ifr;
2805158b515fSGeorge Kennedy 		tun->file = file;
2806608b9977SPaolo Abeni 
2807158b515fSGeorge Kennedy 		tun_net_initialize(dev);
2808eb0fb363SJason Wang 
28091da177e4SLinus Torvalds 		err = register_netdevice(tun->dev);
2810158b515fSGeorge Kennedy 		if (err < 0) {
2811158b515fSGeorge Kennedy 			free_netdev(dev);
2812158b515fSGeorge Kennedy 			return err;
2813158b515fSGeorge Kennedy 		}
2814c2e315b8SMenglong Dong 		/* free_netdev() won't check refcnt, to avoid race
281577f22f92SYang Yingliang 		 * with dev_put() we need publish tun after registration.
281677f22f92SYang Yingliang 		 */
281777f22f92SYang Yingliang 		rcu_assign_pointer(tfile->tun, tun);
2818af668b3cSMichael S. Tsirkin 	}
2819980c9e8cSDavid Woodhouse 
2820eb0fb363SJason Wang 	netif_carrier_on(tun->dev);
28211da177e4SLinus Torvalds 
2822e35259a9SMax Krasnyansky 	/* Make sure persistent devices do not get stuck in
2823e35259a9SMax Krasnyansky 	 * xoff state.
2824e35259a9SMax Krasnyansky 	 */
2825e35259a9SMax Krasnyansky 	if (netif_running(tun->dev))
2826c8d68e6bSJason Wang 		netif_tx_wake_all_queues(tun->dev);
2827e35259a9SMax Krasnyansky 
28281da177e4SLinus Torvalds 	strcpy(ifr->ifr_name, tun->dev->name);
28291da177e4SLinus Torvalds 	return 0;
28301da177e4SLinus Torvalds }
28311da177e4SLinus Torvalds 
283212132768SKirill Tkhai static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2833e3b99556SMark McLoughlin {
2834e3b99556SMark McLoughlin 	strcpy(ifr->ifr_name, tun->dev->name);
2835e3b99556SMark McLoughlin 
2836980c9e8cSDavid Woodhouse 	ifr->ifr_flags = tun_flags(tun);
2837e3b99556SMark McLoughlin 
2838e3b99556SMark McLoughlin }
2839e3b99556SMark McLoughlin 
28405228ddc9SRusty Russell /* This is like a cut-down ethtool ops, except done via tun fd so no
28415228ddc9SRusty Russell  * privs required. */
284288255375SMichał Mirosław static int set_offload(struct tun_struct *tun, unsigned long arg)
28435228ddc9SRusty Russell {
2844c8f44affSMichał Mirosław 	netdev_features_t features = 0;
28455228ddc9SRusty Russell 
28465228ddc9SRusty Russell 	if (arg & TUN_F_CSUM) {
284788255375SMichał Mirosław 		features |= NETIF_F_HW_CSUM;
28485228ddc9SRusty Russell 		arg &= ~TUN_F_CSUM;
28495228ddc9SRusty Russell 
28505228ddc9SRusty Russell 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
28515228ddc9SRusty Russell 			if (arg & TUN_F_TSO_ECN) {
28525228ddc9SRusty Russell 				features |= NETIF_F_TSO_ECN;
28535228ddc9SRusty Russell 				arg &= ~TUN_F_TSO_ECN;
28545228ddc9SRusty Russell 			}
28555228ddc9SRusty Russell 			if (arg & TUN_F_TSO4)
28565228ddc9SRusty Russell 				features |= NETIF_F_TSO;
28575228ddc9SRusty Russell 			if (arg & TUN_F_TSO6)
28585228ddc9SRusty Russell 				features |= NETIF_F_TSO6;
28595228ddc9SRusty Russell 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
28605228ddc9SRusty Russell 		}
28610c19f846SWillem de Bruijn 
28620c19f846SWillem de Bruijn 		arg &= ~TUN_F_UFO;
28635228ddc9SRusty Russell 	}
28645228ddc9SRusty Russell 
28655228ddc9SRusty Russell 	/* This gives the user a way to test for new features in future by
28665228ddc9SRusty Russell 	 * trying to set them. */
28675228ddc9SRusty Russell 	if (arg)
28685228ddc9SRusty Russell 		return -EINVAL;
28695228ddc9SRusty Russell 
287088255375SMichał Mirosław 	tun->set_features = features;
287109050957SYaroslav Isakov 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
287209050957SYaroslav Isakov 	tun->dev->wanted_features |= features;
287388255375SMichał Mirosław 	netdev_update_features(tun->dev);
28745228ddc9SRusty Russell 
28755228ddc9SRusty Russell 	return 0;
28765228ddc9SRusty Russell }
28775228ddc9SRusty Russell 
2878c8d68e6bSJason Wang static void tun_detach_filter(struct tun_struct *tun, int n)
2879c8d68e6bSJason Wang {
2880c8d68e6bSJason Wang 	int i;
2881c8d68e6bSJason Wang 	struct tun_file *tfile;
2882c8d68e6bSJason Wang 
2883c8d68e6bSJason Wang 	for (i = 0; i < n; i++) {
2884b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
28858ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
28868ced425eSHannes Frederic Sowa 		sk_detach_filter(tfile->socket.sk);
28878ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2888c8d68e6bSJason Wang 	}
2889c8d68e6bSJason Wang 
2890c8d68e6bSJason Wang 	tun->filter_attached = false;
2891c8d68e6bSJason Wang }
2892c8d68e6bSJason Wang 
2893c8d68e6bSJason Wang static int tun_attach_filter(struct tun_struct *tun)
2894c8d68e6bSJason Wang {
2895c8d68e6bSJason Wang 	int i, ret = 0;
2896c8d68e6bSJason Wang 	struct tun_file *tfile;
2897c8d68e6bSJason Wang 
2898c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2899b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
29008ced425eSHannes Frederic Sowa 		lock_sock(tfile->socket.sk);
29018ced425eSHannes Frederic Sowa 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
29028ced425eSHannes Frederic Sowa 		release_sock(tfile->socket.sk);
2903c8d68e6bSJason Wang 		if (ret) {
2904c8d68e6bSJason Wang 			tun_detach_filter(tun, i);
2905c8d68e6bSJason Wang 			return ret;
2906c8d68e6bSJason Wang 		}
2907c8d68e6bSJason Wang 	}
2908c8d68e6bSJason Wang 
2909c8d68e6bSJason Wang 	tun->filter_attached = true;
2910c8d68e6bSJason Wang 	return ret;
2911c8d68e6bSJason Wang }
2912c8d68e6bSJason Wang 
2913c8d68e6bSJason Wang static void tun_set_sndbuf(struct tun_struct *tun)
2914c8d68e6bSJason Wang {
2915c8d68e6bSJason Wang 	struct tun_file *tfile;
2916c8d68e6bSJason Wang 	int i;
2917c8d68e6bSJason Wang 
2918c8d68e6bSJason Wang 	for (i = 0; i < tun->numqueues; i++) {
2919b8deabd3SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
2920c8d68e6bSJason Wang 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2921c8d68e6bSJason Wang 	}
2922c8d68e6bSJason Wang }
2923c8d68e6bSJason Wang 
2924cde8b15fSJason Wang static int tun_set_queue(struct file *file, struct ifreq *ifr)
2925cde8b15fSJason Wang {
2926cde8b15fSJason Wang 	struct tun_file *tfile = file->private_data;
2927cde8b15fSJason Wang 	struct tun_struct *tun;
2928cde8b15fSJason Wang 	int ret = 0;
2929cde8b15fSJason Wang 
2930cde8b15fSJason Wang 	rtnl_lock();
2931cde8b15fSJason Wang 
2932cde8b15fSJason Wang 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
29334008e97fSJason Wang 		tun = tfile->detached;
29345dbbaf2dSPaul Moore 		if (!tun) {
2935cde8b15fSJason Wang 			ret = -EINVAL;
29365dbbaf2dSPaul Moore 			goto unlock;
29375dbbaf2dSPaul Moore 		}
29385dbbaf2dSPaul Moore 		ret = security_tun_dev_attach_queue(tun->security);
29395dbbaf2dSPaul Moore 		if (ret < 0)
29405dbbaf2dSPaul Moore 			goto unlock;
2941af3fb24eSEric Dumazet 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
294277f22f92SYang Yingliang 				 tun->flags & IFF_NAPI_FRAGS, true);
29434008e97fSJason Wang 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2944b8deabd3SJason Wang 		tun = rtnl_dereference(tfile->tun);
294540630b82SMichael S. Tsirkin 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
29464008e97fSJason Wang 			ret = -EINVAL;
2947cde8b15fSJason Wang 		else
29484008e97fSJason Wang 			__tun_detach(tfile, false);
29494008e97fSJason Wang 	} else
2950cde8b15fSJason Wang 		ret = -EINVAL;
2951cde8b15fSJason Wang 
295283c1f36fSSabrina Dubroca 	if (ret >= 0)
295383c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
295483c1f36fSSabrina Dubroca 
29555dbbaf2dSPaul Moore unlock:
2956cde8b15fSJason Wang 	rtnl_unlock();
2957cde8b15fSJason Wang 	return ret;
2958cde8b15fSJason Wang }
2959cde8b15fSJason Wang 
29608f3f330dSJason Wang static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
2961cd5681d7SJason Wang 			void __user *data)
296296f84061SJason Wang {
296396f84061SJason Wang 	struct bpf_prog *prog;
296496f84061SJason Wang 	int fd;
296596f84061SJason Wang 
296696f84061SJason Wang 	if (copy_from_user(&fd, data, sizeof(fd)))
296796f84061SJason Wang 		return -EFAULT;
296896f84061SJason Wang 
296996f84061SJason Wang 	if (fd == -1) {
297096f84061SJason Wang 		prog = NULL;
297196f84061SJason Wang 	} else {
297296f84061SJason Wang 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
297396f84061SJason Wang 		if (IS_ERR(prog))
297496f84061SJason Wang 			return PTR_ERR(prog);
297596f84061SJason Wang 	}
297696f84061SJason Wang 
2977cd5681d7SJason Wang 	return __tun_set_ebpf(tun, prog_p, prog);
297896f84061SJason Wang }
297996f84061SJason Wang 
2980cca8ea3bSPhillip Potter /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
2981cca8ea3bSPhillip Potter static unsigned char tun_get_addr_len(unsigned short type)
2982cca8ea3bSPhillip Potter {
2983cca8ea3bSPhillip Potter 	switch (type) {
2984cca8ea3bSPhillip Potter 	case ARPHRD_IP6GRE:
2985cca8ea3bSPhillip Potter 	case ARPHRD_TUNNEL6:
2986cca8ea3bSPhillip Potter 		return sizeof(struct in6_addr);
2987cca8ea3bSPhillip Potter 	case ARPHRD_IPGRE:
2988cca8ea3bSPhillip Potter 	case ARPHRD_TUNNEL:
2989cca8ea3bSPhillip Potter 	case ARPHRD_SIT:
2990cca8ea3bSPhillip Potter 		return 4;
2991cca8ea3bSPhillip Potter 	case ARPHRD_ETHER:
2992cca8ea3bSPhillip Potter 		return ETH_ALEN;
2993cca8ea3bSPhillip Potter 	case ARPHRD_IEEE802154:
2994cca8ea3bSPhillip Potter 	case ARPHRD_IEEE802154_MONITOR:
2995cca8ea3bSPhillip Potter 		return IEEE802154_EXTENDED_ADDR_LEN;
2996cca8ea3bSPhillip Potter 	case ARPHRD_PHONET_PIPE:
2997cca8ea3bSPhillip Potter 	case ARPHRD_PPP:
2998cca8ea3bSPhillip Potter 	case ARPHRD_NONE:
2999cca8ea3bSPhillip Potter 		return 0;
3000cca8ea3bSPhillip Potter 	case ARPHRD_6LOWPAN:
3001cca8ea3bSPhillip Potter 		return EUI64_ADDR_LEN;
3002cca8ea3bSPhillip Potter 	case ARPHRD_FDDI:
3003cca8ea3bSPhillip Potter 		return FDDI_K_ALEN;
3004cca8ea3bSPhillip Potter 	case ARPHRD_HIPPI:
3005cca8ea3bSPhillip Potter 		return HIPPI_ALEN;
3006cca8ea3bSPhillip Potter 	case ARPHRD_IEEE802:
3007cca8ea3bSPhillip Potter 		return FC_ALEN;
3008cca8ea3bSPhillip Potter 	case ARPHRD_ROSE:
3009cca8ea3bSPhillip Potter 		return ROSE_ADDR_LEN;
3010cca8ea3bSPhillip Potter 	case ARPHRD_NETROM:
3011cca8ea3bSPhillip Potter 		return AX25_ADDR_LEN;
3012cca8ea3bSPhillip Potter 	case ARPHRD_LOCALTLK:
3013cca8ea3bSPhillip Potter 		return LTALK_ALEN;
3014cca8ea3bSPhillip Potter 	default:
3015cca8ea3bSPhillip Potter 		return 0;
3016cca8ea3bSPhillip Potter 	}
3017cca8ea3bSPhillip Potter }
3018cca8ea3bSPhillip Potter 
301950857e2aSArnd Bergmann static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
302050857e2aSArnd Bergmann 			    unsigned long arg, int ifreq_len)
30211da177e4SLinus Torvalds {
302236b50babSEric W. Biederman 	struct tun_file *tfile = file->private_data;
3023f663706aSKirill Tkhai 	struct net *net = sock_net(&tfile->sk);
3024631ab46bSEric W. Biederman 	struct tun_struct *tun;
30251da177e4SLinus Torvalds 	void __user* argp = (void __user*)arg;
302626d31925SNicolas Dichtel 	unsigned int ifindex, carrier;
30271da177e4SLinus Torvalds 	struct ifreq ifr;
30280625c883SEric W. Biederman 	kuid_t owner;
30290625c883SEric W. Biederman 	kgid_t group;
303033dccbb0SHerbert Xu 	int sndbuf;
3031d9d52b51SMichael S. Tsirkin 	int vnet_hdr_sz;
30321cf8e410SMichael S. Tsirkin 	int le;
3033f271b2ccSMax Krasnyansky 	int ret;
303483c1f36fSSabrina Dubroca 	bool do_notify = false;
30351da177e4SLinus Torvalds 
3036f2780d6dSKirill Tkhai 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3037f2780d6dSKirill Tkhai 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
303850857e2aSArnd Bergmann 		if (copy_from_user(&ifr, argp, ifreq_len))
30391da177e4SLinus Torvalds 			return -EFAULT;
30408bbb1813SDavid S. Miller 	} else {
3041a117dacdSMathias Krause 		memset(&ifr, 0, sizeof(ifr));
30428bbb1813SDavid S. Miller 	}
3043631ab46bSEric W. Biederman 	if (cmd == TUNGETFEATURES) {
3044631ab46bSEric W. Biederman 		/* Currently this just means: "what IFF flags are valid?".
3045631ab46bSEric W. Biederman 		 * This is needed because we never checked for invalid flags on
3046031f5e03SMichael S. Tsirkin 		 * TUNSETIFF.
3047031f5e03SMichael S. Tsirkin 		 */
3048031f5e03SMichael S. Tsirkin 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3049631ab46bSEric W. Biederman 				(unsigned int __user*)argp);
3050f663706aSKirill Tkhai 	} else if (cmd == TUNSETQUEUE) {
3051cde8b15fSJason Wang 		return tun_set_queue(file, &ifr);
3052f663706aSKirill Tkhai 	} else if (cmd == SIOCGSKNS) {
3053f663706aSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3054f663706aSKirill Tkhai 			return -EPERM;
3055f663706aSKirill Tkhai 		return open_related_ns(&net->ns, get_net_ns);
3056f663706aSKirill Tkhai 	}
3057631ab46bSEric W. Biederman 
3058876bfd4dSHerbert Xu 	rtnl_lock();
3059876bfd4dSHerbert Xu 
30609484dc74Syuan linyu 	tun = tun_get(tfile);
30610f16bc13SGao Feng 	if (cmd == TUNSETIFF) {
30620f16bc13SGao Feng 		ret = -EEXIST;
30630f16bc13SGao Feng 		if (tun)
30640f16bc13SGao Feng 			goto unlock;
30650f16bc13SGao Feng 
30661da177e4SLinus Torvalds 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
30671da177e4SLinus Torvalds 
3068f2780d6dSKirill Tkhai 		ret = tun_set_iff(net, file, &ifr);
30691da177e4SLinus Torvalds 
3070876bfd4dSHerbert Xu 		if (ret)
3071876bfd4dSHerbert Xu 			goto unlock;
30721da177e4SLinus Torvalds 
307350857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3074876bfd4dSHerbert Xu 			ret = -EFAULT;
3075876bfd4dSHerbert Xu 		goto unlock;
30761da177e4SLinus Torvalds 	}
3077fb7589a1SPavel Emelyanov 	if (cmd == TUNSETIFINDEX) {
3078fb7589a1SPavel Emelyanov 		ret = -EPERM;
3079fb7589a1SPavel Emelyanov 		if (tun)
3080fb7589a1SPavel Emelyanov 			goto unlock;
3081fb7589a1SPavel Emelyanov 
3082fb7589a1SPavel Emelyanov 		ret = -EFAULT;
3083fb7589a1SPavel Emelyanov 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3084fb7589a1SPavel Emelyanov 			goto unlock;
3085fb7589a1SPavel Emelyanov 
3086fb7589a1SPavel Emelyanov 		ret = 0;
3087fb7589a1SPavel Emelyanov 		tfile->ifindex = ifindex;
3088fb7589a1SPavel Emelyanov 		goto unlock;
3089fb7589a1SPavel Emelyanov 	}
30901da177e4SLinus Torvalds 
3091876bfd4dSHerbert Xu 	ret = -EBADFD;
30921da177e4SLinus Torvalds 	if (!tun)
3093876bfd4dSHerbert Xu 		goto unlock;
30941da177e4SLinus Torvalds 
30953424170fSMichal Kubecek 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
30961da177e4SLinus Torvalds 
30970c3e0e3bSKirill Tkhai 	net = dev_net(tun->dev);
3098631ab46bSEric W. Biederman 	ret = 0;
30991da177e4SLinus Torvalds 	switch (cmd) {
3100e3b99556SMark McLoughlin 	case TUNGETIFF:
310112132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
3102e3b99556SMark McLoughlin 
31033d407a80SPavel Emelyanov 		if (tfile->detached)
31043d407a80SPavel Emelyanov 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3105849c9b6fSPavel Emelyanov 		if (!tfile->socket.sk->sk_filter)
3106849c9b6fSPavel Emelyanov 			ifr.ifr_flags |= IFF_NOFILTER;
31073d407a80SPavel Emelyanov 
310850857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3109631ab46bSEric W. Biederman 			ret = -EFAULT;
3110e3b99556SMark McLoughlin 		break;
3111e3b99556SMark McLoughlin 
31121da177e4SLinus Torvalds 	case TUNSETNOCSUM:
31131da177e4SLinus Torvalds 		/* Disable/Enable checksum */
31141da177e4SLinus Torvalds 
311588255375SMichał Mirosław 		/* [unimplemented] */
31163424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
31176b8a66eeSJoe Perches 			   arg ? "disabled" : "enabled");
31181da177e4SLinus Torvalds 		break;
31191da177e4SLinus Torvalds 
31201da177e4SLinus Torvalds 	case TUNSETPERSIST:
312154f968d6SJason Wang 		/* Disable/Enable persist mode. Keep an extra reference to the
312254f968d6SJason Wang 		 * module to prevent the module being unprobed.
312354f968d6SJason Wang 		 */
312440630b82SMichael S. Tsirkin 		if (arg && !(tun->flags & IFF_PERSIST)) {
312540630b82SMichael S. Tsirkin 			tun->flags |= IFF_PERSIST;
312654f968d6SJason Wang 			__module_get(THIS_MODULE);
312783c1f36fSSabrina Dubroca 			do_notify = true;
3128dd38bd85SJason Wang 		}
312940630b82SMichael S. Tsirkin 		if (!arg && (tun->flags & IFF_PERSIST)) {
313040630b82SMichael S. Tsirkin 			tun->flags &= ~IFF_PERSIST;
313154f968d6SJason Wang 			module_put(THIS_MODULE);
313283c1f36fSSabrina Dubroca 			do_notify = true;
313354f968d6SJason Wang 		}
31341da177e4SLinus Torvalds 
31353424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "persist %s\n",
31366b8a66eeSJoe Perches 			   arg ? "enabled" : "disabled");
31371da177e4SLinus Torvalds 		break;
31381da177e4SLinus Torvalds 
31391da177e4SLinus Torvalds 	case TUNSETOWNER:
31401da177e4SLinus Torvalds 		/* Set owner of the device */
31410625c883SEric W. Biederman 		owner = make_kuid(current_user_ns(), arg);
31420625c883SEric W. Biederman 		if (!uid_valid(owner)) {
31430625c883SEric W. Biederman 			ret = -EINVAL;
31440625c883SEric W. Biederman 			break;
31450625c883SEric W. Biederman 		}
31460625c883SEric W. Biederman 		tun->owner = owner;
314783c1f36fSSabrina Dubroca 		do_notify = true;
31483424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
31490625c883SEric W. Biederman 			   from_kuid(&init_user_ns, tun->owner));
31501da177e4SLinus Torvalds 		break;
31511da177e4SLinus Torvalds 
31528c644623SGuido Guenther 	case TUNSETGROUP:
31538c644623SGuido Guenther 		/* Set group of the device */
31540625c883SEric W. Biederman 		group = make_kgid(current_user_ns(), arg);
31550625c883SEric W. Biederman 		if (!gid_valid(group)) {
31560625c883SEric W. Biederman 			ret = -EINVAL;
31570625c883SEric W. Biederman 			break;
31580625c883SEric W. Biederman 		}
31590625c883SEric W. Biederman 		tun->group = group;
316083c1f36fSSabrina Dubroca 		do_notify = true;
31613424170fSMichal Kubecek 		netif_info(tun, drv, tun->dev, "group set to %u\n",
31620625c883SEric W. Biederman 			   from_kgid(&init_user_ns, tun->group));
31638c644623SGuido Guenther 		break;
31648c644623SGuido Guenther 
3165ff4cc3acSMike Kershaw 	case TUNSETLINK:
3166ff4cc3acSMike Kershaw 		/* Only allow setting the type when the interface is down */
3167ff4cc3acSMike Kershaw 		if (tun->dev->flags & IFF_UP) {
31683424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev,
31696b8a66eeSJoe Perches 				   "Linktype set failed because interface is up\n");
317048abfe05SDavid S. Miller 			ret = -EBUSY;
3171ff4cc3acSMike Kershaw 		} else {
31728e1e33ffSMartin Schiller 			ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
31738e1e33ffSMartin Schiller 						       tun->dev);
31748e1e33ffSMartin Schiller 			ret = notifier_to_errno(ret);
31758e1e33ffSMartin Schiller 			if (ret) {
31768e1e33ffSMartin Schiller 				netif_info(tun, drv, tun->dev,
31778e1e33ffSMartin Schiller 					   "Refused to change device type\n");
31788e1e33ffSMartin Schiller 				break;
31798e1e33ffSMartin Schiller 			}
3180ff4cc3acSMike Kershaw 			tun->dev->type = (int) arg;
3181cca8ea3bSPhillip Potter 			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
31823424170fSMichal Kubecek 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
31836b8a66eeSJoe Perches 				   tun->dev->type);
31848e1e33ffSMartin Schiller 			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
31858e1e33ffSMartin Schiller 						 tun->dev);
3186ff4cc3acSMike Kershaw 		}
3187631ab46bSEric W. Biederman 		break;
3188ff4cc3acSMike Kershaw 
31891da177e4SLinus Torvalds 	case TUNSETDEBUG:
31903424170fSMichal Kubecek 		tun->msg_enable = (u32)arg;
31911da177e4SLinus Torvalds 		break;
31923424170fSMichal Kubecek 
31935228ddc9SRusty Russell 	case TUNSETOFFLOAD:
319488255375SMichał Mirosław 		ret = set_offload(tun, arg);
3195631ab46bSEric W. Biederman 		break;
31965228ddc9SRusty Russell 
3197f271b2ccSMax Krasnyansky 	case TUNSETTXFILTER:
3198f271b2ccSMax Krasnyansky 		/* Can be set only for TAPs */
3199631ab46bSEric W. Biederman 		ret = -EINVAL;
320040630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3201631ab46bSEric W. Biederman 			break;
3202c0e5a8c2SHarvey Harrison 		ret = update_filter(&tun->txflt, (void __user *)arg);
3203631ab46bSEric W. Biederman 		break;
32041da177e4SLinus Torvalds 
32051da177e4SLinus Torvalds 	case SIOCGIFHWADDR:
3206b595076aSUwe Kleine-König 		/* Get hw address */
32073b23a32aSCong Wang 		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
320850857e2aSArnd Bergmann 		if (copy_to_user(argp, &ifr, ifreq_len))
3209631ab46bSEric W. Biederman 			ret = -EFAULT;
3210631ab46bSEric W. Biederman 		break;
32111da177e4SLinus Torvalds 
32121da177e4SLinus Torvalds 	case SIOCSIFHWADDR:
3213f271b2ccSMax Krasnyansky 		/* Set hw address */
32143b23a32aSCong Wang 		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3215631ab46bSEric W. Biederman 		break;
321633dccbb0SHerbert Xu 
321733dccbb0SHerbert Xu 	case TUNGETSNDBUF:
321854f968d6SJason Wang 		sndbuf = tfile->socket.sk->sk_sndbuf;
321933dccbb0SHerbert Xu 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
322033dccbb0SHerbert Xu 			ret = -EFAULT;
322133dccbb0SHerbert Xu 		break;
322233dccbb0SHerbert Xu 
322333dccbb0SHerbert Xu 	case TUNSETSNDBUF:
322433dccbb0SHerbert Xu 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
322533dccbb0SHerbert Xu 			ret = -EFAULT;
322633dccbb0SHerbert Xu 			break;
322733dccbb0SHerbert Xu 		}
322893161922SCraig Gallek 		if (sndbuf <= 0) {
322993161922SCraig Gallek 			ret = -EINVAL;
323093161922SCraig Gallek 			break;
323193161922SCraig Gallek 		}
323233dccbb0SHerbert Xu 
3233c8d68e6bSJason Wang 		tun->sndbuf = sndbuf;
3234c8d68e6bSJason Wang 		tun_set_sndbuf(tun);
323533dccbb0SHerbert Xu 		break;
323633dccbb0SHerbert Xu 
3237d9d52b51SMichael S. Tsirkin 	case TUNGETVNETHDRSZ:
3238d9d52b51SMichael S. Tsirkin 		vnet_hdr_sz = tun->vnet_hdr_sz;
3239d9d52b51SMichael S. Tsirkin 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3240d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3241d9d52b51SMichael S. Tsirkin 		break;
3242d9d52b51SMichael S. Tsirkin 
3243d9d52b51SMichael S. Tsirkin 	case TUNSETVNETHDRSZ:
3244d9d52b51SMichael S. Tsirkin 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3245d9d52b51SMichael S. Tsirkin 			ret = -EFAULT;
3246d9d52b51SMichael S. Tsirkin 			break;
3247d9d52b51SMichael S. Tsirkin 		}
3248d9d52b51SMichael S. Tsirkin 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3249d9d52b51SMichael S. Tsirkin 			ret = -EINVAL;
3250d9d52b51SMichael S. Tsirkin 			break;
3251d9d52b51SMichael S. Tsirkin 		}
3252d9d52b51SMichael S. Tsirkin 
3253d9d52b51SMichael S. Tsirkin 		tun->vnet_hdr_sz = vnet_hdr_sz;
3254d9d52b51SMichael S. Tsirkin 		break;
3255d9d52b51SMichael S. Tsirkin 
32561cf8e410SMichael S. Tsirkin 	case TUNGETVNETLE:
32571cf8e410SMichael S. Tsirkin 		le = !!(tun->flags & TUN_VNET_LE);
32581cf8e410SMichael S. Tsirkin 		if (put_user(le, (int __user *)argp))
32591cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32601cf8e410SMichael S. Tsirkin 		break;
32611cf8e410SMichael S. Tsirkin 
32621cf8e410SMichael S. Tsirkin 	case TUNSETVNETLE:
32631cf8e410SMichael S. Tsirkin 		if (get_user(le, (int __user *)argp)) {
32641cf8e410SMichael S. Tsirkin 			ret = -EFAULT;
32651cf8e410SMichael S. Tsirkin 			break;
32661cf8e410SMichael S. Tsirkin 		}
32671cf8e410SMichael S. Tsirkin 		if (le)
32681cf8e410SMichael S. Tsirkin 			tun->flags |= TUN_VNET_LE;
32691cf8e410SMichael S. Tsirkin 		else
32701cf8e410SMichael S. Tsirkin 			tun->flags &= ~TUN_VNET_LE;
32711cf8e410SMichael S. Tsirkin 		break;
32721cf8e410SMichael S. Tsirkin 
32738b8e658bSGreg Kurz 	case TUNGETVNETBE:
32748b8e658bSGreg Kurz 		ret = tun_get_vnet_be(tun, argp);
32758b8e658bSGreg Kurz 		break;
32768b8e658bSGreg Kurz 
32778b8e658bSGreg Kurz 	case TUNSETVNETBE:
32788b8e658bSGreg Kurz 		ret = tun_set_vnet_be(tun, argp);
32798b8e658bSGreg Kurz 		break;
32808b8e658bSGreg Kurz 
328199405162SMichael S. Tsirkin 	case TUNATTACHFILTER:
328299405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
328399405162SMichael S. Tsirkin 		ret = -EINVAL;
328440630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
328599405162SMichael S. Tsirkin 			break;
328699405162SMichael S. Tsirkin 		ret = -EFAULT;
328754f968d6SJason Wang 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
328899405162SMichael S. Tsirkin 			break;
328999405162SMichael S. Tsirkin 
3290c8d68e6bSJason Wang 		ret = tun_attach_filter(tun);
329199405162SMichael S. Tsirkin 		break;
329299405162SMichael S. Tsirkin 
329399405162SMichael S. Tsirkin 	case TUNDETACHFILTER:
329499405162SMichael S. Tsirkin 		/* Can be set only for TAPs */
329599405162SMichael S. Tsirkin 		ret = -EINVAL;
329640630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
329799405162SMichael S. Tsirkin 			break;
3298c8d68e6bSJason Wang 		ret = 0;
3299c8d68e6bSJason Wang 		tun_detach_filter(tun, tun->numqueues);
330099405162SMichael S. Tsirkin 		break;
330199405162SMichael S. Tsirkin 
330276975e9cSPavel Emelyanov 	case TUNGETFILTER:
330376975e9cSPavel Emelyanov 		ret = -EINVAL;
330440630b82SMichael S. Tsirkin 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
330576975e9cSPavel Emelyanov 			break;
330676975e9cSPavel Emelyanov 		ret = -EFAULT;
330776975e9cSPavel Emelyanov 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
330876975e9cSPavel Emelyanov 			break;
330976975e9cSPavel Emelyanov 		ret = 0;
331076975e9cSPavel Emelyanov 		break;
331176975e9cSPavel Emelyanov 
331296f84061SJason Wang 	case TUNSETSTEERINGEBPF:
3313cd5681d7SJason Wang 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
331496f84061SJason Wang 		break;
331596f84061SJason Wang 
3316aff3d70aSJason Wang 	case TUNSETFILTEREBPF:
3317aff3d70aSJason Wang 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3318aff3d70aSJason Wang 		break;
3319aff3d70aSJason Wang 
332026d31925SNicolas Dichtel 	case TUNSETCARRIER:
332126d31925SNicolas Dichtel 		ret = -EFAULT;
332226d31925SNicolas Dichtel 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
332326d31925SNicolas Dichtel 			goto unlock;
332426d31925SNicolas Dichtel 
332526d31925SNicolas Dichtel 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
332626d31925SNicolas Dichtel 		break;
332726d31925SNicolas Dichtel 
33280c3e0e3bSKirill Tkhai 	case TUNGETDEVNETNS:
33290c3e0e3bSKirill Tkhai 		ret = -EPERM;
33300c3e0e3bSKirill Tkhai 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
33310c3e0e3bSKirill Tkhai 			goto unlock;
33320c3e0e3bSKirill Tkhai 		ret = open_related_ns(&net->ns, get_net_ns);
33330c3e0e3bSKirill Tkhai 		break;
33340c3e0e3bSKirill Tkhai 
33351da177e4SLinus Torvalds 	default:
3336631ab46bSEric W. Biederman 		ret = -EINVAL;
3337631ab46bSEric W. Biederman 		break;
3338ee289b64SJoe Perches 	}
33391da177e4SLinus Torvalds 
334083c1f36fSSabrina Dubroca 	if (do_notify)
334183c1f36fSSabrina Dubroca 		netdev_state_change(tun->dev);
334283c1f36fSSabrina Dubroca 
3343876bfd4dSHerbert Xu unlock:
3344876bfd4dSHerbert Xu 	rtnl_unlock();
3345876bfd4dSHerbert Xu 	if (tun)
3346631ab46bSEric W. Biederman 		tun_put(tun);
3347631ab46bSEric W. Biederman 	return ret;
33481da177e4SLinus Torvalds }
33491da177e4SLinus Torvalds 
335050857e2aSArnd Bergmann static long tun_chr_ioctl(struct file *file,
335150857e2aSArnd Bergmann 			  unsigned int cmd, unsigned long arg)
335250857e2aSArnd Bergmann {
335350857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
335450857e2aSArnd Bergmann }
335550857e2aSArnd Bergmann 
335650857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
335750857e2aSArnd Bergmann static long tun_chr_compat_ioctl(struct file *file,
335850857e2aSArnd Bergmann 			 unsigned int cmd, unsigned long arg)
335950857e2aSArnd Bergmann {
336050857e2aSArnd Bergmann 	switch (cmd) {
336150857e2aSArnd Bergmann 	case TUNSETIFF:
336250857e2aSArnd Bergmann 	case TUNGETIFF:
336350857e2aSArnd Bergmann 	case TUNSETTXFILTER:
336450857e2aSArnd Bergmann 	case TUNGETSNDBUF:
336550857e2aSArnd Bergmann 	case TUNSETSNDBUF:
336650857e2aSArnd Bergmann 	case SIOCGIFHWADDR:
336750857e2aSArnd Bergmann 	case SIOCSIFHWADDR:
336850857e2aSArnd Bergmann 		arg = (unsigned long)compat_ptr(arg);
336950857e2aSArnd Bergmann 		break;
337050857e2aSArnd Bergmann 	default:
337150857e2aSArnd Bergmann 		arg = (compat_ulong_t)arg;
337250857e2aSArnd Bergmann 		break;
337350857e2aSArnd Bergmann 	}
337450857e2aSArnd Bergmann 
337550857e2aSArnd Bergmann 	/*
337650857e2aSArnd Bergmann 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
337750857e2aSArnd Bergmann 	 * the end of that structure. All fields that are used in this
337850857e2aSArnd Bergmann 	 * driver are compatible though, we don't need to convert the
337950857e2aSArnd Bergmann 	 * contents.
338050857e2aSArnd Bergmann 	 */
338150857e2aSArnd Bergmann 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
338250857e2aSArnd Bergmann }
338350857e2aSArnd Bergmann #endif /* CONFIG_COMPAT */
338450857e2aSArnd Bergmann 
33851da177e4SLinus Torvalds static int tun_chr_fasync(int fd, struct file *file, int on)
33861da177e4SLinus Torvalds {
338754f968d6SJason Wang 	struct tun_file *tfile = file->private_data;
33881da177e4SLinus Torvalds 	int ret;
33891da177e4SLinus Torvalds 
339054f968d6SJason Wang 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
33919d319522SJonathan Corbet 		goto out;
33921da177e4SLinus Torvalds 
33931da177e4SLinus Torvalds 	if (on) {
339401919134SEric W. Biederman 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
339554f968d6SJason Wang 		tfile->flags |= TUN_FASYNC;
33961da177e4SLinus Torvalds 	} else
339754f968d6SJason Wang 		tfile->flags &= ~TUN_FASYNC;
33989d319522SJonathan Corbet 	ret = 0;
33999d319522SJonathan Corbet out:
34009d319522SJonathan Corbet 	return ret;
34011da177e4SLinus Torvalds }
34021da177e4SLinus Torvalds 
34031da177e4SLinus Torvalds static int tun_chr_open(struct inode *inode, struct file * file)
34041da177e4SLinus Torvalds {
3405140e807dSEric W. Biederman 	struct net *net = current->nsproxy->net_ns;
3406631ab46bSEric W. Biederman 	struct tun_file *tfile;
3407deed49fbSThomas Gleixner 
3408140e807dSEric W. Biederman 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
340911aa9c28SEric W. Biederman 					    &tun_proto, 0);
3410631ab46bSEric W. Biederman 	if (!tfile)
3411631ab46bSEric W. Biederman 		return -ENOMEM;
3412b196d88aSJason Wang 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3413b196d88aSJason Wang 		sk_free(&tfile->sk);
3414b196d88aSJason Wang 		return -ENOMEM;
3415b196d88aSJason Wang 	}
3416b196d88aSJason Wang 
3417c7256f57SEric Dumazet 	mutex_init(&tfile->napi_mutex);
3418c956674bSMonam Agarwal 	RCU_INIT_POINTER(tfile->tun, NULL);
341954f968d6SJason Wang 	tfile->flags = 0;
3420fb7589a1SPavel Emelyanov 	tfile->ifindex = 0;
342154f968d6SJason Wang 
3422333f7909SAl Viro 	init_waitqueue_head(&tfile->socket.wq.wait);
342354f968d6SJason Wang 
342454f968d6SJason Wang 	tfile->socket.file = file;
342554f968d6SJason Wang 	tfile->socket.ops = &tun_socket_ops;
342654f968d6SJason Wang 
342754f968d6SJason Wang 	sock_init_data(&tfile->socket, &tfile->sk);
342854f968d6SJason Wang 
342954f968d6SJason Wang 	tfile->sk.sk_write_space = tun_sock_write_space;
343054f968d6SJason Wang 	tfile->sk.sk_sndbuf = INT_MAX;
343154f968d6SJason Wang 
3432631ab46bSEric W. Biederman 	file->private_data = tfile;
34334008e97fSJason Wang 	INIT_LIST_HEAD(&tfile->next);
343454f968d6SJason Wang 
343519a6afb2SJason Wang 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
343619a6afb2SJason Wang 
34371da177e4SLinus Torvalds 	return 0;
34381da177e4SLinus Torvalds }
34391da177e4SLinus Torvalds 
34401da177e4SLinus Torvalds static int tun_chr_close(struct inode *inode, struct file *file)
34411da177e4SLinus Torvalds {
3442631ab46bSEric W. Biederman 	struct tun_file *tfile = file->private_data;
34431da177e4SLinus Torvalds 
3444c8d68e6bSJason Wang 	tun_detach(tfile, true);
34451da177e4SLinus Torvalds 
34461da177e4SLinus Torvalds 	return 0;
34471da177e4SLinus Torvalds }
34481da177e4SLinus Torvalds 
344993e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
34509484dc74Syuan linyu static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
345193e14b6dSMasatake YAMATO {
34529484dc74Syuan linyu 	struct tun_file *tfile = file->private_data;
345393e14b6dSMasatake YAMATO 	struct tun_struct *tun;
345493e14b6dSMasatake YAMATO 	struct ifreq ifr;
345593e14b6dSMasatake YAMATO 
345693e14b6dSMasatake YAMATO 	memset(&ifr, 0, sizeof(ifr));
345793e14b6dSMasatake YAMATO 
345893e14b6dSMasatake YAMATO 	rtnl_lock();
34599484dc74Syuan linyu 	tun = tun_get(tfile);
346093e14b6dSMasatake YAMATO 	if (tun)
346112132768SKirill Tkhai 		tun_get_iff(tun, &ifr);
346293e14b6dSMasatake YAMATO 	rtnl_unlock();
346393e14b6dSMasatake YAMATO 
346493e14b6dSMasatake YAMATO 	if (tun)
346593e14b6dSMasatake YAMATO 		tun_put(tun);
346693e14b6dSMasatake YAMATO 
3467a3816ab0SJoe Perches 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
346893e14b6dSMasatake YAMATO }
346993e14b6dSMasatake YAMATO #endif
347093e14b6dSMasatake YAMATO 
3471d54b1fdbSArjan van de Ven static const struct file_operations tun_fops = {
34721da177e4SLinus Torvalds 	.owner	= THIS_MODULE,
34731da177e4SLinus Torvalds 	.llseek = no_llseek,
34749b067034SAl Viro 	.read_iter  = tun_chr_read_iter,
3475f5ff53b4SAl Viro 	.write_iter = tun_chr_write_iter,
34761da177e4SLinus Torvalds 	.poll	= tun_chr_poll,
3477876bfd4dSHerbert Xu 	.unlocked_ioctl	= tun_chr_ioctl,
347850857e2aSArnd Bergmann #ifdef CONFIG_COMPAT
347950857e2aSArnd Bergmann 	.compat_ioctl = tun_chr_compat_ioctl,
348050857e2aSArnd Bergmann #endif
34811da177e4SLinus Torvalds 	.open	= tun_chr_open,
34821da177e4SLinus Torvalds 	.release = tun_chr_close,
348393e14b6dSMasatake YAMATO 	.fasync = tun_chr_fasync,
348493e14b6dSMasatake YAMATO #ifdef CONFIG_PROC_FS
348593e14b6dSMasatake YAMATO 	.show_fdinfo = tun_chr_show_fdinfo,
348693e14b6dSMasatake YAMATO #endif
34871da177e4SLinus Torvalds };
34881da177e4SLinus Torvalds 
34891da177e4SLinus Torvalds static struct miscdevice tun_miscdev = {
34901da177e4SLinus Torvalds 	.minor = TUN_MINOR,
34911da177e4SLinus Torvalds 	.name = "tun",
3492e454cea2SKay Sievers 	.nodename = "net/tun",
34931da177e4SLinus Torvalds 	.fops = &tun_fops,
34941da177e4SLinus Torvalds };
34951da177e4SLinus Torvalds 
34961da177e4SLinus Torvalds /* ethtool interface */
34971da177e4SLinus Torvalds 
34984e24f2ddSChas Williams static void tun_default_link_ksettings(struct net_device *dev,
349929ccc49dSPhilippe Reynes 				       struct ethtool_link_ksettings *cmd)
35001da177e4SLinus Torvalds {
350129ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
350229ccc49dSPhilippe Reynes 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
350329ccc49dSPhilippe Reynes 	cmd->base.speed		= SPEED_10;
350429ccc49dSPhilippe Reynes 	cmd->base.duplex	= DUPLEX_FULL;
350529ccc49dSPhilippe Reynes 	cmd->base.port		= PORT_TP;
350629ccc49dSPhilippe Reynes 	cmd->base.phy_address	= 0;
350729ccc49dSPhilippe Reynes 	cmd->base.autoneg	= AUTONEG_DISABLE;
35084e24f2ddSChas Williams }
35094e24f2ddSChas Williams 
35104e24f2ddSChas Williams static int tun_get_link_ksettings(struct net_device *dev,
35114e24f2ddSChas Williams 				  struct ethtool_link_ksettings *cmd)
35124e24f2ddSChas Williams {
35134e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
35144e24f2ddSChas Williams 
35154e24f2ddSChas Williams 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
35164e24f2ddSChas Williams 	return 0;
35174e24f2ddSChas Williams }
35184e24f2ddSChas Williams 
35194e24f2ddSChas Williams static int tun_set_link_ksettings(struct net_device *dev,
35204e24f2ddSChas Williams 				  const struct ethtool_link_ksettings *cmd)
35214e24f2ddSChas Williams {
35224e24f2ddSChas Williams 	struct tun_struct *tun = netdev_priv(dev);
35234e24f2ddSChas Williams 
35244e24f2ddSChas Williams 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
35251da177e4SLinus Torvalds 	return 0;
35261da177e4SLinus Torvalds }
35271da177e4SLinus Torvalds 
35281da177e4SLinus Torvalds static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
35291da177e4SLinus Torvalds {
35301da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35311da177e4SLinus Torvalds 
353233a5ba14SRick Jones 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
353333a5ba14SRick Jones 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
35341da177e4SLinus Torvalds 
35351da177e4SLinus Torvalds 	switch (tun->flags & TUN_TYPE_MASK) {
353640630b82SMichael S. Tsirkin 	case IFF_TUN:
353733a5ba14SRick Jones 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
35381da177e4SLinus Torvalds 		break;
353940630b82SMichael S. Tsirkin 	case IFF_TAP:
354033a5ba14SRick Jones 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
35411da177e4SLinus Torvalds 		break;
35421da177e4SLinus Torvalds 	}
35431da177e4SLinus Torvalds }
35441da177e4SLinus Torvalds 
35451da177e4SLinus Torvalds static u32 tun_get_msglevel(struct net_device *dev)
35461da177e4SLinus Torvalds {
35471da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35483424170fSMichal Kubecek 
35493424170fSMichal Kubecek 	return tun->msg_enable;
35501da177e4SLinus Torvalds }
35511da177e4SLinus Torvalds 
35521da177e4SLinus Torvalds static void tun_set_msglevel(struct net_device *dev, u32 value)
35531da177e4SLinus Torvalds {
35541da177e4SLinus Torvalds 	struct tun_struct *tun = netdev_priv(dev);
35553424170fSMichal Kubecek 
35563424170fSMichal Kubecek 	tun->msg_enable = value;
35571da177e4SLinus Torvalds }
35581da177e4SLinus Torvalds 
35595503fcecSJason Wang static int tun_get_coalesce(struct net_device *dev,
3560f3ccfda1SYufeng Mo 			    struct ethtool_coalesce *ec,
3561f3ccfda1SYufeng Mo 			    struct kernel_ethtool_coalesce *kernel_coal,
3562f3ccfda1SYufeng Mo 			    struct netlink_ext_ack *extack)
35635503fcecSJason Wang {
35645503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35655503fcecSJason Wang 
35665503fcecSJason Wang 	ec->rx_max_coalesced_frames = tun->rx_batched;
35675503fcecSJason Wang 
35685503fcecSJason Wang 	return 0;
35695503fcecSJason Wang }
35705503fcecSJason Wang 
35715503fcecSJason Wang static int tun_set_coalesce(struct net_device *dev,
3572f3ccfda1SYufeng Mo 			    struct ethtool_coalesce *ec,
3573f3ccfda1SYufeng Mo 			    struct kernel_ethtool_coalesce *kernel_coal,
3574f3ccfda1SYufeng Mo 			    struct netlink_ext_ack *extack)
35755503fcecSJason Wang {
35765503fcecSJason Wang 	struct tun_struct *tun = netdev_priv(dev);
35775503fcecSJason Wang 
35785503fcecSJason Wang 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
35795503fcecSJason Wang 		tun->rx_batched = NAPI_POLL_WEIGHT;
35805503fcecSJason Wang 	else
35815503fcecSJason Wang 		tun->rx_batched = ec->rx_max_coalesced_frames;
35825503fcecSJason Wang 
35835503fcecSJason Wang 	return 0;
35845503fcecSJason Wang }
35855503fcecSJason Wang 
35867282d491SJeff Garzik static const struct ethtool_ops tun_ethtool_ops = {
3587e5ad00b3SJakub Kicinski 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
35881da177e4SLinus Torvalds 	.get_drvinfo	= tun_get_drvinfo,
35891da177e4SLinus Torvalds 	.get_msglevel	= tun_get_msglevel,
35901da177e4SLinus Torvalds 	.set_msglevel	= tun_set_msglevel,
3591bee31369SNolan Leake 	.get_link	= ethtool_op_get_link,
3592eda29772SRichard Cochran 	.get_ts_info	= ethtool_op_get_ts_info,
35935503fcecSJason Wang 	.get_coalesce   = tun_get_coalesce,
35945503fcecSJason Wang 	.set_coalesce   = tun_set_coalesce,
359529ccc49dSPhilippe Reynes 	.get_link_ksettings = tun_get_link_ksettings,
35964e24f2ddSChas Williams 	.set_link_ksettings = tun_set_link_ksettings,
35971da177e4SLinus Torvalds };
35981da177e4SLinus Torvalds 
35991576d986SJason Wang static int tun_queue_resize(struct tun_struct *tun)
36001576d986SJason Wang {
36011576d986SJason Wang 	struct net_device *dev = tun->dev;
36021576d986SJason Wang 	struct tun_file *tfile;
36035990a305SJason Wang 	struct ptr_ring **rings;
36041576d986SJason Wang 	int n = tun->numqueues + tun->numdisabled;
36051576d986SJason Wang 	int ret, i;
36061576d986SJason Wang 
36075990a305SJason Wang 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
36085990a305SJason Wang 	if (!rings)
36091576d986SJason Wang 		return -ENOMEM;
36101576d986SJason Wang 
36111576d986SJason Wang 	for (i = 0; i < tun->numqueues; i++) {
36121576d986SJason Wang 		tfile = rtnl_dereference(tun->tfiles[i]);
36135990a305SJason Wang 		rings[i] = &tfile->tx_ring;
36141576d986SJason Wang 	}
36151576d986SJason Wang 	list_for_each_entry(tfile, &tun->disabled, next)
36165990a305SJason Wang 		rings[i++] = &tfile->tx_ring;
36171576d986SJason Wang 
36185990a305SJason Wang 	ret = ptr_ring_resize_multiple(rings, n,
36195990a305SJason Wang 				       dev->tx_queue_len, GFP_KERNEL,
3620fc72d1d5SJason Wang 				       tun_ptr_free);
36211576d986SJason Wang 
36225990a305SJason Wang 	kfree(rings);
36231576d986SJason Wang 	return ret;
36241576d986SJason Wang }
36251576d986SJason Wang 
36261576d986SJason Wang static int tun_device_event(struct notifier_block *unused,
36271576d986SJason Wang 			    unsigned long event, void *ptr)
36281576d986SJason Wang {
36291576d986SJason Wang 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
36301576d986SJason Wang 	struct tun_struct *tun = netdev_priv(dev);
363172b319dcSFei Li 	int i;
36321576d986SJason Wang 
363386dfb4acSCraig Gallek 	if (dev->rtnl_link_ops != &tun_link_ops)
363486dfb4acSCraig Gallek 		return NOTIFY_DONE;
363586dfb4acSCraig Gallek 
36361576d986SJason Wang 	switch (event) {
36371576d986SJason Wang 	case NETDEV_CHANGE_TX_QUEUE_LEN:
36381576d986SJason Wang 		if (tun_queue_resize(tun))
36391576d986SJason Wang 			return NOTIFY_BAD;
36401576d986SJason Wang 		break;
364172b319dcSFei Li 	case NETDEV_UP:
364272b319dcSFei Li 		for (i = 0; i < tun->numqueues; i++) {
364372b319dcSFei Li 			struct tun_file *tfile;
364472b319dcSFei Li 
364572b319dcSFei Li 			tfile = rtnl_dereference(tun->tfiles[i]);
364672b319dcSFei Li 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
364772b319dcSFei Li 		}
364872b319dcSFei Li 		break;
36491576d986SJason Wang 	default:
36501576d986SJason Wang 		break;
36511576d986SJason Wang 	}
36521576d986SJason Wang 
36531576d986SJason Wang 	return NOTIFY_DONE;
36541576d986SJason Wang }
36551576d986SJason Wang 
36561576d986SJason Wang static struct notifier_block tun_notifier_block __read_mostly = {
36571576d986SJason Wang 	.notifier_call	= tun_device_event,
36581576d986SJason Wang };
365979d17604SPavel Emelyanov 
36601da177e4SLinus Torvalds static int __init tun_init(void)
36611da177e4SLinus Torvalds {
36621da177e4SLinus Torvalds 	int ret = 0;
36631da177e4SLinus Torvalds 
36646b8a66eeSJoe Perches 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
36651da177e4SLinus Torvalds 
3666f019a7a5SEric W. Biederman 	ret = rtnl_link_register(&tun_link_ops);
366779d17604SPavel Emelyanov 	if (ret) {
36686b8a66eeSJoe Perches 		pr_err("Can't register link_ops\n");
3669f019a7a5SEric W. Biederman 		goto err_linkops;
367079d17604SPavel Emelyanov 	}
367179d17604SPavel Emelyanov 
36721da177e4SLinus Torvalds 	ret = misc_register(&tun_miscdev);
367379d17604SPavel Emelyanov 	if (ret) {
36746b8a66eeSJoe Perches 		pr_err("Can't register misc device %d\n", TUN_MINOR);
367579d17604SPavel Emelyanov 		goto err_misc;
367679d17604SPavel Emelyanov 	}
36771576d986SJason Wang 
36785edfbd3cSTonghao Zhang 	ret = register_netdevice_notifier(&tun_notifier_block);
36795edfbd3cSTonghao Zhang 	if (ret) {
36805edfbd3cSTonghao Zhang 		pr_err("Can't register netdevice notifier\n");
36815edfbd3cSTonghao Zhang 		goto err_notifier;
36825edfbd3cSTonghao Zhang 	}
36835edfbd3cSTonghao Zhang 
368479d17604SPavel Emelyanov 	return  0;
36855edfbd3cSTonghao Zhang 
36865edfbd3cSTonghao Zhang err_notifier:
36875edfbd3cSTonghao Zhang 	misc_deregister(&tun_miscdev);
368879d17604SPavel Emelyanov err_misc:
3689f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
3690f019a7a5SEric W. Biederman err_linkops:
36911da177e4SLinus Torvalds 	return ret;
36921da177e4SLinus Torvalds }
36931da177e4SLinus Torvalds 
36941da177e4SLinus Torvalds static void tun_cleanup(void)
36951da177e4SLinus Torvalds {
36961da177e4SLinus Torvalds 	misc_deregister(&tun_miscdev);
3697f019a7a5SEric W. Biederman 	rtnl_link_unregister(&tun_link_ops);
36981576d986SJason Wang 	unregister_netdevice_notifier(&tun_notifier_block);
36991da177e4SLinus Torvalds }
37001da177e4SLinus Torvalds 
370105c2828cSMichael S. Tsirkin /* Get an underlying socket object from tun file.  Returns error unless file is
370205c2828cSMichael S. Tsirkin  * attached to a device.  The returned object works like a packet socket, it
370305c2828cSMichael S. Tsirkin  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
370405c2828cSMichael S. Tsirkin  * holding a reference to the file for as long as the socket is in use. */
370505c2828cSMichael S. Tsirkin struct socket *tun_get_socket(struct file *file)
370605c2828cSMichael S. Tsirkin {
37076e914fc7SJason Wang 	struct tun_file *tfile;
370805c2828cSMichael S. Tsirkin 	if (file->f_op != &tun_fops)
370905c2828cSMichael S. Tsirkin 		return ERR_PTR(-EINVAL);
37106e914fc7SJason Wang 	tfile = file->private_data;
37116e914fc7SJason Wang 	if (!tfile)
371205c2828cSMichael S. Tsirkin 		return ERR_PTR(-EBADFD);
371354f968d6SJason Wang 	return &tfile->socket;
371405c2828cSMichael S. Tsirkin }
371505c2828cSMichael S. Tsirkin EXPORT_SYMBOL_GPL(tun_get_socket);
371605c2828cSMichael S. Tsirkin 
37175990a305SJason Wang struct ptr_ring *tun_get_tx_ring(struct file *file)
371883339c6bSJason Wang {
371983339c6bSJason Wang 	struct tun_file *tfile;
372083339c6bSJason Wang 
372183339c6bSJason Wang 	if (file->f_op != &tun_fops)
372283339c6bSJason Wang 		return ERR_PTR(-EINVAL);
372383339c6bSJason Wang 	tfile = file->private_data;
372483339c6bSJason Wang 	if (!tfile)
372583339c6bSJason Wang 		return ERR_PTR(-EBADFD);
37265990a305SJason Wang 	return &tfile->tx_ring;
372783339c6bSJason Wang }
37285990a305SJason Wang EXPORT_SYMBOL_GPL(tun_get_tx_ring);
372983339c6bSJason Wang 
37301da177e4SLinus Torvalds module_init(tun_init);
37311da177e4SLinus Torvalds module_exit(tun_cleanup);
37321da177e4SLinus Torvalds MODULE_DESCRIPTION(DRV_DESCRIPTION);
37331da177e4SLinus Torvalds MODULE_AUTHOR(DRV_COPYRIGHT);
37341da177e4SLinus Torvalds MODULE_LICENSE("GPL");
37351da177e4SLinus Torvalds MODULE_ALIAS_MISCDEV(TUN_MINOR);
3736578454ffSKay Sievers MODULE_ALIAS("devname:net/tun");
3737