xref: /openbmc/linux/drivers/net/tun.c (revision cd5d5810)
1 /*
2  *  TUN - Universal TUN/TAP device driver.
3  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4  *
5  *  This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation; either version 2 of the License, or
8  *  (at your option) any later version.
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  *  GNU General Public License for more details.
14  *
15  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16  */
17 
18 /*
19  *  Changes:
20  *
21  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22  *    Add TUNSETLINK ioctl to set the link encapsulation
23  *
24  *  Mark Smith <markzzzsmith@yahoo.com.au>
25  *    Use eth_random_addr() for tap MAC address.
26  *
27  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
28  *    Fixes in packet dropping, queue length setting and queue wakeup.
29  *    Increased default tx queue length.
30  *    Added ethtool API.
31  *    Minor cleanups
32  *
33  *  Daniel Podlejski <underley@underley.eu.org>
34  *    Modifications for 2.3.99-pre5 kernel.
35  */
36 
37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 
39 #define DRV_NAME	"tun"
40 #define DRV_VERSION	"1.6"
41 #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
42 #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43 
44 #include <linux/module.h>
45 #include <linux/errno.h>
46 #include <linux/kernel.h>
47 #include <linux/major.h>
48 #include <linux/slab.h>
49 #include <linux/poll.h>
50 #include <linux/fcntl.h>
51 #include <linux/init.h>
52 #include <linux/skbuff.h>
53 #include <linux/netdevice.h>
54 #include <linux/etherdevice.h>
55 #include <linux/miscdevice.h>
56 #include <linux/ethtool.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/compat.h>
59 #include <linux/if.h>
60 #include <linux/if_arp.h>
61 #include <linux/if_ether.h>
62 #include <linux/if_tun.h>
63 #include <linux/if_vlan.h>
64 #include <linux/crc32.h>
65 #include <linux/nsproxy.h>
66 #include <linux/virtio_net.h>
67 #include <linux/rcupdate.h>
68 #include <net/net_namespace.h>
69 #include <net/netns/generic.h>
70 #include <net/rtnetlink.h>
71 #include <net/sock.h>
72 
73 #include <asm/uaccess.h>
74 
75 /* Uncomment to enable debugging */
76 /* #define TUN_DEBUG 1 */
77 
78 #ifdef TUN_DEBUG
79 static int debug;
80 
81 #define tun_debug(level, tun, fmt, args...)			\
82 do {								\
83 	if (tun->debug)						\
84 		netdev_printk(level, tun->dev, fmt, ##args);	\
85 } while (0)
86 #define DBG1(level, fmt, args...)				\
87 do {								\
88 	if (debug == 2)						\
89 		printk(level fmt, ##args);			\
90 } while (0)
91 #else
92 #define tun_debug(level, tun, fmt, args...)			\
93 do {								\
94 	if (0)							\
95 		netdev_printk(level, tun->dev, fmt, ##args);	\
96 } while (0)
97 #define DBG1(level, fmt, args...)				\
98 do {								\
99 	if (0)							\
100 		printk(level fmt, ##args);			\
101 } while (0)
102 #endif
103 
104 #define GOODCOPY_LEN 128
105 
106 #define FLT_EXACT_COUNT 8
107 struct tap_filter {
108 	unsigned int    count;    /* Number of addrs. Zero means disabled */
109 	u32             mask[2];  /* Mask of the hashed addrs */
110 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
111 };
112 
113 /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
114  * the netdevice to be fit in one page. So we can make sure the success of
115  * memory allocation. TODO: increase the limit. */
116 #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
117 #define MAX_TAP_FLOWS  4096
118 
119 #define TUN_FLOW_EXPIRE (3 * HZ)
120 
121 /* A tun_file connects an open character device to a tuntap netdevice. It
122  * also contains all socket related strctures (except sock_fprog and tap_filter)
123  * to serve as one transmit queue for tuntap device. The sock_fprog and
124  * tap_filter were kept in tun_struct since they were used for filtering for the
125  * netdevice not for a specific queue (at least I didn't see the requirement for
126  * this).
127  *
128  * RCU usage:
129  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
130  * other can only be read while rcu_read_lock or rtnl_lock is held.
131  */
132 struct tun_file {
133 	struct sock sk;
134 	struct socket socket;
135 	struct socket_wq wq;
136 	struct tun_struct __rcu *tun;
137 	struct net *net;
138 	struct fasync_struct *fasync;
139 	/* only used for fasnyc */
140 	unsigned int flags;
141 	union {
142 		u16 queue_index;
143 		unsigned int ifindex;
144 	};
145 	struct list_head next;
146 	struct tun_struct *detached;
147 };
148 
149 struct tun_flow_entry {
150 	struct hlist_node hash_link;
151 	struct rcu_head rcu;
152 	struct tun_struct *tun;
153 
154 	u32 rxhash;
155 	int queue_index;
156 	unsigned long updated;
157 };
158 
159 #define TUN_NUM_FLOW_ENTRIES 1024
160 
161 /* Since the socket were moved to tun_file, to preserve the behavior of persist
162  * device, socket filter, sndbuf and vnet header size were restore when the
163  * file were attached to a persist device.
164  */
165 struct tun_struct {
166 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
167 	unsigned int            numqueues;
168 	unsigned int 		flags;
169 	kuid_t			owner;
170 	kgid_t			group;
171 
172 	struct net_device	*dev;
173 	netdev_features_t	set_features;
174 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
175 			  NETIF_F_TSO6|NETIF_F_UFO)
176 
177 	int			vnet_hdr_sz;
178 	int			sndbuf;
179 	struct tap_filter	txflt;
180 	struct sock_fprog	fprog;
181 	/* protected by rtnl lock */
182 	bool			filter_attached;
183 #ifdef TUN_DEBUG
184 	int debug;
185 #endif
186 	spinlock_t lock;
187 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
188 	struct timer_list flow_gc_timer;
189 	unsigned long ageing_time;
190 	unsigned int numdisabled;
191 	struct list_head disabled;
192 	void *security;
193 	u32 flow_count;
194 };
195 
196 static inline u32 tun_hashfn(u32 rxhash)
197 {
198 	return rxhash & 0x3ff;
199 }
200 
201 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
202 {
203 	struct tun_flow_entry *e;
204 
205 	hlist_for_each_entry_rcu(e, head, hash_link) {
206 		if (e->rxhash == rxhash)
207 			return e;
208 	}
209 	return NULL;
210 }
211 
212 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
213 					      struct hlist_head *head,
214 					      u32 rxhash, u16 queue_index)
215 {
216 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
217 
218 	if (e) {
219 		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
220 			  rxhash, queue_index);
221 		e->updated = jiffies;
222 		e->rxhash = rxhash;
223 		e->queue_index = queue_index;
224 		e->tun = tun;
225 		hlist_add_head_rcu(&e->hash_link, head);
226 		++tun->flow_count;
227 	}
228 	return e;
229 }
230 
231 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
232 {
233 	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
234 		  e->rxhash, e->queue_index);
235 	hlist_del_rcu(&e->hash_link);
236 	kfree_rcu(e, rcu);
237 	--tun->flow_count;
238 }
239 
240 static void tun_flow_flush(struct tun_struct *tun)
241 {
242 	int i;
243 
244 	spin_lock_bh(&tun->lock);
245 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
246 		struct tun_flow_entry *e;
247 		struct hlist_node *n;
248 
249 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
250 			tun_flow_delete(tun, e);
251 	}
252 	spin_unlock_bh(&tun->lock);
253 }
254 
255 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
256 {
257 	int i;
258 
259 	spin_lock_bh(&tun->lock);
260 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
261 		struct tun_flow_entry *e;
262 		struct hlist_node *n;
263 
264 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
265 			if (e->queue_index == queue_index)
266 				tun_flow_delete(tun, e);
267 		}
268 	}
269 	spin_unlock_bh(&tun->lock);
270 }
271 
272 static void tun_flow_cleanup(unsigned long data)
273 {
274 	struct tun_struct *tun = (struct tun_struct *)data;
275 	unsigned long delay = tun->ageing_time;
276 	unsigned long next_timer = jiffies + delay;
277 	unsigned long count = 0;
278 	int i;
279 
280 	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
281 
282 	spin_lock_bh(&tun->lock);
283 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
284 		struct tun_flow_entry *e;
285 		struct hlist_node *n;
286 
287 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
288 			unsigned long this_timer;
289 			count++;
290 			this_timer = e->updated + delay;
291 			if (time_before_eq(this_timer, jiffies))
292 				tun_flow_delete(tun, e);
293 			else if (time_before(this_timer, next_timer))
294 				next_timer = this_timer;
295 		}
296 	}
297 
298 	if (count)
299 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
300 	spin_unlock_bh(&tun->lock);
301 }
302 
303 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
304 			    struct tun_file *tfile)
305 {
306 	struct hlist_head *head;
307 	struct tun_flow_entry *e;
308 	unsigned long delay = tun->ageing_time;
309 	u16 queue_index = tfile->queue_index;
310 
311 	if (!rxhash)
312 		return;
313 	else
314 		head = &tun->flows[tun_hashfn(rxhash)];
315 
316 	rcu_read_lock();
317 
318 	/* We may get a very small possibility of OOO during switching, not
319 	 * worth to optimize.*/
320 	if (tun->numqueues == 1 || tfile->detached)
321 		goto unlock;
322 
323 	e = tun_flow_find(head, rxhash);
324 	if (likely(e)) {
325 		/* TODO: keep queueing to old queue until it's empty? */
326 		e->queue_index = queue_index;
327 		e->updated = jiffies;
328 	} else {
329 		spin_lock_bh(&tun->lock);
330 		if (!tun_flow_find(head, rxhash) &&
331 		    tun->flow_count < MAX_TAP_FLOWS)
332 			tun_flow_create(tun, head, rxhash, queue_index);
333 
334 		if (!timer_pending(&tun->flow_gc_timer))
335 			mod_timer(&tun->flow_gc_timer,
336 				  round_jiffies_up(jiffies + delay));
337 		spin_unlock_bh(&tun->lock);
338 	}
339 
340 unlock:
341 	rcu_read_unlock();
342 }
343 
344 /* We try to identify a flow through its rxhash first. The reason that
345  * we do not check rxq no. is becuase some cards(e.g 82599), chooses
346  * the rxq based on the txq where the last packet of the flow comes. As
347  * the userspace application move between processors, we may get a
348  * different rxq no. here. If we could not get rxhash, then we would
349  * hope the rxq no. may help here.
350  */
351 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
352 {
353 	struct tun_struct *tun = netdev_priv(dev);
354 	struct tun_flow_entry *e;
355 	u32 txq = 0;
356 	u32 numqueues = 0;
357 
358 	rcu_read_lock();
359 	numqueues = ACCESS_ONCE(tun->numqueues);
360 
361 	txq = skb_get_rxhash(skb);
362 	if (txq) {
363 		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
364 		if (e)
365 			txq = e->queue_index;
366 		else
367 			/* use multiply and shift instead of expensive divide */
368 			txq = ((u64)txq * numqueues) >> 32;
369 	} else if (likely(skb_rx_queue_recorded(skb))) {
370 		txq = skb_get_rx_queue(skb);
371 		while (unlikely(txq >= numqueues))
372 			txq -= numqueues;
373 	}
374 
375 	rcu_read_unlock();
376 	return txq;
377 }
378 
379 static inline bool tun_not_capable(struct tun_struct *tun)
380 {
381 	const struct cred *cred = current_cred();
382 	struct net *net = dev_net(tun->dev);
383 
384 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
385 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
386 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
387 }
388 
389 static void tun_set_real_num_queues(struct tun_struct *tun)
390 {
391 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
392 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
393 }
394 
395 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
396 {
397 	tfile->detached = tun;
398 	list_add_tail(&tfile->next, &tun->disabled);
399 	++tun->numdisabled;
400 }
401 
402 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
403 {
404 	struct tun_struct *tun = tfile->detached;
405 
406 	tfile->detached = NULL;
407 	list_del_init(&tfile->next);
408 	--tun->numdisabled;
409 	return tun;
410 }
411 
412 static void tun_queue_purge(struct tun_file *tfile)
413 {
414 	skb_queue_purge(&tfile->sk.sk_receive_queue);
415 	skb_queue_purge(&tfile->sk.sk_error_queue);
416 }
417 
418 static void __tun_detach(struct tun_file *tfile, bool clean)
419 {
420 	struct tun_file *ntfile;
421 	struct tun_struct *tun;
422 
423 	tun = rtnl_dereference(tfile->tun);
424 
425 	if (tun && !tfile->detached) {
426 		u16 index = tfile->queue_index;
427 		BUG_ON(index >= tun->numqueues);
428 
429 		rcu_assign_pointer(tun->tfiles[index],
430 				   tun->tfiles[tun->numqueues - 1]);
431 		ntfile = rtnl_dereference(tun->tfiles[index]);
432 		ntfile->queue_index = index;
433 
434 		--tun->numqueues;
435 		if (clean) {
436 			rcu_assign_pointer(tfile->tun, NULL);
437 			sock_put(&tfile->sk);
438 		} else
439 			tun_disable_queue(tun, tfile);
440 
441 		synchronize_net();
442 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
443 		/* Drop read queue */
444 		tun_queue_purge(tfile);
445 		tun_set_real_num_queues(tun);
446 	} else if (tfile->detached && clean) {
447 		tun = tun_enable_queue(tfile);
448 		sock_put(&tfile->sk);
449 	}
450 
451 	if (clean) {
452 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
453 			netif_carrier_off(tun->dev);
454 
455 			if (!(tun->flags & TUN_PERSIST) &&
456 			    tun->dev->reg_state == NETREG_REGISTERED)
457 				unregister_netdevice(tun->dev);
458 		}
459 
460 		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
461 				 &tfile->socket.flags));
462 		sk_release_kernel(&tfile->sk);
463 	}
464 }
465 
466 static void tun_detach(struct tun_file *tfile, bool clean)
467 {
468 	rtnl_lock();
469 	__tun_detach(tfile, clean);
470 	rtnl_unlock();
471 }
472 
473 static void tun_detach_all(struct net_device *dev)
474 {
475 	struct tun_struct *tun = netdev_priv(dev);
476 	struct tun_file *tfile, *tmp;
477 	int i, n = tun->numqueues;
478 
479 	for (i = 0; i < n; i++) {
480 		tfile = rtnl_dereference(tun->tfiles[i]);
481 		BUG_ON(!tfile);
482 		wake_up_all(&tfile->wq.wait);
483 		rcu_assign_pointer(tfile->tun, NULL);
484 		--tun->numqueues;
485 	}
486 	list_for_each_entry(tfile, &tun->disabled, next) {
487 		wake_up_all(&tfile->wq.wait);
488 		rcu_assign_pointer(tfile->tun, NULL);
489 	}
490 	BUG_ON(tun->numqueues != 0);
491 
492 	synchronize_net();
493 	for (i = 0; i < n; i++) {
494 		tfile = rtnl_dereference(tun->tfiles[i]);
495 		/* Drop read queue */
496 		tun_queue_purge(tfile);
497 		sock_put(&tfile->sk);
498 	}
499 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
500 		tun_enable_queue(tfile);
501 		tun_queue_purge(tfile);
502 		sock_put(&tfile->sk);
503 	}
504 	BUG_ON(tun->numdisabled != 0);
505 
506 	if (tun->flags & TUN_PERSIST)
507 		module_put(THIS_MODULE);
508 }
509 
510 static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
511 {
512 	struct tun_file *tfile = file->private_data;
513 	int err;
514 
515 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
516 	if (err < 0)
517 		goto out;
518 
519 	err = -EINVAL;
520 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
521 		goto out;
522 
523 	err = -EBUSY;
524 	if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
525 		goto out;
526 
527 	err = -E2BIG;
528 	if (!tfile->detached &&
529 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
530 		goto out;
531 
532 	err = 0;
533 
534 	/* Re-attach the filter to presist device */
535 	if (!skip_filter && (tun->filter_attached == true)) {
536 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
537 		if (!err)
538 			goto out;
539 	}
540 	tfile->queue_index = tun->numqueues;
541 	rcu_assign_pointer(tfile->tun, tun);
542 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
543 	tun->numqueues++;
544 
545 	if (tfile->detached)
546 		tun_enable_queue(tfile);
547 	else
548 		sock_hold(&tfile->sk);
549 
550 	tun_set_real_num_queues(tun);
551 
552 	/* device is allowed to go away first, so no need to hold extra
553 	 * refcnt.
554 	 */
555 
556 out:
557 	return err;
558 }
559 
560 static struct tun_struct *__tun_get(struct tun_file *tfile)
561 {
562 	struct tun_struct *tun;
563 
564 	rcu_read_lock();
565 	tun = rcu_dereference(tfile->tun);
566 	if (tun)
567 		dev_hold(tun->dev);
568 	rcu_read_unlock();
569 
570 	return tun;
571 }
572 
573 static struct tun_struct *tun_get(struct file *file)
574 {
575 	return __tun_get(file->private_data);
576 }
577 
578 static void tun_put(struct tun_struct *tun)
579 {
580 	dev_put(tun->dev);
581 }
582 
583 /* TAP filtering */
584 static void addr_hash_set(u32 *mask, const u8 *addr)
585 {
586 	int n = ether_crc(ETH_ALEN, addr) >> 26;
587 	mask[n >> 5] |= (1 << (n & 31));
588 }
589 
590 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
591 {
592 	int n = ether_crc(ETH_ALEN, addr) >> 26;
593 	return mask[n >> 5] & (1 << (n & 31));
594 }
595 
596 static int update_filter(struct tap_filter *filter, void __user *arg)
597 {
598 	struct { u8 u[ETH_ALEN]; } *addr;
599 	struct tun_filter uf;
600 	int err, alen, n, nexact;
601 
602 	if (copy_from_user(&uf, arg, sizeof(uf)))
603 		return -EFAULT;
604 
605 	if (!uf.count) {
606 		/* Disabled */
607 		filter->count = 0;
608 		return 0;
609 	}
610 
611 	alen = ETH_ALEN * uf.count;
612 	addr = kmalloc(alen, GFP_KERNEL);
613 	if (!addr)
614 		return -ENOMEM;
615 
616 	if (copy_from_user(addr, arg + sizeof(uf), alen)) {
617 		err = -EFAULT;
618 		goto done;
619 	}
620 
621 	/* The filter is updated without holding any locks. Which is
622 	 * perfectly safe. We disable it first and in the worst
623 	 * case we'll accept a few undesired packets. */
624 	filter->count = 0;
625 	wmb();
626 
627 	/* Use first set of addresses as an exact filter */
628 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
629 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
630 
631 	nexact = n;
632 
633 	/* Remaining multicast addresses are hashed,
634 	 * unicast will leave the filter disabled. */
635 	memset(filter->mask, 0, sizeof(filter->mask));
636 	for (; n < uf.count; n++) {
637 		if (!is_multicast_ether_addr(addr[n].u)) {
638 			err = 0; /* no filter */
639 			goto done;
640 		}
641 		addr_hash_set(filter->mask, addr[n].u);
642 	}
643 
644 	/* For ALLMULTI just set the mask to all ones.
645 	 * This overrides the mask populated above. */
646 	if ((uf.flags & TUN_FLT_ALLMULTI))
647 		memset(filter->mask, ~0, sizeof(filter->mask));
648 
649 	/* Now enable the filter */
650 	wmb();
651 	filter->count = nexact;
652 
653 	/* Return the number of exact filters */
654 	err = nexact;
655 
656 done:
657 	kfree(addr);
658 	return err;
659 }
660 
661 /* Returns: 0 - drop, !=0 - accept */
662 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
663 {
664 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
665 	 * at this point. */
666 	struct ethhdr *eh = (struct ethhdr *) skb->data;
667 	int i;
668 
669 	/* Exact match */
670 	for (i = 0; i < filter->count; i++)
671 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
672 			return 1;
673 
674 	/* Inexact match (multicast only) */
675 	if (is_multicast_ether_addr(eh->h_dest))
676 		return addr_hash_test(filter->mask, eh->h_dest);
677 
678 	return 0;
679 }
680 
681 /*
682  * Checks whether the packet is accepted or not.
683  * Returns: 0 - drop, !=0 - accept
684  */
685 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
686 {
687 	if (!filter->count)
688 		return 1;
689 
690 	return run_filter(filter, skb);
691 }
692 
693 /* Network device part of the driver */
694 
695 static const struct ethtool_ops tun_ethtool_ops;
696 
697 /* Net device detach from fd. */
698 static void tun_net_uninit(struct net_device *dev)
699 {
700 	tun_detach_all(dev);
701 }
702 
703 /* Net device open. */
704 static int tun_net_open(struct net_device *dev)
705 {
706 	netif_tx_start_all_queues(dev);
707 	return 0;
708 }
709 
710 /* Net device close. */
711 static int tun_net_close(struct net_device *dev)
712 {
713 	netif_tx_stop_all_queues(dev);
714 	return 0;
715 }
716 
717 /* Net device start xmit */
718 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
719 {
720 	struct tun_struct *tun = netdev_priv(dev);
721 	int txq = skb->queue_mapping;
722 	struct tun_file *tfile;
723 
724 	rcu_read_lock();
725 	tfile = rcu_dereference(tun->tfiles[txq]);
726 
727 	/* Drop packet if interface is not attached */
728 	if (txq >= tun->numqueues)
729 		goto drop;
730 
731 	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
732 
733 	BUG_ON(!tfile);
734 
735 	/* Drop if the filter does not like it.
736 	 * This is a noop if the filter is disabled.
737 	 * Filter can be enabled only for the TAP devices. */
738 	if (!check_filter(&tun->txflt, skb))
739 		goto drop;
740 
741 	if (tfile->socket.sk->sk_filter &&
742 	    sk_filter(tfile->socket.sk, skb))
743 		goto drop;
744 
745 	/* Limit the number of packets queued by dividing txq length with the
746 	 * number of queues.
747 	 */
748 	if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
749 			  >= dev->tx_queue_len / tun->numqueues)
750 		goto drop;
751 
752 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
753 		goto drop;
754 
755 	if (skb->sk) {
756 		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
757 		sw_tx_timestamp(skb);
758 	}
759 
760 	/* Orphan the skb - required as we might hang on to it
761 	 * for indefinite time.
762 	 */
763 	skb_orphan(skb);
764 
765 	nf_reset(skb);
766 
767 	/* Enqueue packet */
768 	skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
769 
770 	/* Notify and wake up reader process */
771 	if (tfile->flags & TUN_FASYNC)
772 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
773 	wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
774 				   POLLRDNORM | POLLRDBAND);
775 
776 	rcu_read_unlock();
777 	return NETDEV_TX_OK;
778 
779 drop:
780 	dev->stats.tx_dropped++;
781 	skb_tx_error(skb);
782 	kfree_skb(skb);
783 	rcu_read_unlock();
784 	return NETDEV_TX_OK;
785 }
786 
787 static void tun_net_mclist(struct net_device *dev)
788 {
789 	/*
790 	 * This callback is supposed to deal with mc filter in
791 	 * _rx_ path and has nothing to do with the _tx_ path.
792 	 * In rx path we always accept everything userspace gives us.
793 	 */
794 }
795 
796 #define MIN_MTU 68
797 #define MAX_MTU 65535
798 
799 static int
800 tun_net_change_mtu(struct net_device *dev, int new_mtu)
801 {
802 	if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
803 		return -EINVAL;
804 	dev->mtu = new_mtu;
805 	return 0;
806 }
807 
808 static netdev_features_t tun_net_fix_features(struct net_device *dev,
809 	netdev_features_t features)
810 {
811 	struct tun_struct *tun = netdev_priv(dev);
812 
813 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
814 }
815 #ifdef CONFIG_NET_POLL_CONTROLLER
816 static void tun_poll_controller(struct net_device *dev)
817 {
818 	/*
819 	 * Tun only receives frames when:
820 	 * 1) the char device endpoint gets data from user space
821 	 * 2) the tun socket gets a sendmsg call from user space
822 	 * Since both of those are syncronous operations, we are guaranteed
823 	 * never to have pending data when we poll for it
824 	 * so theres nothing to do here but return.
825 	 * We need this though so netpoll recognizes us as an interface that
826 	 * supports polling, which enables bridge devices in virt setups to
827 	 * still use netconsole
828 	 */
829 	return;
830 }
831 #endif
832 static const struct net_device_ops tun_netdev_ops = {
833 	.ndo_uninit		= tun_net_uninit,
834 	.ndo_open		= tun_net_open,
835 	.ndo_stop		= tun_net_close,
836 	.ndo_start_xmit		= tun_net_xmit,
837 	.ndo_change_mtu		= tun_net_change_mtu,
838 	.ndo_fix_features	= tun_net_fix_features,
839 	.ndo_select_queue	= tun_select_queue,
840 #ifdef CONFIG_NET_POLL_CONTROLLER
841 	.ndo_poll_controller	= tun_poll_controller,
842 #endif
843 };
844 
845 static const struct net_device_ops tap_netdev_ops = {
846 	.ndo_uninit		= tun_net_uninit,
847 	.ndo_open		= tun_net_open,
848 	.ndo_stop		= tun_net_close,
849 	.ndo_start_xmit		= tun_net_xmit,
850 	.ndo_change_mtu		= tun_net_change_mtu,
851 	.ndo_fix_features	= tun_net_fix_features,
852 	.ndo_set_rx_mode	= tun_net_mclist,
853 	.ndo_set_mac_address	= eth_mac_addr,
854 	.ndo_validate_addr	= eth_validate_addr,
855 	.ndo_select_queue	= tun_select_queue,
856 #ifdef CONFIG_NET_POLL_CONTROLLER
857 	.ndo_poll_controller	= tun_poll_controller,
858 #endif
859 };
860 
861 static void tun_flow_init(struct tun_struct *tun)
862 {
863 	int i;
864 
865 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
866 		INIT_HLIST_HEAD(&tun->flows[i]);
867 
868 	tun->ageing_time = TUN_FLOW_EXPIRE;
869 	setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
870 	mod_timer(&tun->flow_gc_timer,
871 		  round_jiffies_up(jiffies + tun->ageing_time));
872 }
873 
874 static void tun_flow_uninit(struct tun_struct *tun)
875 {
876 	del_timer_sync(&tun->flow_gc_timer);
877 	tun_flow_flush(tun);
878 }
879 
880 /* Initialize net device. */
881 static void tun_net_init(struct net_device *dev)
882 {
883 	struct tun_struct *tun = netdev_priv(dev);
884 
885 	switch (tun->flags & TUN_TYPE_MASK) {
886 	case TUN_TUN_DEV:
887 		dev->netdev_ops = &tun_netdev_ops;
888 
889 		/* Point-to-Point TUN Device */
890 		dev->hard_header_len = 0;
891 		dev->addr_len = 0;
892 		dev->mtu = 1500;
893 
894 		/* Zero header length */
895 		dev->type = ARPHRD_NONE;
896 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
897 		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
898 		break;
899 
900 	case TUN_TAP_DEV:
901 		dev->netdev_ops = &tap_netdev_ops;
902 		/* Ethernet TAP Device */
903 		ether_setup(dev);
904 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
905 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
906 
907 		eth_hw_addr_random(dev);
908 
909 		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
910 		break;
911 	}
912 }
913 
914 /* Character device part */
915 
916 /* Poll */
917 static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
918 {
919 	struct tun_file *tfile = file->private_data;
920 	struct tun_struct *tun = __tun_get(tfile);
921 	struct sock *sk;
922 	unsigned int mask = 0;
923 
924 	if (!tun)
925 		return POLLERR;
926 
927 	sk = tfile->socket.sk;
928 
929 	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
930 
931 	poll_wait(file, &tfile->wq.wait, wait);
932 
933 	if (!skb_queue_empty(&sk->sk_receive_queue))
934 		mask |= POLLIN | POLLRDNORM;
935 
936 	if (sock_writeable(sk) ||
937 	    (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
938 	     sock_writeable(sk)))
939 		mask |= POLLOUT | POLLWRNORM;
940 
941 	if (tun->dev->reg_state != NETREG_REGISTERED)
942 		mask = POLLERR;
943 
944 	tun_put(tun);
945 	return mask;
946 }
947 
948 /* prepad is the amount to reserve at front.  len is length after that.
949  * linear is a hint as to how much to copy (usually headers). */
950 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
951 				     size_t prepad, size_t len,
952 				     size_t linear, int noblock)
953 {
954 	struct sock *sk = tfile->socket.sk;
955 	struct sk_buff *skb;
956 	int err;
957 
958 	/* Under a page?  Don't bother with paged skb. */
959 	if (prepad + len < PAGE_SIZE || !linear)
960 		linear = len;
961 
962 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
963 				   &err, 0);
964 	if (!skb)
965 		return ERR_PTR(err);
966 
967 	skb_reserve(skb, prepad);
968 	skb_put(skb, linear);
969 	skb->data_len = len - linear;
970 	skb->len += len - linear;
971 
972 	return skb;
973 }
974 
975 /* Get packet from user space buffer */
976 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
977 			    void *msg_control, const struct iovec *iv,
978 			    size_t total_len, size_t count, int noblock)
979 {
980 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
981 	struct sk_buff *skb;
982 	size_t len = total_len, align = NET_SKB_PAD, linear;
983 	struct virtio_net_hdr gso = { 0 };
984 	int offset = 0;
985 	int copylen;
986 	bool zerocopy = false;
987 	int err;
988 	u32 rxhash;
989 
990 	if (!(tun->flags & TUN_NO_PI)) {
991 		if (len < sizeof(pi))
992 			return -EINVAL;
993 		len -= sizeof(pi);
994 
995 		if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
996 			return -EFAULT;
997 		offset += sizeof(pi);
998 	}
999 
1000 	if (tun->flags & TUN_VNET_HDR) {
1001 		if (len < tun->vnet_hdr_sz)
1002 			return -EINVAL;
1003 		len -= tun->vnet_hdr_sz;
1004 
1005 		if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1006 			return -EFAULT;
1007 
1008 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1009 		    gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1010 			gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1011 
1012 		if (gso.hdr_len > len)
1013 			return -EINVAL;
1014 		offset += tun->vnet_hdr_sz;
1015 	}
1016 
1017 	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
1018 		align += NET_IP_ALIGN;
1019 		if (unlikely(len < ETH_HLEN ||
1020 			     (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
1021 			return -EINVAL;
1022 	}
1023 
1024 	if (msg_control) {
1025 		/* There are 256 bytes to be copied in skb, so there is
1026 		 * enough room for skb expand head in case it is used.
1027 		 * The rest of the buffer is mapped from userspace.
1028 		 */
1029 		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1030 		linear = copylen;
1031 		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1032 			zerocopy = true;
1033 	}
1034 
1035 	if (!zerocopy) {
1036 		copylen = len;
1037 		linear = gso.hdr_len;
1038 	}
1039 
1040 	skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1041 	if (IS_ERR(skb)) {
1042 		if (PTR_ERR(skb) != -EAGAIN)
1043 			tun->dev->stats.rx_dropped++;
1044 		return PTR_ERR(skb);
1045 	}
1046 
1047 	if (zerocopy)
1048 		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1049 	else {
1050 		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1051 		if (!err && msg_control) {
1052 			struct ubuf_info *uarg = msg_control;
1053 			uarg->callback(uarg, false);
1054 		}
1055 	}
1056 
1057 	if (err) {
1058 		tun->dev->stats.rx_dropped++;
1059 		kfree_skb(skb);
1060 		return -EFAULT;
1061 	}
1062 
1063 	if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1064 		if (!skb_partial_csum_set(skb, gso.csum_start,
1065 					  gso.csum_offset)) {
1066 			tun->dev->stats.rx_frame_errors++;
1067 			kfree_skb(skb);
1068 			return -EINVAL;
1069 		}
1070 	}
1071 
1072 	switch (tun->flags & TUN_TYPE_MASK) {
1073 	case TUN_TUN_DEV:
1074 		if (tun->flags & TUN_NO_PI) {
1075 			switch (skb->data[0] & 0xf0) {
1076 			case 0x40:
1077 				pi.proto = htons(ETH_P_IP);
1078 				break;
1079 			case 0x60:
1080 				pi.proto = htons(ETH_P_IPV6);
1081 				break;
1082 			default:
1083 				tun->dev->stats.rx_dropped++;
1084 				kfree_skb(skb);
1085 				return -EINVAL;
1086 			}
1087 		}
1088 
1089 		skb_reset_mac_header(skb);
1090 		skb->protocol = pi.proto;
1091 		skb->dev = tun->dev;
1092 		break;
1093 	case TUN_TAP_DEV:
1094 		skb->protocol = eth_type_trans(skb, tun->dev);
1095 		break;
1096 	}
1097 
1098 	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1099 		pr_debug("GSO!\n");
1100 		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1101 		case VIRTIO_NET_HDR_GSO_TCPV4:
1102 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1103 			break;
1104 		case VIRTIO_NET_HDR_GSO_TCPV6:
1105 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1106 			break;
1107 		case VIRTIO_NET_HDR_GSO_UDP:
1108 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1109 			break;
1110 		default:
1111 			tun->dev->stats.rx_frame_errors++;
1112 			kfree_skb(skb);
1113 			return -EINVAL;
1114 		}
1115 
1116 		if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1117 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1118 
1119 		skb_shinfo(skb)->gso_size = gso.gso_size;
1120 		if (skb_shinfo(skb)->gso_size == 0) {
1121 			tun->dev->stats.rx_frame_errors++;
1122 			kfree_skb(skb);
1123 			return -EINVAL;
1124 		}
1125 
1126 		/* Header must be checked, and gso_segs computed. */
1127 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1128 		skb_shinfo(skb)->gso_segs = 0;
1129 	}
1130 
1131 	/* copy skb_ubuf_info for callback when skb has no error */
1132 	if (zerocopy) {
1133 		skb_shinfo(skb)->destructor_arg = msg_control;
1134 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1135 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1136 	}
1137 
1138 	skb_reset_network_header(skb);
1139 	skb_probe_transport_header(skb, 0);
1140 
1141 	rxhash = skb_get_rxhash(skb);
1142 	netif_rx_ni(skb);
1143 
1144 	tun->dev->stats.rx_packets++;
1145 	tun->dev->stats.rx_bytes += len;
1146 
1147 	tun_flow_update(tun, rxhash, tfile);
1148 	return total_len;
1149 }
1150 
1151 static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1152 			      unsigned long count, loff_t pos)
1153 {
1154 	struct file *file = iocb->ki_filp;
1155 	struct tun_struct *tun = tun_get(file);
1156 	struct tun_file *tfile = file->private_data;
1157 	ssize_t result;
1158 
1159 	if (!tun)
1160 		return -EBADFD;
1161 
1162 	tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
1163 
1164 	result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1165 			      count, file->f_flags & O_NONBLOCK);
1166 
1167 	tun_put(tun);
1168 	return result;
1169 }
1170 
1171 /* Put packet to the user space buffer */
1172 static ssize_t tun_put_user(struct tun_struct *tun,
1173 			    struct tun_file *tfile,
1174 			    struct sk_buff *skb,
1175 			    const struct iovec *iv, int len)
1176 {
1177 	struct tun_pi pi = { 0, skb->protocol };
1178 	ssize_t total = 0;
1179 	int vlan_offset = 0;
1180 
1181 	if (!(tun->flags & TUN_NO_PI)) {
1182 		if ((len -= sizeof(pi)) < 0)
1183 			return -EINVAL;
1184 
1185 		if (len < skb->len) {
1186 			/* Packet will be striped */
1187 			pi.flags |= TUN_PKT_STRIP;
1188 		}
1189 
1190 		if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
1191 			return -EFAULT;
1192 		total += sizeof(pi);
1193 	}
1194 
1195 	if (tun->flags & TUN_VNET_HDR) {
1196 		struct virtio_net_hdr gso = { 0 }; /* no info leak */
1197 		if ((len -= tun->vnet_hdr_sz) < 0)
1198 			return -EINVAL;
1199 
1200 		if (skb_is_gso(skb)) {
1201 			struct skb_shared_info *sinfo = skb_shinfo(skb);
1202 
1203 			/* This is a hint as to how much should be linear. */
1204 			gso.hdr_len = skb_headlen(skb);
1205 			gso.gso_size = sinfo->gso_size;
1206 			if (sinfo->gso_type & SKB_GSO_TCPV4)
1207 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1208 			else if (sinfo->gso_type & SKB_GSO_TCPV6)
1209 				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1210 			else if (sinfo->gso_type & SKB_GSO_UDP)
1211 				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1212 			else {
1213 				pr_err("unexpected GSO type: "
1214 				       "0x%x, gso_size %d, hdr_len %d\n",
1215 				       sinfo->gso_type, gso.gso_size,
1216 				       gso.hdr_len);
1217 				print_hex_dump(KERN_ERR, "tun: ",
1218 					       DUMP_PREFIX_NONE,
1219 					       16, 1, skb->head,
1220 					       min((int)gso.hdr_len, 64), true);
1221 				WARN_ON_ONCE(1);
1222 				return -EINVAL;
1223 			}
1224 			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1225 				gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1226 		} else
1227 			gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1228 
1229 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1230 			gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1231 			gso.csum_start = skb_checksum_start_offset(skb);
1232 			gso.csum_offset = skb->csum_offset;
1233 		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1234 			gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1235 		} /* else everything is zero */
1236 
1237 		if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1238 					       sizeof(gso))))
1239 			return -EFAULT;
1240 		total += tun->vnet_hdr_sz;
1241 	}
1242 
1243 	if (!vlan_tx_tag_present(skb)) {
1244 		len = min_t(int, skb->len, len);
1245 	} else {
1246 		int copy, ret;
1247 		struct {
1248 			__be16 h_vlan_proto;
1249 			__be16 h_vlan_TCI;
1250 		} veth;
1251 
1252 		veth.h_vlan_proto = skb->vlan_proto;
1253 		veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1254 
1255 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1256 		len = min_t(int, skb->len + VLAN_HLEN, len);
1257 
1258 		copy = min_t(int, vlan_offset, len);
1259 		ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
1260 		len -= copy;
1261 		total += copy;
1262 		if (ret || !len)
1263 			goto done;
1264 
1265 		copy = min_t(int, sizeof(veth), len);
1266 		ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
1267 		len -= copy;
1268 		total += copy;
1269 		if (ret || !len)
1270 			goto done;
1271 	}
1272 
1273 	skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
1274 	total += len;
1275 
1276 done:
1277 	tun->dev->stats.tx_packets++;
1278 	tun->dev->stats.tx_bytes += len;
1279 
1280 	return total;
1281 }
1282 
1283 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1284 			   struct kiocb *iocb, const struct iovec *iv,
1285 			   ssize_t len, int noblock)
1286 {
1287 	DECLARE_WAITQUEUE(wait, current);
1288 	struct sk_buff *skb;
1289 	ssize_t ret = 0;
1290 
1291 	tun_debug(KERN_INFO, tun, "tun_do_read\n");
1292 
1293 	if (unlikely(!noblock))
1294 		add_wait_queue(&tfile->wq.wait, &wait);
1295 	while (len) {
1296 		if (unlikely(!noblock))
1297 			current->state = TASK_INTERRUPTIBLE;
1298 
1299 		/* Read frames from the queue */
1300 		if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
1301 			if (noblock) {
1302 				ret = -EAGAIN;
1303 				break;
1304 			}
1305 			if (signal_pending(current)) {
1306 				ret = -ERESTARTSYS;
1307 				break;
1308 			}
1309 			if (tun->dev->reg_state != NETREG_REGISTERED) {
1310 				ret = -EIO;
1311 				break;
1312 			}
1313 
1314 			/* Nothing to read, let's sleep */
1315 			schedule();
1316 			continue;
1317 		}
1318 
1319 		ret = tun_put_user(tun, tfile, skb, iv, len);
1320 		kfree_skb(skb);
1321 		break;
1322 	}
1323 
1324 	if (unlikely(!noblock)) {
1325 		current->state = TASK_RUNNING;
1326 		remove_wait_queue(&tfile->wq.wait, &wait);
1327 	}
1328 
1329 	return ret;
1330 }
1331 
1332 static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1333 			    unsigned long count, loff_t pos)
1334 {
1335 	struct file *file = iocb->ki_filp;
1336 	struct tun_file *tfile = file->private_data;
1337 	struct tun_struct *tun = __tun_get(tfile);
1338 	ssize_t len, ret;
1339 
1340 	if (!tun)
1341 		return -EBADFD;
1342 	len = iov_length(iv, count);
1343 	if (len < 0) {
1344 		ret = -EINVAL;
1345 		goto out;
1346 	}
1347 
1348 	ret = tun_do_read(tun, tfile, iocb, iv, len,
1349 			  file->f_flags & O_NONBLOCK);
1350 	ret = min_t(ssize_t, ret, len);
1351 out:
1352 	tun_put(tun);
1353 	return ret;
1354 }
1355 
1356 static void tun_free_netdev(struct net_device *dev)
1357 {
1358 	struct tun_struct *tun = netdev_priv(dev);
1359 
1360 	BUG_ON(!(list_empty(&tun->disabled)));
1361 	tun_flow_uninit(tun);
1362 	security_tun_dev_free_security(tun->security);
1363 	free_netdev(dev);
1364 }
1365 
1366 static void tun_setup(struct net_device *dev)
1367 {
1368 	struct tun_struct *tun = netdev_priv(dev);
1369 
1370 	tun->owner = INVALID_UID;
1371 	tun->group = INVALID_GID;
1372 
1373 	dev->ethtool_ops = &tun_ethtool_ops;
1374 	dev->destructor = tun_free_netdev;
1375 }
1376 
1377 /* Trivial set of netlink ops to allow deleting tun or tap
1378  * device with netlink.
1379  */
1380 static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1381 {
1382 	return -EINVAL;
1383 }
1384 
1385 static struct rtnl_link_ops tun_link_ops __read_mostly = {
1386 	.kind		= DRV_NAME,
1387 	.priv_size	= sizeof(struct tun_struct),
1388 	.setup		= tun_setup,
1389 	.validate	= tun_validate,
1390 };
1391 
1392 static void tun_sock_write_space(struct sock *sk)
1393 {
1394 	struct tun_file *tfile;
1395 	wait_queue_head_t *wqueue;
1396 
1397 	if (!sock_writeable(sk))
1398 		return;
1399 
1400 	if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1401 		return;
1402 
1403 	wqueue = sk_sleep(sk);
1404 	if (wqueue && waitqueue_active(wqueue))
1405 		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1406 						POLLWRNORM | POLLWRBAND);
1407 
1408 	tfile = container_of(sk, struct tun_file, sk);
1409 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1410 }
1411 
1412 static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1413 		       struct msghdr *m, size_t total_len)
1414 {
1415 	int ret;
1416 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1417 	struct tun_struct *tun = __tun_get(tfile);
1418 
1419 	if (!tun)
1420 		return -EBADFD;
1421 	ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1422 			   m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1423 	tun_put(tun);
1424 	return ret;
1425 }
1426 
1427 static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1428 		       struct msghdr *m, size_t total_len,
1429 		       int flags)
1430 {
1431 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1432 	struct tun_struct *tun = __tun_get(tfile);
1433 	int ret;
1434 
1435 	if (!tun)
1436 		return -EBADFD;
1437 
1438 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1439 		ret = -EINVAL;
1440 		goto out;
1441 	}
1442 	if (flags & MSG_ERRQUEUE) {
1443 		ret = sock_recv_errqueue(sock->sk, m, total_len,
1444 					 SOL_PACKET, TUN_TX_TIMESTAMP);
1445 		goto out;
1446 	}
1447 	ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1448 			  flags & MSG_DONTWAIT);
1449 	if (ret > total_len) {
1450 		m->msg_flags |= MSG_TRUNC;
1451 		ret = flags & MSG_TRUNC ? ret : total_len;
1452 	}
1453 out:
1454 	tun_put(tun);
1455 	return ret;
1456 }
1457 
1458 static int tun_release(struct socket *sock)
1459 {
1460 	if (sock->sk)
1461 		sock_put(sock->sk);
1462 	return 0;
1463 }
1464 
1465 /* Ops structure to mimic raw sockets with tun */
1466 static const struct proto_ops tun_socket_ops = {
1467 	.sendmsg = tun_sendmsg,
1468 	.recvmsg = tun_recvmsg,
1469 	.release = tun_release,
1470 };
1471 
1472 static struct proto tun_proto = {
1473 	.name		= "tun",
1474 	.owner		= THIS_MODULE,
1475 	.obj_size	= sizeof(struct tun_file),
1476 };
1477 
1478 static int tun_flags(struct tun_struct *tun)
1479 {
1480 	int flags = 0;
1481 
1482 	if (tun->flags & TUN_TUN_DEV)
1483 		flags |= IFF_TUN;
1484 	else
1485 		flags |= IFF_TAP;
1486 
1487 	if (tun->flags & TUN_NO_PI)
1488 		flags |= IFF_NO_PI;
1489 
1490 	/* This flag has no real effect.  We track the value for backwards
1491 	 * compatibility.
1492 	 */
1493 	if (tun->flags & TUN_ONE_QUEUE)
1494 		flags |= IFF_ONE_QUEUE;
1495 
1496 	if (tun->flags & TUN_VNET_HDR)
1497 		flags |= IFF_VNET_HDR;
1498 
1499 	if (tun->flags & TUN_TAP_MQ)
1500 		flags |= IFF_MULTI_QUEUE;
1501 
1502 	if (tun->flags & TUN_PERSIST)
1503 		flags |= IFF_PERSIST;
1504 
1505 	return flags;
1506 }
1507 
1508 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1509 			      char *buf)
1510 {
1511 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1512 	return sprintf(buf, "0x%x\n", tun_flags(tun));
1513 }
1514 
1515 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1516 			      char *buf)
1517 {
1518 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1519 	return uid_valid(tun->owner)?
1520 		sprintf(buf, "%u\n",
1521 			from_kuid_munged(current_user_ns(), tun->owner)):
1522 		sprintf(buf, "-1\n");
1523 }
1524 
1525 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1526 			      char *buf)
1527 {
1528 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1529 	return gid_valid(tun->group) ?
1530 		sprintf(buf, "%u\n",
1531 			from_kgid_munged(current_user_ns(), tun->group)):
1532 		sprintf(buf, "-1\n");
1533 }
1534 
1535 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1536 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1537 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1538 
1539 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1540 {
1541 	struct tun_struct *tun;
1542 	struct tun_file *tfile = file->private_data;
1543 	struct net_device *dev;
1544 	int err;
1545 
1546 	if (tfile->detached)
1547 		return -EINVAL;
1548 
1549 	dev = __dev_get_by_name(net, ifr->ifr_name);
1550 	if (dev) {
1551 		if (ifr->ifr_flags & IFF_TUN_EXCL)
1552 			return -EBUSY;
1553 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1554 			tun = netdev_priv(dev);
1555 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1556 			tun = netdev_priv(dev);
1557 		else
1558 			return -EINVAL;
1559 
1560 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1561 		    !!(tun->flags & TUN_TAP_MQ))
1562 			return -EINVAL;
1563 
1564 		if (tun_not_capable(tun))
1565 			return -EPERM;
1566 		err = security_tun_dev_open(tun->security);
1567 		if (err < 0)
1568 			return err;
1569 
1570 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1571 		if (err < 0)
1572 			return err;
1573 
1574 		if (tun->flags & TUN_TAP_MQ &&
1575 		    (tun->numqueues + tun->numdisabled > 1)) {
1576 			/* One or more queue has already been attached, no need
1577 			 * to initialize the device again.
1578 			 */
1579 			return 0;
1580 		}
1581 	}
1582 	else {
1583 		char *name;
1584 		unsigned long flags = 0;
1585 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1586 			     MAX_TAP_QUEUES : 1;
1587 
1588 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1589 			return -EPERM;
1590 		err = security_tun_dev_create();
1591 		if (err < 0)
1592 			return err;
1593 
1594 		/* Set dev type */
1595 		if (ifr->ifr_flags & IFF_TUN) {
1596 			/* TUN device */
1597 			flags |= TUN_TUN_DEV;
1598 			name = "tun%d";
1599 		} else if (ifr->ifr_flags & IFF_TAP) {
1600 			/* TAP device */
1601 			flags |= TUN_TAP_DEV;
1602 			name = "tap%d";
1603 		} else
1604 			return -EINVAL;
1605 
1606 		if (*ifr->ifr_name)
1607 			name = ifr->ifr_name;
1608 
1609 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1610 				       tun_setup, queues, queues);
1611 
1612 		if (!dev)
1613 			return -ENOMEM;
1614 
1615 		dev_net_set(dev, net);
1616 		dev->rtnl_link_ops = &tun_link_ops;
1617 		dev->ifindex = tfile->ifindex;
1618 
1619 		tun = netdev_priv(dev);
1620 		tun->dev = dev;
1621 		tun->flags = flags;
1622 		tun->txflt.count = 0;
1623 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1624 
1625 		tun->filter_attached = false;
1626 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1627 
1628 		spin_lock_init(&tun->lock);
1629 
1630 		err = security_tun_dev_alloc_security(&tun->security);
1631 		if (err < 0)
1632 			goto err_free_dev;
1633 
1634 		tun_net_init(dev);
1635 		tun_flow_init(tun);
1636 
1637 		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1638 				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1639 				   NETIF_F_HW_VLAN_STAG_TX;
1640 		dev->features = dev->hw_features;
1641 		dev->vlan_features = dev->features;
1642 
1643 		INIT_LIST_HEAD(&tun->disabled);
1644 		err = tun_attach(tun, file, false);
1645 		if (err < 0)
1646 			goto err_free_flow;
1647 
1648 		err = register_netdevice(tun->dev);
1649 		if (err < 0)
1650 			goto err_detach;
1651 
1652 		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1653 		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1654 		    device_create_file(&tun->dev->dev, &dev_attr_group))
1655 			pr_err("Failed to create tun sysfs files\n");
1656 	}
1657 
1658 	netif_carrier_on(tun->dev);
1659 
1660 	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1661 
1662 	if (ifr->ifr_flags & IFF_NO_PI)
1663 		tun->flags |= TUN_NO_PI;
1664 	else
1665 		tun->flags &= ~TUN_NO_PI;
1666 
1667 	/* This flag has no real effect.  We track the value for backwards
1668 	 * compatibility.
1669 	 */
1670 	if (ifr->ifr_flags & IFF_ONE_QUEUE)
1671 		tun->flags |= TUN_ONE_QUEUE;
1672 	else
1673 		tun->flags &= ~TUN_ONE_QUEUE;
1674 
1675 	if (ifr->ifr_flags & IFF_VNET_HDR)
1676 		tun->flags |= TUN_VNET_HDR;
1677 	else
1678 		tun->flags &= ~TUN_VNET_HDR;
1679 
1680 	if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1681 		tun->flags |= TUN_TAP_MQ;
1682 	else
1683 		tun->flags &= ~TUN_TAP_MQ;
1684 
1685 	/* Make sure persistent devices do not get stuck in
1686 	 * xoff state.
1687 	 */
1688 	if (netif_running(tun->dev))
1689 		netif_tx_wake_all_queues(tun->dev);
1690 
1691 	strcpy(ifr->ifr_name, tun->dev->name);
1692 	return 0;
1693 
1694 err_detach:
1695 	tun_detach_all(dev);
1696 err_free_flow:
1697 	tun_flow_uninit(tun);
1698 	security_tun_dev_free_security(tun->security);
1699 err_free_dev:
1700 	free_netdev(dev);
1701 	return err;
1702 }
1703 
1704 static void tun_get_iff(struct net *net, struct tun_struct *tun,
1705 		       struct ifreq *ifr)
1706 {
1707 	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1708 
1709 	strcpy(ifr->ifr_name, tun->dev->name);
1710 
1711 	ifr->ifr_flags = tun_flags(tun);
1712 
1713 }
1714 
1715 /* This is like a cut-down ethtool ops, except done via tun fd so no
1716  * privs required. */
1717 static int set_offload(struct tun_struct *tun, unsigned long arg)
1718 {
1719 	netdev_features_t features = 0;
1720 
1721 	if (arg & TUN_F_CSUM) {
1722 		features |= NETIF_F_HW_CSUM;
1723 		arg &= ~TUN_F_CSUM;
1724 
1725 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1726 			if (arg & TUN_F_TSO_ECN) {
1727 				features |= NETIF_F_TSO_ECN;
1728 				arg &= ~TUN_F_TSO_ECN;
1729 			}
1730 			if (arg & TUN_F_TSO4)
1731 				features |= NETIF_F_TSO;
1732 			if (arg & TUN_F_TSO6)
1733 				features |= NETIF_F_TSO6;
1734 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1735 		}
1736 
1737 		if (arg & TUN_F_UFO) {
1738 			features |= NETIF_F_UFO;
1739 			arg &= ~TUN_F_UFO;
1740 		}
1741 	}
1742 
1743 	/* This gives the user a way to test for new features in future by
1744 	 * trying to set them. */
1745 	if (arg)
1746 		return -EINVAL;
1747 
1748 	tun->set_features = features;
1749 	netdev_update_features(tun->dev);
1750 
1751 	return 0;
1752 }
1753 
1754 static void tun_detach_filter(struct tun_struct *tun, int n)
1755 {
1756 	int i;
1757 	struct tun_file *tfile;
1758 
1759 	for (i = 0; i < n; i++) {
1760 		tfile = rtnl_dereference(tun->tfiles[i]);
1761 		sk_detach_filter(tfile->socket.sk);
1762 	}
1763 
1764 	tun->filter_attached = false;
1765 }
1766 
1767 static int tun_attach_filter(struct tun_struct *tun)
1768 {
1769 	int i, ret = 0;
1770 	struct tun_file *tfile;
1771 
1772 	for (i = 0; i < tun->numqueues; i++) {
1773 		tfile = rtnl_dereference(tun->tfiles[i]);
1774 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1775 		if (ret) {
1776 			tun_detach_filter(tun, i);
1777 			return ret;
1778 		}
1779 	}
1780 
1781 	tun->filter_attached = true;
1782 	return ret;
1783 }
1784 
1785 static void tun_set_sndbuf(struct tun_struct *tun)
1786 {
1787 	struct tun_file *tfile;
1788 	int i;
1789 
1790 	for (i = 0; i < tun->numqueues; i++) {
1791 		tfile = rtnl_dereference(tun->tfiles[i]);
1792 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1793 	}
1794 }
1795 
1796 static int tun_set_queue(struct file *file, struct ifreq *ifr)
1797 {
1798 	struct tun_file *tfile = file->private_data;
1799 	struct tun_struct *tun;
1800 	int ret = 0;
1801 
1802 	rtnl_lock();
1803 
1804 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1805 		tun = tfile->detached;
1806 		if (!tun) {
1807 			ret = -EINVAL;
1808 			goto unlock;
1809 		}
1810 		ret = security_tun_dev_attach_queue(tun->security);
1811 		if (ret < 0)
1812 			goto unlock;
1813 		ret = tun_attach(tun, file, false);
1814 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1815 		tun = rtnl_dereference(tfile->tun);
1816 		if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
1817 			ret = -EINVAL;
1818 		else
1819 			__tun_detach(tfile, false);
1820 	} else
1821 		ret = -EINVAL;
1822 
1823 unlock:
1824 	rtnl_unlock();
1825 	return ret;
1826 }
1827 
1828 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1829 			    unsigned long arg, int ifreq_len)
1830 {
1831 	struct tun_file *tfile = file->private_data;
1832 	struct tun_struct *tun;
1833 	void __user* argp = (void __user*)arg;
1834 	struct ifreq ifr;
1835 	kuid_t owner;
1836 	kgid_t group;
1837 	int sndbuf;
1838 	int vnet_hdr_sz;
1839 	unsigned int ifindex;
1840 	int ret;
1841 
1842 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1843 		if (copy_from_user(&ifr, argp, ifreq_len))
1844 			return -EFAULT;
1845 	} else {
1846 		memset(&ifr, 0, sizeof(ifr));
1847 	}
1848 	if (cmd == TUNGETFEATURES) {
1849 		/* Currently this just means: "what IFF flags are valid?".
1850 		 * This is needed because we never checked for invalid flags on
1851 		 * TUNSETIFF. */
1852 		return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1853 				IFF_VNET_HDR | IFF_MULTI_QUEUE,
1854 				(unsigned int __user*)argp);
1855 	} else if (cmd == TUNSETQUEUE)
1856 		return tun_set_queue(file, &ifr);
1857 
1858 	ret = 0;
1859 	rtnl_lock();
1860 
1861 	tun = __tun_get(tfile);
1862 	if (cmd == TUNSETIFF && !tun) {
1863 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
1864 
1865 		ret = tun_set_iff(tfile->net, file, &ifr);
1866 
1867 		if (ret)
1868 			goto unlock;
1869 
1870 		if (copy_to_user(argp, &ifr, ifreq_len))
1871 			ret = -EFAULT;
1872 		goto unlock;
1873 	}
1874 	if (cmd == TUNSETIFINDEX) {
1875 		ret = -EPERM;
1876 		if (tun)
1877 			goto unlock;
1878 
1879 		ret = -EFAULT;
1880 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
1881 			goto unlock;
1882 
1883 		ret = 0;
1884 		tfile->ifindex = ifindex;
1885 		goto unlock;
1886 	}
1887 
1888 	ret = -EBADFD;
1889 	if (!tun)
1890 		goto unlock;
1891 
1892 	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1893 
1894 	ret = 0;
1895 	switch (cmd) {
1896 	case TUNGETIFF:
1897 		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1898 
1899 		if (tfile->detached)
1900 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
1901 		if (!tfile->socket.sk->sk_filter)
1902 			ifr.ifr_flags |= IFF_NOFILTER;
1903 
1904 		if (copy_to_user(argp, &ifr, ifreq_len))
1905 			ret = -EFAULT;
1906 		break;
1907 
1908 	case TUNSETNOCSUM:
1909 		/* Disable/Enable checksum */
1910 
1911 		/* [unimplemented] */
1912 		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1913 			  arg ? "disabled" : "enabled");
1914 		break;
1915 
1916 	case TUNSETPERSIST:
1917 		/* Disable/Enable persist mode. Keep an extra reference to the
1918 		 * module to prevent the module being unprobed.
1919 		 */
1920 		if (arg && !(tun->flags & TUN_PERSIST)) {
1921 			tun->flags |= TUN_PERSIST;
1922 			__module_get(THIS_MODULE);
1923 		}
1924 		if (!arg && (tun->flags & TUN_PERSIST)) {
1925 			tun->flags &= ~TUN_PERSIST;
1926 			module_put(THIS_MODULE);
1927 		}
1928 
1929 		tun_debug(KERN_INFO, tun, "persist %s\n",
1930 			  arg ? "enabled" : "disabled");
1931 		break;
1932 
1933 	case TUNSETOWNER:
1934 		/* Set owner of the device */
1935 		owner = make_kuid(current_user_ns(), arg);
1936 		if (!uid_valid(owner)) {
1937 			ret = -EINVAL;
1938 			break;
1939 		}
1940 		tun->owner = owner;
1941 		tun_debug(KERN_INFO, tun, "owner set to %u\n",
1942 			  from_kuid(&init_user_ns, tun->owner));
1943 		break;
1944 
1945 	case TUNSETGROUP:
1946 		/* Set group of the device */
1947 		group = make_kgid(current_user_ns(), arg);
1948 		if (!gid_valid(group)) {
1949 			ret = -EINVAL;
1950 			break;
1951 		}
1952 		tun->group = group;
1953 		tun_debug(KERN_INFO, tun, "group set to %u\n",
1954 			  from_kgid(&init_user_ns, tun->group));
1955 		break;
1956 
1957 	case TUNSETLINK:
1958 		/* Only allow setting the type when the interface is down */
1959 		if (tun->dev->flags & IFF_UP) {
1960 			tun_debug(KERN_INFO, tun,
1961 				  "Linktype set failed because interface is up\n");
1962 			ret = -EBUSY;
1963 		} else {
1964 			tun->dev->type = (int) arg;
1965 			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1966 				  tun->dev->type);
1967 			ret = 0;
1968 		}
1969 		break;
1970 
1971 #ifdef TUN_DEBUG
1972 	case TUNSETDEBUG:
1973 		tun->debug = arg;
1974 		break;
1975 #endif
1976 	case TUNSETOFFLOAD:
1977 		ret = set_offload(tun, arg);
1978 		break;
1979 
1980 	case TUNSETTXFILTER:
1981 		/* Can be set only for TAPs */
1982 		ret = -EINVAL;
1983 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1984 			break;
1985 		ret = update_filter(&tun->txflt, (void __user *)arg);
1986 		break;
1987 
1988 	case SIOCGIFHWADDR:
1989 		/* Get hw address */
1990 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1991 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
1992 		if (copy_to_user(argp, &ifr, ifreq_len))
1993 			ret = -EFAULT;
1994 		break;
1995 
1996 	case SIOCSIFHWADDR:
1997 		/* Set hw address */
1998 		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1999 			  ifr.ifr_hwaddr.sa_data);
2000 
2001 		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2002 		break;
2003 
2004 	case TUNGETSNDBUF:
2005 		sndbuf = tfile->socket.sk->sk_sndbuf;
2006 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2007 			ret = -EFAULT;
2008 		break;
2009 
2010 	case TUNSETSNDBUF:
2011 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2012 			ret = -EFAULT;
2013 			break;
2014 		}
2015 
2016 		tun->sndbuf = sndbuf;
2017 		tun_set_sndbuf(tun);
2018 		break;
2019 
2020 	case TUNGETVNETHDRSZ:
2021 		vnet_hdr_sz = tun->vnet_hdr_sz;
2022 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2023 			ret = -EFAULT;
2024 		break;
2025 
2026 	case TUNSETVNETHDRSZ:
2027 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2028 			ret = -EFAULT;
2029 			break;
2030 		}
2031 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2032 			ret = -EINVAL;
2033 			break;
2034 		}
2035 
2036 		tun->vnet_hdr_sz = vnet_hdr_sz;
2037 		break;
2038 
2039 	case TUNATTACHFILTER:
2040 		/* Can be set only for TAPs */
2041 		ret = -EINVAL;
2042 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2043 			break;
2044 		ret = -EFAULT;
2045 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2046 			break;
2047 
2048 		ret = tun_attach_filter(tun);
2049 		break;
2050 
2051 	case TUNDETACHFILTER:
2052 		/* Can be set only for TAPs */
2053 		ret = -EINVAL;
2054 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2055 			break;
2056 		ret = 0;
2057 		tun_detach_filter(tun, tun->numqueues);
2058 		break;
2059 
2060 	case TUNGETFILTER:
2061 		ret = -EINVAL;
2062 		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2063 			break;
2064 		ret = -EFAULT;
2065 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2066 			break;
2067 		ret = 0;
2068 		break;
2069 
2070 	default:
2071 		ret = -EINVAL;
2072 		break;
2073 	}
2074 
2075 unlock:
2076 	rtnl_unlock();
2077 	if (tun)
2078 		tun_put(tun);
2079 	return ret;
2080 }
2081 
2082 static long tun_chr_ioctl(struct file *file,
2083 			  unsigned int cmd, unsigned long arg)
2084 {
2085 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2086 }
2087 
2088 #ifdef CONFIG_COMPAT
2089 static long tun_chr_compat_ioctl(struct file *file,
2090 			 unsigned int cmd, unsigned long arg)
2091 {
2092 	switch (cmd) {
2093 	case TUNSETIFF:
2094 	case TUNGETIFF:
2095 	case TUNSETTXFILTER:
2096 	case TUNGETSNDBUF:
2097 	case TUNSETSNDBUF:
2098 	case SIOCGIFHWADDR:
2099 	case SIOCSIFHWADDR:
2100 		arg = (unsigned long)compat_ptr(arg);
2101 		break;
2102 	default:
2103 		arg = (compat_ulong_t)arg;
2104 		break;
2105 	}
2106 
2107 	/*
2108 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
2109 	 * the end of that structure. All fields that are used in this
2110 	 * driver are compatible though, we don't need to convert the
2111 	 * contents.
2112 	 */
2113 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2114 }
2115 #endif /* CONFIG_COMPAT */
2116 
2117 static int tun_chr_fasync(int fd, struct file *file, int on)
2118 {
2119 	struct tun_file *tfile = file->private_data;
2120 	int ret;
2121 
2122 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2123 		goto out;
2124 
2125 	if (on) {
2126 		ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2127 		if (ret)
2128 			goto out;
2129 		tfile->flags |= TUN_FASYNC;
2130 	} else
2131 		tfile->flags &= ~TUN_FASYNC;
2132 	ret = 0;
2133 out:
2134 	return ret;
2135 }
2136 
2137 static int tun_chr_open(struct inode *inode, struct file * file)
2138 {
2139 	struct tun_file *tfile;
2140 
2141 	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2142 
2143 	tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2144 					    &tun_proto);
2145 	if (!tfile)
2146 		return -ENOMEM;
2147 	rcu_assign_pointer(tfile->tun, NULL);
2148 	tfile->net = get_net(current->nsproxy->net_ns);
2149 	tfile->flags = 0;
2150 	tfile->ifindex = 0;
2151 
2152 	rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2153 	init_waitqueue_head(&tfile->wq.wait);
2154 
2155 	tfile->socket.file = file;
2156 	tfile->socket.ops = &tun_socket_ops;
2157 
2158 	sock_init_data(&tfile->socket, &tfile->sk);
2159 	sk_change_net(&tfile->sk, tfile->net);
2160 
2161 	tfile->sk.sk_write_space = tun_sock_write_space;
2162 	tfile->sk.sk_sndbuf = INT_MAX;
2163 
2164 	file->private_data = tfile;
2165 	set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2166 	INIT_LIST_HEAD(&tfile->next);
2167 
2168 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2169 
2170 	return 0;
2171 }
2172 
2173 static int tun_chr_close(struct inode *inode, struct file *file)
2174 {
2175 	struct tun_file *tfile = file->private_data;
2176 	struct net *net = tfile->net;
2177 
2178 	tun_detach(tfile, true);
2179 	put_net(net);
2180 
2181 	return 0;
2182 }
2183 
2184 static const struct file_operations tun_fops = {
2185 	.owner	= THIS_MODULE,
2186 	.llseek = no_llseek,
2187 	.read  = do_sync_read,
2188 	.aio_read  = tun_chr_aio_read,
2189 	.write = do_sync_write,
2190 	.aio_write = tun_chr_aio_write,
2191 	.poll	= tun_chr_poll,
2192 	.unlocked_ioctl	= tun_chr_ioctl,
2193 #ifdef CONFIG_COMPAT
2194 	.compat_ioctl = tun_chr_compat_ioctl,
2195 #endif
2196 	.open	= tun_chr_open,
2197 	.release = tun_chr_close,
2198 	.fasync = tun_chr_fasync
2199 };
2200 
2201 static struct miscdevice tun_miscdev = {
2202 	.minor = TUN_MINOR,
2203 	.name = "tun",
2204 	.nodename = "net/tun",
2205 	.fops = &tun_fops,
2206 };
2207 
2208 /* ethtool interface */
2209 
2210 static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2211 {
2212 	cmd->supported		= 0;
2213 	cmd->advertising	= 0;
2214 	ethtool_cmd_speed_set(cmd, SPEED_10);
2215 	cmd->duplex		= DUPLEX_FULL;
2216 	cmd->port		= PORT_TP;
2217 	cmd->phy_address	= 0;
2218 	cmd->transceiver	= XCVR_INTERNAL;
2219 	cmd->autoneg		= AUTONEG_DISABLE;
2220 	cmd->maxtxpkt		= 0;
2221 	cmd->maxrxpkt		= 0;
2222 	return 0;
2223 }
2224 
2225 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2226 {
2227 	struct tun_struct *tun = netdev_priv(dev);
2228 
2229 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2230 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2231 
2232 	switch (tun->flags & TUN_TYPE_MASK) {
2233 	case TUN_TUN_DEV:
2234 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2235 		break;
2236 	case TUN_TAP_DEV:
2237 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2238 		break;
2239 	}
2240 }
2241 
2242 static u32 tun_get_msglevel(struct net_device *dev)
2243 {
2244 #ifdef TUN_DEBUG
2245 	struct tun_struct *tun = netdev_priv(dev);
2246 	return tun->debug;
2247 #else
2248 	return -EOPNOTSUPP;
2249 #endif
2250 }
2251 
2252 static void tun_set_msglevel(struct net_device *dev, u32 value)
2253 {
2254 #ifdef TUN_DEBUG
2255 	struct tun_struct *tun = netdev_priv(dev);
2256 	tun->debug = value;
2257 #endif
2258 }
2259 
2260 static const struct ethtool_ops tun_ethtool_ops = {
2261 	.get_settings	= tun_get_settings,
2262 	.get_drvinfo	= tun_get_drvinfo,
2263 	.get_msglevel	= tun_get_msglevel,
2264 	.set_msglevel	= tun_set_msglevel,
2265 	.get_link	= ethtool_op_get_link,
2266 	.get_ts_info	= ethtool_op_get_ts_info,
2267 };
2268 
2269 
2270 static int __init tun_init(void)
2271 {
2272 	int ret = 0;
2273 
2274 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2275 	pr_info("%s\n", DRV_COPYRIGHT);
2276 
2277 	ret = rtnl_link_register(&tun_link_ops);
2278 	if (ret) {
2279 		pr_err("Can't register link_ops\n");
2280 		goto err_linkops;
2281 	}
2282 
2283 	ret = misc_register(&tun_miscdev);
2284 	if (ret) {
2285 		pr_err("Can't register misc device %d\n", TUN_MINOR);
2286 		goto err_misc;
2287 	}
2288 	return  0;
2289 err_misc:
2290 	rtnl_link_unregister(&tun_link_ops);
2291 err_linkops:
2292 	return ret;
2293 }
2294 
2295 static void tun_cleanup(void)
2296 {
2297 	misc_deregister(&tun_miscdev);
2298 	rtnl_link_unregister(&tun_link_ops);
2299 }
2300 
2301 /* Get an underlying socket object from tun file.  Returns error unless file is
2302  * attached to a device.  The returned object works like a packet socket, it
2303  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2304  * holding a reference to the file for as long as the socket is in use. */
2305 struct socket *tun_get_socket(struct file *file)
2306 {
2307 	struct tun_file *tfile;
2308 	if (file->f_op != &tun_fops)
2309 		return ERR_PTR(-EINVAL);
2310 	tfile = file->private_data;
2311 	if (!tfile)
2312 		return ERR_PTR(-EBADFD);
2313 	return &tfile->socket;
2314 }
2315 EXPORT_SYMBOL_GPL(tun_get_socket);
2316 
2317 module_init(tun_init);
2318 module_exit(tun_cleanup);
2319 MODULE_DESCRIPTION(DRV_DESCRIPTION);
2320 MODULE_AUTHOR(DRV_COPYRIGHT);
2321 MODULE_LICENSE("GPL");
2322 MODULE_ALIAS_MISCDEV(TUN_MINOR);
2323 MODULE_ALIAS("devname:net/tun");
2324