xref: /openbmc/linux/drivers/net/tap.c (revision 635b8c8ecdd27142d7fdab0df334b2e9201481cf)
1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
17 #include <linux/fs.h>
18 #include <linux/uio.h>
19 
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
22 #include <net/sock.h>
23 #include <linux/virtio_net.h>
24 #include <linux/skb_array.h>
25 
26 /*
27  * A tap queue is the central object of this driver, it connects
28  * an open character device to a macvlan interface. There can be
29  * multiple queues on one interface, which map back to queues
30  * implemented in hardware on the underlying device.
31  *
32  * tap_proto is used to allocate queues through the sock allocation
33  * mechanism.
34  *
35  */
36 struct tap_queue {
37 	struct sock sk;
38 	struct socket sock;
39 	struct socket_wq wq;
40 	int vnet_hdr_sz;
41 	struct macvlan_dev __rcu *vlan;
42 	struct file *file;
43 	unsigned int flags;
44 	u16 queue_index;
45 	bool enabled;
46 	struct list_head next;
47 	struct skb_array skb_array;
48 };
49 
50 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
51 
52 #define TAP_VNET_LE 0x80000000
53 #define TAP_VNET_BE 0x40000000
54 
55 #ifdef CONFIG_TUN_VNET_CROSS_LE
56 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
57 {
58 	return q->flags & TAP_VNET_BE ? false :
59 		virtio_legacy_is_little_endian();
60 }
61 
62 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
63 {
64 	int s = !!(q->flags & TAP_VNET_BE);
65 
66 	if (put_user(s, sp))
67 		return -EFAULT;
68 
69 	return 0;
70 }
71 
72 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
73 {
74 	int s;
75 
76 	if (get_user(s, sp))
77 		return -EFAULT;
78 
79 	if (s)
80 		q->flags |= TAP_VNET_BE;
81 	else
82 		q->flags &= ~TAP_VNET_BE;
83 
84 	return 0;
85 }
86 #else
87 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
88 {
89 	return virtio_legacy_is_little_endian();
90 }
91 
92 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
93 {
94 	return -EINVAL;
95 }
96 
97 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
98 {
99 	return -EINVAL;
100 }
101 #endif /* CONFIG_TUN_VNET_CROSS_LE */
102 
103 static inline bool tap_is_little_endian(struct tap_queue *q)
104 {
105 	return q->flags & TAP_VNET_LE ||
106 		tap_legacy_is_little_endian(q);
107 }
108 
109 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
110 {
111 	return __virtio16_to_cpu(tap_is_little_endian(q), val);
112 }
113 
114 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
115 {
116 	return __cpu_to_virtio16(tap_is_little_endian(q), val);
117 }
118 
119 static struct proto tap_proto = {
120 	.name = "tap",
121 	.owner = THIS_MODULE,
122 	.obj_size = sizeof(struct tap_queue),
123 };
124 
125 #define TAP_NUM_DEVS (1U << MINORBITS)
126 static DEFINE_MUTEX(minor_lock);
127 DEFINE_IDR(minor_idr);
128 
129 #define GOODCOPY_LEN 128
130 
131 static const struct proto_ops tap_socket_ops;
132 
133 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
134 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
135 
136 static struct macvlan_dev *tap_get_vlan_rcu(const struct net_device *dev)
137 {
138 	return rcu_dereference(dev->rx_handler_data);
139 }
140 
141 /*
142  * RCU usage:
143  * The tap_queue and the macvlan_dev are loosely coupled, the
144  * pointers from one to the other can only be read while rcu_read_lock
145  * or rtnl is held.
146  *
147  * Both the file and the macvlan_dev hold a reference on the tap_queue
148  * through sock_hold(&q->sk). When the macvlan_dev goes away first,
149  * q->vlan becomes inaccessible. When the files gets closed,
150  * tap_get_queue() fails.
151  *
152  * There may still be references to the struct sock inside of the
153  * queue from outbound SKBs, but these never reference back to the
154  * file or the dev. The data structure is freed through __sk_free
155  * when both our references and any pending SKBs are gone.
156  */
157 
158 static int tap_enable_queue(struct net_device *dev, struct file *file,
159 			    struct tap_queue *q)
160 {
161 	struct macvlan_dev *vlan = netdev_priv(dev);
162 	int err = -EINVAL;
163 
164 	ASSERT_RTNL();
165 
166 	if (q->enabled)
167 		goto out;
168 
169 	err = 0;
170 	rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
171 	q->queue_index = vlan->numvtaps;
172 	q->enabled = true;
173 
174 	vlan->numvtaps++;
175 out:
176 	return err;
177 }
178 
179 /* Requires RTNL */
180 static int tap_set_queue(struct net_device *dev, struct file *file,
181 			 struct tap_queue *q)
182 {
183 	struct macvlan_dev *vlan = netdev_priv(dev);
184 
185 	if (vlan->numqueues == MAX_TAP_QUEUES)
186 		return -EBUSY;
187 
188 	rcu_assign_pointer(q->vlan, vlan);
189 	rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
190 	sock_hold(&q->sk);
191 
192 	q->file = file;
193 	q->queue_index = vlan->numvtaps;
194 	q->enabled = true;
195 	file->private_data = q;
196 	list_add_tail(&q->next, &vlan->queue_list);
197 
198 	vlan->numvtaps++;
199 	vlan->numqueues++;
200 
201 	return 0;
202 }
203 
204 static int tap_disable_queue(struct tap_queue *q)
205 {
206 	struct macvlan_dev *vlan;
207 	struct tap_queue *nq;
208 
209 	ASSERT_RTNL();
210 	if (!q->enabled)
211 		return -EINVAL;
212 
213 	vlan = rtnl_dereference(q->vlan);
214 
215 	if (vlan) {
216 		int index = q->queue_index;
217 		BUG_ON(index >= vlan->numvtaps);
218 		nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
219 		nq->queue_index = index;
220 
221 		rcu_assign_pointer(vlan->taps[index], nq);
222 		RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
223 		q->enabled = false;
224 
225 		vlan->numvtaps--;
226 	}
227 
228 	return 0;
229 }
230 
231 /*
232  * The file owning the queue got closed, give up both
233  * the reference that the files holds as well as the
234  * one from the macvlan_dev if that still exists.
235  *
236  * Using the spinlock makes sure that we don't get
237  * to the queue again after destroying it.
238  */
239 static void tap_put_queue(struct tap_queue *q)
240 {
241 	struct macvlan_dev *vlan;
242 
243 	rtnl_lock();
244 	vlan = rtnl_dereference(q->vlan);
245 
246 	if (vlan) {
247 		if (q->enabled)
248 			BUG_ON(tap_disable_queue(q));
249 
250 		vlan->numqueues--;
251 		RCU_INIT_POINTER(q->vlan, NULL);
252 		sock_put(&q->sk);
253 		list_del_init(&q->next);
254 	}
255 
256 	rtnl_unlock();
257 
258 	synchronize_rcu();
259 	sock_put(&q->sk);
260 }
261 
262 /*
263  * Select a queue based on the rxq of the device on which this packet
264  * arrived. If the incoming device is not mq, calculate a flow hash
265  * to select a queue. If all fails, find the first available queue.
266  * Cache vlan->numvtaps since it can become zero during the execution
267  * of this function.
268  */
269 static struct tap_queue *tap_get_queue(struct net_device *dev,
270 				       struct sk_buff *skb)
271 {
272 	struct macvlan_dev *vlan = netdev_priv(dev);
273 	struct tap_queue *tap = NULL;
274 	/* Access to taps array is protected by rcu, but access to numvtaps
275 	 * isn't. Below we use it to lookup a queue, but treat it as a hint
276 	 * and validate that the result isn't NULL - in case we are
277 	 * racing against queue removal.
278 	 */
279 	int numvtaps = ACCESS_ONCE(vlan->numvtaps);
280 	__u32 rxq;
281 
282 	if (!numvtaps)
283 		goto out;
284 
285 	if (numvtaps == 1)
286 		goto single;
287 
288 	/* Check if we can use flow to select a queue */
289 	rxq = skb_get_hash(skb);
290 	if (rxq) {
291 		tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
292 		goto out;
293 	}
294 
295 	if (likely(skb_rx_queue_recorded(skb))) {
296 		rxq = skb_get_rx_queue(skb);
297 
298 		while (unlikely(rxq >= numvtaps))
299 			rxq -= numvtaps;
300 
301 		tap = rcu_dereference(vlan->taps[rxq]);
302 		goto out;
303 	}
304 
305 single:
306 	tap = rcu_dereference(vlan->taps[0]);
307 out:
308 	return tap;
309 }
310 
311 /*
312  * The net_device is going away, give up the reference
313  * that it holds on all queues and safely set the pointer
314  * from the queues to NULL.
315  */
316 void tap_del_queues(struct net_device *dev)
317 {
318 	struct macvlan_dev *vlan = netdev_priv(dev);
319 	struct tap_queue *q, *tmp;
320 
321 	ASSERT_RTNL();
322 	list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
323 		list_del_init(&q->next);
324 		RCU_INIT_POINTER(q->vlan, NULL);
325 		if (q->enabled)
326 			vlan->numvtaps--;
327 		vlan->numqueues--;
328 		sock_put(&q->sk);
329 	}
330 	BUG_ON(vlan->numvtaps);
331 	BUG_ON(vlan->numqueues);
332 	/* guarantee that any future tap_set_queue will fail */
333 	vlan->numvtaps = MAX_TAP_QUEUES;
334 }
335 
336 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
337 {
338 	struct sk_buff *skb = *pskb;
339 	struct net_device *dev = skb->dev;
340 	struct macvlan_dev *vlan;
341 	struct tap_queue *q;
342 	netdev_features_t features = TAP_FEATURES;
343 
344 	vlan = tap_get_vlan_rcu(dev);
345 	if (!vlan)
346 		return RX_HANDLER_PASS;
347 
348 	q = tap_get_queue(dev, skb);
349 	if (!q)
350 		return RX_HANDLER_PASS;
351 
352 	if (__skb_array_full(&q->skb_array))
353 		goto drop;
354 
355 	skb_push(skb, ETH_HLEN);
356 
357 	/* Apply the forward feature mask so that we perform segmentation
358 	 * according to users wishes.  This only works if VNET_HDR is
359 	 * enabled.
360 	 */
361 	if (q->flags & IFF_VNET_HDR)
362 		features |= vlan->tap_features;
363 	if (netif_needs_gso(skb, features)) {
364 		struct sk_buff *segs = __skb_gso_segment(skb, features, false);
365 
366 		if (IS_ERR(segs))
367 			goto drop;
368 
369 		if (!segs) {
370 			if (skb_array_produce(&q->skb_array, skb))
371 				goto drop;
372 			goto wake_up;
373 		}
374 
375 		consume_skb(skb);
376 		while (segs) {
377 			struct sk_buff *nskb = segs->next;
378 
379 			segs->next = NULL;
380 			if (skb_array_produce(&q->skb_array, segs)) {
381 				kfree_skb(segs);
382 				kfree_skb_list(nskb);
383 				break;
384 			}
385 			segs = nskb;
386 		}
387 	} else {
388 		/* If we receive a partial checksum and the tap side
389 		 * doesn't support checksum offload, compute the checksum.
390 		 * Note: it doesn't matter which checksum feature to
391 		 *	  check, we either support them all or none.
392 		 */
393 		if (skb->ip_summed == CHECKSUM_PARTIAL &&
394 		    !(features & NETIF_F_CSUM_MASK) &&
395 		    skb_checksum_help(skb))
396 			goto drop;
397 		if (skb_array_produce(&q->skb_array, skb))
398 			goto drop;
399 	}
400 
401 wake_up:
402 	wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
403 	return RX_HANDLER_CONSUMED;
404 
405 drop:
406 	/* Count errors/drops only here, thus don't care about args. */
407 	macvlan_count_rx(vlan, 0, 0, 0);
408 	kfree_skb(skb);
409 	return RX_HANDLER_CONSUMED;
410 }
411 
412 int tap_get_minor(struct macvlan_dev *vlan)
413 {
414 	int retval = -ENOMEM;
415 
416 	mutex_lock(&minor_lock);
417 	retval = idr_alloc(&minor_idr, vlan, 1, TAP_NUM_DEVS, GFP_KERNEL);
418 	if (retval >= 0) {
419 		vlan->minor = retval;
420 	} else if (retval == -ENOSPC) {
421 		netdev_err(vlan->dev, "Too many tap devices\n");
422 		retval = -EINVAL;
423 	}
424 	mutex_unlock(&minor_lock);
425 	return retval < 0 ? retval : 0;
426 }
427 
428 void tap_free_minor(struct macvlan_dev *vlan)
429 {
430 	mutex_lock(&minor_lock);
431 	if (vlan->minor) {
432 		idr_remove(&minor_idr, vlan->minor);
433 		vlan->minor = 0;
434 	}
435 	mutex_unlock(&minor_lock);
436 }
437 
438 static struct net_device *dev_get_by_tap_minor(int minor)
439 {
440 	struct net_device *dev = NULL;
441 	struct macvlan_dev *vlan;
442 
443 	mutex_lock(&minor_lock);
444 	vlan = idr_find(&minor_idr, minor);
445 	if (vlan) {
446 		dev = vlan->dev;
447 		dev_hold(dev);
448 	}
449 	mutex_unlock(&minor_lock);
450 	return dev;
451 }
452 
453 static void tap_sock_write_space(struct sock *sk)
454 {
455 	wait_queue_head_t *wqueue;
456 
457 	if (!sock_writeable(sk) ||
458 	    !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
459 		return;
460 
461 	wqueue = sk_sleep(sk);
462 	if (wqueue && waitqueue_active(wqueue))
463 		wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
464 }
465 
466 static void tap_sock_destruct(struct sock *sk)
467 {
468 	struct tap_queue *q = container_of(sk, struct tap_queue, sk);
469 
470 	skb_array_cleanup(&q->skb_array);
471 }
472 
473 static int tap_open(struct inode *inode, struct file *file)
474 {
475 	struct net *net = current->nsproxy->net_ns;
476 	struct net_device *dev;
477 	struct tap_queue *q;
478 	int err = -ENODEV;
479 
480 	rtnl_lock();
481 	dev = dev_get_by_tap_minor(iminor(inode));
482 	if (!dev)
483 		goto err;
484 
485 	err = -ENOMEM;
486 	q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
487 					     &tap_proto, 0);
488 	if (!q)
489 		goto err;
490 
491 	RCU_INIT_POINTER(q->sock.wq, &q->wq);
492 	init_waitqueue_head(&q->wq.wait);
493 	q->sock.type = SOCK_RAW;
494 	q->sock.state = SS_CONNECTED;
495 	q->sock.file = file;
496 	q->sock.ops = &tap_socket_ops;
497 	sock_init_data(&q->sock, &q->sk);
498 	q->sk.sk_write_space = tap_sock_write_space;
499 	q->sk.sk_destruct = tap_sock_destruct;
500 	q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
501 	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
502 
503 	/*
504 	 * so far only KVM virtio_net uses tap, enable zero copy between
505 	 * guest kernel and host kernel when lower device supports zerocopy
506 	 *
507 	 * The macvlan supports zerocopy iff the lower device supports zero
508 	 * copy so we don't have to look at the lower device directly.
509 	 */
510 	if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
511 		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
512 
513 	err = -ENOMEM;
514 	if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
515 		goto err_array;
516 
517 	err = tap_set_queue(dev, file, q);
518 	if (err)
519 		goto err_queue;
520 
521 	dev_put(dev);
522 
523 	rtnl_unlock();
524 	return err;
525 
526 err_queue:
527 	skb_array_cleanup(&q->skb_array);
528 err_array:
529 	sock_put(&q->sk);
530 err:
531 	if (dev)
532 		dev_put(dev);
533 
534 	rtnl_unlock();
535 	return err;
536 }
537 
538 static int tap_release(struct inode *inode, struct file *file)
539 {
540 	struct tap_queue *q = file->private_data;
541 	tap_put_queue(q);
542 	return 0;
543 }
544 
545 static unsigned int tap_poll(struct file *file, poll_table *wait)
546 {
547 	struct tap_queue *q = file->private_data;
548 	unsigned int mask = POLLERR;
549 
550 	if (!q)
551 		goto out;
552 
553 	mask = 0;
554 	poll_wait(file, &q->wq.wait, wait);
555 
556 	if (!skb_array_empty(&q->skb_array))
557 		mask |= POLLIN | POLLRDNORM;
558 
559 	if (sock_writeable(&q->sk) ||
560 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
561 	     sock_writeable(&q->sk)))
562 		mask |= POLLOUT | POLLWRNORM;
563 
564 out:
565 	return mask;
566 }
567 
568 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
569 					    size_t len, size_t linear,
570 						int noblock, int *err)
571 {
572 	struct sk_buff *skb;
573 
574 	/* Under a page?  Don't bother with paged skb. */
575 	if (prepad + len < PAGE_SIZE || !linear)
576 		linear = len;
577 
578 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
579 				   err, 0);
580 	if (!skb)
581 		return NULL;
582 
583 	skb_reserve(skb, prepad);
584 	skb_put(skb, linear);
585 	skb->data_len = len - linear;
586 	skb->len += len - linear;
587 
588 	return skb;
589 }
590 
591 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */
592 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
593 
594 /* Get packet from user space buffer */
595 static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
596 			    struct iov_iter *from, int noblock)
597 {
598 	int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
599 	struct sk_buff *skb;
600 	struct macvlan_dev *vlan;
601 	unsigned long total_len = iov_iter_count(from);
602 	unsigned long len = total_len;
603 	int err;
604 	struct virtio_net_hdr vnet_hdr = { 0 };
605 	int vnet_hdr_len = 0;
606 	int copylen = 0;
607 	int depth;
608 	bool zerocopy = false;
609 	size_t linear;
610 
611 	if (q->flags & IFF_VNET_HDR) {
612 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
613 
614 		err = -EINVAL;
615 		if (len < vnet_hdr_len)
616 			goto err;
617 		len -= vnet_hdr_len;
618 
619 		err = -EFAULT;
620 		if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
621 			goto err;
622 		iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
623 		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
624 		     tap16_to_cpu(q, vnet_hdr.csum_start) +
625 		     tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
626 			     tap16_to_cpu(q, vnet_hdr.hdr_len))
627 			vnet_hdr.hdr_len = cpu_to_tap16(q,
628 				 tap16_to_cpu(q, vnet_hdr.csum_start) +
629 				 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
630 		err = -EINVAL;
631 		if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
632 			goto err;
633 	}
634 
635 	err = -EINVAL;
636 	if (unlikely(len < ETH_HLEN))
637 		goto err;
638 
639 	if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
640 		struct iov_iter i;
641 
642 		copylen = vnet_hdr.hdr_len ?
643 			tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
644 		if (copylen > good_linear)
645 			copylen = good_linear;
646 		else if (copylen < ETH_HLEN)
647 			copylen = ETH_HLEN;
648 		linear = copylen;
649 		i = *from;
650 		iov_iter_advance(&i, copylen);
651 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
652 			zerocopy = true;
653 	}
654 
655 	if (!zerocopy) {
656 		copylen = len;
657 		linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
658 		if (linear > good_linear)
659 			linear = good_linear;
660 		else if (linear < ETH_HLEN)
661 			linear = ETH_HLEN;
662 	}
663 
664 	skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
665 			    linear, noblock, &err);
666 	if (!skb)
667 		goto err;
668 
669 	if (zerocopy)
670 		err = zerocopy_sg_from_iter(skb, from);
671 	else
672 		err = skb_copy_datagram_from_iter(skb, 0, from, len);
673 
674 	if (err)
675 		goto err_kfree;
676 
677 	skb_set_network_header(skb, ETH_HLEN);
678 	skb_reset_mac_header(skb);
679 	skb->protocol = eth_hdr(skb)->h_proto;
680 
681 	if (vnet_hdr_len) {
682 		err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
683 					    tap_is_little_endian(q));
684 		if (err)
685 			goto err_kfree;
686 	}
687 
688 	skb_probe_transport_header(skb, ETH_HLEN);
689 
690 	/* Move network header to the right position for VLAN tagged packets */
691 	if ((skb->protocol == htons(ETH_P_8021Q) ||
692 	     skb->protocol == htons(ETH_P_8021AD)) &&
693 	    __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
694 		skb_set_network_header(skb, depth);
695 
696 	rcu_read_lock();
697 	vlan = rcu_dereference(q->vlan);
698 	/* copy skb_ubuf_info for callback when skb has no error */
699 	if (zerocopy) {
700 		skb_shinfo(skb)->destructor_arg = m->msg_control;
701 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
702 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
703 	} else if (m && m->msg_control) {
704 		struct ubuf_info *uarg = m->msg_control;
705 		uarg->callback(uarg, false);
706 	}
707 
708 	if (vlan) {
709 		skb->dev = vlan->dev;
710 		dev_queue_xmit(skb);
711 	} else {
712 		kfree_skb(skb);
713 	}
714 	rcu_read_unlock();
715 
716 	return total_len;
717 
718 err_kfree:
719 	kfree_skb(skb);
720 
721 err:
722 	rcu_read_lock();
723 	vlan = rcu_dereference(q->vlan);
724 	if (vlan)
725 		this_cpu_inc(vlan->pcpu_stats->tx_dropped);
726 	rcu_read_unlock();
727 
728 	return err;
729 }
730 
731 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
732 {
733 	struct file *file = iocb->ki_filp;
734 	struct tap_queue *q = file->private_data;
735 
736 	return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
737 }
738 
739 /* Put packet to the user space buffer */
740 static ssize_t tap_put_user(struct tap_queue *q,
741 			    const struct sk_buff *skb,
742 			    struct iov_iter *iter)
743 {
744 	int ret;
745 	int vnet_hdr_len = 0;
746 	int vlan_offset = 0;
747 	int total;
748 
749 	if (q->flags & IFF_VNET_HDR) {
750 		struct virtio_net_hdr vnet_hdr;
751 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
752 		if (iov_iter_count(iter) < vnet_hdr_len)
753 			return -EINVAL;
754 
755 		if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
756 					    tap_is_little_endian(q), true))
757 			BUG();
758 
759 		if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
760 		    sizeof(vnet_hdr))
761 			return -EFAULT;
762 
763 		iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
764 	}
765 	total = vnet_hdr_len;
766 	total += skb->len;
767 
768 	if (skb_vlan_tag_present(skb)) {
769 		struct {
770 			__be16 h_vlan_proto;
771 			__be16 h_vlan_TCI;
772 		} veth;
773 		veth.h_vlan_proto = skb->vlan_proto;
774 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
775 
776 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
777 		total += VLAN_HLEN;
778 
779 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
780 		if (ret || !iov_iter_count(iter))
781 			goto done;
782 
783 		ret = copy_to_iter(&veth, sizeof(veth), iter);
784 		if (ret != sizeof(veth) || !iov_iter_count(iter))
785 			goto done;
786 	}
787 
788 	ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
789 				     skb->len - vlan_offset);
790 
791 done:
792 	return ret ? ret : total;
793 }
794 
795 static ssize_t tap_do_read(struct tap_queue *q,
796 			   struct iov_iter *to,
797 			   int noblock)
798 {
799 	DEFINE_WAIT(wait);
800 	struct sk_buff *skb;
801 	ssize_t ret = 0;
802 
803 	if (!iov_iter_count(to))
804 		return 0;
805 
806 	while (1) {
807 		if (!noblock)
808 			prepare_to_wait(sk_sleep(&q->sk), &wait,
809 					TASK_INTERRUPTIBLE);
810 
811 		/* Read frames from the queue */
812 		skb = skb_array_consume(&q->skb_array);
813 		if (skb)
814 			break;
815 		if (noblock) {
816 			ret = -EAGAIN;
817 			break;
818 		}
819 		if (signal_pending(current)) {
820 			ret = -ERESTARTSYS;
821 			break;
822 		}
823 		/* Nothing to read, let's sleep */
824 		schedule();
825 	}
826 	if (!noblock)
827 		finish_wait(sk_sleep(&q->sk), &wait);
828 
829 	if (skb) {
830 		ret = tap_put_user(q, skb, to);
831 		if (unlikely(ret < 0))
832 			kfree_skb(skb);
833 		else
834 			consume_skb(skb);
835 	}
836 	return ret;
837 }
838 
839 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
840 {
841 	struct file *file = iocb->ki_filp;
842 	struct tap_queue *q = file->private_data;
843 	ssize_t len = iov_iter_count(to), ret;
844 
845 	ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
846 	ret = min_t(ssize_t, ret, len);
847 	if (ret > 0)
848 		iocb->ki_pos = ret;
849 	return ret;
850 }
851 
852 static struct macvlan_dev *tap_get_vlan(struct tap_queue *q)
853 {
854 	struct macvlan_dev *vlan;
855 
856 	ASSERT_RTNL();
857 	vlan = rtnl_dereference(q->vlan);
858 	if (vlan)
859 		dev_hold(vlan->dev);
860 
861 	return vlan;
862 }
863 
864 static void tap_put_vlan(struct macvlan_dev *vlan)
865 {
866 	dev_put(vlan->dev);
867 }
868 
869 static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
870 {
871 	struct tap_queue *q = file->private_data;
872 	struct macvlan_dev *vlan;
873 	int ret;
874 
875 	vlan = tap_get_vlan(q);
876 	if (!vlan)
877 		return -EINVAL;
878 
879 	if (flags & IFF_ATTACH_QUEUE)
880 		ret = tap_enable_queue(vlan->dev, file, q);
881 	else if (flags & IFF_DETACH_QUEUE)
882 		ret = tap_disable_queue(q);
883 	else
884 		ret = -EINVAL;
885 
886 	tap_put_vlan(vlan);
887 	return ret;
888 }
889 
890 static int set_offload(struct tap_queue *q, unsigned long arg)
891 {
892 	struct macvlan_dev *vlan;
893 	netdev_features_t features;
894 	netdev_features_t feature_mask = 0;
895 
896 	vlan = rtnl_dereference(q->vlan);
897 	if (!vlan)
898 		return -ENOLINK;
899 
900 	features = vlan->dev->features;
901 
902 	if (arg & TUN_F_CSUM) {
903 		feature_mask = NETIF_F_HW_CSUM;
904 
905 		if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
906 			if (arg & TUN_F_TSO_ECN)
907 				feature_mask |= NETIF_F_TSO_ECN;
908 			if (arg & TUN_F_TSO4)
909 				feature_mask |= NETIF_F_TSO;
910 			if (arg & TUN_F_TSO6)
911 				feature_mask |= NETIF_F_TSO6;
912 		}
913 
914 		if (arg & TUN_F_UFO)
915 			feature_mask |= NETIF_F_UFO;
916 	}
917 
918 	/* tun/tap driver inverts the usage for TSO offloads, where
919 	 * setting the TSO bit means that the userspace wants to
920 	 * accept TSO frames and turning it off means that user space
921 	 * does not support TSO.
922 	 * For tap, we have to invert it to mean the same thing.
923 	 * When user space turns off TSO, we turn off GSO/LRO so that
924 	 * user-space will not receive TSO frames.
925 	 */
926 	if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
927 		features |= RX_OFFLOADS;
928 	else
929 		features &= ~RX_OFFLOADS;
930 
931 	/* tap_features are the same as features on tun/tap and
932 	 * reflect user expectations.
933 	 */
934 	vlan->tap_features = feature_mask;
935 	vlan->set_features = features;
936 	netdev_update_features(vlan->dev);
937 
938 	return 0;
939 }
940 
941 /*
942  * provide compatibility with generic tun/tap interface
943  */
944 static long tap_ioctl(struct file *file, unsigned int cmd,
945 		      unsigned long arg)
946 {
947 	struct tap_queue *q = file->private_data;
948 	struct macvlan_dev *vlan;
949 	void __user *argp = (void __user *)arg;
950 	struct ifreq __user *ifr = argp;
951 	unsigned int __user *up = argp;
952 	unsigned short u;
953 	int __user *sp = argp;
954 	struct sockaddr sa;
955 	int s;
956 	int ret;
957 
958 	switch (cmd) {
959 	case TUNSETIFF:
960 		/* ignore the name, just look at flags */
961 		if (get_user(u, &ifr->ifr_flags))
962 			return -EFAULT;
963 
964 		ret = 0;
965 		if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
966 			ret = -EINVAL;
967 		else
968 			q->flags = (q->flags & ~TAP_IFFEATURES) | u;
969 
970 		return ret;
971 
972 	case TUNGETIFF:
973 		rtnl_lock();
974 		vlan = tap_get_vlan(q);
975 		if (!vlan) {
976 			rtnl_unlock();
977 			return -ENOLINK;
978 		}
979 
980 		ret = 0;
981 		u = q->flags;
982 		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
983 		    put_user(u, &ifr->ifr_flags))
984 			ret = -EFAULT;
985 		tap_put_vlan(vlan);
986 		rtnl_unlock();
987 		return ret;
988 
989 	case TUNSETQUEUE:
990 		if (get_user(u, &ifr->ifr_flags))
991 			return -EFAULT;
992 		rtnl_lock();
993 		ret = tap_ioctl_set_queue(file, u);
994 		rtnl_unlock();
995 		return ret;
996 
997 	case TUNGETFEATURES:
998 		if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
999 			return -EFAULT;
1000 		return 0;
1001 
1002 	case TUNSETSNDBUF:
1003 		if (get_user(s, sp))
1004 			return -EFAULT;
1005 
1006 		q->sk.sk_sndbuf = s;
1007 		return 0;
1008 
1009 	case TUNGETVNETHDRSZ:
1010 		s = q->vnet_hdr_sz;
1011 		if (put_user(s, sp))
1012 			return -EFAULT;
1013 		return 0;
1014 
1015 	case TUNSETVNETHDRSZ:
1016 		if (get_user(s, sp))
1017 			return -EFAULT;
1018 		if (s < (int)sizeof(struct virtio_net_hdr))
1019 			return -EINVAL;
1020 
1021 		q->vnet_hdr_sz = s;
1022 		return 0;
1023 
1024 	case TUNGETVNETLE:
1025 		s = !!(q->flags & TAP_VNET_LE);
1026 		if (put_user(s, sp))
1027 			return -EFAULT;
1028 		return 0;
1029 
1030 	case TUNSETVNETLE:
1031 		if (get_user(s, sp))
1032 			return -EFAULT;
1033 		if (s)
1034 			q->flags |= TAP_VNET_LE;
1035 		else
1036 			q->flags &= ~TAP_VNET_LE;
1037 		return 0;
1038 
1039 	case TUNGETVNETBE:
1040 		return tap_get_vnet_be(q, sp);
1041 
1042 	case TUNSETVNETBE:
1043 		return tap_set_vnet_be(q, sp);
1044 
1045 	case TUNSETOFFLOAD:
1046 		/* let the user check for future flags */
1047 		if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1048 			    TUN_F_TSO_ECN | TUN_F_UFO))
1049 			return -EINVAL;
1050 
1051 		rtnl_lock();
1052 		ret = set_offload(q, arg);
1053 		rtnl_unlock();
1054 		return ret;
1055 
1056 	case SIOCGIFHWADDR:
1057 		rtnl_lock();
1058 		vlan = tap_get_vlan(q);
1059 		if (!vlan) {
1060 			rtnl_unlock();
1061 			return -ENOLINK;
1062 		}
1063 		ret = 0;
1064 		u = vlan->dev->type;
1065 		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1066 		    copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
1067 		    put_user(u, &ifr->ifr_hwaddr.sa_family))
1068 			ret = -EFAULT;
1069 		tap_put_vlan(vlan);
1070 		rtnl_unlock();
1071 		return ret;
1072 
1073 	case SIOCSIFHWADDR:
1074 		if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1075 			return -EFAULT;
1076 		rtnl_lock();
1077 		vlan = tap_get_vlan(q);
1078 		if (!vlan) {
1079 			rtnl_unlock();
1080 			return -ENOLINK;
1081 		}
1082 		ret = dev_set_mac_address(vlan->dev, &sa);
1083 		tap_put_vlan(vlan);
1084 		rtnl_unlock();
1085 		return ret;
1086 
1087 	default:
1088 		return -EINVAL;
1089 	}
1090 }
1091 
1092 #ifdef CONFIG_COMPAT
1093 static long tap_compat_ioctl(struct file *file, unsigned int cmd,
1094 			     unsigned long arg)
1095 {
1096 	return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1097 }
1098 #endif
1099 
1100 const struct file_operations tap_fops = {
1101 	.owner		= THIS_MODULE,
1102 	.open		= tap_open,
1103 	.release	= tap_release,
1104 	.read_iter	= tap_read_iter,
1105 	.write_iter	= tap_write_iter,
1106 	.poll		= tap_poll,
1107 	.llseek		= no_llseek,
1108 	.unlocked_ioctl	= tap_ioctl,
1109 #ifdef CONFIG_COMPAT
1110 	.compat_ioctl	= tap_compat_ioctl,
1111 #endif
1112 };
1113 
1114 static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1115 		       size_t total_len)
1116 {
1117 	struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1118 	return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
1119 }
1120 
1121 static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1122 		       size_t total_len, int flags)
1123 {
1124 	struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1125 	int ret;
1126 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1127 		return -EINVAL;
1128 	ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
1129 	if (ret > total_len) {
1130 		m->msg_flags |= MSG_TRUNC;
1131 		ret = flags & MSG_TRUNC ? ret : total_len;
1132 	}
1133 	return ret;
1134 }
1135 
1136 static int tap_peek_len(struct socket *sock)
1137 {
1138 	struct tap_queue *q = container_of(sock, struct tap_queue,
1139 					       sock);
1140 	return skb_array_peek_len(&q->skb_array);
1141 }
1142 
1143 /* Ops structure to mimic raw sockets with tun */
1144 static const struct proto_ops tap_socket_ops = {
1145 	.sendmsg = tap_sendmsg,
1146 	.recvmsg = tap_recvmsg,
1147 	.peek_len = tap_peek_len,
1148 };
1149 
1150 /* Get an underlying socket object from tun file.  Returns error unless file is
1151  * attached to a device.  The returned object works like a packet socket, it
1152  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
1153  * holding a reference to the file for as long as the socket is in use. */
1154 struct socket *tap_get_socket(struct file *file)
1155 {
1156 	struct tap_queue *q;
1157 	if (file->f_op != &tap_fops)
1158 		return ERR_PTR(-EINVAL);
1159 	q = file->private_data;
1160 	if (!q)
1161 		return ERR_PTR(-EBADFD);
1162 	return &q->sock;
1163 }
1164 EXPORT_SYMBOL_GPL(tap_get_socket);
1165 
1166 int tap_queue_resize(struct macvlan_dev *vlan)
1167 {
1168 	struct net_device *dev = vlan->dev;
1169 	struct tap_queue *q;
1170 	struct skb_array **arrays;
1171 	int n = vlan->numqueues;
1172 	int ret, i = 0;
1173 
1174 	arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1175 	if (!arrays)
1176 		return -ENOMEM;
1177 
1178 	list_for_each_entry(q, &vlan->queue_list, next)
1179 		arrays[i++] = &q->skb_array;
1180 
1181 	ret = skb_array_resize_multiple(arrays, n,
1182 					dev->tx_queue_len, GFP_KERNEL);
1183 
1184 	kfree(arrays);
1185 	return ret;
1186 }
1187