1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/etherdevice.h> 3 #include <linux/if_tap.h> 4 #include <linux/if_vlan.h> 5 #include <linux/interrupt.h> 6 #include <linux/nsproxy.h> 7 #include <linux/compat.h> 8 #include <linux/if_tun.h> 9 #include <linux/module.h> 10 #include <linux/skbuff.h> 11 #include <linux/cache.h> 12 #include <linux/sched/signal.h> 13 #include <linux/types.h> 14 #include <linux/slab.h> 15 #include <linux/wait.h> 16 #include <linux/cdev.h> 17 #include <linux/idr.h> 18 #include <linux/fs.h> 19 #include <linux/uio.h> 20 21 #include <net/net_namespace.h> 22 #include <net/rtnetlink.h> 23 #include <net/sock.h> 24 #include <linux/virtio_net.h> 25 #include <linux/skb_array.h> 26 27 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) 28 29 #define TAP_VNET_LE 0x80000000 30 #define TAP_VNET_BE 0x40000000 31 32 #ifdef CONFIG_TUN_VNET_CROSS_LE 33 static inline bool tap_legacy_is_little_endian(struct tap_queue *q) 34 { 35 return q->flags & TAP_VNET_BE ? false : 36 virtio_legacy_is_little_endian(); 37 } 38 39 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) 40 { 41 int s = !!(q->flags & TAP_VNET_BE); 42 43 if (put_user(s, sp)) 44 return -EFAULT; 45 46 return 0; 47 } 48 49 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) 50 { 51 int s; 52 53 if (get_user(s, sp)) 54 return -EFAULT; 55 56 if (s) 57 q->flags |= TAP_VNET_BE; 58 else 59 q->flags &= ~TAP_VNET_BE; 60 61 return 0; 62 } 63 #else 64 static inline bool tap_legacy_is_little_endian(struct tap_queue *q) 65 { 66 return virtio_legacy_is_little_endian(); 67 } 68 69 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) 70 { 71 return -EINVAL; 72 } 73 74 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) 75 { 76 return -EINVAL; 77 } 78 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 79 80 static inline bool tap_is_little_endian(struct tap_queue *q) 81 { 82 return q->flags & TAP_VNET_LE || 83 tap_legacy_is_little_endian(q); 84 } 85 86 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) 87 { 88 return __virtio16_to_cpu(tap_is_little_endian(q), val); 89 } 90 91 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) 92 { 93 return __cpu_to_virtio16(tap_is_little_endian(q), val); 94 } 95 96 static struct proto tap_proto = { 97 .name = "tap", 98 .owner = THIS_MODULE, 99 .obj_size = sizeof(struct tap_queue), 100 }; 101 102 #define TAP_NUM_DEVS (1U << MINORBITS) 103 104 static LIST_HEAD(major_list); 105 106 struct major_info { 107 struct rcu_head rcu; 108 dev_t major; 109 struct idr minor_idr; 110 spinlock_t minor_lock; 111 const char *device_name; 112 struct list_head next; 113 }; 114 115 #define GOODCOPY_LEN 128 116 117 static const struct proto_ops tap_socket_ops; 118 119 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 120 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) 121 122 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev) 123 { 124 return rcu_dereference(dev->rx_handler_data); 125 } 126 127 /* 128 * RCU usage: 129 * The tap_queue and the macvlan_dev are loosely coupled, the 130 * pointers from one to the other can only be read while rcu_read_lock 131 * or rtnl is held. 132 * 133 * Both the file and the macvlan_dev hold a reference on the tap_queue 134 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 135 * q->vlan becomes inaccessible. When the files gets closed, 136 * tap_get_queue() fails. 137 * 138 * There may still be references to the struct sock inside of the 139 * queue from outbound SKBs, but these never reference back to the 140 * file or the dev. The data structure is freed through __sk_free 141 * when both our references and any pending SKBs are gone. 142 */ 143 144 static int tap_enable_queue(struct tap_dev *tap, struct file *file, 145 struct tap_queue *q) 146 { 147 int err = -EINVAL; 148 149 ASSERT_RTNL(); 150 151 if (q->enabled) 152 goto out; 153 154 err = 0; 155 rcu_assign_pointer(tap->taps[tap->numvtaps], q); 156 q->queue_index = tap->numvtaps; 157 q->enabled = true; 158 159 tap->numvtaps++; 160 out: 161 return err; 162 } 163 164 /* Requires RTNL */ 165 static int tap_set_queue(struct tap_dev *tap, struct file *file, 166 struct tap_queue *q) 167 { 168 if (tap->numqueues == MAX_TAP_QUEUES) 169 return -EBUSY; 170 171 rcu_assign_pointer(q->tap, tap); 172 rcu_assign_pointer(tap->taps[tap->numvtaps], q); 173 sock_hold(&q->sk); 174 175 q->file = file; 176 q->queue_index = tap->numvtaps; 177 q->enabled = true; 178 file->private_data = q; 179 list_add_tail(&q->next, &tap->queue_list); 180 181 tap->numvtaps++; 182 tap->numqueues++; 183 184 return 0; 185 } 186 187 static int tap_disable_queue(struct tap_queue *q) 188 { 189 struct tap_dev *tap; 190 struct tap_queue *nq; 191 192 ASSERT_RTNL(); 193 if (!q->enabled) 194 return -EINVAL; 195 196 tap = rtnl_dereference(q->tap); 197 198 if (tap) { 199 int index = q->queue_index; 200 BUG_ON(index >= tap->numvtaps); 201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); 202 nq->queue_index = index; 203 204 rcu_assign_pointer(tap->taps[index], nq); 205 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL); 206 q->enabled = false; 207 208 tap->numvtaps--; 209 } 210 211 return 0; 212 } 213 214 /* 215 * The file owning the queue got closed, give up both 216 * the reference that the files holds as well as the 217 * one from the macvlan_dev if that still exists. 218 * 219 * Using the spinlock makes sure that we don't get 220 * to the queue again after destroying it. 221 */ 222 static void tap_put_queue(struct tap_queue *q) 223 { 224 struct tap_dev *tap; 225 226 rtnl_lock(); 227 tap = rtnl_dereference(q->tap); 228 229 if (tap) { 230 if (q->enabled) 231 BUG_ON(tap_disable_queue(q)); 232 233 tap->numqueues--; 234 RCU_INIT_POINTER(q->tap, NULL); 235 sock_put(&q->sk); 236 list_del_init(&q->next); 237 } 238 239 rtnl_unlock(); 240 241 synchronize_rcu(); 242 sock_put(&q->sk); 243 } 244 245 /* 246 * Select a queue based on the rxq of the device on which this packet 247 * arrived. If the incoming device is not mq, calculate a flow hash 248 * to select a queue. If all fails, find the first available queue. 249 * Cache vlan->numvtaps since it can become zero during the execution 250 * of this function. 251 */ 252 static struct tap_queue *tap_get_queue(struct tap_dev *tap, 253 struct sk_buff *skb) 254 { 255 struct tap_queue *queue = NULL; 256 /* Access to taps array is protected by rcu, but access to numvtaps 257 * isn't. Below we use it to lookup a queue, but treat it as a hint 258 * and validate that the result isn't NULL - in case we are 259 * racing against queue removal. 260 */ 261 int numvtaps = READ_ONCE(tap->numvtaps); 262 __u32 rxq; 263 264 if (!numvtaps) 265 goto out; 266 267 if (numvtaps == 1) 268 goto single; 269 270 /* Check if we can use flow to select a queue */ 271 rxq = skb_get_hash(skb); 272 if (rxq) { 273 queue = rcu_dereference(tap->taps[rxq % numvtaps]); 274 goto out; 275 } 276 277 if (likely(skb_rx_queue_recorded(skb))) { 278 rxq = skb_get_rx_queue(skb); 279 280 while (unlikely(rxq >= numvtaps)) 281 rxq -= numvtaps; 282 283 queue = rcu_dereference(tap->taps[rxq]); 284 goto out; 285 } 286 287 single: 288 queue = rcu_dereference(tap->taps[0]); 289 out: 290 return queue; 291 } 292 293 /* 294 * The net_device is going away, give up the reference 295 * that it holds on all queues and safely set the pointer 296 * from the queues to NULL. 297 */ 298 void tap_del_queues(struct tap_dev *tap) 299 { 300 struct tap_queue *q, *tmp; 301 302 ASSERT_RTNL(); 303 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { 304 list_del_init(&q->next); 305 RCU_INIT_POINTER(q->tap, NULL); 306 if (q->enabled) 307 tap->numvtaps--; 308 tap->numqueues--; 309 sock_put(&q->sk); 310 } 311 BUG_ON(tap->numvtaps); 312 BUG_ON(tap->numqueues); 313 /* guarantee that any future tap_set_queue will fail */ 314 tap->numvtaps = MAX_TAP_QUEUES; 315 } 316 EXPORT_SYMBOL_GPL(tap_del_queues); 317 318 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) 319 { 320 struct sk_buff *skb = *pskb; 321 struct net_device *dev = skb->dev; 322 struct tap_dev *tap; 323 struct tap_queue *q; 324 netdev_features_t features = TAP_FEATURES; 325 326 tap = tap_dev_get_rcu(dev); 327 if (!tap) 328 return RX_HANDLER_PASS; 329 330 q = tap_get_queue(tap, skb); 331 if (!q) 332 return RX_HANDLER_PASS; 333 334 skb_push(skb, ETH_HLEN); 335 336 /* Apply the forward feature mask so that we perform segmentation 337 * according to users wishes. This only works if VNET_HDR is 338 * enabled. 339 */ 340 if (q->flags & IFF_VNET_HDR) 341 features |= tap->tap_features; 342 if (netif_needs_gso(skb, features)) { 343 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 344 struct sk_buff *next; 345 346 if (IS_ERR(segs)) 347 goto drop; 348 349 if (!segs) { 350 if (ptr_ring_produce(&q->ring, skb)) 351 goto drop; 352 goto wake_up; 353 } 354 355 consume_skb(skb); 356 skb_list_walk_safe(segs, skb, next) { 357 skb_mark_not_on_list(skb); 358 if (ptr_ring_produce(&q->ring, skb)) { 359 kfree_skb(skb); 360 kfree_skb_list(next); 361 break; 362 } 363 } 364 } else { 365 /* If we receive a partial checksum and the tap side 366 * doesn't support checksum offload, compute the checksum. 367 * Note: it doesn't matter which checksum feature to 368 * check, we either support them all or none. 369 */ 370 if (skb->ip_summed == CHECKSUM_PARTIAL && 371 !(features & NETIF_F_CSUM_MASK) && 372 skb_checksum_help(skb)) 373 goto drop; 374 if (ptr_ring_produce(&q->ring, skb)) 375 goto drop; 376 } 377 378 wake_up: 379 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); 380 return RX_HANDLER_CONSUMED; 381 382 drop: 383 /* Count errors/drops only here, thus don't care about args. */ 384 if (tap->count_rx_dropped) 385 tap->count_rx_dropped(tap); 386 kfree_skb(skb); 387 return RX_HANDLER_CONSUMED; 388 } 389 EXPORT_SYMBOL_GPL(tap_handle_frame); 390 391 static struct major_info *tap_get_major(int major) 392 { 393 struct major_info *tap_major; 394 395 list_for_each_entry_rcu(tap_major, &major_list, next) { 396 if (tap_major->major == major) 397 return tap_major; 398 } 399 400 return NULL; 401 } 402 403 int tap_get_minor(dev_t major, struct tap_dev *tap) 404 { 405 int retval = -ENOMEM; 406 struct major_info *tap_major; 407 408 rcu_read_lock(); 409 tap_major = tap_get_major(MAJOR(major)); 410 if (!tap_major) { 411 retval = -EINVAL; 412 goto unlock; 413 } 414 415 spin_lock(&tap_major->minor_lock); 416 retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); 417 if (retval >= 0) { 418 tap->minor = retval; 419 } else if (retval == -ENOSPC) { 420 netdev_err(tap->dev, "Too many tap devices\n"); 421 retval = -EINVAL; 422 } 423 spin_unlock(&tap_major->minor_lock); 424 425 unlock: 426 rcu_read_unlock(); 427 return retval < 0 ? retval : 0; 428 } 429 EXPORT_SYMBOL_GPL(tap_get_minor); 430 431 void tap_free_minor(dev_t major, struct tap_dev *tap) 432 { 433 struct major_info *tap_major; 434 435 rcu_read_lock(); 436 tap_major = tap_get_major(MAJOR(major)); 437 if (!tap_major) { 438 goto unlock; 439 } 440 441 spin_lock(&tap_major->minor_lock); 442 if (tap->minor) { 443 idr_remove(&tap_major->minor_idr, tap->minor); 444 tap->minor = 0; 445 } 446 spin_unlock(&tap_major->minor_lock); 447 448 unlock: 449 rcu_read_unlock(); 450 } 451 EXPORT_SYMBOL_GPL(tap_free_minor); 452 453 static struct tap_dev *dev_get_by_tap_file(int major, int minor) 454 { 455 struct net_device *dev = NULL; 456 struct tap_dev *tap; 457 struct major_info *tap_major; 458 459 rcu_read_lock(); 460 tap_major = tap_get_major(major); 461 if (!tap_major) { 462 tap = NULL; 463 goto unlock; 464 } 465 466 spin_lock(&tap_major->minor_lock); 467 tap = idr_find(&tap_major->minor_idr, minor); 468 if (tap) { 469 dev = tap->dev; 470 dev_hold(dev); 471 } 472 spin_unlock(&tap_major->minor_lock); 473 474 unlock: 475 rcu_read_unlock(); 476 return tap; 477 } 478 479 static void tap_sock_write_space(struct sock *sk) 480 { 481 wait_queue_head_t *wqueue; 482 483 if (!sock_writeable(sk) || 484 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 485 return; 486 487 wqueue = sk_sleep(sk); 488 if (wqueue && waitqueue_active(wqueue)) 489 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); 490 } 491 492 static void tap_sock_destruct(struct sock *sk) 493 { 494 struct tap_queue *q = container_of(sk, struct tap_queue, sk); 495 496 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); 497 } 498 499 static int tap_open(struct inode *inode, struct file *file) 500 { 501 struct net *net = current->nsproxy->net_ns; 502 struct tap_dev *tap; 503 struct tap_queue *q; 504 int err = -ENODEV; 505 506 rtnl_lock(); 507 tap = dev_get_by_tap_file(imajor(inode), iminor(inode)); 508 if (!tap) 509 goto err; 510 511 err = -ENOMEM; 512 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 513 &tap_proto, 0); 514 if (!q) 515 goto err; 516 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { 517 sk_free(&q->sk); 518 goto err; 519 } 520 521 init_waitqueue_head(&q->sock.wq.wait); 522 q->sock.type = SOCK_RAW; 523 q->sock.state = SS_CONNECTED; 524 q->sock.file = file; 525 q->sock.ops = &tap_socket_ops; 526 sock_init_data(&q->sock, &q->sk); 527 q->sk.sk_write_space = tap_sock_write_space; 528 q->sk.sk_destruct = tap_sock_destruct; 529 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 530 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 531 532 /* 533 * so far only KVM virtio_net uses tap, enable zero copy between 534 * guest kernel and host kernel when lower device supports zerocopy 535 * 536 * The macvlan supports zerocopy iff the lower device supports zero 537 * copy so we don't have to look at the lower device directly. 538 */ 539 if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) 540 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 541 542 err = tap_set_queue(tap, file, q); 543 if (err) { 544 /* tap_sock_destruct() will take care of freeing ptr_ring */ 545 goto err_put; 546 } 547 548 dev_put(tap->dev); 549 550 rtnl_unlock(); 551 return err; 552 553 err_put: 554 sock_put(&q->sk); 555 err: 556 if (tap) 557 dev_put(tap->dev); 558 559 rtnl_unlock(); 560 return err; 561 } 562 563 static int tap_release(struct inode *inode, struct file *file) 564 { 565 struct tap_queue *q = file->private_data; 566 tap_put_queue(q); 567 return 0; 568 } 569 570 static __poll_t tap_poll(struct file *file, poll_table *wait) 571 { 572 struct tap_queue *q = file->private_data; 573 __poll_t mask = EPOLLERR; 574 575 if (!q) 576 goto out; 577 578 mask = 0; 579 poll_wait(file, &q->sock.wq.wait, wait); 580 581 if (!ptr_ring_empty(&q->ring)) 582 mask |= EPOLLIN | EPOLLRDNORM; 583 584 if (sock_writeable(&q->sk) || 585 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && 586 sock_writeable(&q->sk))) 587 mask |= EPOLLOUT | EPOLLWRNORM; 588 589 out: 590 return mask; 591 } 592 593 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, 594 size_t len, size_t linear, 595 int noblock, int *err) 596 { 597 struct sk_buff *skb; 598 599 /* Under a page? Don't bother with paged skb. */ 600 if (prepad + len < PAGE_SIZE || !linear) 601 linear = len; 602 603 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 604 err, 0); 605 if (!skb) 606 return NULL; 607 608 skb_reserve(skb, prepad); 609 skb_put(skb, linear); 610 skb->data_len = len - linear; 611 skb->len += len - linear; 612 613 return skb; 614 } 615 616 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ 617 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) 618 619 /* Get packet from user space buffer */ 620 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, 621 struct iov_iter *from, int noblock) 622 { 623 int good_linear = SKB_MAX_HEAD(TAP_RESERVE); 624 struct sk_buff *skb; 625 struct tap_dev *tap; 626 unsigned long total_len = iov_iter_count(from); 627 unsigned long len = total_len; 628 int err; 629 struct virtio_net_hdr vnet_hdr = { 0 }; 630 int vnet_hdr_len = 0; 631 int copylen = 0; 632 int depth; 633 bool zerocopy = false; 634 size_t linear; 635 636 if (q->flags & IFF_VNET_HDR) { 637 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 638 639 err = -EINVAL; 640 if (len < vnet_hdr_len) 641 goto err; 642 len -= vnet_hdr_len; 643 644 err = -EFAULT; 645 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) 646 goto err; 647 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); 648 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 649 tap16_to_cpu(q, vnet_hdr.csum_start) + 650 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > 651 tap16_to_cpu(q, vnet_hdr.hdr_len)) 652 vnet_hdr.hdr_len = cpu_to_tap16(q, 653 tap16_to_cpu(q, vnet_hdr.csum_start) + 654 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); 655 err = -EINVAL; 656 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) 657 goto err; 658 } 659 660 err = -EINVAL; 661 if (unlikely(len < ETH_HLEN)) 662 goto err; 663 664 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 665 struct iov_iter i; 666 667 copylen = vnet_hdr.hdr_len ? 668 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; 669 if (copylen > good_linear) 670 copylen = good_linear; 671 else if (copylen < ETH_HLEN) 672 copylen = ETH_HLEN; 673 linear = copylen; 674 i = *from; 675 iov_iter_advance(&i, copylen); 676 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 677 zerocopy = true; 678 } 679 680 if (!zerocopy) { 681 copylen = len; 682 linear = tap16_to_cpu(q, vnet_hdr.hdr_len); 683 if (linear > good_linear) 684 linear = good_linear; 685 else if (linear < ETH_HLEN) 686 linear = ETH_HLEN; 687 } 688 689 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, 690 linear, noblock, &err); 691 if (!skb) 692 goto err; 693 694 if (zerocopy) 695 err = zerocopy_sg_from_iter(skb, from); 696 else 697 err = skb_copy_datagram_from_iter(skb, 0, from, len); 698 699 if (err) 700 goto err_kfree; 701 702 skb_set_network_header(skb, ETH_HLEN); 703 skb_reset_mac_header(skb); 704 skb->protocol = eth_hdr(skb)->h_proto; 705 706 if (vnet_hdr_len) { 707 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, 708 tap_is_little_endian(q)); 709 if (err) 710 goto err_kfree; 711 } 712 713 skb_probe_transport_header(skb); 714 715 /* Move network header to the right position for VLAN tagged packets */ 716 if (eth_type_vlan(skb->protocol) && 717 __vlan_get_protocol(skb, skb->protocol, &depth) != 0) 718 skb_set_network_header(skb, depth); 719 720 rcu_read_lock(); 721 tap = rcu_dereference(q->tap); 722 /* copy skb_ubuf_info for callback when skb has no error */ 723 if (zerocopy) { 724 skb_zcopy_init(skb, msg_control); 725 } else if (msg_control) { 726 struct ubuf_info *uarg = msg_control; 727 uarg->callback(NULL, uarg, false); 728 } 729 730 if (tap) { 731 skb->dev = tap->dev; 732 dev_queue_xmit(skb); 733 } else { 734 kfree_skb(skb); 735 } 736 rcu_read_unlock(); 737 738 return total_len; 739 740 err_kfree: 741 kfree_skb(skb); 742 743 err: 744 rcu_read_lock(); 745 tap = rcu_dereference(q->tap); 746 if (tap && tap->count_tx_dropped) 747 tap->count_tx_dropped(tap); 748 rcu_read_unlock(); 749 750 return err; 751 } 752 753 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) 754 { 755 struct file *file = iocb->ki_filp; 756 struct tap_queue *q = file->private_data; 757 758 return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); 759 } 760 761 /* Put packet to the user space buffer */ 762 static ssize_t tap_put_user(struct tap_queue *q, 763 const struct sk_buff *skb, 764 struct iov_iter *iter) 765 { 766 int ret; 767 int vnet_hdr_len = 0; 768 int vlan_offset = 0; 769 int total; 770 771 if (q->flags & IFF_VNET_HDR) { 772 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; 773 struct virtio_net_hdr vnet_hdr; 774 775 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 776 if (iov_iter_count(iter) < vnet_hdr_len) 777 return -EINVAL; 778 779 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, 780 tap_is_little_endian(q), true, 781 vlan_hlen)) 782 BUG(); 783 784 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != 785 sizeof(vnet_hdr)) 786 return -EFAULT; 787 788 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); 789 } 790 total = vnet_hdr_len; 791 total += skb->len; 792 793 if (skb_vlan_tag_present(skb)) { 794 struct { 795 __be16 h_vlan_proto; 796 __be16 h_vlan_TCI; 797 } veth; 798 veth.h_vlan_proto = skb->vlan_proto; 799 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 800 801 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 802 total += VLAN_HLEN; 803 804 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 805 if (ret || !iov_iter_count(iter)) 806 goto done; 807 808 ret = copy_to_iter(&veth, sizeof(veth), iter); 809 if (ret != sizeof(veth) || !iov_iter_count(iter)) 810 goto done; 811 } 812 813 ret = skb_copy_datagram_iter(skb, vlan_offset, iter, 814 skb->len - vlan_offset); 815 816 done: 817 return ret ? ret : total; 818 } 819 820 static ssize_t tap_do_read(struct tap_queue *q, 821 struct iov_iter *to, 822 int noblock, struct sk_buff *skb) 823 { 824 DEFINE_WAIT(wait); 825 ssize_t ret = 0; 826 827 if (!iov_iter_count(to)) { 828 kfree_skb(skb); 829 return 0; 830 } 831 832 if (skb) 833 goto put; 834 835 while (1) { 836 if (!noblock) 837 prepare_to_wait(sk_sleep(&q->sk), &wait, 838 TASK_INTERRUPTIBLE); 839 840 /* Read frames from the queue */ 841 skb = ptr_ring_consume(&q->ring); 842 if (skb) 843 break; 844 if (noblock) { 845 ret = -EAGAIN; 846 break; 847 } 848 if (signal_pending(current)) { 849 ret = -ERESTARTSYS; 850 break; 851 } 852 /* Nothing to read, let's sleep */ 853 schedule(); 854 } 855 if (!noblock) 856 finish_wait(sk_sleep(&q->sk), &wait); 857 858 put: 859 if (skb) { 860 ret = tap_put_user(q, skb, to); 861 if (unlikely(ret < 0)) 862 kfree_skb(skb); 863 else 864 consume_skb(skb); 865 } 866 return ret; 867 } 868 869 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) 870 { 871 struct file *file = iocb->ki_filp; 872 struct tap_queue *q = file->private_data; 873 ssize_t len = iov_iter_count(to), ret; 874 875 ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL); 876 ret = min_t(ssize_t, ret, len); 877 if (ret > 0) 878 iocb->ki_pos = ret; 879 return ret; 880 } 881 882 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) 883 { 884 struct tap_dev *tap; 885 886 ASSERT_RTNL(); 887 tap = rtnl_dereference(q->tap); 888 if (tap) 889 dev_hold(tap->dev); 890 891 return tap; 892 } 893 894 static void tap_put_tap_dev(struct tap_dev *tap) 895 { 896 dev_put(tap->dev); 897 } 898 899 static int tap_ioctl_set_queue(struct file *file, unsigned int flags) 900 { 901 struct tap_queue *q = file->private_data; 902 struct tap_dev *tap; 903 int ret; 904 905 tap = tap_get_tap_dev(q); 906 if (!tap) 907 return -EINVAL; 908 909 if (flags & IFF_ATTACH_QUEUE) 910 ret = tap_enable_queue(tap, file, q); 911 else if (flags & IFF_DETACH_QUEUE) 912 ret = tap_disable_queue(q); 913 else 914 ret = -EINVAL; 915 916 tap_put_tap_dev(tap); 917 return ret; 918 } 919 920 static int set_offload(struct tap_queue *q, unsigned long arg) 921 { 922 struct tap_dev *tap; 923 netdev_features_t features; 924 netdev_features_t feature_mask = 0; 925 926 tap = rtnl_dereference(q->tap); 927 if (!tap) 928 return -ENOLINK; 929 930 features = tap->dev->features; 931 932 if (arg & TUN_F_CSUM) { 933 feature_mask = NETIF_F_HW_CSUM; 934 935 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 936 if (arg & TUN_F_TSO_ECN) 937 feature_mask |= NETIF_F_TSO_ECN; 938 if (arg & TUN_F_TSO4) 939 feature_mask |= NETIF_F_TSO; 940 if (arg & TUN_F_TSO6) 941 feature_mask |= NETIF_F_TSO6; 942 } 943 } 944 945 /* tun/tap driver inverts the usage for TSO offloads, where 946 * setting the TSO bit means that the userspace wants to 947 * accept TSO frames and turning it off means that user space 948 * does not support TSO. 949 * For tap, we have to invert it to mean the same thing. 950 * When user space turns off TSO, we turn off GSO/LRO so that 951 * user-space will not receive TSO frames. 952 */ 953 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 954 features |= RX_OFFLOADS; 955 else 956 features &= ~RX_OFFLOADS; 957 958 /* tap_features are the same as features on tun/tap and 959 * reflect user expectations. 960 */ 961 tap->tap_features = feature_mask; 962 if (tap->update_features) 963 tap->update_features(tap, features); 964 965 return 0; 966 } 967 968 /* 969 * provide compatibility with generic tun/tap interface 970 */ 971 static long tap_ioctl(struct file *file, unsigned int cmd, 972 unsigned long arg) 973 { 974 struct tap_queue *q = file->private_data; 975 struct tap_dev *tap; 976 void __user *argp = (void __user *)arg; 977 struct ifreq __user *ifr = argp; 978 unsigned int __user *up = argp; 979 unsigned short u; 980 int __user *sp = argp; 981 struct sockaddr sa; 982 int s; 983 int ret; 984 985 switch (cmd) { 986 case TUNSETIFF: 987 /* ignore the name, just look at flags */ 988 if (get_user(u, &ifr->ifr_flags)) 989 return -EFAULT; 990 991 ret = 0; 992 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP)) 993 ret = -EINVAL; 994 else 995 q->flags = (q->flags & ~TAP_IFFEATURES) | u; 996 997 return ret; 998 999 case TUNGETIFF: 1000 rtnl_lock(); 1001 tap = tap_get_tap_dev(q); 1002 if (!tap) { 1003 rtnl_unlock(); 1004 return -ENOLINK; 1005 } 1006 1007 ret = 0; 1008 u = q->flags; 1009 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || 1010 put_user(u, &ifr->ifr_flags)) 1011 ret = -EFAULT; 1012 tap_put_tap_dev(tap); 1013 rtnl_unlock(); 1014 return ret; 1015 1016 case TUNSETQUEUE: 1017 if (get_user(u, &ifr->ifr_flags)) 1018 return -EFAULT; 1019 rtnl_lock(); 1020 ret = tap_ioctl_set_queue(file, u); 1021 rtnl_unlock(); 1022 return ret; 1023 1024 case TUNGETFEATURES: 1025 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up)) 1026 return -EFAULT; 1027 return 0; 1028 1029 case TUNSETSNDBUF: 1030 if (get_user(s, sp)) 1031 return -EFAULT; 1032 if (s <= 0) 1033 return -EINVAL; 1034 1035 q->sk.sk_sndbuf = s; 1036 return 0; 1037 1038 case TUNGETVNETHDRSZ: 1039 s = q->vnet_hdr_sz; 1040 if (put_user(s, sp)) 1041 return -EFAULT; 1042 return 0; 1043 1044 case TUNSETVNETHDRSZ: 1045 if (get_user(s, sp)) 1046 return -EFAULT; 1047 if (s < (int)sizeof(struct virtio_net_hdr)) 1048 return -EINVAL; 1049 1050 q->vnet_hdr_sz = s; 1051 return 0; 1052 1053 case TUNGETVNETLE: 1054 s = !!(q->flags & TAP_VNET_LE); 1055 if (put_user(s, sp)) 1056 return -EFAULT; 1057 return 0; 1058 1059 case TUNSETVNETLE: 1060 if (get_user(s, sp)) 1061 return -EFAULT; 1062 if (s) 1063 q->flags |= TAP_VNET_LE; 1064 else 1065 q->flags &= ~TAP_VNET_LE; 1066 return 0; 1067 1068 case TUNGETVNETBE: 1069 return tap_get_vnet_be(q, sp); 1070 1071 case TUNSETVNETBE: 1072 return tap_set_vnet_be(q, sp); 1073 1074 case TUNSETOFFLOAD: 1075 /* let the user check for future flags */ 1076 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1077 TUN_F_TSO_ECN | TUN_F_UFO)) 1078 return -EINVAL; 1079 1080 rtnl_lock(); 1081 ret = set_offload(q, arg); 1082 rtnl_unlock(); 1083 return ret; 1084 1085 case SIOCGIFHWADDR: 1086 rtnl_lock(); 1087 tap = tap_get_tap_dev(q); 1088 if (!tap) { 1089 rtnl_unlock(); 1090 return -ENOLINK; 1091 } 1092 ret = 0; 1093 u = tap->dev->type; 1094 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || 1095 copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) || 1096 put_user(u, &ifr->ifr_hwaddr.sa_family)) 1097 ret = -EFAULT; 1098 tap_put_tap_dev(tap); 1099 rtnl_unlock(); 1100 return ret; 1101 1102 case SIOCSIFHWADDR: 1103 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) 1104 return -EFAULT; 1105 rtnl_lock(); 1106 tap = tap_get_tap_dev(q); 1107 if (!tap) { 1108 rtnl_unlock(); 1109 return -ENOLINK; 1110 } 1111 ret = dev_set_mac_address(tap->dev, &sa, NULL); 1112 tap_put_tap_dev(tap); 1113 rtnl_unlock(); 1114 return ret; 1115 1116 default: 1117 return -EINVAL; 1118 } 1119 } 1120 1121 static const struct file_operations tap_fops = { 1122 .owner = THIS_MODULE, 1123 .open = tap_open, 1124 .release = tap_release, 1125 .read_iter = tap_read_iter, 1126 .write_iter = tap_write_iter, 1127 .poll = tap_poll, 1128 .llseek = no_llseek, 1129 .unlocked_ioctl = tap_ioctl, 1130 .compat_ioctl = compat_ptr_ioctl, 1131 }; 1132 1133 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) 1134 { 1135 struct tun_xdp_hdr *hdr = xdp->data_hard_start; 1136 struct virtio_net_hdr *gso = &hdr->gso; 1137 int buflen = hdr->buflen; 1138 int vnet_hdr_len = 0; 1139 struct tap_dev *tap; 1140 struct sk_buff *skb; 1141 int err, depth; 1142 1143 if (q->flags & IFF_VNET_HDR) 1144 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); 1145 1146 skb = build_skb(xdp->data_hard_start, buflen); 1147 if (!skb) { 1148 err = -ENOMEM; 1149 goto err; 1150 } 1151 1152 skb_reserve(skb, xdp->data - xdp->data_hard_start); 1153 skb_put(skb, xdp->data_end - xdp->data); 1154 1155 skb_set_network_header(skb, ETH_HLEN); 1156 skb_reset_mac_header(skb); 1157 skb->protocol = eth_hdr(skb)->h_proto; 1158 1159 if (vnet_hdr_len) { 1160 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); 1161 if (err) 1162 goto err_kfree; 1163 } 1164 1165 /* Move network header to the right position for VLAN tagged packets */ 1166 if (eth_type_vlan(skb->protocol) && 1167 __vlan_get_protocol(skb, skb->protocol, &depth) != 0) 1168 skb_set_network_header(skb, depth); 1169 1170 rcu_read_lock(); 1171 tap = rcu_dereference(q->tap); 1172 if (tap) { 1173 skb->dev = tap->dev; 1174 skb_probe_transport_header(skb); 1175 dev_queue_xmit(skb); 1176 } else { 1177 kfree_skb(skb); 1178 } 1179 rcu_read_unlock(); 1180 1181 return 0; 1182 1183 err_kfree: 1184 kfree_skb(skb); 1185 err: 1186 rcu_read_lock(); 1187 tap = rcu_dereference(q->tap); 1188 if (tap && tap->count_tx_dropped) 1189 tap->count_tx_dropped(tap); 1190 rcu_read_unlock(); 1191 return err; 1192 } 1193 1194 static int tap_sendmsg(struct socket *sock, struct msghdr *m, 1195 size_t total_len) 1196 { 1197 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1198 struct tun_msg_ctl *ctl = m->msg_control; 1199 struct xdp_buff *xdp; 1200 int i; 1201 1202 if (ctl && (ctl->type == TUN_MSG_PTR)) { 1203 for (i = 0; i < ctl->num; i++) { 1204 xdp = &((struct xdp_buff *)ctl->ptr)[i]; 1205 tap_get_user_xdp(q, xdp); 1206 } 1207 return 0; 1208 } 1209 1210 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, 1211 m->msg_flags & MSG_DONTWAIT); 1212 } 1213 1214 static int tap_recvmsg(struct socket *sock, struct msghdr *m, 1215 size_t total_len, int flags) 1216 { 1217 struct tap_queue *q = container_of(sock, struct tap_queue, sock); 1218 struct sk_buff *skb = m->msg_control; 1219 int ret; 1220 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { 1221 kfree_skb(skb); 1222 return -EINVAL; 1223 } 1224 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); 1225 if (ret > total_len) { 1226 m->msg_flags |= MSG_TRUNC; 1227 ret = flags & MSG_TRUNC ? ret : total_len; 1228 } 1229 return ret; 1230 } 1231 1232 static int tap_peek_len(struct socket *sock) 1233 { 1234 struct tap_queue *q = container_of(sock, struct tap_queue, 1235 sock); 1236 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); 1237 } 1238 1239 /* Ops structure to mimic raw sockets with tun */ 1240 static const struct proto_ops tap_socket_ops = { 1241 .sendmsg = tap_sendmsg, 1242 .recvmsg = tap_recvmsg, 1243 .peek_len = tap_peek_len, 1244 }; 1245 1246 /* Get an underlying socket object from tun file. Returns error unless file is 1247 * attached to a device. The returned object works like a packet socket, it 1248 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1249 * holding a reference to the file for as long as the socket is in use. */ 1250 struct socket *tap_get_socket(struct file *file) 1251 { 1252 struct tap_queue *q; 1253 if (file->f_op != &tap_fops) 1254 return ERR_PTR(-EINVAL); 1255 q = file->private_data; 1256 if (!q) 1257 return ERR_PTR(-EBADFD); 1258 return &q->sock; 1259 } 1260 EXPORT_SYMBOL_GPL(tap_get_socket); 1261 1262 struct ptr_ring *tap_get_ptr_ring(struct file *file) 1263 { 1264 struct tap_queue *q; 1265 1266 if (file->f_op != &tap_fops) 1267 return ERR_PTR(-EINVAL); 1268 q = file->private_data; 1269 if (!q) 1270 return ERR_PTR(-EBADFD); 1271 return &q->ring; 1272 } 1273 EXPORT_SYMBOL_GPL(tap_get_ptr_ring); 1274 1275 int tap_queue_resize(struct tap_dev *tap) 1276 { 1277 struct net_device *dev = tap->dev; 1278 struct tap_queue *q; 1279 struct ptr_ring **rings; 1280 int n = tap->numqueues; 1281 int ret, i = 0; 1282 1283 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 1284 if (!rings) 1285 return -ENOMEM; 1286 1287 list_for_each_entry(q, &tap->queue_list, next) 1288 rings[i++] = &q->ring; 1289 1290 ret = ptr_ring_resize_multiple(rings, n, 1291 dev->tx_queue_len, GFP_KERNEL, 1292 __skb_array_destroy_skb); 1293 1294 kfree(rings); 1295 return ret; 1296 } 1297 EXPORT_SYMBOL_GPL(tap_queue_resize); 1298 1299 static int tap_list_add(dev_t major, const char *device_name) 1300 { 1301 struct major_info *tap_major; 1302 1303 tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC); 1304 if (!tap_major) 1305 return -ENOMEM; 1306 1307 tap_major->major = MAJOR(major); 1308 1309 idr_init(&tap_major->minor_idr); 1310 spin_lock_init(&tap_major->minor_lock); 1311 1312 tap_major->device_name = device_name; 1313 1314 list_add_tail_rcu(&tap_major->next, &major_list); 1315 return 0; 1316 } 1317 1318 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, 1319 const char *device_name, struct module *module) 1320 { 1321 int err; 1322 1323 err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name); 1324 if (err) 1325 goto out1; 1326 1327 cdev_init(tap_cdev, &tap_fops); 1328 tap_cdev->owner = module; 1329 err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); 1330 if (err) 1331 goto out2; 1332 1333 err = tap_list_add(*tap_major, device_name); 1334 if (err) 1335 goto out3; 1336 1337 return 0; 1338 1339 out3: 1340 cdev_del(tap_cdev); 1341 out2: 1342 unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); 1343 out1: 1344 return err; 1345 } 1346 EXPORT_SYMBOL_GPL(tap_create_cdev); 1347 1348 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) 1349 { 1350 struct major_info *tap_major, *tmp; 1351 1352 cdev_del(tap_cdev); 1353 unregister_chrdev_region(major, TAP_NUM_DEVS); 1354 list_for_each_entry_safe(tap_major, tmp, &major_list, next) { 1355 if (tap_major->major == MAJOR(major)) { 1356 idr_destroy(&tap_major->minor_idr); 1357 list_del_rcu(&tap_major->next); 1358 kfree_rcu(tap_major, rcu); 1359 } 1360 } 1361 } 1362 EXPORT_SYMBOL_GPL(tap_destroy_cdev); 1363 1364 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1365 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); 1366 MODULE_LICENSE("GPL"); 1367