1 /* 2 * TUN - Universal TUN/TAP device driver. 3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 16 */ 17 18 /* 19 * Changes: 20 * 21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22 * Add TUNSETLINK ioctl to set the link encapsulation 23 * 24 * Mark Smith <markzzzsmith@yahoo.com.au> 25 * Use eth_random_addr() for tap MAC address. 26 * 27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 28 * Fixes in packet dropping, queue length setting and queue wakeup. 29 * Increased default tx queue length. 30 * Added ethtool API. 31 * Minor cleanups 32 * 33 * Daniel Podlejski <underley@underley.eu.org> 34 * Modifications for 2.3.99-pre5 kernel. 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #define DRV_NAME "tun" 40 #define DRV_VERSION "1.6" 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 43 44 #include <linux/module.h> 45 #include <linux/errno.h> 46 #include <linux/kernel.h> 47 #include <linux/sched/signal.h> 48 #include <linux/major.h> 49 #include <linux/slab.h> 50 #include <linux/poll.h> 51 #include <linux/fcntl.h> 52 #include <linux/init.h> 53 #include <linux/skbuff.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/miscdevice.h> 57 #include <linux/ethtool.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/compat.h> 60 #include <linux/if.h> 61 #include <linux/if_arp.h> 62 #include <linux/if_ether.h> 63 #include <linux/if_tun.h> 64 #include <linux/if_vlan.h> 65 #include <linux/crc32.h> 66 #include <linux/nsproxy.h> 67 #include <linux/virtio_net.h> 68 #include <linux/rcupdate.h> 69 #include <net/net_namespace.h> 70 #include <net/netns/generic.h> 71 #include <net/rtnetlink.h> 72 #include <net/sock.h> 73 #include <linux/seq_file.h> 74 #include <linux/uio.h> 75 #include <linux/skb_array.h> 76 #include <linux/bpf.h> 77 #include <linux/bpf_trace.h> 78 #include <linux/mutex.h> 79 80 #include <linux/uaccess.h> 81 82 /* Uncomment to enable debugging */ 83 /* #define TUN_DEBUG 1 */ 84 85 #ifdef TUN_DEBUG 86 static int debug; 87 88 #define tun_debug(level, tun, fmt, args...) \ 89 do { \ 90 if (tun->debug) \ 91 netdev_printk(level, tun->dev, fmt, ##args); \ 92 } while (0) 93 #define DBG1(level, fmt, args...) \ 94 do { \ 95 if (debug == 2) \ 96 printk(level fmt, ##args); \ 97 } while (0) 98 #else 99 #define tun_debug(level, tun, fmt, args...) \ 100 do { \ 101 if (0) \ 102 netdev_printk(level, tun->dev, fmt, ##args); \ 103 } while (0) 104 #define DBG1(level, fmt, args...) \ 105 do { \ 106 if (0) \ 107 printk(level fmt, ##args); \ 108 } while (0) 109 #endif 110 111 #define TUN_HEADROOM 256 112 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 113 114 /* TUN device flags */ 115 116 /* IFF_ATTACH_QUEUE is never stored in device flags, 117 * overload it to mean fasync when stored there. 118 */ 119 #define TUN_FASYNC IFF_ATTACH_QUEUE 120 /* High bits in flags field are unused. */ 121 #define TUN_VNET_LE 0x80000000 122 #define TUN_VNET_BE 0x40000000 123 124 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 125 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 126 127 #define GOODCOPY_LEN 128 128 129 #define FLT_EXACT_COUNT 8 130 struct tap_filter { 131 unsigned int count; /* Number of addrs. Zero means disabled */ 132 u32 mask[2]; /* Mask of the hashed addrs */ 133 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134 }; 135 136 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137 * to max number of VCPUs in guest. */ 138 #define MAX_TAP_QUEUES 256 139 #define MAX_TAP_FLOWS 4096 140 141 #define TUN_FLOW_EXPIRE (3 * HZ) 142 143 struct tun_pcpu_stats { 144 u64 rx_packets; 145 u64 rx_bytes; 146 u64 tx_packets; 147 u64 tx_bytes; 148 struct u64_stats_sync syncp; 149 u32 rx_dropped; 150 u32 tx_dropped; 151 u32 rx_frame_errors; 152 }; 153 154 /* A tun_file connects an open character device to a tuntap netdevice. It 155 * also contains all socket related structures (except sock_fprog and tap_filter) 156 * to serve as one transmit queue for tuntap device. The sock_fprog and 157 * tap_filter were kept in tun_struct since they were used for filtering for the 158 * netdevice not for a specific queue (at least I didn't see the requirement for 159 * this). 160 * 161 * RCU usage: 162 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 163 * other can only be read while rcu_read_lock or rtnl_lock is held. 164 */ 165 struct tun_file { 166 struct sock sk; 167 struct socket socket; 168 struct socket_wq wq; 169 struct tun_struct __rcu *tun; 170 struct fasync_struct *fasync; 171 /* only used for fasnyc */ 172 unsigned int flags; 173 union { 174 u16 queue_index; 175 unsigned int ifindex; 176 }; 177 struct napi_struct napi; 178 bool napi_enabled; 179 struct mutex napi_mutex; /* Protects access to the above napi */ 180 struct list_head next; 181 struct tun_struct *detached; 182 struct ptr_ring tx_ring; 183 struct xdp_rxq_info xdp_rxq; 184 }; 185 186 struct tun_flow_entry { 187 struct hlist_node hash_link; 188 struct rcu_head rcu; 189 struct tun_struct *tun; 190 191 u32 rxhash; 192 u32 rps_rxhash; 193 int queue_index; 194 unsigned long updated; 195 }; 196 197 #define TUN_NUM_FLOW_ENTRIES 1024 198 199 struct tun_prog { 200 struct rcu_head rcu; 201 struct bpf_prog *prog; 202 }; 203 204 /* Since the socket were moved to tun_file, to preserve the behavior of persist 205 * device, socket filter, sndbuf and vnet header size were restore when the 206 * file were attached to a persist device. 207 */ 208 struct tun_struct { 209 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 210 unsigned int numqueues; 211 unsigned int flags; 212 kuid_t owner; 213 kgid_t group; 214 215 struct net_device *dev; 216 netdev_features_t set_features; 217 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 218 NETIF_F_TSO6) 219 220 int align; 221 int vnet_hdr_sz; 222 int sndbuf; 223 struct tap_filter txflt; 224 struct sock_fprog fprog; 225 /* protected by rtnl lock */ 226 bool filter_attached; 227 #ifdef TUN_DEBUG 228 int debug; 229 #endif 230 spinlock_t lock; 231 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 232 struct timer_list flow_gc_timer; 233 unsigned long ageing_time; 234 unsigned int numdisabled; 235 struct list_head disabled; 236 void *security; 237 u32 flow_count; 238 u32 rx_batched; 239 struct tun_pcpu_stats __percpu *pcpu_stats; 240 struct bpf_prog __rcu *xdp_prog; 241 struct tun_prog __rcu *steering_prog; 242 struct tun_prog __rcu *filter_prog; 243 }; 244 245 struct veth { 246 __be16 h_vlan_proto; 247 __be16 h_vlan_TCI; 248 }; 249 250 bool tun_is_xdp_buff(void *ptr) 251 { 252 return (unsigned long)ptr & TUN_XDP_FLAG; 253 } 254 EXPORT_SYMBOL(tun_is_xdp_buff); 255 256 void *tun_xdp_to_ptr(void *ptr) 257 { 258 return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 259 } 260 EXPORT_SYMBOL(tun_xdp_to_ptr); 261 262 void *tun_ptr_to_xdp(void *ptr) 263 { 264 return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 265 } 266 EXPORT_SYMBOL(tun_ptr_to_xdp); 267 268 static int tun_napi_receive(struct napi_struct *napi, int budget) 269 { 270 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 271 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 272 struct sk_buff_head process_queue; 273 struct sk_buff *skb; 274 int received = 0; 275 276 __skb_queue_head_init(&process_queue); 277 278 spin_lock(&queue->lock); 279 skb_queue_splice_tail_init(queue, &process_queue); 280 spin_unlock(&queue->lock); 281 282 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 283 napi_gro_receive(napi, skb); 284 ++received; 285 } 286 287 if (!skb_queue_empty(&process_queue)) { 288 spin_lock(&queue->lock); 289 skb_queue_splice(&process_queue, queue); 290 spin_unlock(&queue->lock); 291 } 292 293 return received; 294 } 295 296 static int tun_napi_poll(struct napi_struct *napi, int budget) 297 { 298 unsigned int received; 299 300 received = tun_napi_receive(napi, budget); 301 302 if (received < budget) 303 napi_complete_done(napi, received); 304 305 return received; 306 } 307 308 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 309 bool napi_en) 310 { 311 tfile->napi_enabled = napi_en; 312 if (napi_en) { 313 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 314 NAPI_POLL_WEIGHT); 315 napi_enable(&tfile->napi); 316 mutex_init(&tfile->napi_mutex); 317 } 318 } 319 320 static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 321 { 322 if (tfile->napi_enabled) 323 napi_disable(&tfile->napi); 324 } 325 326 static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 327 { 328 if (tfile->napi_enabled) 329 netif_napi_del(&tfile->napi); 330 } 331 332 static bool tun_napi_frags_enabled(const struct tun_struct *tun) 333 { 334 return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 335 } 336 337 #ifdef CONFIG_TUN_VNET_CROSS_LE 338 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 339 { 340 return tun->flags & TUN_VNET_BE ? false : 341 virtio_legacy_is_little_endian(); 342 } 343 344 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 345 { 346 int be = !!(tun->flags & TUN_VNET_BE); 347 348 if (put_user(be, argp)) 349 return -EFAULT; 350 351 return 0; 352 } 353 354 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 355 { 356 int be; 357 358 if (get_user(be, argp)) 359 return -EFAULT; 360 361 if (be) 362 tun->flags |= TUN_VNET_BE; 363 else 364 tun->flags &= ~TUN_VNET_BE; 365 366 return 0; 367 } 368 #else 369 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 370 { 371 return virtio_legacy_is_little_endian(); 372 } 373 374 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 375 { 376 return -EINVAL; 377 } 378 379 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 380 { 381 return -EINVAL; 382 } 383 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 384 385 static inline bool tun_is_little_endian(struct tun_struct *tun) 386 { 387 return tun->flags & TUN_VNET_LE || 388 tun_legacy_is_little_endian(tun); 389 } 390 391 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 392 { 393 return __virtio16_to_cpu(tun_is_little_endian(tun), val); 394 } 395 396 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 397 { 398 return __cpu_to_virtio16(tun_is_little_endian(tun), val); 399 } 400 401 static inline u32 tun_hashfn(u32 rxhash) 402 { 403 return rxhash & 0x3ff; 404 } 405 406 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 407 { 408 struct tun_flow_entry *e; 409 410 hlist_for_each_entry_rcu(e, head, hash_link) { 411 if (e->rxhash == rxhash) 412 return e; 413 } 414 return NULL; 415 } 416 417 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 418 struct hlist_head *head, 419 u32 rxhash, u16 queue_index) 420 { 421 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 422 423 if (e) { 424 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 425 rxhash, queue_index); 426 e->updated = jiffies; 427 e->rxhash = rxhash; 428 e->rps_rxhash = 0; 429 e->queue_index = queue_index; 430 e->tun = tun; 431 hlist_add_head_rcu(&e->hash_link, head); 432 ++tun->flow_count; 433 } 434 return e; 435 } 436 437 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 438 { 439 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 440 e->rxhash, e->queue_index); 441 hlist_del_rcu(&e->hash_link); 442 kfree_rcu(e, rcu); 443 --tun->flow_count; 444 } 445 446 static void tun_flow_flush(struct tun_struct *tun) 447 { 448 int i; 449 450 spin_lock_bh(&tun->lock); 451 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 452 struct tun_flow_entry *e; 453 struct hlist_node *n; 454 455 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 456 tun_flow_delete(tun, e); 457 } 458 spin_unlock_bh(&tun->lock); 459 } 460 461 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 462 { 463 int i; 464 465 spin_lock_bh(&tun->lock); 466 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 467 struct tun_flow_entry *e; 468 struct hlist_node *n; 469 470 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 471 if (e->queue_index == queue_index) 472 tun_flow_delete(tun, e); 473 } 474 } 475 spin_unlock_bh(&tun->lock); 476 } 477 478 static void tun_flow_cleanup(struct timer_list *t) 479 { 480 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 481 unsigned long delay = tun->ageing_time; 482 unsigned long next_timer = jiffies + delay; 483 unsigned long count = 0; 484 int i; 485 486 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 487 488 spin_lock(&tun->lock); 489 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 490 struct tun_flow_entry *e; 491 struct hlist_node *n; 492 493 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 494 unsigned long this_timer; 495 496 this_timer = e->updated + delay; 497 if (time_before_eq(this_timer, jiffies)) { 498 tun_flow_delete(tun, e); 499 continue; 500 } 501 count++; 502 if (time_before(this_timer, next_timer)) 503 next_timer = this_timer; 504 } 505 } 506 507 if (count) 508 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 509 spin_unlock(&tun->lock); 510 } 511 512 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 513 struct tun_file *tfile) 514 { 515 struct hlist_head *head; 516 struct tun_flow_entry *e; 517 unsigned long delay = tun->ageing_time; 518 u16 queue_index = tfile->queue_index; 519 520 if (!rxhash) 521 return; 522 else 523 head = &tun->flows[tun_hashfn(rxhash)]; 524 525 rcu_read_lock(); 526 527 /* We may get a very small possibility of OOO during switching, not 528 * worth to optimize.*/ 529 if (tun->numqueues == 1 || tfile->detached) 530 goto unlock; 531 532 e = tun_flow_find(head, rxhash); 533 if (likely(e)) { 534 /* TODO: keep queueing to old queue until it's empty? */ 535 e->queue_index = queue_index; 536 e->updated = jiffies; 537 sock_rps_record_flow_hash(e->rps_rxhash); 538 } else { 539 spin_lock_bh(&tun->lock); 540 if (!tun_flow_find(head, rxhash) && 541 tun->flow_count < MAX_TAP_FLOWS) 542 tun_flow_create(tun, head, rxhash, queue_index); 543 544 if (!timer_pending(&tun->flow_gc_timer)) 545 mod_timer(&tun->flow_gc_timer, 546 round_jiffies_up(jiffies + delay)); 547 spin_unlock_bh(&tun->lock); 548 } 549 550 unlock: 551 rcu_read_unlock(); 552 } 553 554 /** 555 * Save the hash received in the stack receive path and update the 556 * flow_hash table accordingly. 557 */ 558 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 559 { 560 if (unlikely(e->rps_rxhash != hash)) 561 e->rps_rxhash = hash; 562 } 563 564 /* We try to identify a flow through its rxhash first. The reason that 565 * we do not check rxq no. is because some cards(e.g 82599), chooses 566 * the rxq based on the txq where the last packet of the flow comes. As 567 * the userspace application move between processors, we may get a 568 * different rxq no. here. If we could not get rxhash, then we would 569 * hope the rxq no. may help here. 570 */ 571 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 572 { 573 struct tun_flow_entry *e; 574 u32 txq = 0; 575 u32 numqueues = 0; 576 577 numqueues = READ_ONCE(tun->numqueues); 578 579 txq = __skb_get_hash_symmetric(skb); 580 if (txq) { 581 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 582 if (e) { 583 tun_flow_save_rps_rxhash(e, txq); 584 txq = e->queue_index; 585 } else 586 /* use multiply and shift instead of expensive divide */ 587 txq = ((u64)txq * numqueues) >> 32; 588 } else if (likely(skb_rx_queue_recorded(skb))) { 589 txq = skb_get_rx_queue(skb); 590 while (unlikely(txq >= numqueues)) 591 txq -= numqueues; 592 } 593 594 return txq; 595 } 596 597 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 598 { 599 struct tun_prog *prog; 600 u16 ret = 0; 601 602 prog = rcu_dereference(tun->steering_prog); 603 if (prog) 604 ret = bpf_prog_run_clear_cb(prog->prog, skb); 605 606 return ret % tun->numqueues; 607 } 608 609 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 610 void *accel_priv, select_queue_fallback_t fallback) 611 { 612 struct tun_struct *tun = netdev_priv(dev); 613 u16 ret; 614 615 rcu_read_lock(); 616 if (rcu_dereference(tun->steering_prog)) 617 ret = tun_ebpf_select_queue(tun, skb); 618 else 619 ret = tun_automq_select_queue(tun, skb); 620 rcu_read_unlock(); 621 622 return ret; 623 } 624 625 static inline bool tun_not_capable(struct tun_struct *tun) 626 { 627 const struct cred *cred = current_cred(); 628 struct net *net = dev_net(tun->dev); 629 630 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 631 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 632 !ns_capable(net->user_ns, CAP_NET_ADMIN); 633 } 634 635 static void tun_set_real_num_queues(struct tun_struct *tun) 636 { 637 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 638 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 639 } 640 641 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 642 { 643 tfile->detached = tun; 644 list_add_tail(&tfile->next, &tun->disabled); 645 ++tun->numdisabled; 646 } 647 648 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 649 { 650 struct tun_struct *tun = tfile->detached; 651 652 tfile->detached = NULL; 653 list_del_init(&tfile->next); 654 --tun->numdisabled; 655 return tun; 656 } 657 658 static void tun_ptr_free(void *ptr) 659 { 660 if (!ptr) 661 return; 662 if (tun_is_xdp_buff(ptr)) { 663 struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 664 665 put_page(virt_to_head_page(xdp->data)); 666 } else { 667 __skb_array_destroy_skb(ptr); 668 } 669 } 670 671 static void tun_queue_purge(struct tun_file *tfile) 672 { 673 void *ptr; 674 675 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 676 tun_ptr_free(ptr); 677 678 skb_queue_purge(&tfile->sk.sk_write_queue); 679 skb_queue_purge(&tfile->sk.sk_error_queue); 680 } 681 682 static void tun_cleanup_tx_ring(struct tun_file *tfile) 683 { 684 if (tfile->tx_ring.queue) { 685 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 686 xdp_rxq_info_unreg(&tfile->xdp_rxq); 687 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 688 } 689 } 690 691 static void __tun_detach(struct tun_file *tfile, bool clean) 692 { 693 struct tun_file *ntfile; 694 struct tun_struct *tun; 695 696 tun = rtnl_dereference(tfile->tun); 697 698 if (tun && clean) { 699 tun_napi_disable(tun, tfile); 700 tun_napi_del(tun, tfile); 701 } 702 703 if (tun && !tfile->detached) { 704 u16 index = tfile->queue_index; 705 BUG_ON(index >= tun->numqueues); 706 707 rcu_assign_pointer(tun->tfiles[index], 708 tun->tfiles[tun->numqueues - 1]); 709 ntfile = rtnl_dereference(tun->tfiles[index]); 710 ntfile->queue_index = index; 711 712 --tun->numqueues; 713 if (clean) { 714 RCU_INIT_POINTER(tfile->tun, NULL); 715 sock_put(&tfile->sk); 716 } else 717 tun_disable_queue(tun, tfile); 718 719 synchronize_net(); 720 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 721 /* Drop read queue */ 722 tun_queue_purge(tfile); 723 tun_set_real_num_queues(tun); 724 } else if (tfile->detached && clean) { 725 tun = tun_enable_queue(tfile); 726 sock_put(&tfile->sk); 727 } 728 729 if (clean) { 730 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 731 netif_carrier_off(tun->dev); 732 733 if (!(tun->flags & IFF_PERSIST) && 734 tun->dev->reg_state == NETREG_REGISTERED) 735 unregister_netdevice(tun->dev); 736 } 737 tun_cleanup_tx_ring(tfile); 738 sock_put(&tfile->sk); 739 } 740 } 741 742 static void tun_detach(struct tun_file *tfile, bool clean) 743 { 744 rtnl_lock(); 745 __tun_detach(tfile, clean); 746 rtnl_unlock(); 747 } 748 749 static void tun_detach_all(struct net_device *dev) 750 { 751 struct tun_struct *tun = netdev_priv(dev); 752 struct tun_file *tfile, *tmp; 753 int i, n = tun->numqueues; 754 755 for (i = 0; i < n; i++) { 756 tfile = rtnl_dereference(tun->tfiles[i]); 757 BUG_ON(!tfile); 758 tun_napi_disable(tun, tfile); 759 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 760 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 761 RCU_INIT_POINTER(tfile->tun, NULL); 762 --tun->numqueues; 763 } 764 list_for_each_entry(tfile, &tun->disabled, next) { 765 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 766 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 767 RCU_INIT_POINTER(tfile->tun, NULL); 768 } 769 BUG_ON(tun->numqueues != 0); 770 771 synchronize_net(); 772 for (i = 0; i < n; i++) { 773 tfile = rtnl_dereference(tun->tfiles[i]); 774 tun_napi_del(tun, tfile); 775 /* Drop read queue */ 776 tun_queue_purge(tfile); 777 sock_put(&tfile->sk); 778 tun_cleanup_tx_ring(tfile); 779 } 780 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 781 tun_enable_queue(tfile); 782 tun_queue_purge(tfile); 783 sock_put(&tfile->sk); 784 tun_cleanup_tx_ring(tfile); 785 } 786 BUG_ON(tun->numdisabled != 0); 787 788 if (tun->flags & IFF_PERSIST) 789 module_put(THIS_MODULE); 790 } 791 792 static int tun_attach(struct tun_struct *tun, struct file *file, 793 bool skip_filter, bool napi) 794 { 795 struct tun_file *tfile = file->private_data; 796 struct net_device *dev = tun->dev; 797 int err; 798 799 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 800 if (err < 0) 801 goto out; 802 803 err = -EINVAL; 804 if (rtnl_dereference(tfile->tun) && !tfile->detached) 805 goto out; 806 807 err = -EBUSY; 808 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 809 goto out; 810 811 err = -E2BIG; 812 if (!tfile->detached && 813 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 814 goto out; 815 816 err = 0; 817 818 /* Re-attach the filter to persist device */ 819 if (!skip_filter && (tun->filter_attached == true)) { 820 lock_sock(tfile->socket.sk); 821 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 822 release_sock(tfile->socket.sk); 823 if (!err) 824 goto out; 825 } 826 827 if (!tfile->detached && 828 ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { 829 err = -ENOMEM; 830 goto out; 831 } 832 833 tfile->queue_index = tun->numqueues; 834 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 835 836 if (tfile->detached) { 837 /* Re-attach detached tfile, updating XDP queue_index */ 838 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 839 840 if (tfile->xdp_rxq.queue_index != tfile->queue_index) 841 tfile->xdp_rxq.queue_index = tfile->queue_index; 842 } else { 843 /* Setup XDP RX-queue info, for new tfile getting attached */ 844 err = xdp_rxq_info_reg(&tfile->xdp_rxq, 845 tun->dev, tfile->queue_index); 846 if (err < 0) 847 goto out; 848 err = 0; 849 } 850 851 rcu_assign_pointer(tfile->tun, tun); 852 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 853 tun->numqueues++; 854 855 if (tfile->detached) { 856 tun_enable_queue(tfile); 857 } else { 858 sock_hold(&tfile->sk); 859 tun_napi_init(tun, tfile, napi); 860 } 861 862 tun_set_real_num_queues(tun); 863 864 /* device is allowed to go away first, so no need to hold extra 865 * refcnt. 866 */ 867 868 out: 869 return err; 870 } 871 872 static struct tun_struct *tun_get(struct tun_file *tfile) 873 { 874 struct tun_struct *tun; 875 876 rcu_read_lock(); 877 tun = rcu_dereference(tfile->tun); 878 if (tun) 879 dev_hold(tun->dev); 880 rcu_read_unlock(); 881 882 return tun; 883 } 884 885 static void tun_put(struct tun_struct *tun) 886 { 887 dev_put(tun->dev); 888 } 889 890 /* TAP filtering */ 891 static void addr_hash_set(u32 *mask, const u8 *addr) 892 { 893 int n = ether_crc(ETH_ALEN, addr) >> 26; 894 mask[n >> 5] |= (1 << (n & 31)); 895 } 896 897 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 898 { 899 int n = ether_crc(ETH_ALEN, addr) >> 26; 900 return mask[n >> 5] & (1 << (n & 31)); 901 } 902 903 static int update_filter(struct tap_filter *filter, void __user *arg) 904 { 905 struct { u8 u[ETH_ALEN]; } *addr; 906 struct tun_filter uf; 907 int err, alen, n, nexact; 908 909 if (copy_from_user(&uf, arg, sizeof(uf))) 910 return -EFAULT; 911 912 if (!uf.count) { 913 /* Disabled */ 914 filter->count = 0; 915 return 0; 916 } 917 918 alen = ETH_ALEN * uf.count; 919 addr = memdup_user(arg + sizeof(uf), alen); 920 if (IS_ERR(addr)) 921 return PTR_ERR(addr); 922 923 /* The filter is updated without holding any locks. Which is 924 * perfectly safe. We disable it first and in the worst 925 * case we'll accept a few undesired packets. */ 926 filter->count = 0; 927 wmb(); 928 929 /* Use first set of addresses as an exact filter */ 930 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 931 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 932 933 nexact = n; 934 935 /* Remaining multicast addresses are hashed, 936 * unicast will leave the filter disabled. */ 937 memset(filter->mask, 0, sizeof(filter->mask)); 938 for (; n < uf.count; n++) { 939 if (!is_multicast_ether_addr(addr[n].u)) { 940 err = 0; /* no filter */ 941 goto free_addr; 942 } 943 addr_hash_set(filter->mask, addr[n].u); 944 } 945 946 /* For ALLMULTI just set the mask to all ones. 947 * This overrides the mask populated above. */ 948 if ((uf.flags & TUN_FLT_ALLMULTI)) 949 memset(filter->mask, ~0, sizeof(filter->mask)); 950 951 /* Now enable the filter */ 952 wmb(); 953 filter->count = nexact; 954 955 /* Return the number of exact filters */ 956 err = nexact; 957 free_addr: 958 kfree(addr); 959 return err; 960 } 961 962 /* Returns: 0 - drop, !=0 - accept */ 963 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 964 { 965 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 966 * at this point. */ 967 struct ethhdr *eh = (struct ethhdr *) skb->data; 968 int i; 969 970 /* Exact match */ 971 for (i = 0; i < filter->count; i++) 972 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 973 return 1; 974 975 /* Inexact match (multicast only) */ 976 if (is_multicast_ether_addr(eh->h_dest)) 977 return addr_hash_test(filter->mask, eh->h_dest); 978 979 return 0; 980 } 981 982 /* 983 * Checks whether the packet is accepted or not. 984 * Returns: 0 - drop, !=0 - accept 985 */ 986 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 987 { 988 if (!filter->count) 989 return 1; 990 991 return run_filter(filter, skb); 992 } 993 994 /* Network device part of the driver */ 995 996 static const struct ethtool_ops tun_ethtool_ops; 997 998 /* Net device detach from fd. */ 999 static void tun_net_uninit(struct net_device *dev) 1000 { 1001 tun_detach_all(dev); 1002 } 1003 1004 /* Net device open. */ 1005 static int tun_net_open(struct net_device *dev) 1006 { 1007 struct tun_struct *tun = netdev_priv(dev); 1008 int i; 1009 1010 netif_tx_start_all_queues(dev); 1011 1012 for (i = 0; i < tun->numqueues; i++) { 1013 struct tun_file *tfile; 1014 1015 tfile = rtnl_dereference(tun->tfiles[i]); 1016 tfile->socket.sk->sk_write_space(tfile->socket.sk); 1017 } 1018 1019 return 0; 1020 } 1021 1022 /* Net device close. */ 1023 static int tun_net_close(struct net_device *dev) 1024 { 1025 netif_tx_stop_all_queues(dev); 1026 return 0; 1027 } 1028 1029 /* Net device start xmit */ 1030 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 1031 { 1032 #ifdef CONFIG_RPS 1033 if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 1034 /* Select queue was not called for the skbuff, so we extract the 1035 * RPS hash and save it into the flow_table here. 1036 */ 1037 __u32 rxhash; 1038 1039 rxhash = __skb_get_hash_symmetric(skb); 1040 if (rxhash) { 1041 struct tun_flow_entry *e; 1042 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 1043 rxhash); 1044 if (e) 1045 tun_flow_save_rps_rxhash(e, rxhash); 1046 } 1047 } 1048 #endif 1049 } 1050 1051 static unsigned int run_ebpf_filter(struct tun_struct *tun, 1052 struct sk_buff *skb, 1053 int len) 1054 { 1055 struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1056 1057 if (prog) 1058 len = bpf_prog_run_clear_cb(prog->prog, skb); 1059 1060 return len; 1061 } 1062 1063 /* Net device start xmit */ 1064 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 1065 { 1066 struct tun_struct *tun = netdev_priv(dev); 1067 int txq = skb->queue_mapping; 1068 struct tun_file *tfile; 1069 int len = skb->len; 1070 1071 rcu_read_lock(); 1072 tfile = rcu_dereference(tun->tfiles[txq]); 1073 1074 /* Drop packet if interface is not attached */ 1075 if (txq >= tun->numqueues) 1076 goto drop; 1077 1078 if (!rcu_dereference(tun->steering_prog)) 1079 tun_automq_xmit(tun, skb); 1080 1081 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 1082 1083 BUG_ON(!tfile); 1084 1085 /* Drop if the filter does not like it. 1086 * This is a noop if the filter is disabled. 1087 * Filter can be enabled only for the TAP devices. */ 1088 if (!check_filter(&tun->txflt, skb)) 1089 goto drop; 1090 1091 if (tfile->socket.sk->sk_filter && 1092 sk_filter(tfile->socket.sk, skb)) 1093 goto drop; 1094 1095 len = run_ebpf_filter(tun, skb, len); 1096 1097 /* Trim extra bytes since we may insert vlan proto & TCI 1098 * in tun_put_user(). 1099 */ 1100 len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; 1101 if (len <= 0 || pskb_trim(skb, len)) 1102 goto drop; 1103 1104 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1105 goto drop; 1106 1107 skb_tx_timestamp(skb); 1108 1109 /* Orphan the skb - required as we might hang on to it 1110 * for indefinite time. 1111 */ 1112 skb_orphan(skb); 1113 1114 nf_reset(skb); 1115 1116 if (ptr_ring_produce(&tfile->tx_ring, skb)) 1117 goto drop; 1118 1119 /* Notify and wake up reader process */ 1120 if (tfile->flags & TUN_FASYNC) 1121 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1122 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1123 1124 rcu_read_unlock(); 1125 return NETDEV_TX_OK; 1126 1127 drop: 1128 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1129 skb_tx_error(skb); 1130 kfree_skb(skb); 1131 rcu_read_unlock(); 1132 return NET_XMIT_DROP; 1133 } 1134 1135 static void tun_net_mclist(struct net_device *dev) 1136 { 1137 /* 1138 * This callback is supposed to deal with mc filter in 1139 * _rx_ path and has nothing to do with the _tx_ path. 1140 * In rx path we always accept everything userspace gives us. 1141 */ 1142 } 1143 1144 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1145 netdev_features_t features) 1146 { 1147 struct tun_struct *tun = netdev_priv(dev); 1148 1149 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1150 } 1151 #ifdef CONFIG_NET_POLL_CONTROLLER 1152 static void tun_poll_controller(struct net_device *dev) 1153 { 1154 /* 1155 * Tun only receives frames when: 1156 * 1) the char device endpoint gets data from user space 1157 * 2) the tun socket gets a sendmsg call from user space 1158 * If NAPI is not enabled, since both of those are synchronous 1159 * operations, we are guaranteed never to have pending data when we poll 1160 * for it so there is nothing to do here but return. 1161 * We need this though so netpoll recognizes us as an interface that 1162 * supports polling, which enables bridge devices in virt setups to 1163 * still use netconsole 1164 * If NAPI is enabled, however, we need to schedule polling for all 1165 * queues unless we are using napi_gro_frags(), which we call in 1166 * process context and not in NAPI context. 1167 */ 1168 struct tun_struct *tun = netdev_priv(dev); 1169 1170 if (tun->flags & IFF_NAPI) { 1171 struct tun_file *tfile; 1172 int i; 1173 1174 if (tun_napi_frags_enabled(tun)) 1175 return; 1176 1177 rcu_read_lock(); 1178 for (i = 0; i < tun->numqueues; i++) { 1179 tfile = rcu_dereference(tun->tfiles[i]); 1180 if (tfile->napi_enabled) 1181 napi_schedule(&tfile->napi); 1182 } 1183 rcu_read_unlock(); 1184 } 1185 return; 1186 } 1187 #endif 1188 1189 static void tun_set_headroom(struct net_device *dev, int new_hr) 1190 { 1191 struct tun_struct *tun = netdev_priv(dev); 1192 1193 if (new_hr < NET_SKB_PAD) 1194 new_hr = NET_SKB_PAD; 1195 1196 tun->align = new_hr; 1197 } 1198 1199 static void 1200 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1201 { 1202 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1203 struct tun_struct *tun = netdev_priv(dev); 1204 struct tun_pcpu_stats *p; 1205 int i; 1206 1207 for_each_possible_cpu(i) { 1208 u64 rxpackets, rxbytes, txpackets, txbytes; 1209 unsigned int start; 1210 1211 p = per_cpu_ptr(tun->pcpu_stats, i); 1212 do { 1213 start = u64_stats_fetch_begin(&p->syncp); 1214 rxpackets = p->rx_packets; 1215 rxbytes = p->rx_bytes; 1216 txpackets = p->tx_packets; 1217 txbytes = p->tx_bytes; 1218 } while (u64_stats_fetch_retry(&p->syncp, start)); 1219 1220 stats->rx_packets += rxpackets; 1221 stats->rx_bytes += rxbytes; 1222 stats->tx_packets += txpackets; 1223 stats->tx_bytes += txbytes; 1224 1225 /* u32 counters */ 1226 rx_dropped += p->rx_dropped; 1227 rx_frame_errors += p->rx_frame_errors; 1228 tx_dropped += p->tx_dropped; 1229 } 1230 stats->rx_dropped = rx_dropped; 1231 stats->rx_frame_errors = rx_frame_errors; 1232 stats->tx_dropped = tx_dropped; 1233 } 1234 1235 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1236 struct netlink_ext_ack *extack) 1237 { 1238 struct tun_struct *tun = netdev_priv(dev); 1239 struct bpf_prog *old_prog; 1240 1241 old_prog = rtnl_dereference(tun->xdp_prog); 1242 rcu_assign_pointer(tun->xdp_prog, prog); 1243 if (old_prog) 1244 bpf_prog_put(old_prog); 1245 1246 return 0; 1247 } 1248 1249 static u32 tun_xdp_query(struct net_device *dev) 1250 { 1251 struct tun_struct *tun = netdev_priv(dev); 1252 const struct bpf_prog *xdp_prog; 1253 1254 xdp_prog = rtnl_dereference(tun->xdp_prog); 1255 if (xdp_prog) 1256 return xdp_prog->aux->id; 1257 1258 return 0; 1259 } 1260 1261 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1262 { 1263 switch (xdp->command) { 1264 case XDP_SETUP_PROG: 1265 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1266 case XDP_QUERY_PROG: 1267 xdp->prog_id = tun_xdp_query(dev); 1268 xdp->prog_attached = !!xdp->prog_id; 1269 return 0; 1270 default: 1271 return -EINVAL; 1272 } 1273 } 1274 1275 static const struct net_device_ops tun_netdev_ops = { 1276 .ndo_uninit = tun_net_uninit, 1277 .ndo_open = tun_net_open, 1278 .ndo_stop = tun_net_close, 1279 .ndo_start_xmit = tun_net_xmit, 1280 .ndo_fix_features = tun_net_fix_features, 1281 .ndo_select_queue = tun_select_queue, 1282 #ifdef CONFIG_NET_POLL_CONTROLLER 1283 .ndo_poll_controller = tun_poll_controller, 1284 #endif 1285 .ndo_set_rx_headroom = tun_set_headroom, 1286 .ndo_get_stats64 = tun_net_get_stats64, 1287 }; 1288 1289 static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) 1290 { 1291 struct tun_struct *tun = netdev_priv(dev); 1292 struct xdp_buff *buff = xdp->data_hard_start; 1293 int headroom = xdp->data - xdp->data_hard_start; 1294 struct tun_file *tfile; 1295 u32 numqueues; 1296 int ret = 0; 1297 1298 /* Assure headroom is available and buff is properly aligned */ 1299 if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) 1300 return -ENOSPC; 1301 1302 *buff = *xdp; 1303 1304 rcu_read_lock(); 1305 1306 numqueues = READ_ONCE(tun->numqueues); 1307 if (!numqueues) { 1308 ret = -ENOSPC; 1309 goto out; 1310 } 1311 1312 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1313 numqueues]); 1314 /* Encode the XDP flag into lowest bit for consumer to differ 1315 * XDP buffer from sk_buff. 1316 */ 1317 if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { 1318 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1319 ret = -ENOSPC; 1320 } 1321 1322 out: 1323 rcu_read_unlock(); 1324 return ret; 1325 } 1326 1327 static void tun_xdp_flush(struct net_device *dev) 1328 { 1329 struct tun_struct *tun = netdev_priv(dev); 1330 struct tun_file *tfile; 1331 u32 numqueues; 1332 1333 rcu_read_lock(); 1334 1335 numqueues = READ_ONCE(tun->numqueues); 1336 if (!numqueues) 1337 goto out; 1338 1339 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1340 numqueues]); 1341 /* Notify and wake up reader process */ 1342 if (tfile->flags & TUN_FASYNC) 1343 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1344 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1345 1346 out: 1347 rcu_read_unlock(); 1348 } 1349 1350 static const struct net_device_ops tap_netdev_ops = { 1351 .ndo_uninit = tun_net_uninit, 1352 .ndo_open = tun_net_open, 1353 .ndo_stop = tun_net_close, 1354 .ndo_start_xmit = tun_net_xmit, 1355 .ndo_fix_features = tun_net_fix_features, 1356 .ndo_set_rx_mode = tun_net_mclist, 1357 .ndo_set_mac_address = eth_mac_addr, 1358 .ndo_validate_addr = eth_validate_addr, 1359 .ndo_select_queue = tun_select_queue, 1360 #ifdef CONFIG_NET_POLL_CONTROLLER 1361 .ndo_poll_controller = tun_poll_controller, 1362 #endif 1363 .ndo_features_check = passthru_features_check, 1364 .ndo_set_rx_headroom = tun_set_headroom, 1365 .ndo_get_stats64 = tun_net_get_stats64, 1366 .ndo_bpf = tun_xdp, 1367 .ndo_xdp_xmit = tun_xdp_xmit, 1368 .ndo_xdp_flush = tun_xdp_flush, 1369 }; 1370 1371 static void tun_flow_init(struct tun_struct *tun) 1372 { 1373 int i; 1374 1375 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1376 INIT_HLIST_HEAD(&tun->flows[i]); 1377 1378 tun->ageing_time = TUN_FLOW_EXPIRE; 1379 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1380 mod_timer(&tun->flow_gc_timer, 1381 round_jiffies_up(jiffies + tun->ageing_time)); 1382 } 1383 1384 static void tun_flow_uninit(struct tun_struct *tun) 1385 { 1386 del_timer_sync(&tun->flow_gc_timer); 1387 tun_flow_flush(tun); 1388 } 1389 1390 #define MIN_MTU 68 1391 #define MAX_MTU 65535 1392 1393 /* Initialize net device. */ 1394 static void tun_net_init(struct net_device *dev) 1395 { 1396 struct tun_struct *tun = netdev_priv(dev); 1397 1398 switch (tun->flags & TUN_TYPE_MASK) { 1399 case IFF_TUN: 1400 dev->netdev_ops = &tun_netdev_ops; 1401 1402 /* Point-to-Point TUN Device */ 1403 dev->hard_header_len = 0; 1404 dev->addr_len = 0; 1405 dev->mtu = 1500; 1406 1407 /* Zero header length */ 1408 dev->type = ARPHRD_NONE; 1409 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1410 break; 1411 1412 case IFF_TAP: 1413 dev->netdev_ops = &tap_netdev_ops; 1414 /* Ethernet TAP Device */ 1415 ether_setup(dev); 1416 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1417 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1418 1419 eth_hw_addr_random(dev); 1420 1421 break; 1422 } 1423 1424 dev->min_mtu = MIN_MTU; 1425 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1426 } 1427 1428 /* Character device part */ 1429 1430 /* Poll */ 1431 static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 1432 { 1433 struct tun_file *tfile = file->private_data; 1434 struct tun_struct *tun = tun_get(tfile); 1435 struct sock *sk; 1436 __poll_t mask = 0; 1437 1438 if (!tun) 1439 return EPOLLERR; 1440 1441 sk = tfile->socket.sk; 1442 1443 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 1444 1445 poll_wait(file, sk_sleep(sk), wait); 1446 1447 if (!ptr_ring_empty(&tfile->tx_ring)) 1448 mask |= EPOLLIN | EPOLLRDNORM; 1449 1450 if (tun->dev->flags & IFF_UP && 1451 (sock_writeable(sk) || 1452 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1453 sock_writeable(sk)))) 1454 mask |= EPOLLOUT | EPOLLWRNORM; 1455 1456 if (tun->dev->reg_state != NETREG_REGISTERED) 1457 mask = EPOLLERR; 1458 1459 tun_put(tun); 1460 return mask; 1461 } 1462 1463 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1464 size_t len, 1465 const struct iov_iter *it) 1466 { 1467 struct sk_buff *skb; 1468 size_t linear; 1469 int err; 1470 int i; 1471 1472 if (it->nr_segs > MAX_SKB_FRAGS + 1) 1473 return ERR_PTR(-ENOMEM); 1474 1475 local_bh_disable(); 1476 skb = napi_get_frags(&tfile->napi); 1477 local_bh_enable(); 1478 if (!skb) 1479 return ERR_PTR(-ENOMEM); 1480 1481 linear = iov_iter_single_seg_count(it); 1482 err = __skb_grow(skb, linear); 1483 if (err) 1484 goto free; 1485 1486 skb->len = len; 1487 skb->data_len = len - linear; 1488 skb->truesize += skb->data_len; 1489 1490 for (i = 1; i < it->nr_segs; i++) { 1491 struct page_frag *pfrag = ¤t->task_frag; 1492 size_t fragsz = it->iov[i].iov_len; 1493 1494 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1495 err = -EINVAL; 1496 goto free; 1497 } 1498 1499 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 1500 err = -ENOMEM; 1501 goto free; 1502 } 1503 1504 skb_fill_page_desc(skb, i - 1, pfrag->page, 1505 pfrag->offset, fragsz); 1506 page_ref_inc(pfrag->page); 1507 pfrag->offset += fragsz; 1508 } 1509 1510 return skb; 1511 free: 1512 /* frees skb and all frags allocated with napi_alloc_frag() */ 1513 napi_free_frags(&tfile->napi); 1514 return ERR_PTR(err); 1515 } 1516 1517 /* prepad is the amount to reserve at front. len is length after that. 1518 * linear is a hint as to how much to copy (usually headers). */ 1519 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1520 size_t prepad, size_t len, 1521 size_t linear, int noblock) 1522 { 1523 struct sock *sk = tfile->socket.sk; 1524 struct sk_buff *skb; 1525 int err; 1526 1527 /* Under a page? Don't bother with paged skb. */ 1528 if (prepad + len < PAGE_SIZE || !linear) 1529 linear = len; 1530 1531 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1532 &err, 0); 1533 if (!skb) 1534 return ERR_PTR(err); 1535 1536 skb_reserve(skb, prepad); 1537 skb_put(skb, linear); 1538 skb->data_len = len - linear; 1539 skb->len += len - linear; 1540 1541 return skb; 1542 } 1543 1544 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1545 struct sk_buff *skb, int more) 1546 { 1547 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1548 struct sk_buff_head process_queue; 1549 u32 rx_batched = tun->rx_batched; 1550 bool rcv = false; 1551 1552 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1553 local_bh_disable(); 1554 netif_receive_skb(skb); 1555 local_bh_enable(); 1556 return; 1557 } 1558 1559 spin_lock(&queue->lock); 1560 if (!more || skb_queue_len(queue) == rx_batched) { 1561 __skb_queue_head_init(&process_queue); 1562 skb_queue_splice_tail_init(queue, &process_queue); 1563 rcv = true; 1564 } else { 1565 __skb_queue_tail(queue, skb); 1566 } 1567 spin_unlock(&queue->lock); 1568 1569 if (rcv) { 1570 struct sk_buff *nskb; 1571 1572 local_bh_disable(); 1573 while ((nskb = __skb_dequeue(&process_queue))) 1574 netif_receive_skb(nskb); 1575 netif_receive_skb(skb); 1576 local_bh_enable(); 1577 } 1578 } 1579 1580 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1581 int len, int noblock, bool zerocopy) 1582 { 1583 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1584 return false; 1585 1586 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1587 return false; 1588 1589 if (!noblock) 1590 return false; 1591 1592 if (zerocopy) 1593 return false; 1594 1595 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 1596 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1597 return false; 1598 1599 return true; 1600 } 1601 1602 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1603 struct tun_file *tfile, 1604 struct iov_iter *from, 1605 struct virtio_net_hdr *hdr, 1606 int len, int *skb_xdp) 1607 { 1608 struct page_frag *alloc_frag = ¤t->task_frag; 1609 struct sk_buff *skb; 1610 struct bpf_prog *xdp_prog; 1611 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1612 unsigned int delta = 0; 1613 char *buf; 1614 size_t copied; 1615 bool xdp_xmit = false; 1616 int err, pad = TUN_RX_PAD; 1617 1618 rcu_read_lock(); 1619 xdp_prog = rcu_dereference(tun->xdp_prog); 1620 if (xdp_prog) 1621 pad += TUN_HEADROOM; 1622 buflen += SKB_DATA_ALIGN(len + pad); 1623 rcu_read_unlock(); 1624 1625 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1626 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1627 return ERR_PTR(-ENOMEM); 1628 1629 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1630 copied = copy_page_from_iter(alloc_frag->page, 1631 alloc_frag->offset + pad, 1632 len, from); 1633 if (copied != len) 1634 return ERR_PTR(-EFAULT); 1635 1636 /* There's a small window that XDP may be set after the check 1637 * of xdp_prog above, this should be rare and for simplicity 1638 * we do XDP on skb in case the headroom is not enough. 1639 */ 1640 if (hdr->gso_type || !xdp_prog) 1641 *skb_xdp = 1; 1642 else 1643 *skb_xdp = 0; 1644 1645 preempt_disable(); 1646 rcu_read_lock(); 1647 xdp_prog = rcu_dereference(tun->xdp_prog); 1648 if (xdp_prog && !*skb_xdp) { 1649 struct xdp_buff xdp; 1650 void *orig_data; 1651 u32 act; 1652 1653 xdp.data_hard_start = buf; 1654 xdp.data = buf + pad; 1655 xdp_set_data_meta_invalid(&xdp); 1656 xdp.data_end = xdp.data + len; 1657 xdp.rxq = &tfile->xdp_rxq; 1658 orig_data = xdp.data; 1659 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1660 1661 switch (act) { 1662 case XDP_REDIRECT: 1663 get_page(alloc_frag->page); 1664 alloc_frag->offset += buflen; 1665 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1666 xdp_do_flush_map(); 1667 if (err) 1668 goto err_redirect; 1669 rcu_read_unlock(); 1670 preempt_enable(); 1671 return NULL; 1672 case XDP_TX: 1673 xdp_xmit = true; 1674 /* fall through */ 1675 case XDP_PASS: 1676 delta = orig_data - xdp.data; 1677 break; 1678 default: 1679 bpf_warn_invalid_xdp_action(act); 1680 /* fall through */ 1681 case XDP_ABORTED: 1682 trace_xdp_exception(tun->dev, xdp_prog, act); 1683 /* fall through */ 1684 case XDP_DROP: 1685 goto err_xdp; 1686 } 1687 } 1688 1689 skb = build_skb(buf, buflen); 1690 if (!skb) { 1691 rcu_read_unlock(); 1692 preempt_enable(); 1693 return ERR_PTR(-ENOMEM); 1694 } 1695 1696 skb_reserve(skb, pad - delta); 1697 skb_put(skb, len + delta); 1698 get_page(alloc_frag->page); 1699 alloc_frag->offset += buflen; 1700 1701 if (xdp_xmit) { 1702 skb->dev = tun->dev; 1703 generic_xdp_tx(skb, xdp_prog); 1704 rcu_read_unlock(); 1705 preempt_enable(); 1706 return NULL; 1707 } 1708 1709 rcu_read_unlock(); 1710 preempt_enable(); 1711 1712 return skb; 1713 1714 err_redirect: 1715 put_page(alloc_frag->page); 1716 err_xdp: 1717 rcu_read_unlock(); 1718 preempt_enable(); 1719 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1720 return NULL; 1721 } 1722 1723 /* Get packet from user space buffer */ 1724 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1725 void *msg_control, struct iov_iter *from, 1726 int noblock, bool more) 1727 { 1728 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1729 struct sk_buff *skb; 1730 size_t total_len = iov_iter_count(from); 1731 size_t len = total_len, align = tun->align, linear; 1732 struct virtio_net_hdr gso = { 0 }; 1733 struct tun_pcpu_stats *stats; 1734 int good_linear; 1735 int copylen; 1736 bool zerocopy = false; 1737 int err; 1738 u32 rxhash = 0; 1739 int skb_xdp = 1; 1740 bool frags = tun_napi_frags_enabled(tun); 1741 1742 if (!(tun->dev->flags & IFF_UP)) 1743 return -EIO; 1744 1745 if (!(tun->flags & IFF_NO_PI)) { 1746 if (len < sizeof(pi)) 1747 return -EINVAL; 1748 len -= sizeof(pi); 1749 1750 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1751 return -EFAULT; 1752 } 1753 1754 if (tun->flags & IFF_VNET_HDR) { 1755 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1756 1757 if (len < vnet_hdr_sz) 1758 return -EINVAL; 1759 len -= vnet_hdr_sz; 1760 1761 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1762 return -EFAULT; 1763 1764 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1765 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 1766 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 1767 1768 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1769 return -EINVAL; 1770 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1771 } 1772 1773 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1774 align += NET_IP_ALIGN; 1775 if (unlikely(len < ETH_HLEN || 1776 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1777 return -EINVAL; 1778 } 1779 1780 good_linear = SKB_MAX_HEAD(align); 1781 1782 if (msg_control) { 1783 struct iov_iter i = *from; 1784 1785 /* There are 256 bytes to be copied in skb, so there is 1786 * enough room for skb expand head in case it is used. 1787 * The rest of the buffer is mapped from userspace. 1788 */ 1789 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 1790 if (copylen > good_linear) 1791 copylen = good_linear; 1792 linear = copylen; 1793 iov_iter_advance(&i, copylen); 1794 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1795 zerocopy = true; 1796 } 1797 1798 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1799 /* For the packet that is not easy to be processed 1800 * (e.g gso or jumbo packet), we will do it at after 1801 * skb was created with generic XDP routine. 1802 */ 1803 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1804 if (IS_ERR(skb)) { 1805 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1806 return PTR_ERR(skb); 1807 } 1808 if (!skb) 1809 return total_len; 1810 } else { 1811 if (!zerocopy) { 1812 copylen = len; 1813 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 1814 linear = good_linear; 1815 else 1816 linear = tun16_to_cpu(tun, gso.hdr_len); 1817 } 1818 1819 if (frags) { 1820 mutex_lock(&tfile->napi_mutex); 1821 skb = tun_napi_alloc_frags(tfile, copylen, from); 1822 /* tun_napi_alloc_frags() enforces a layout for the skb. 1823 * If zerocopy is enabled, then this layout will be 1824 * overwritten by zerocopy_sg_from_iter(). 1825 */ 1826 zerocopy = false; 1827 } else { 1828 skb = tun_alloc_skb(tfile, align, copylen, linear, 1829 noblock); 1830 } 1831 1832 if (IS_ERR(skb)) { 1833 if (PTR_ERR(skb) != -EAGAIN) 1834 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1835 if (frags) 1836 mutex_unlock(&tfile->napi_mutex); 1837 return PTR_ERR(skb); 1838 } 1839 1840 if (zerocopy) 1841 err = zerocopy_sg_from_iter(skb, from); 1842 else 1843 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1844 1845 if (err) { 1846 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1847 kfree_skb(skb); 1848 if (frags) { 1849 tfile->napi.skb = NULL; 1850 mutex_unlock(&tfile->napi_mutex); 1851 } 1852 1853 return -EFAULT; 1854 } 1855 } 1856 1857 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1858 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1859 kfree_skb(skb); 1860 if (frags) { 1861 tfile->napi.skb = NULL; 1862 mutex_unlock(&tfile->napi_mutex); 1863 } 1864 1865 return -EINVAL; 1866 } 1867 1868 switch (tun->flags & TUN_TYPE_MASK) { 1869 case IFF_TUN: 1870 if (tun->flags & IFF_NO_PI) { 1871 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 1872 1873 switch (ip_version) { 1874 case 4: 1875 pi.proto = htons(ETH_P_IP); 1876 break; 1877 case 6: 1878 pi.proto = htons(ETH_P_IPV6); 1879 break; 1880 default: 1881 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1882 kfree_skb(skb); 1883 return -EINVAL; 1884 } 1885 } 1886 1887 skb_reset_mac_header(skb); 1888 skb->protocol = pi.proto; 1889 skb->dev = tun->dev; 1890 break; 1891 case IFF_TAP: 1892 if (!frags) 1893 skb->protocol = eth_type_trans(skb, tun->dev); 1894 break; 1895 } 1896 1897 /* copy skb_ubuf_info for callback when skb has no error */ 1898 if (zerocopy) { 1899 skb_shinfo(skb)->destructor_arg = msg_control; 1900 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1901 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1902 } else if (msg_control) { 1903 struct ubuf_info *uarg = msg_control; 1904 uarg->callback(uarg, false); 1905 } 1906 1907 skb_reset_network_header(skb); 1908 skb_probe_transport_header(skb, 0); 1909 1910 if (skb_xdp) { 1911 struct bpf_prog *xdp_prog; 1912 int ret; 1913 1914 rcu_read_lock(); 1915 xdp_prog = rcu_dereference(tun->xdp_prog); 1916 if (xdp_prog) { 1917 ret = do_xdp_generic(xdp_prog, skb); 1918 if (ret != XDP_PASS) { 1919 rcu_read_unlock(); 1920 return total_len; 1921 } 1922 } 1923 rcu_read_unlock(); 1924 } 1925 1926 rcu_read_lock(); 1927 if (!rcu_dereference(tun->steering_prog)) 1928 rxhash = __skb_get_hash_symmetric(skb); 1929 rcu_read_unlock(); 1930 1931 if (frags) { 1932 /* Exercise flow dissector code path. */ 1933 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1934 1935 if (unlikely(headlen > skb_headlen(skb))) { 1936 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1937 napi_free_frags(&tfile->napi); 1938 mutex_unlock(&tfile->napi_mutex); 1939 WARN_ON(1); 1940 return -ENOMEM; 1941 } 1942 1943 local_bh_disable(); 1944 napi_gro_frags(&tfile->napi); 1945 local_bh_enable(); 1946 mutex_unlock(&tfile->napi_mutex); 1947 } else if (tfile->napi_enabled) { 1948 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1949 int queue_len; 1950 1951 spin_lock_bh(&queue->lock); 1952 __skb_queue_tail(queue, skb); 1953 queue_len = skb_queue_len(queue); 1954 spin_unlock(&queue->lock); 1955 1956 if (!more || queue_len > NAPI_POLL_WEIGHT) 1957 napi_schedule(&tfile->napi); 1958 1959 local_bh_enable(); 1960 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1961 tun_rx_batched(tun, tfile, skb, more); 1962 } else { 1963 netif_rx_ni(skb); 1964 } 1965 1966 stats = get_cpu_ptr(tun->pcpu_stats); 1967 u64_stats_update_begin(&stats->syncp); 1968 stats->rx_packets++; 1969 stats->rx_bytes += len; 1970 u64_stats_update_end(&stats->syncp); 1971 put_cpu_ptr(stats); 1972 1973 if (rxhash) 1974 tun_flow_update(tun, rxhash, tfile); 1975 1976 return total_len; 1977 } 1978 1979 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 1980 { 1981 struct file *file = iocb->ki_filp; 1982 struct tun_file *tfile = file->private_data; 1983 struct tun_struct *tun = tun_get(tfile); 1984 ssize_t result; 1985 1986 if (!tun) 1987 return -EBADFD; 1988 1989 result = tun_get_user(tun, tfile, NULL, from, 1990 file->f_flags & O_NONBLOCK, false); 1991 1992 tun_put(tun); 1993 return result; 1994 } 1995 1996 static ssize_t tun_put_user_xdp(struct tun_struct *tun, 1997 struct tun_file *tfile, 1998 struct xdp_buff *xdp, 1999 struct iov_iter *iter) 2000 { 2001 int vnet_hdr_sz = 0; 2002 size_t size = xdp->data_end - xdp->data; 2003 struct tun_pcpu_stats *stats; 2004 size_t ret; 2005 2006 if (tun->flags & IFF_VNET_HDR) { 2007 struct virtio_net_hdr gso = { 0 }; 2008 2009 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2010 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2011 return -EINVAL; 2012 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2013 sizeof(gso))) 2014 return -EFAULT; 2015 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2016 } 2017 2018 ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; 2019 2020 stats = get_cpu_ptr(tun->pcpu_stats); 2021 u64_stats_update_begin(&stats->syncp); 2022 stats->tx_packets++; 2023 stats->tx_bytes += ret; 2024 u64_stats_update_end(&stats->syncp); 2025 put_cpu_ptr(tun->pcpu_stats); 2026 2027 return ret; 2028 } 2029 2030 /* Put packet to the user space buffer */ 2031 static ssize_t tun_put_user(struct tun_struct *tun, 2032 struct tun_file *tfile, 2033 struct sk_buff *skb, 2034 struct iov_iter *iter) 2035 { 2036 struct tun_pi pi = { 0, skb->protocol }; 2037 struct tun_pcpu_stats *stats; 2038 ssize_t total; 2039 int vlan_offset = 0; 2040 int vlan_hlen = 0; 2041 int vnet_hdr_sz = 0; 2042 2043 if (skb_vlan_tag_present(skb)) 2044 vlan_hlen = VLAN_HLEN; 2045 2046 if (tun->flags & IFF_VNET_HDR) 2047 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2048 2049 total = skb->len + vlan_hlen + vnet_hdr_sz; 2050 2051 if (!(tun->flags & IFF_NO_PI)) { 2052 if (iov_iter_count(iter) < sizeof(pi)) 2053 return -EINVAL; 2054 2055 total += sizeof(pi); 2056 if (iov_iter_count(iter) < total) { 2057 /* Packet will be striped */ 2058 pi.flags |= TUN_PKT_STRIP; 2059 } 2060 2061 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 2062 return -EFAULT; 2063 } 2064 2065 if (vnet_hdr_sz) { 2066 struct virtio_net_hdr gso; 2067 2068 if (iov_iter_count(iter) < vnet_hdr_sz) 2069 return -EINVAL; 2070 2071 if (virtio_net_hdr_from_skb(skb, &gso, 2072 tun_is_little_endian(tun), true)) { 2073 struct skb_shared_info *sinfo = skb_shinfo(skb); 2074 pr_err("unexpected GSO type: " 2075 "0x%x, gso_size %d, hdr_len %d\n", 2076 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 2077 tun16_to_cpu(tun, gso.hdr_len)); 2078 print_hex_dump(KERN_ERR, "tun: ", 2079 DUMP_PREFIX_NONE, 2080 16, 1, skb->head, 2081 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2082 WARN_ON_ONCE(1); 2083 return -EINVAL; 2084 } 2085 2086 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2087 return -EFAULT; 2088 2089 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2090 } 2091 2092 if (vlan_hlen) { 2093 int ret; 2094 struct veth veth; 2095 2096 veth.h_vlan_proto = skb->vlan_proto; 2097 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 2098 2099 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 2100 2101 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2102 if (ret || !iov_iter_count(iter)) 2103 goto done; 2104 2105 ret = copy_to_iter(&veth, sizeof(veth), iter); 2106 if (ret != sizeof(veth) || !iov_iter_count(iter)) 2107 goto done; 2108 } 2109 2110 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 2111 2112 done: 2113 /* caller is in process context, */ 2114 stats = get_cpu_ptr(tun->pcpu_stats); 2115 u64_stats_update_begin(&stats->syncp); 2116 stats->tx_packets++; 2117 stats->tx_bytes += skb->len + vlan_hlen; 2118 u64_stats_update_end(&stats->syncp); 2119 put_cpu_ptr(tun->pcpu_stats); 2120 2121 return total; 2122 } 2123 2124 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 2125 { 2126 DECLARE_WAITQUEUE(wait, current); 2127 void *ptr = NULL; 2128 int error = 0; 2129 2130 ptr = ptr_ring_consume(&tfile->tx_ring); 2131 if (ptr) 2132 goto out; 2133 if (noblock) { 2134 error = -EAGAIN; 2135 goto out; 2136 } 2137 2138 add_wait_queue(&tfile->wq.wait, &wait); 2139 current->state = TASK_INTERRUPTIBLE; 2140 2141 while (1) { 2142 ptr = ptr_ring_consume(&tfile->tx_ring); 2143 if (ptr) 2144 break; 2145 if (signal_pending(current)) { 2146 error = -ERESTARTSYS; 2147 break; 2148 } 2149 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2150 error = -EFAULT; 2151 break; 2152 } 2153 2154 schedule(); 2155 } 2156 2157 current->state = TASK_RUNNING; 2158 remove_wait_queue(&tfile->wq.wait, &wait); 2159 2160 out: 2161 *err = error; 2162 return ptr; 2163 } 2164 2165 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 2166 struct iov_iter *to, 2167 int noblock, void *ptr) 2168 { 2169 ssize_t ret; 2170 int err; 2171 2172 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 2173 2174 if (!iov_iter_count(to)) { 2175 tun_ptr_free(ptr); 2176 return 0; 2177 } 2178 2179 if (!ptr) { 2180 /* Read frames from ring */ 2181 ptr = tun_ring_recv(tfile, noblock, &err); 2182 if (!ptr) 2183 return err; 2184 } 2185 2186 if (tun_is_xdp_buff(ptr)) { 2187 struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2188 2189 ret = tun_put_user_xdp(tun, tfile, xdp, to); 2190 put_page(virt_to_head_page(xdp->data)); 2191 } else { 2192 struct sk_buff *skb = ptr; 2193 2194 ret = tun_put_user(tun, tfile, skb, to); 2195 if (unlikely(ret < 0)) 2196 kfree_skb(skb); 2197 else 2198 consume_skb(skb); 2199 } 2200 2201 return ret; 2202 } 2203 2204 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 2205 { 2206 struct file *file = iocb->ki_filp; 2207 struct tun_file *tfile = file->private_data; 2208 struct tun_struct *tun = tun_get(tfile); 2209 ssize_t len = iov_iter_count(to), ret; 2210 2211 if (!tun) 2212 return -EBADFD; 2213 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 2214 ret = min_t(ssize_t, ret, len); 2215 if (ret > 0) 2216 iocb->ki_pos = ret; 2217 tun_put(tun); 2218 return ret; 2219 } 2220 2221 static void tun_prog_free(struct rcu_head *rcu) 2222 { 2223 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 2224 2225 bpf_prog_destroy(prog->prog); 2226 kfree(prog); 2227 } 2228 2229 static int __tun_set_ebpf(struct tun_struct *tun, 2230 struct tun_prog __rcu **prog_p, 2231 struct bpf_prog *prog) 2232 { 2233 struct tun_prog *old, *new = NULL; 2234 2235 if (prog) { 2236 new = kmalloc(sizeof(*new), GFP_KERNEL); 2237 if (!new) 2238 return -ENOMEM; 2239 new->prog = prog; 2240 } 2241 2242 spin_lock_bh(&tun->lock); 2243 old = rcu_dereference_protected(*prog_p, 2244 lockdep_is_held(&tun->lock)); 2245 rcu_assign_pointer(*prog_p, new); 2246 spin_unlock_bh(&tun->lock); 2247 2248 if (old) 2249 call_rcu(&old->rcu, tun_prog_free); 2250 2251 return 0; 2252 } 2253 2254 static void tun_free_netdev(struct net_device *dev) 2255 { 2256 struct tun_struct *tun = netdev_priv(dev); 2257 2258 BUG_ON(!(list_empty(&tun->disabled))); 2259 free_percpu(tun->pcpu_stats); 2260 tun_flow_uninit(tun); 2261 security_tun_dev_free_security(tun->security); 2262 __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2263 __tun_set_ebpf(tun, &tun->filter_prog, NULL); 2264 } 2265 2266 static void tun_setup(struct net_device *dev) 2267 { 2268 struct tun_struct *tun = netdev_priv(dev); 2269 2270 tun->owner = INVALID_UID; 2271 tun->group = INVALID_GID; 2272 2273 dev->ethtool_ops = &tun_ethtool_ops; 2274 dev->needs_free_netdev = true; 2275 dev->priv_destructor = tun_free_netdev; 2276 /* We prefer our own queue length */ 2277 dev->tx_queue_len = TUN_READQ_SIZE; 2278 } 2279 2280 /* Trivial set of netlink ops to allow deleting tun or tap 2281 * device with netlink. 2282 */ 2283 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2284 struct netlink_ext_ack *extack) 2285 { 2286 return -EINVAL; 2287 } 2288 2289 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2290 .kind = DRV_NAME, 2291 .priv_size = sizeof(struct tun_struct), 2292 .setup = tun_setup, 2293 .validate = tun_validate, 2294 }; 2295 2296 static void tun_sock_write_space(struct sock *sk) 2297 { 2298 struct tun_file *tfile; 2299 wait_queue_head_t *wqueue; 2300 2301 if (!sock_writeable(sk)) 2302 return; 2303 2304 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2305 return; 2306 2307 wqueue = sk_sleep(sk); 2308 if (wqueue && waitqueue_active(wqueue)) 2309 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2310 EPOLLWRNORM | EPOLLWRBAND); 2311 2312 tfile = container_of(sk, struct tun_file, sk); 2313 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2314 } 2315 2316 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2317 { 2318 int ret; 2319 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2320 struct tun_struct *tun = tun_get(tfile); 2321 2322 if (!tun) 2323 return -EBADFD; 2324 2325 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2326 m->msg_flags & MSG_DONTWAIT, 2327 m->msg_flags & MSG_MORE); 2328 tun_put(tun); 2329 return ret; 2330 } 2331 2332 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2333 int flags) 2334 { 2335 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2336 struct tun_struct *tun = tun_get(tfile); 2337 void *ptr = m->msg_control; 2338 int ret; 2339 2340 if (!tun) { 2341 ret = -EBADFD; 2342 goto out_free; 2343 } 2344 2345 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2346 ret = -EINVAL; 2347 goto out_put_tun; 2348 } 2349 if (flags & MSG_ERRQUEUE) { 2350 ret = sock_recv_errqueue(sock->sk, m, total_len, 2351 SOL_PACKET, TUN_TX_TIMESTAMP); 2352 goto out; 2353 } 2354 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 2355 if (ret > (ssize_t)total_len) { 2356 m->msg_flags |= MSG_TRUNC; 2357 ret = flags & MSG_TRUNC ? ret : total_len; 2358 } 2359 out: 2360 tun_put(tun); 2361 return ret; 2362 2363 out_put_tun: 2364 tun_put(tun); 2365 out_free: 2366 tun_ptr_free(ptr); 2367 return ret; 2368 } 2369 2370 static int tun_ptr_peek_len(void *ptr) 2371 { 2372 if (likely(ptr)) { 2373 if (tun_is_xdp_buff(ptr)) { 2374 struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); 2375 2376 return xdp->data_end - xdp->data; 2377 } 2378 return __skb_array_len_with_tag(ptr); 2379 } else { 2380 return 0; 2381 } 2382 } 2383 2384 static int tun_peek_len(struct socket *sock) 2385 { 2386 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2387 struct tun_struct *tun; 2388 int ret = 0; 2389 2390 tun = tun_get(tfile); 2391 if (!tun) 2392 return 0; 2393 2394 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 2395 tun_put(tun); 2396 2397 return ret; 2398 } 2399 2400 /* Ops structure to mimic raw sockets with tun */ 2401 static const struct proto_ops tun_socket_ops = { 2402 .peek_len = tun_peek_len, 2403 .sendmsg = tun_sendmsg, 2404 .recvmsg = tun_recvmsg, 2405 }; 2406 2407 static struct proto tun_proto = { 2408 .name = "tun", 2409 .owner = THIS_MODULE, 2410 .obj_size = sizeof(struct tun_file), 2411 }; 2412 2413 static int tun_flags(struct tun_struct *tun) 2414 { 2415 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2416 } 2417 2418 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2419 char *buf) 2420 { 2421 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2422 return sprintf(buf, "0x%x\n", tun_flags(tun)); 2423 } 2424 2425 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2426 char *buf) 2427 { 2428 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2429 return uid_valid(tun->owner)? 2430 sprintf(buf, "%u\n", 2431 from_kuid_munged(current_user_ns(), tun->owner)): 2432 sprintf(buf, "-1\n"); 2433 } 2434 2435 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2436 char *buf) 2437 { 2438 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2439 return gid_valid(tun->group) ? 2440 sprintf(buf, "%u\n", 2441 from_kgid_munged(current_user_ns(), tun->group)): 2442 sprintf(buf, "-1\n"); 2443 } 2444 2445 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2446 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2447 static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2448 2449 static struct attribute *tun_dev_attrs[] = { 2450 &dev_attr_tun_flags.attr, 2451 &dev_attr_owner.attr, 2452 &dev_attr_group.attr, 2453 NULL 2454 }; 2455 2456 static const struct attribute_group tun_attr_group = { 2457 .attrs = tun_dev_attrs 2458 }; 2459 2460 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2461 { 2462 struct tun_struct *tun; 2463 struct tun_file *tfile = file->private_data; 2464 struct net_device *dev; 2465 int err; 2466 2467 if (tfile->detached) 2468 return -EINVAL; 2469 2470 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2471 if (!capable(CAP_NET_ADMIN)) 2472 return -EPERM; 2473 2474 if (!(ifr->ifr_flags & IFF_NAPI) || 2475 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2476 return -EINVAL; 2477 } 2478 2479 dev = __dev_get_by_name(net, ifr->ifr_name); 2480 if (dev) { 2481 if (ifr->ifr_flags & IFF_TUN_EXCL) 2482 return -EBUSY; 2483 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2484 tun = netdev_priv(dev); 2485 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2486 tun = netdev_priv(dev); 2487 else 2488 return -EINVAL; 2489 2490 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2491 !!(tun->flags & IFF_MULTI_QUEUE)) 2492 return -EINVAL; 2493 2494 if (tun_not_capable(tun)) 2495 return -EPERM; 2496 err = security_tun_dev_open(tun->security); 2497 if (err < 0) 2498 return err; 2499 2500 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2501 ifr->ifr_flags & IFF_NAPI); 2502 if (err < 0) 2503 return err; 2504 2505 if (tun->flags & IFF_MULTI_QUEUE && 2506 (tun->numqueues + tun->numdisabled > 1)) { 2507 /* One or more queue has already been attached, no need 2508 * to initialize the device again. 2509 */ 2510 return 0; 2511 } 2512 } 2513 else { 2514 char *name; 2515 unsigned long flags = 0; 2516 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2517 MAX_TAP_QUEUES : 1; 2518 2519 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2520 return -EPERM; 2521 err = security_tun_dev_create(); 2522 if (err < 0) 2523 return err; 2524 2525 /* Set dev type */ 2526 if (ifr->ifr_flags & IFF_TUN) { 2527 /* TUN device */ 2528 flags |= IFF_TUN; 2529 name = "tun%d"; 2530 } else if (ifr->ifr_flags & IFF_TAP) { 2531 /* TAP device */ 2532 flags |= IFF_TAP; 2533 name = "tap%d"; 2534 } else 2535 return -EINVAL; 2536 2537 if (*ifr->ifr_name) 2538 name = ifr->ifr_name; 2539 2540 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2541 NET_NAME_UNKNOWN, tun_setup, queues, 2542 queues); 2543 2544 if (!dev) 2545 return -ENOMEM; 2546 err = dev_get_valid_name(net, dev, name); 2547 if (err < 0) 2548 goto err_free_dev; 2549 2550 dev_net_set(dev, net); 2551 dev->rtnl_link_ops = &tun_link_ops; 2552 dev->ifindex = tfile->ifindex; 2553 dev->sysfs_groups[0] = &tun_attr_group; 2554 2555 tun = netdev_priv(dev); 2556 tun->dev = dev; 2557 tun->flags = flags; 2558 tun->txflt.count = 0; 2559 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2560 2561 tun->align = NET_SKB_PAD; 2562 tun->filter_attached = false; 2563 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2564 tun->rx_batched = 0; 2565 RCU_INIT_POINTER(tun->steering_prog, NULL); 2566 2567 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2568 if (!tun->pcpu_stats) { 2569 err = -ENOMEM; 2570 goto err_free_dev; 2571 } 2572 2573 spin_lock_init(&tun->lock); 2574 2575 err = security_tun_dev_alloc_security(&tun->security); 2576 if (err < 0) 2577 goto err_free_stat; 2578 2579 tun_net_init(dev); 2580 tun_flow_init(tun); 2581 2582 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 2583 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 2584 NETIF_F_HW_VLAN_STAG_TX; 2585 dev->features = dev->hw_features | NETIF_F_LLTX; 2586 dev->vlan_features = dev->features & 2587 ~(NETIF_F_HW_VLAN_CTAG_TX | 2588 NETIF_F_HW_VLAN_STAG_TX); 2589 2590 INIT_LIST_HEAD(&tun->disabled); 2591 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2592 if (err < 0) 2593 goto err_free_flow; 2594 2595 err = register_netdevice(tun->dev); 2596 if (err < 0) 2597 goto err_detach; 2598 } 2599 2600 netif_carrier_on(tun->dev); 2601 2602 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 2603 2604 tun->flags = (tun->flags & ~TUN_FEATURES) | 2605 (ifr->ifr_flags & TUN_FEATURES); 2606 2607 /* Make sure persistent devices do not get stuck in 2608 * xoff state. 2609 */ 2610 if (netif_running(tun->dev)) 2611 netif_tx_wake_all_queues(tun->dev); 2612 2613 strcpy(ifr->ifr_name, tun->dev->name); 2614 return 0; 2615 2616 err_detach: 2617 tun_detach_all(dev); 2618 /* register_netdevice() already called tun_free_netdev() */ 2619 goto err_free_dev; 2620 2621 err_free_flow: 2622 tun_flow_uninit(tun); 2623 security_tun_dev_free_security(tun->security); 2624 err_free_stat: 2625 free_percpu(tun->pcpu_stats); 2626 err_free_dev: 2627 free_netdev(dev); 2628 return err; 2629 } 2630 2631 static void tun_get_iff(struct net *net, struct tun_struct *tun, 2632 struct ifreq *ifr) 2633 { 2634 tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2635 2636 strcpy(ifr->ifr_name, tun->dev->name); 2637 2638 ifr->ifr_flags = tun_flags(tun); 2639 2640 } 2641 2642 /* This is like a cut-down ethtool ops, except done via tun fd so no 2643 * privs required. */ 2644 static int set_offload(struct tun_struct *tun, unsigned long arg) 2645 { 2646 netdev_features_t features = 0; 2647 2648 if (arg & TUN_F_CSUM) { 2649 features |= NETIF_F_HW_CSUM; 2650 arg &= ~TUN_F_CSUM; 2651 2652 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2653 if (arg & TUN_F_TSO_ECN) { 2654 features |= NETIF_F_TSO_ECN; 2655 arg &= ~TUN_F_TSO_ECN; 2656 } 2657 if (arg & TUN_F_TSO4) 2658 features |= NETIF_F_TSO; 2659 if (arg & TUN_F_TSO6) 2660 features |= NETIF_F_TSO6; 2661 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2662 } 2663 2664 arg &= ~TUN_F_UFO; 2665 } 2666 2667 /* This gives the user a way to test for new features in future by 2668 * trying to set them. */ 2669 if (arg) 2670 return -EINVAL; 2671 2672 tun->set_features = features; 2673 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2674 tun->dev->wanted_features |= features; 2675 netdev_update_features(tun->dev); 2676 2677 return 0; 2678 } 2679 2680 static void tun_detach_filter(struct tun_struct *tun, int n) 2681 { 2682 int i; 2683 struct tun_file *tfile; 2684 2685 for (i = 0; i < n; i++) { 2686 tfile = rtnl_dereference(tun->tfiles[i]); 2687 lock_sock(tfile->socket.sk); 2688 sk_detach_filter(tfile->socket.sk); 2689 release_sock(tfile->socket.sk); 2690 } 2691 2692 tun->filter_attached = false; 2693 } 2694 2695 static int tun_attach_filter(struct tun_struct *tun) 2696 { 2697 int i, ret = 0; 2698 struct tun_file *tfile; 2699 2700 for (i = 0; i < tun->numqueues; i++) { 2701 tfile = rtnl_dereference(tun->tfiles[i]); 2702 lock_sock(tfile->socket.sk); 2703 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2704 release_sock(tfile->socket.sk); 2705 if (ret) { 2706 tun_detach_filter(tun, i); 2707 return ret; 2708 } 2709 } 2710 2711 tun->filter_attached = true; 2712 return ret; 2713 } 2714 2715 static void tun_set_sndbuf(struct tun_struct *tun) 2716 { 2717 struct tun_file *tfile; 2718 int i; 2719 2720 for (i = 0; i < tun->numqueues; i++) { 2721 tfile = rtnl_dereference(tun->tfiles[i]); 2722 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2723 } 2724 } 2725 2726 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2727 { 2728 struct tun_file *tfile = file->private_data; 2729 struct tun_struct *tun; 2730 int ret = 0; 2731 2732 rtnl_lock(); 2733 2734 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2735 tun = tfile->detached; 2736 if (!tun) { 2737 ret = -EINVAL; 2738 goto unlock; 2739 } 2740 ret = security_tun_dev_attach_queue(tun->security); 2741 if (ret < 0) 2742 goto unlock; 2743 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 2744 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2745 tun = rtnl_dereference(tfile->tun); 2746 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2747 ret = -EINVAL; 2748 else 2749 __tun_detach(tfile, false); 2750 } else 2751 ret = -EINVAL; 2752 2753 unlock: 2754 rtnl_unlock(); 2755 return ret; 2756 } 2757 2758 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2759 void __user *data) 2760 { 2761 struct bpf_prog *prog; 2762 int fd; 2763 2764 if (copy_from_user(&fd, data, sizeof(fd))) 2765 return -EFAULT; 2766 2767 if (fd == -1) { 2768 prog = NULL; 2769 } else { 2770 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 2771 if (IS_ERR(prog)) 2772 return PTR_ERR(prog); 2773 } 2774 2775 return __tun_set_ebpf(tun, prog_p, prog); 2776 } 2777 2778 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 2779 unsigned long arg, int ifreq_len) 2780 { 2781 struct tun_file *tfile = file->private_data; 2782 struct tun_struct *tun; 2783 void __user* argp = (void __user*)arg; 2784 struct ifreq ifr; 2785 kuid_t owner; 2786 kgid_t group; 2787 int sndbuf; 2788 int vnet_hdr_sz; 2789 unsigned int ifindex; 2790 int le; 2791 int ret; 2792 2793 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 2794 if (copy_from_user(&ifr, argp, ifreq_len)) 2795 return -EFAULT; 2796 } else { 2797 memset(&ifr, 0, sizeof(ifr)); 2798 } 2799 if (cmd == TUNGETFEATURES) { 2800 /* Currently this just means: "what IFF flags are valid?". 2801 * This is needed because we never checked for invalid flags on 2802 * TUNSETIFF. 2803 */ 2804 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2805 (unsigned int __user*)argp); 2806 } else if (cmd == TUNSETQUEUE) 2807 return tun_set_queue(file, &ifr); 2808 2809 ret = 0; 2810 rtnl_lock(); 2811 2812 tun = tun_get(tfile); 2813 if (cmd == TUNSETIFF) { 2814 ret = -EEXIST; 2815 if (tun) 2816 goto unlock; 2817 2818 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 2819 2820 ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 2821 2822 if (ret) 2823 goto unlock; 2824 2825 if (copy_to_user(argp, &ifr, ifreq_len)) 2826 ret = -EFAULT; 2827 goto unlock; 2828 } 2829 if (cmd == TUNSETIFINDEX) { 2830 ret = -EPERM; 2831 if (tun) 2832 goto unlock; 2833 2834 ret = -EFAULT; 2835 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2836 goto unlock; 2837 2838 ret = 0; 2839 tfile->ifindex = ifindex; 2840 goto unlock; 2841 } 2842 2843 ret = -EBADFD; 2844 if (!tun) 2845 goto unlock; 2846 2847 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 2848 2849 ret = 0; 2850 switch (cmd) { 2851 case TUNGETIFF: 2852 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2853 2854 if (tfile->detached) 2855 ifr.ifr_flags |= IFF_DETACH_QUEUE; 2856 if (!tfile->socket.sk->sk_filter) 2857 ifr.ifr_flags |= IFF_NOFILTER; 2858 2859 if (copy_to_user(argp, &ifr, ifreq_len)) 2860 ret = -EFAULT; 2861 break; 2862 2863 case TUNSETNOCSUM: 2864 /* Disable/Enable checksum */ 2865 2866 /* [unimplemented] */ 2867 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 2868 arg ? "disabled" : "enabled"); 2869 break; 2870 2871 case TUNSETPERSIST: 2872 /* Disable/Enable persist mode. Keep an extra reference to the 2873 * module to prevent the module being unprobed. 2874 */ 2875 if (arg && !(tun->flags & IFF_PERSIST)) { 2876 tun->flags |= IFF_PERSIST; 2877 __module_get(THIS_MODULE); 2878 } 2879 if (!arg && (tun->flags & IFF_PERSIST)) { 2880 tun->flags &= ~IFF_PERSIST; 2881 module_put(THIS_MODULE); 2882 } 2883 2884 tun_debug(KERN_INFO, tun, "persist %s\n", 2885 arg ? "enabled" : "disabled"); 2886 break; 2887 2888 case TUNSETOWNER: 2889 /* Set owner of the device */ 2890 owner = make_kuid(current_user_ns(), arg); 2891 if (!uid_valid(owner)) { 2892 ret = -EINVAL; 2893 break; 2894 } 2895 tun->owner = owner; 2896 tun_debug(KERN_INFO, tun, "owner set to %u\n", 2897 from_kuid(&init_user_ns, tun->owner)); 2898 break; 2899 2900 case TUNSETGROUP: 2901 /* Set group of the device */ 2902 group = make_kgid(current_user_ns(), arg); 2903 if (!gid_valid(group)) { 2904 ret = -EINVAL; 2905 break; 2906 } 2907 tun->group = group; 2908 tun_debug(KERN_INFO, tun, "group set to %u\n", 2909 from_kgid(&init_user_ns, tun->group)); 2910 break; 2911 2912 case TUNSETLINK: 2913 /* Only allow setting the type when the interface is down */ 2914 if (tun->dev->flags & IFF_UP) { 2915 tun_debug(KERN_INFO, tun, 2916 "Linktype set failed because interface is up\n"); 2917 ret = -EBUSY; 2918 } else { 2919 tun->dev->type = (int) arg; 2920 tun_debug(KERN_INFO, tun, "linktype set to %d\n", 2921 tun->dev->type); 2922 ret = 0; 2923 } 2924 break; 2925 2926 #ifdef TUN_DEBUG 2927 case TUNSETDEBUG: 2928 tun->debug = arg; 2929 break; 2930 #endif 2931 case TUNSETOFFLOAD: 2932 ret = set_offload(tun, arg); 2933 break; 2934 2935 case TUNSETTXFILTER: 2936 /* Can be set only for TAPs */ 2937 ret = -EINVAL; 2938 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2939 break; 2940 ret = update_filter(&tun->txflt, (void __user *)arg); 2941 break; 2942 2943 case SIOCGIFHWADDR: 2944 /* Get hw address */ 2945 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2946 ifr.ifr_hwaddr.sa_family = tun->dev->type; 2947 if (copy_to_user(argp, &ifr, ifreq_len)) 2948 ret = -EFAULT; 2949 break; 2950 2951 case SIOCSIFHWADDR: 2952 /* Set hw address */ 2953 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 2954 ifr.ifr_hwaddr.sa_data); 2955 2956 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2957 break; 2958 2959 case TUNGETSNDBUF: 2960 sndbuf = tfile->socket.sk->sk_sndbuf; 2961 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 2962 ret = -EFAULT; 2963 break; 2964 2965 case TUNSETSNDBUF: 2966 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 2967 ret = -EFAULT; 2968 break; 2969 } 2970 if (sndbuf <= 0) { 2971 ret = -EINVAL; 2972 break; 2973 } 2974 2975 tun->sndbuf = sndbuf; 2976 tun_set_sndbuf(tun); 2977 break; 2978 2979 case TUNGETVNETHDRSZ: 2980 vnet_hdr_sz = tun->vnet_hdr_sz; 2981 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2982 ret = -EFAULT; 2983 break; 2984 2985 case TUNSETVNETHDRSZ: 2986 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2987 ret = -EFAULT; 2988 break; 2989 } 2990 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2991 ret = -EINVAL; 2992 break; 2993 } 2994 2995 tun->vnet_hdr_sz = vnet_hdr_sz; 2996 break; 2997 2998 case TUNGETVNETLE: 2999 le = !!(tun->flags & TUN_VNET_LE); 3000 if (put_user(le, (int __user *)argp)) 3001 ret = -EFAULT; 3002 break; 3003 3004 case TUNSETVNETLE: 3005 if (get_user(le, (int __user *)argp)) { 3006 ret = -EFAULT; 3007 break; 3008 } 3009 if (le) 3010 tun->flags |= TUN_VNET_LE; 3011 else 3012 tun->flags &= ~TUN_VNET_LE; 3013 break; 3014 3015 case TUNGETVNETBE: 3016 ret = tun_get_vnet_be(tun, argp); 3017 break; 3018 3019 case TUNSETVNETBE: 3020 ret = tun_set_vnet_be(tun, argp); 3021 break; 3022 3023 case TUNATTACHFILTER: 3024 /* Can be set only for TAPs */ 3025 ret = -EINVAL; 3026 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3027 break; 3028 ret = -EFAULT; 3029 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 3030 break; 3031 3032 ret = tun_attach_filter(tun); 3033 break; 3034 3035 case TUNDETACHFILTER: 3036 /* Can be set only for TAPs */ 3037 ret = -EINVAL; 3038 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3039 break; 3040 ret = 0; 3041 tun_detach_filter(tun, tun->numqueues); 3042 break; 3043 3044 case TUNGETFILTER: 3045 ret = -EINVAL; 3046 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3047 break; 3048 ret = -EFAULT; 3049 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 3050 break; 3051 ret = 0; 3052 break; 3053 3054 case TUNSETSTEERINGEBPF: 3055 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 3056 break; 3057 3058 case TUNSETFILTEREBPF: 3059 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3060 break; 3061 3062 default: 3063 ret = -EINVAL; 3064 break; 3065 } 3066 3067 unlock: 3068 rtnl_unlock(); 3069 if (tun) 3070 tun_put(tun); 3071 return ret; 3072 } 3073 3074 static long tun_chr_ioctl(struct file *file, 3075 unsigned int cmd, unsigned long arg) 3076 { 3077 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 3078 } 3079 3080 #ifdef CONFIG_COMPAT 3081 static long tun_chr_compat_ioctl(struct file *file, 3082 unsigned int cmd, unsigned long arg) 3083 { 3084 switch (cmd) { 3085 case TUNSETIFF: 3086 case TUNGETIFF: 3087 case TUNSETTXFILTER: 3088 case TUNGETSNDBUF: 3089 case TUNSETSNDBUF: 3090 case SIOCGIFHWADDR: 3091 case SIOCSIFHWADDR: 3092 arg = (unsigned long)compat_ptr(arg); 3093 break; 3094 default: 3095 arg = (compat_ulong_t)arg; 3096 break; 3097 } 3098 3099 /* 3100 * compat_ifreq is shorter than ifreq, so we must not access beyond 3101 * the end of that structure. All fields that are used in this 3102 * driver are compatible though, we don't need to convert the 3103 * contents. 3104 */ 3105 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 3106 } 3107 #endif /* CONFIG_COMPAT */ 3108 3109 static int tun_chr_fasync(int fd, struct file *file, int on) 3110 { 3111 struct tun_file *tfile = file->private_data; 3112 int ret; 3113 3114 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 3115 goto out; 3116 3117 if (on) { 3118 __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 3119 tfile->flags |= TUN_FASYNC; 3120 } else 3121 tfile->flags &= ~TUN_FASYNC; 3122 ret = 0; 3123 out: 3124 return ret; 3125 } 3126 3127 static int tun_chr_open(struct inode *inode, struct file * file) 3128 { 3129 struct net *net = current->nsproxy->net_ns; 3130 struct tun_file *tfile; 3131 3132 DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3133 3134 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 3135 &tun_proto, 0); 3136 if (!tfile) 3137 return -ENOMEM; 3138 RCU_INIT_POINTER(tfile->tun, NULL); 3139 tfile->flags = 0; 3140 tfile->ifindex = 0; 3141 3142 init_waitqueue_head(&tfile->wq.wait); 3143 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 3144 3145 tfile->socket.file = file; 3146 tfile->socket.ops = &tun_socket_ops; 3147 3148 sock_init_data(&tfile->socket, &tfile->sk); 3149 3150 tfile->sk.sk_write_space = tun_sock_write_space; 3151 tfile->sk.sk_sndbuf = INT_MAX; 3152 3153 file->private_data = tfile; 3154 INIT_LIST_HEAD(&tfile->next); 3155 3156 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3157 3158 memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); 3159 3160 return 0; 3161 } 3162 3163 static int tun_chr_close(struct inode *inode, struct file *file) 3164 { 3165 struct tun_file *tfile = file->private_data; 3166 3167 tun_detach(tfile, true); 3168 3169 return 0; 3170 } 3171 3172 #ifdef CONFIG_PROC_FS 3173 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 3174 { 3175 struct tun_file *tfile = file->private_data; 3176 struct tun_struct *tun; 3177 struct ifreq ifr; 3178 3179 memset(&ifr, 0, sizeof(ifr)); 3180 3181 rtnl_lock(); 3182 tun = tun_get(tfile); 3183 if (tun) 3184 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 3185 rtnl_unlock(); 3186 3187 if (tun) 3188 tun_put(tun); 3189 3190 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 3191 } 3192 #endif 3193 3194 static const struct file_operations tun_fops = { 3195 .owner = THIS_MODULE, 3196 .llseek = no_llseek, 3197 .read_iter = tun_chr_read_iter, 3198 .write_iter = tun_chr_write_iter, 3199 .poll = tun_chr_poll, 3200 .unlocked_ioctl = tun_chr_ioctl, 3201 #ifdef CONFIG_COMPAT 3202 .compat_ioctl = tun_chr_compat_ioctl, 3203 #endif 3204 .open = tun_chr_open, 3205 .release = tun_chr_close, 3206 .fasync = tun_chr_fasync, 3207 #ifdef CONFIG_PROC_FS 3208 .show_fdinfo = tun_chr_show_fdinfo, 3209 #endif 3210 }; 3211 3212 static struct miscdevice tun_miscdev = { 3213 .minor = TUN_MINOR, 3214 .name = "tun", 3215 .nodename = "net/tun", 3216 .fops = &tun_fops, 3217 }; 3218 3219 /* ethtool interface */ 3220 3221 static int tun_get_link_ksettings(struct net_device *dev, 3222 struct ethtool_link_ksettings *cmd) 3223 { 3224 ethtool_link_ksettings_zero_link_mode(cmd, supported); 3225 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3226 cmd->base.speed = SPEED_10; 3227 cmd->base.duplex = DUPLEX_FULL; 3228 cmd->base.port = PORT_TP; 3229 cmd->base.phy_address = 0; 3230 cmd->base.autoneg = AUTONEG_DISABLE; 3231 return 0; 3232 } 3233 3234 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3235 { 3236 struct tun_struct *tun = netdev_priv(dev); 3237 3238 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 3239 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 3240 3241 switch (tun->flags & TUN_TYPE_MASK) { 3242 case IFF_TUN: 3243 strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 3244 break; 3245 case IFF_TAP: 3246 strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 3247 break; 3248 } 3249 } 3250 3251 static u32 tun_get_msglevel(struct net_device *dev) 3252 { 3253 #ifdef TUN_DEBUG 3254 struct tun_struct *tun = netdev_priv(dev); 3255 return tun->debug; 3256 #else 3257 return -EOPNOTSUPP; 3258 #endif 3259 } 3260 3261 static void tun_set_msglevel(struct net_device *dev, u32 value) 3262 { 3263 #ifdef TUN_DEBUG 3264 struct tun_struct *tun = netdev_priv(dev); 3265 tun->debug = value; 3266 #endif 3267 } 3268 3269 static int tun_get_coalesce(struct net_device *dev, 3270 struct ethtool_coalesce *ec) 3271 { 3272 struct tun_struct *tun = netdev_priv(dev); 3273 3274 ec->rx_max_coalesced_frames = tun->rx_batched; 3275 3276 return 0; 3277 } 3278 3279 static int tun_set_coalesce(struct net_device *dev, 3280 struct ethtool_coalesce *ec) 3281 { 3282 struct tun_struct *tun = netdev_priv(dev); 3283 3284 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 3285 tun->rx_batched = NAPI_POLL_WEIGHT; 3286 else 3287 tun->rx_batched = ec->rx_max_coalesced_frames; 3288 3289 return 0; 3290 } 3291 3292 static const struct ethtool_ops tun_ethtool_ops = { 3293 .get_drvinfo = tun_get_drvinfo, 3294 .get_msglevel = tun_get_msglevel, 3295 .set_msglevel = tun_set_msglevel, 3296 .get_link = ethtool_op_get_link, 3297 .get_ts_info = ethtool_op_get_ts_info, 3298 .get_coalesce = tun_get_coalesce, 3299 .set_coalesce = tun_set_coalesce, 3300 .get_link_ksettings = tun_get_link_ksettings, 3301 }; 3302 3303 static int tun_queue_resize(struct tun_struct *tun) 3304 { 3305 struct net_device *dev = tun->dev; 3306 struct tun_file *tfile; 3307 struct ptr_ring **rings; 3308 int n = tun->numqueues + tun->numdisabled; 3309 int ret, i; 3310 3311 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 3312 if (!rings) 3313 return -ENOMEM; 3314 3315 for (i = 0; i < tun->numqueues; i++) { 3316 tfile = rtnl_dereference(tun->tfiles[i]); 3317 rings[i] = &tfile->tx_ring; 3318 } 3319 list_for_each_entry(tfile, &tun->disabled, next) 3320 rings[i++] = &tfile->tx_ring; 3321 3322 ret = ptr_ring_resize_multiple(rings, n, 3323 dev->tx_queue_len, GFP_KERNEL, 3324 tun_ptr_free); 3325 3326 kfree(rings); 3327 return ret; 3328 } 3329 3330 static int tun_device_event(struct notifier_block *unused, 3331 unsigned long event, void *ptr) 3332 { 3333 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3334 struct tun_struct *tun = netdev_priv(dev); 3335 3336 if (dev->rtnl_link_ops != &tun_link_ops) 3337 return NOTIFY_DONE; 3338 3339 switch (event) { 3340 case NETDEV_CHANGE_TX_QUEUE_LEN: 3341 if (tun_queue_resize(tun)) 3342 return NOTIFY_BAD; 3343 break; 3344 default: 3345 break; 3346 } 3347 3348 return NOTIFY_DONE; 3349 } 3350 3351 static struct notifier_block tun_notifier_block __read_mostly = { 3352 .notifier_call = tun_device_event, 3353 }; 3354 3355 static int __init tun_init(void) 3356 { 3357 int ret = 0; 3358 3359 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3360 3361 ret = rtnl_link_register(&tun_link_ops); 3362 if (ret) { 3363 pr_err("Can't register link_ops\n"); 3364 goto err_linkops; 3365 } 3366 3367 ret = misc_register(&tun_miscdev); 3368 if (ret) { 3369 pr_err("Can't register misc device %d\n", TUN_MINOR); 3370 goto err_misc; 3371 } 3372 3373 ret = register_netdevice_notifier(&tun_notifier_block); 3374 if (ret) { 3375 pr_err("Can't register netdevice notifier\n"); 3376 goto err_notifier; 3377 } 3378 3379 return 0; 3380 3381 err_notifier: 3382 misc_deregister(&tun_miscdev); 3383 err_misc: 3384 rtnl_link_unregister(&tun_link_ops); 3385 err_linkops: 3386 return ret; 3387 } 3388 3389 static void tun_cleanup(void) 3390 { 3391 misc_deregister(&tun_miscdev); 3392 rtnl_link_unregister(&tun_link_ops); 3393 unregister_netdevice_notifier(&tun_notifier_block); 3394 } 3395 3396 /* Get an underlying socket object from tun file. Returns error unless file is 3397 * attached to a device. The returned object works like a packet socket, it 3398 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3399 * holding a reference to the file for as long as the socket is in use. */ 3400 struct socket *tun_get_socket(struct file *file) 3401 { 3402 struct tun_file *tfile; 3403 if (file->f_op != &tun_fops) 3404 return ERR_PTR(-EINVAL); 3405 tfile = file->private_data; 3406 if (!tfile) 3407 return ERR_PTR(-EBADFD); 3408 return &tfile->socket; 3409 } 3410 EXPORT_SYMBOL_GPL(tun_get_socket); 3411 3412 struct ptr_ring *tun_get_tx_ring(struct file *file) 3413 { 3414 struct tun_file *tfile; 3415 3416 if (file->f_op != &tun_fops) 3417 return ERR_PTR(-EINVAL); 3418 tfile = file->private_data; 3419 if (!tfile) 3420 return ERR_PTR(-EBADFD); 3421 return &tfile->tx_ring; 3422 } 3423 EXPORT_SYMBOL_GPL(tun_get_tx_ring); 3424 3425 module_init(tun_init); 3426 module_exit(tun_cleanup); 3427 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3428 MODULE_AUTHOR(DRV_COPYRIGHT); 3429 MODULE_LICENSE("GPL"); 3430 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3431 MODULE_ALIAS("devname:net/tun"); 3432