1 /* 2 * TUN - Universal TUN/TAP device driver. 3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 16 */ 17 18 /* 19 * Changes: 20 * 21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22 * Add TUNSETLINK ioctl to set the link encapsulation 23 * 24 * Mark Smith <markzzzsmith@yahoo.com.au> 25 * Use eth_random_addr() for tap MAC address. 26 * 27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 28 * Fixes in packet dropping, queue length setting and queue wakeup. 29 * Increased default tx queue length. 30 * Added ethtool API. 31 * Minor cleanups 32 * 33 * Daniel Podlejski <underley@underley.eu.org> 34 * Modifications for 2.3.99-pre5 kernel. 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #define DRV_NAME "tun" 40 #define DRV_VERSION "1.6" 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 43 44 #include <linux/module.h> 45 #include <linux/errno.h> 46 #include <linux/kernel.h> 47 #include <linux/sched/signal.h> 48 #include <linux/major.h> 49 #include <linux/slab.h> 50 #include <linux/poll.h> 51 #include <linux/fcntl.h> 52 #include <linux/init.h> 53 #include <linux/skbuff.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/miscdevice.h> 57 #include <linux/ethtool.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/compat.h> 60 #include <linux/if.h> 61 #include <linux/if_arp.h> 62 #include <linux/if_ether.h> 63 #include <linux/if_tun.h> 64 #include <linux/if_vlan.h> 65 #include <linux/crc32.h> 66 #include <linux/nsproxy.h> 67 #include <linux/virtio_net.h> 68 #include <linux/rcupdate.h> 69 #include <net/net_namespace.h> 70 #include <net/netns/generic.h> 71 #include <net/rtnetlink.h> 72 #include <net/sock.h> 73 #include <net/xdp.h> 74 #include <linux/seq_file.h> 75 #include <linux/uio.h> 76 #include <linux/skb_array.h> 77 #include <linux/bpf.h> 78 #include <linux/bpf_trace.h> 79 #include <linux/mutex.h> 80 81 #include <linux/uaccess.h> 82 #include <linux/proc_fs.h> 83 84 static void tun_default_link_ksettings(struct net_device *dev, 85 struct ethtool_link_ksettings *cmd); 86 87 /* Uncomment to enable debugging */ 88 /* #define TUN_DEBUG 1 */ 89 90 #ifdef TUN_DEBUG 91 static int debug; 92 93 #define tun_debug(level, tun, fmt, args...) \ 94 do { \ 95 if (tun->debug) \ 96 netdev_printk(level, tun->dev, fmt, ##args); \ 97 } while (0) 98 #define DBG1(level, fmt, args...) \ 99 do { \ 100 if (debug == 2) \ 101 printk(level fmt, ##args); \ 102 } while (0) 103 #else 104 #define tun_debug(level, tun, fmt, args...) \ 105 do { \ 106 if (0) \ 107 netdev_printk(level, tun->dev, fmt, ##args); \ 108 } while (0) 109 #define DBG1(level, fmt, args...) \ 110 do { \ 111 if (0) \ 112 printk(level fmt, ##args); \ 113 } while (0) 114 #endif 115 116 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 117 118 /* TUN device flags */ 119 120 /* IFF_ATTACH_QUEUE is never stored in device flags, 121 * overload it to mean fasync when stored there. 122 */ 123 #define TUN_FASYNC IFF_ATTACH_QUEUE 124 /* High bits in flags field are unused. */ 125 #define TUN_VNET_LE 0x80000000 126 #define TUN_VNET_BE 0x40000000 127 128 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 129 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 130 131 #define GOODCOPY_LEN 128 132 133 #define FLT_EXACT_COUNT 8 134 struct tap_filter { 135 unsigned int count; /* Number of addrs. Zero means disabled */ 136 u32 mask[2]; /* Mask of the hashed addrs */ 137 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 138 }; 139 140 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 141 * to max number of VCPUs in guest. */ 142 #define MAX_TAP_QUEUES 256 143 #define MAX_TAP_FLOWS 4096 144 145 #define TUN_FLOW_EXPIRE (3 * HZ) 146 147 struct tun_pcpu_stats { 148 u64 rx_packets; 149 u64 rx_bytes; 150 u64 tx_packets; 151 u64 tx_bytes; 152 struct u64_stats_sync syncp; 153 u32 rx_dropped; 154 u32 tx_dropped; 155 u32 rx_frame_errors; 156 }; 157 158 /* A tun_file connects an open character device to a tuntap netdevice. It 159 * also contains all socket related structures (except sock_fprog and tap_filter) 160 * to serve as one transmit queue for tuntap device. The sock_fprog and 161 * tap_filter were kept in tun_struct since they were used for filtering for the 162 * netdevice not for a specific queue (at least I didn't see the requirement for 163 * this). 164 * 165 * RCU usage: 166 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 167 * other can only be read while rcu_read_lock or rtnl_lock is held. 168 */ 169 struct tun_file { 170 struct sock sk; 171 struct socket socket; 172 struct socket_wq wq; 173 struct tun_struct __rcu *tun; 174 struct fasync_struct *fasync; 175 /* only used for fasnyc */ 176 unsigned int flags; 177 union { 178 u16 queue_index; 179 unsigned int ifindex; 180 }; 181 struct napi_struct napi; 182 bool napi_enabled; 183 bool napi_frags_enabled; 184 struct mutex napi_mutex; /* Protects access to the above napi */ 185 struct list_head next; 186 struct tun_struct *detached; 187 struct ptr_ring tx_ring; 188 struct xdp_rxq_info xdp_rxq; 189 }; 190 191 struct tun_flow_entry { 192 struct hlist_node hash_link; 193 struct rcu_head rcu; 194 struct tun_struct *tun; 195 196 u32 rxhash; 197 u32 rps_rxhash; 198 int queue_index; 199 unsigned long updated; 200 }; 201 202 #define TUN_NUM_FLOW_ENTRIES 1024 203 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 204 205 struct tun_prog { 206 struct rcu_head rcu; 207 struct bpf_prog *prog; 208 }; 209 210 /* Since the socket were moved to tun_file, to preserve the behavior of persist 211 * device, socket filter, sndbuf and vnet header size were restore when the 212 * file were attached to a persist device. 213 */ 214 struct tun_struct { 215 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 216 unsigned int numqueues; 217 unsigned int flags; 218 kuid_t owner; 219 kgid_t group; 220 221 struct net_device *dev; 222 netdev_features_t set_features; 223 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 224 NETIF_F_TSO6) 225 226 int align; 227 int vnet_hdr_sz; 228 int sndbuf; 229 struct tap_filter txflt; 230 struct sock_fprog fprog; 231 /* protected by rtnl lock */ 232 bool filter_attached; 233 #ifdef TUN_DEBUG 234 int debug; 235 #endif 236 spinlock_t lock; 237 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 238 struct timer_list flow_gc_timer; 239 unsigned long ageing_time; 240 unsigned int numdisabled; 241 struct list_head disabled; 242 void *security; 243 u32 flow_count; 244 u32 rx_batched; 245 struct tun_pcpu_stats __percpu *pcpu_stats; 246 struct bpf_prog __rcu *xdp_prog; 247 struct tun_prog __rcu *steering_prog; 248 struct tun_prog __rcu *filter_prog; 249 struct ethtool_link_ksettings link_ksettings; 250 }; 251 252 struct veth { 253 __be16 h_vlan_proto; 254 __be16 h_vlan_TCI; 255 }; 256 257 bool tun_is_xdp_frame(void *ptr) 258 { 259 return (unsigned long)ptr & TUN_XDP_FLAG; 260 } 261 EXPORT_SYMBOL(tun_is_xdp_frame); 262 263 void *tun_xdp_to_ptr(void *ptr) 264 { 265 return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 266 } 267 EXPORT_SYMBOL(tun_xdp_to_ptr); 268 269 void *tun_ptr_to_xdp(void *ptr) 270 { 271 return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 272 } 273 EXPORT_SYMBOL(tun_ptr_to_xdp); 274 275 static int tun_napi_receive(struct napi_struct *napi, int budget) 276 { 277 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 278 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 279 struct sk_buff_head process_queue; 280 struct sk_buff *skb; 281 int received = 0; 282 283 __skb_queue_head_init(&process_queue); 284 285 spin_lock(&queue->lock); 286 skb_queue_splice_tail_init(queue, &process_queue); 287 spin_unlock(&queue->lock); 288 289 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 290 napi_gro_receive(napi, skb); 291 ++received; 292 } 293 294 if (!skb_queue_empty(&process_queue)) { 295 spin_lock(&queue->lock); 296 skb_queue_splice(&process_queue, queue); 297 spin_unlock(&queue->lock); 298 } 299 300 return received; 301 } 302 303 static int tun_napi_poll(struct napi_struct *napi, int budget) 304 { 305 unsigned int received; 306 307 received = tun_napi_receive(napi, budget); 308 309 if (received < budget) 310 napi_complete_done(napi, received); 311 312 return received; 313 } 314 315 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 316 bool napi_en, bool napi_frags) 317 { 318 tfile->napi_enabled = napi_en; 319 tfile->napi_frags_enabled = napi_en && napi_frags; 320 if (napi_en) { 321 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 322 NAPI_POLL_WEIGHT); 323 napi_enable(&tfile->napi); 324 } 325 } 326 327 static void tun_napi_disable(struct tun_file *tfile) 328 { 329 if (tfile->napi_enabled) 330 napi_disable(&tfile->napi); 331 } 332 333 static void tun_napi_del(struct tun_file *tfile) 334 { 335 if (tfile->napi_enabled) 336 netif_napi_del(&tfile->napi); 337 } 338 339 static bool tun_napi_frags_enabled(const struct tun_file *tfile) 340 { 341 return tfile->napi_frags_enabled; 342 } 343 344 #ifdef CONFIG_TUN_VNET_CROSS_LE 345 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 346 { 347 return tun->flags & TUN_VNET_BE ? false : 348 virtio_legacy_is_little_endian(); 349 } 350 351 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 352 { 353 int be = !!(tun->flags & TUN_VNET_BE); 354 355 if (put_user(be, argp)) 356 return -EFAULT; 357 358 return 0; 359 } 360 361 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 362 { 363 int be; 364 365 if (get_user(be, argp)) 366 return -EFAULT; 367 368 if (be) 369 tun->flags |= TUN_VNET_BE; 370 else 371 tun->flags &= ~TUN_VNET_BE; 372 373 return 0; 374 } 375 #else 376 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 377 { 378 return virtio_legacy_is_little_endian(); 379 } 380 381 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 382 { 383 return -EINVAL; 384 } 385 386 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 387 { 388 return -EINVAL; 389 } 390 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 391 392 static inline bool tun_is_little_endian(struct tun_struct *tun) 393 { 394 return tun->flags & TUN_VNET_LE || 395 tun_legacy_is_little_endian(tun); 396 } 397 398 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 399 { 400 return __virtio16_to_cpu(tun_is_little_endian(tun), val); 401 } 402 403 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 404 { 405 return __cpu_to_virtio16(tun_is_little_endian(tun), val); 406 } 407 408 static inline u32 tun_hashfn(u32 rxhash) 409 { 410 return rxhash & TUN_MASK_FLOW_ENTRIES; 411 } 412 413 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 414 { 415 struct tun_flow_entry *e; 416 417 hlist_for_each_entry_rcu(e, head, hash_link) { 418 if (e->rxhash == rxhash) 419 return e; 420 } 421 return NULL; 422 } 423 424 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 425 struct hlist_head *head, 426 u32 rxhash, u16 queue_index) 427 { 428 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 429 430 if (e) { 431 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 432 rxhash, queue_index); 433 e->updated = jiffies; 434 e->rxhash = rxhash; 435 e->rps_rxhash = 0; 436 e->queue_index = queue_index; 437 e->tun = tun; 438 hlist_add_head_rcu(&e->hash_link, head); 439 ++tun->flow_count; 440 } 441 return e; 442 } 443 444 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 445 { 446 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 447 e->rxhash, e->queue_index); 448 hlist_del_rcu(&e->hash_link); 449 kfree_rcu(e, rcu); 450 --tun->flow_count; 451 } 452 453 static void tun_flow_flush(struct tun_struct *tun) 454 { 455 int i; 456 457 spin_lock_bh(&tun->lock); 458 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 459 struct tun_flow_entry *e; 460 struct hlist_node *n; 461 462 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 463 tun_flow_delete(tun, e); 464 } 465 spin_unlock_bh(&tun->lock); 466 } 467 468 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 469 { 470 int i; 471 472 spin_lock_bh(&tun->lock); 473 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 474 struct tun_flow_entry *e; 475 struct hlist_node *n; 476 477 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 478 if (e->queue_index == queue_index) 479 tun_flow_delete(tun, e); 480 } 481 } 482 spin_unlock_bh(&tun->lock); 483 } 484 485 static void tun_flow_cleanup(struct timer_list *t) 486 { 487 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 488 unsigned long delay = tun->ageing_time; 489 unsigned long next_timer = jiffies + delay; 490 unsigned long count = 0; 491 int i; 492 493 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 494 495 spin_lock(&tun->lock); 496 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 497 struct tun_flow_entry *e; 498 struct hlist_node *n; 499 500 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 501 unsigned long this_timer; 502 503 this_timer = e->updated + delay; 504 if (time_before_eq(this_timer, jiffies)) { 505 tun_flow_delete(tun, e); 506 continue; 507 } 508 count++; 509 if (time_before(this_timer, next_timer)) 510 next_timer = this_timer; 511 } 512 } 513 514 if (count) 515 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 516 spin_unlock(&tun->lock); 517 } 518 519 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 520 struct tun_file *tfile) 521 { 522 struct hlist_head *head; 523 struct tun_flow_entry *e; 524 unsigned long delay = tun->ageing_time; 525 u16 queue_index = tfile->queue_index; 526 527 if (!rxhash) 528 return; 529 else 530 head = &tun->flows[tun_hashfn(rxhash)]; 531 532 rcu_read_lock(); 533 534 e = tun_flow_find(head, rxhash); 535 if (likely(e)) { 536 /* TODO: keep queueing to old queue until it's empty? */ 537 e->queue_index = queue_index; 538 e->updated = jiffies; 539 sock_rps_record_flow_hash(e->rps_rxhash); 540 } else { 541 spin_lock_bh(&tun->lock); 542 if (!tun_flow_find(head, rxhash) && 543 tun->flow_count < MAX_TAP_FLOWS) 544 tun_flow_create(tun, head, rxhash, queue_index); 545 546 if (!timer_pending(&tun->flow_gc_timer)) 547 mod_timer(&tun->flow_gc_timer, 548 round_jiffies_up(jiffies + delay)); 549 spin_unlock_bh(&tun->lock); 550 } 551 552 rcu_read_unlock(); 553 } 554 555 /** 556 * Save the hash received in the stack receive path and update the 557 * flow_hash table accordingly. 558 */ 559 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 560 { 561 if (unlikely(e->rps_rxhash != hash)) 562 e->rps_rxhash = hash; 563 } 564 565 /* We try to identify a flow through its rxhash. The reason that 566 * we do not check rxq no. is because some cards(e.g 82599), chooses 567 * the rxq based on the txq where the last packet of the flow comes. As 568 * the userspace application move between processors, we may get a 569 * different rxq no. here. 570 */ 571 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 572 { 573 struct tun_flow_entry *e; 574 u32 txq = 0; 575 u32 numqueues = 0; 576 577 numqueues = READ_ONCE(tun->numqueues); 578 579 txq = __skb_get_hash_symmetric(skb); 580 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 581 if (e) { 582 tun_flow_save_rps_rxhash(e, txq); 583 txq = e->queue_index; 584 } else { 585 /* use multiply and shift instead of expensive divide */ 586 txq = ((u64)txq * numqueues) >> 32; 587 } 588 589 return txq; 590 } 591 592 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 593 { 594 struct tun_prog *prog; 595 u16 ret = 0; 596 597 prog = rcu_dereference(tun->steering_prog); 598 if (prog) 599 ret = bpf_prog_run_clear_cb(prog->prog, skb); 600 601 return ret % tun->numqueues; 602 } 603 604 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 605 struct net_device *sb_dev, 606 select_queue_fallback_t fallback) 607 { 608 struct tun_struct *tun = netdev_priv(dev); 609 u16 ret; 610 611 rcu_read_lock(); 612 if (rcu_dereference(tun->steering_prog)) 613 ret = tun_ebpf_select_queue(tun, skb); 614 else 615 ret = tun_automq_select_queue(tun, skb); 616 rcu_read_unlock(); 617 618 return ret; 619 } 620 621 static inline bool tun_not_capable(struct tun_struct *tun) 622 { 623 const struct cred *cred = current_cred(); 624 struct net *net = dev_net(tun->dev); 625 626 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 627 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 628 !ns_capable(net->user_ns, CAP_NET_ADMIN); 629 } 630 631 static void tun_set_real_num_queues(struct tun_struct *tun) 632 { 633 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 634 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 635 } 636 637 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 638 { 639 tfile->detached = tun; 640 list_add_tail(&tfile->next, &tun->disabled); 641 ++tun->numdisabled; 642 } 643 644 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 645 { 646 struct tun_struct *tun = tfile->detached; 647 648 tfile->detached = NULL; 649 list_del_init(&tfile->next); 650 --tun->numdisabled; 651 return tun; 652 } 653 654 void tun_ptr_free(void *ptr) 655 { 656 if (!ptr) 657 return; 658 if (tun_is_xdp_frame(ptr)) { 659 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 660 661 xdp_return_frame(xdpf); 662 } else { 663 __skb_array_destroy_skb(ptr); 664 } 665 } 666 EXPORT_SYMBOL_GPL(tun_ptr_free); 667 668 static void tun_queue_purge(struct tun_file *tfile) 669 { 670 void *ptr; 671 672 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 673 tun_ptr_free(ptr); 674 675 skb_queue_purge(&tfile->sk.sk_write_queue); 676 skb_queue_purge(&tfile->sk.sk_error_queue); 677 } 678 679 static void __tun_detach(struct tun_file *tfile, bool clean) 680 { 681 struct tun_file *ntfile; 682 struct tun_struct *tun; 683 684 tun = rtnl_dereference(tfile->tun); 685 686 if (tun && clean) { 687 tun_napi_disable(tfile); 688 tun_napi_del(tfile); 689 } 690 691 if (tun && !tfile->detached) { 692 u16 index = tfile->queue_index; 693 BUG_ON(index >= tun->numqueues); 694 695 rcu_assign_pointer(tun->tfiles[index], 696 tun->tfiles[tun->numqueues - 1]); 697 ntfile = rtnl_dereference(tun->tfiles[index]); 698 ntfile->queue_index = index; 699 700 --tun->numqueues; 701 if (clean) { 702 RCU_INIT_POINTER(tfile->tun, NULL); 703 sock_put(&tfile->sk); 704 } else 705 tun_disable_queue(tun, tfile); 706 707 synchronize_net(); 708 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 709 /* Drop read queue */ 710 tun_queue_purge(tfile); 711 tun_set_real_num_queues(tun); 712 } else if (tfile->detached && clean) { 713 tun = tun_enable_queue(tfile); 714 sock_put(&tfile->sk); 715 } 716 717 if (clean) { 718 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 719 netif_carrier_off(tun->dev); 720 721 if (!(tun->flags & IFF_PERSIST) && 722 tun->dev->reg_state == NETREG_REGISTERED) 723 unregister_netdevice(tun->dev); 724 } 725 if (tun) 726 xdp_rxq_info_unreg(&tfile->xdp_rxq); 727 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 728 sock_put(&tfile->sk); 729 } 730 } 731 732 static void tun_detach(struct tun_file *tfile, bool clean) 733 { 734 struct tun_struct *tun; 735 struct net_device *dev; 736 737 rtnl_lock(); 738 tun = rtnl_dereference(tfile->tun); 739 dev = tun ? tun->dev : NULL; 740 __tun_detach(tfile, clean); 741 if (dev) 742 netdev_state_change(dev); 743 rtnl_unlock(); 744 } 745 746 static void tun_detach_all(struct net_device *dev) 747 { 748 struct tun_struct *tun = netdev_priv(dev); 749 struct tun_file *tfile, *tmp; 750 int i, n = tun->numqueues; 751 752 for (i = 0; i < n; i++) { 753 tfile = rtnl_dereference(tun->tfiles[i]); 754 BUG_ON(!tfile); 755 tun_napi_disable(tfile); 756 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 757 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 758 RCU_INIT_POINTER(tfile->tun, NULL); 759 --tun->numqueues; 760 } 761 list_for_each_entry(tfile, &tun->disabled, next) { 762 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 763 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 764 RCU_INIT_POINTER(tfile->tun, NULL); 765 } 766 BUG_ON(tun->numqueues != 0); 767 768 synchronize_net(); 769 for (i = 0; i < n; i++) { 770 tfile = rtnl_dereference(tun->tfiles[i]); 771 tun_napi_del(tfile); 772 /* Drop read queue */ 773 tun_queue_purge(tfile); 774 xdp_rxq_info_unreg(&tfile->xdp_rxq); 775 sock_put(&tfile->sk); 776 } 777 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 778 tun_enable_queue(tfile); 779 tun_queue_purge(tfile); 780 xdp_rxq_info_unreg(&tfile->xdp_rxq); 781 sock_put(&tfile->sk); 782 } 783 BUG_ON(tun->numdisabled != 0); 784 785 if (tun->flags & IFF_PERSIST) 786 module_put(THIS_MODULE); 787 } 788 789 static int tun_attach(struct tun_struct *tun, struct file *file, 790 bool skip_filter, bool napi, bool napi_frags) 791 { 792 struct tun_file *tfile = file->private_data; 793 struct net_device *dev = tun->dev; 794 int err; 795 796 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 797 if (err < 0) 798 goto out; 799 800 err = -EINVAL; 801 if (rtnl_dereference(tfile->tun) && !tfile->detached) 802 goto out; 803 804 err = -EBUSY; 805 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 806 goto out; 807 808 err = -E2BIG; 809 if (!tfile->detached && 810 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 811 goto out; 812 813 err = 0; 814 815 /* Re-attach the filter to persist device */ 816 if (!skip_filter && (tun->filter_attached == true)) { 817 lock_sock(tfile->socket.sk); 818 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 819 release_sock(tfile->socket.sk); 820 if (!err) 821 goto out; 822 } 823 824 if (!tfile->detached && 825 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 826 GFP_KERNEL, tun_ptr_free)) { 827 err = -ENOMEM; 828 goto out; 829 } 830 831 tfile->queue_index = tun->numqueues; 832 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 833 834 if (tfile->detached) { 835 /* Re-attach detached tfile, updating XDP queue_index */ 836 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 837 838 if (tfile->xdp_rxq.queue_index != tfile->queue_index) 839 tfile->xdp_rxq.queue_index = tfile->queue_index; 840 } else { 841 /* Setup XDP RX-queue info, for new tfile getting attached */ 842 err = xdp_rxq_info_reg(&tfile->xdp_rxq, 843 tun->dev, tfile->queue_index); 844 if (err < 0) 845 goto out; 846 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 847 MEM_TYPE_PAGE_SHARED, NULL); 848 if (err < 0) { 849 xdp_rxq_info_unreg(&tfile->xdp_rxq); 850 goto out; 851 } 852 err = 0; 853 } 854 855 rcu_assign_pointer(tfile->tun, tun); 856 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 857 tun->numqueues++; 858 859 if (tfile->detached) { 860 tun_enable_queue(tfile); 861 } else { 862 sock_hold(&tfile->sk); 863 tun_napi_init(tun, tfile, napi, napi_frags); 864 } 865 866 if (rtnl_dereference(tun->xdp_prog)) 867 sock_set_flag(&tfile->sk, SOCK_XDP); 868 869 tun_set_real_num_queues(tun); 870 871 /* device is allowed to go away first, so no need to hold extra 872 * refcnt. 873 */ 874 875 out: 876 return err; 877 } 878 879 static struct tun_struct *tun_get(struct tun_file *tfile) 880 { 881 struct tun_struct *tun; 882 883 rcu_read_lock(); 884 tun = rcu_dereference(tfile->tun); 885 if (tun) 886 dev_hold(tun->dev); 887 rcu_read_unlock(); 888 889 return tun; 890 } 891 892 static void tun_put(struct tun_struct *tun) 893 { 894 dev_put(tun->dev); 895 } 896 897 /* TAP filtering */ 898 static void addr_hash_set(u32 *mask, const u8 *addr) 899 { 900 int n = ether_crc(ETH_ALEN, addr) >> 26; 901 mask[n >> 5] |= (1 << (n & 31)); 902 } 903 904 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 905 { 906 int n = ether_crc(ETH_ALEN, addr) >> 26; 907 return mask[n >> 5] & (1 << (n & 31)); 908 } 909 910 static int update_filter(struct tap_filter *filter, void __user *arg) 911 { 912 struct { u8 u[ETH_ALEN]; } *addr; 913 struct tun_filter uf; 914 int err, alen, n, nexact; 915 916 if (copy_from_user(&uf, arg, sizeof(uf))) 917 return -EFAULT; 918 919 if (!uf.count) { 920 /* Disabled */ 921 filter->count = 0; 922 return 0; 923 } 924 925 alen = ETH_ALEN * uf.count; 926 addr = memdup_user(arg + sizeof(uf), alen); 927 if (IS_ERR(addr)) 928 return PTR_ERR(addr); 929 930 /* The filter is updated without holding any locks. Which is 931 * perfectly safe. We disable it first and in the worst 932 * case we'll accept a few undesired packets. */ 933 filter->count = 0; 934 wmb(); 935 936 /* Use first set of addresses as an exact filter */ 937 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 938 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 939 940 nexact = n; 941 942 /* Remaining multicast addresses are hashed, 943 * unicast will leave the filter disabled. */ 944 memset(filter->mask, 0, sizeof(filter->mask)); 945 for (; n < uf.count; n++) { 946 if (!is_multicast_ether_addr(addr[n].u)) { 947 err = 0; /* no filter */ 948 goto free_addr; 949 } 950 addr_hash_set(filter->mask, addr[n].u); 951 } 952 953 /* For ALLMULTI just set the mask to all ones. 954 * This overrides the mask populated above. */ 955 if ((uf.flags & TUN_FLT_ALLMULTI)) 956 memset(filter->mask, ~0, sizeof(filter->mask)); 957 958 /* Now enable the filter */ 959 wmb(); 960 filter->count = nexact; 961 962 /* Return the number of exact filters */ 963 err = nexact; 964 free_addr: 965 kfree(addr); 966 return err; 967 } 968 969 /* Returns: 0 - drop, !=0 - accept */ 970 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 971 { 972 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 973 * at this point. */ 974 struct ethhdr *eh = (struct ethhdr *) skb->data; 975 int i; 976 977 /* Exact match */ 978 for (i = 0; i < filter->count; i++) 979 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 980 return 1; 981 982 /* Inexact match (multicast only) */ 983 if (is_multicast_ether_addr(eh->h_dest)) 984 return addr_hash_test(filter->mask, eh->h_dest); 985 986 return 0; 987 } 988 989 /* 990 * Checks whether the packet is accepted or not. 991 * Returns: 0 - drop, !=0 - accept 992 */ 993 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 994 { 995 if (!filter->count) 996 return 1; 997 998 return run_filter(filter, skb); 999 } 1000 1001 /* Network device part of the driver */ 1002 1003 static const struct ethtool_ops tun_ethtool_ops; 1004 1005 /* Net device detach from fd. */ 1006 static void tun_net_uninit(struct net_device *dev) 1007 { 1008 tun_detach_all(dev); 1009 } 1010 1011 /* Net device open. */ 1012 static int tun_net_open(struct net_device *dev) 1013 { 1014 struct tun_struct *tun = netdev_priv(dev); 1015 int i; 1016 1017 netif_tx_start_all_queues(dev); 1018 1019 for (i = 0; i < tun->numqueues; i++) { 1020 struct tun_file *tfile; 1021 1022 tfile = rtnl_dereference(tun->tfiles[i]); 1023 tfile->socket.sk->sk_write_space(tfile->socket.sk); 1024 } 1025 1026 return 0; 1027 } 1028 1029 /* Net device close. */ 1030 static int tun_net_close(struct net_device *dev) 1031 { 1032 netif_tx_stop_all_queues(dev); 1033 return 0; 1034 } 1035 1036 /* Net device start xmit */ 1037 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 1038 { 1039 #ifdef CONFIG_RPS 1040 if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 1041 /* Select queue was not called for the skbuff, so we extract the 1042 * RPS hash and save it into the flow_table here. 1043 */ 1044 struct tun_flow_entry *e; 1045 __u32 rxhash; 1046 1047 rxhash = __skb_get_hash_symmetric(skb); 1048 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 1049 if (e) 1050 tun_flow_save_rps_rxhash(e, rxhash); 1051 } 1052 #endif 1053 } 1054 1055 static unsigned int run_ebpf_filter(struct tun_struct *tun, 1056 struct sk_buff *skb, 1057 int len) 1058 { 1059 struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1060 1061 if (prog) 1062 len = bpf_prog_run_clear_cb(prog->prog, skb); 1063 1064 return len; 1065 } 1066 1067 /* Net device start xmit */ 1068 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 1069 { 1070 struct tun_struct *tun = netdev_priv(dev); 1071 int txq = skb->queue_mapping; 1072 struct tun_file *tfile; 1073 int len = skb->len; 1074 1075 rcu_read_lock(); 1076 tfile = rcu_dereference(tun->tfiles[txq]); 1077 1078 /* Drop packet if interface is not attached */ 1079 if (txq >= tun->numqueues) 1080 goto drop; 1081 1082 if (!rcu_dereference(tun->steering_prog)) 1083 tun_automq_xmit(tun, skb); 1084 1085 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 1086 1087 BUG_ON(!tfile); 1088 1089 /* Drop if the filter does not like it. 1090 * This is a noop if the filter is disabled. 1091 * Filter can be enabled only for the TAP devices. */ 1092 if (!check_filter(&tun->txflt, skb)) 1093 goto drop; 1094 1095 if (tfile->socket.sk->sk_filter && 1096 sk_filter(tfile->socket.sk, skb)) 1097 goto drop; 1098 1099 len = run_ebpf_filter(tun, skb, len); 1100 if (len == 0 || pskb_trim(skb, len)) 1101 goto drop; 1102 1103 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1104 goto drop; 1105 1106 skb_tx_timestamp(skb); 1107 1108 /* Orphan the skb - required as we might hang on to it 1109 * for indefinite time. 1110 */ 1111 skb_orphan(skb); 1112 1113 nf_reset(skb); 1114 1115 if (ptr_ring_produce(&tfile->tx_ring, skb)) 1116 goto drop; 1117 1118 /* Notify and wake up reader process */ 1119 if (tfile->flags & TUN_FASYNC) 1120 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1121 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1122 1123 rcu_read_unlock(); 1124 return NETDEV_TX_OK; 1125 1126 drop: 1127 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1128 skb_tx_error(skb); 1129 kfree_skb(skb); 1130 rcu_read_unlock(); 1131 return NET_XMIT_DROP; 1132 } 1133 1134 static void tun_net_mclist(struct net_device *dev) 1135 { 1136 /* 1137 * This callback is supposed to deal with mc filter in 1138 * _rx_ path and has nothing to do with the _tx_ path. 1139 * In rx path we always accept everything userspace gives us. 1140 */ 1141 } 1142 1143 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1144 netdev_features_t features) 1145 { 1146 struct tun_struct *tun = netdev_priv(dev); 1147 1148 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1149 } 1150 1151 static void tun_set_headroom(struct net_device *dev, int new_hr) 1152 { 1153 struct tun_struct *tun = netdev_priv(dev); 1154 1155 if (new_hr < NET_SKB_PAD) 1156 new_hr = NET_SKB_PAD; 1157 1158 tun->align = new_hr; 1159 } 1160 1161 static void 1162 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1163 { 1164 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1165 struct tun_struct *tun = netdev_priv(dev); 1166 struct tun_pcpu_stats *p; 1167 int i; 1168 1169 for_each_possible_cpu(i) { 1170 u64 rxpackets, rxbytes, txpackets, txbytes; 1171 unsigned int start; 1172 1173 p = per_cpu_ptr(tun->pcpu_stats, i); 1174 do { 1175 start = u64_stats_fetch_begin(&p->syncp); 1176 rxpackets = p->rx_packets; 1177 rxbytes = p->rx_bytes; 1178 txpackets = p->tx_packets; 1179 txbytes = p->tx_bytes; 1180 } while (u64_stats_fetch_retry(&p->syncp, start)); 1181 1182 stats->rx_packets += rxpackets; 1183 stats->rx_bytes += rxbytes; 1184 stats->tx_packets += txpackets; 1185 stats->tx_bytes += txbytes; 1186 1187 /* u32 counters */ 1188 rx_dropped += p->rx_dropped; 1189 rx_frame_errors += p->rx_frame_errors; 1190 tx_dropped += p->tx_dropped; 1191 } 1192 stats->rx_dropped = rx_dropped; 1193 stats->rx_frame_errors = rx_frame_errors; 1194 stats->tx_dropped = tx_dropped; 1195 } 1196 1197 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1198 struct netlink_ext_ack *extack) 1199 { 1200 struct tun_struct *tun = netdev_priv(dev); 1201 struct tun_file *tfile; 1202 struct bpf_prog *old_prog; 1203 int i; 1204 1205 old_prog = rtnl_dereference(tun->xdp_prog); 1206 rcu_assign_pointer(tun->xdp_prog, prog); 1207 if (old_prog) 1208 bpf_prog_put(old_prog); 1209 1210 for (i = 0; i < tun->numqueues; i++) { 1211 tfile = rtnl_dereference(tun->tfiles[i]); 1212 if (prog) 1213 sock_set_flag(&tfile->sk, SOCK_XDP); 1214 else 1215 sock_reset_flag(&tfile->sk, SOCK_XDP); 1216 } 1217 list_for_each_entry(tfile, &tun->disabled, next) { 1218 if (prog) 1219 sock_set_flag(&tfile->sk, SOCK_XDP); 1220 else 1221 sock_reset_flag(&tfile->sk, SOCK_XDP); 1222 } 1223 1224 return 0; 1225 } 1226 1227 static u32 tun_xdp_query(struct net_device *dev) 1228 { 1229 struct tun_struct *tun = netdev_priv(dev); 1230 const struct bpf_prog *xdp_prog; 1231 1232 xdp_prog = rtnl_dereference(tun->xdp_prog); 1233 if (xdp_prog) 1234 return xdp_prog->aux->id; 1235 1236 return 0; 1237 } 1238 1239 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1240 { 1241 switch (xdp->command) { 1242 case XDP_SETUP_PROG: 1243 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1244 case XDP_QUERY_PROG: 1245 xdp->prog_id = tun_xdp_query(dev); 1246 return 0; 1247 default: 1248 return -EINVAL; 1249 } 1250 } 1251 1252 static const struct net_device_ops tun_netdev_ops = { 1253 .ndo_uninit = tun_net_uninit, 1254 .ndo_open = tun_net_open, 1255 .ndo_stop = tun_net_close, 1256 .ndo_start_xmit = tun_net_xmit, 1257 .ndo_fix_features = tun_net_fix_features, 1258 .ndo_select_queue = tun_select_queue, 1259 .ndo_set_rx_headroom = tun_set_headroom, 1260 .ndo_get_stats64 = tun_net_get_stats64, 1261 }; 1262 1263 static void __tun_xdp_flush_tfile(struct tun_file *tfile) 1264 { 1265 /* Notify and wake up reader process */ 1266 if (tfile->flags & TUN_FASYNC) 1267 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1268 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1269 } 1270 1271 static int tun_xdp_xmit(struct net_device *dev, int n, 1272 struct xdp_frame **frames, u32 flags) 1273 { 1274 struct tun_struct *tun = netdev_priv(dev); 1275 struct tun_file *tfile; 1276 u32 numqueues; 1277 int drops = 0; 1278 int cnt = n; 1279 int i; 1280 1281 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1282 return -EINVAL; 1283 1284 rcu_read_lock(); 1285 1286 numqueues = READ_ONCE(tun->numqueues); 1287 if (!numqueues) { 1288 rcu_read_unlock(); 1289 return -ENXIO; /* Caller will free/return all frames */ 1290 } 1291 1292 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1293 numqueues]); 1294 1295 spin_lock(&tfile->tx_ring.producer_lock); 1296 for (i = 0; i < n; i++) { 1297 struct xdp_frame *xdp = frames[i]; 1298 /* Encode the XDP flag into lowest bit for consumer to differ 1299 * XDP buffer from sk_buff. 1300 */ 1301 void *frame = tun_xdp_to_ptr(xdp); 1302 1303 if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1304 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1305 xdp_return_frame_rx_napi(xdp); 1306 drops++; 1307 } 1308 } 1309 spin_unlock(&tfile->tx_ring.producer_lock); 1310 1311 if (flags & XDP_XMIT_FLUSH) 1312 __tun_xdp_flush_tfile(tfile); 1313 1314 rcu_read_unlock(); 1315 return cnt - drops; 1316 } 1317 1318 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 1319 { 1320 struct xdp_frame *frame = convert_to_xdp_frame(xdp); 1321 1322 if (unlikely(!frame)) 1323 return -EOVERFLOW; 1324 1325 return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1326 } 1327 1328 static const struct net_device_ops tap_netdev_ops = { 1329 .ndo_uninit = tun_net_uninit, 1330 .ndo_open = tun_net_open, 1331 .ndo_stop = tun_net_close, 1332 .ndo_start_xmit = tun_net_xmit, 1333 .ndo_fix_features = tun_net_fix_features, 1334 .ndo_set_rx_mode = tun_net_mclist, 1335 .ndo_set_mac_address = eth_mac_addr, 1336 .ndo_validate_addr = eth_validate_addr, 1337 .ndo_select_queue = tun_select_queue, 1338 .ndo_features_check = passthru_features_check, 1339 .ndo_set_rx_headroom = tun_set_headroom, 1340 .ndo_get_stats64 = tun_net_get_stats64, 1341 .ndo_bpf = tun_xdp, 1342 .ndo_xdp_xmit = tun_xdp_xmit, 1343 }; 1344 1345 static void tun_flow_init(struct tun_struct *tun) 1346 { 1347 int i; 1348 1349 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1350 INIT_HLIST_HEAD(&tun->flows[i]); 1351 1352 tun->ageing_time = TUN_FLOW_EXPIRE; 1353 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1354 mod_timer(&tun->flow_gc_timer, 1355 round_jiffies_up(jiffies + tun->ageing_time)); 1356 } 1357 1358 static void tun_flow_uninit(struct tun_struct *tun) 1359 { 1360 del_timer_sync(&tun->flow_gc_timer); 1361 tun_flow_flush(tun); 1362 } 1363 1364 #define MIN_MTU 68 1365 #define MAX_MTU 65535 1366 1367 /* Initialize net device. */ 1368 static void tun_net_init(struct net_device *dev) 1369 { 1370 struct tun_struct *tun = netdev_priv(dev); 1371 1372 switch (tun->flags & TUN_TYPE_MASK) { 1373 case IFF_TUN: 1374 dev->netdev_ops = &tun_netdev_ops; 1375 1376 /* Point-to-Point TUN Device */ 1377 dev->hard_header_len = 0; 1378 dev->addr_len = 0; 1379 dev->mtu = 1500; 1380 1381 /* Zero header length */ 1382 dev->type = ARPHRD_NONE; 1383 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1384 break; 1385 1386 case IFF_TAP: 1387 dev->netdev_ops = &tap_netdev_ops; 1388 /* Ethernet TAP Device */ 1389 ether_setup(dev); 1390 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1391 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1392 1393 eth_hw_addr_random(dev); 1394 1395 break; 1396 } 1397 1398 dev->min_mtu = MIN_MTU; 1399 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1400 } 1401 1402 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 1403 { 1404 struct sock *sk = tfile->socket.sk; 1405 1406 return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 1407 } 1408 1409 /* Character device part */ 1410 1411 /* Poll */ 1412 static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 1413 { 1414 struct tun_file *tfile = file->private_data; 1415 struct tun_struct *tun = tun_get(tfile); 1416 struct sock *sk; 1417 __poll_t mask = 0; 1418 1419 if (!tun) 1420 return EPOLLERR; 1421 1422 sk = tfile->socket.sk; 1423 1424 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 1425 1426 poll_wait(file, sk_sleep(sk), wait); 1427 1428 if (!ptr_ring_empty(&tfile->tx_ring)) 1429 mask |= EPOLLIN | EPOLLRDNORM; 1430 1431 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 1432 * guarantee EPOLLOUT to be raised by either here or 1433 * tun_sock_write_space(). Then process could get notification 1434 * after it writes to a down device and meets -EIO. 1435 */ 1436 if (tun_sock_writeable(tun, tfile) || 1437 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1438 tun_sock_writeable(tun, tfile))) 1439 mask |= EPOLLOUT | EPOLLWRNORM; 1440 1441 if (tun->dev->reg_state != NETREG_REGISTERED) 1442 mask = EPOLLERR; 1443 1444 tun_put(tun); 1445 return mask; 1446 } 1447 1448 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1449 size_t len, 1450 const struct iov_iter *it) 1451 { 1452 struct sk_buff *skb; 1453 size_t linear; 1454 int err; 1455 int i; 1456 1457 if (it->nr_segs > MAX_SKB_FRAGS + 1) 1458 return ERR_PTR(-ENOMEM); 1459 1460 local_bh_disable(); 1461 skb = napi_get_frags(&tfile->napi); 1462 local_bh_enable(); 1463 if (!skb) 1464 return ERR_PTR(-ENOMEM); 1465 1466 linear = iov_iter_single_seg_count(it); 1467 err = __skb_grow(skb, linear); 1468 if (err) 1469 goto free; 1470 1471 skb->len = len; 1472 skb->data_len = len - linear; 1473 skb->truesize += skb->data_len; 1474 1475 for (i = 1; i < it->nr_segs; i++) { 1476 struct page_frag *pfrag = ¤t->task_frag; 1477 size_t fragsz = it->iov[i].iov_len; 1478 1479 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1480 err = -EINVAL; 1481 goto free; 1482 } 1483 1484 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 1485 err = -ENOMEM; 1486 goto free; 1487 } 1488 1489 skb_fill_page_desc(skb, i - 1, pfrag->page, 1490 pfrag->offset, fragsz); 1491 page_ref_inc(pfrag->page); 1492 pfrag->offset += fragsz; 1493 } 1494 1495 return skb; 1496 free: 1497 /* frees skb and all frags allocated with napi_alloc_frag() */ 1498 napi_free_frags(&tfile->napi); 1499 return ERR_PTR(err); 1500 } 1501 1502 /* prepad is the amount to reserve at front. len is length after that. 1503 * linear is a hint as to how much to copy (usually headers). */ 1504 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1505 size_t prepad, size_t len, 1506 size_t linear, int noblock) 1507 { 1508 struct sock *sk = tfile->socket.sk; 1509 struct sk_buff *skb; 1510 int err; 1511 1512 /* Under a page? Don't bother with paged skb. */ 1513 if (prepad + len < PAGE_SIZE || !linear) 1514 linear = len; 1515 1516 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1517 &err, 0); 1518 if (!skb) 1519 return ERR_PTR(err); 1520 1521 skb_reserve(skb, prepad); 1522 skb_put(skb, linear); 1523 skb->data_len = len - linear; 1524 skb->len += len - linear; 1525 1526 return skb; 1527 } 1528 1529 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1530 struct sk_buff *skb, int more) 1531 { 1532 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1533 struct sk_buff_head process_queue; 1534 u32 rx_batched = tun->rx_batched; 1535 bool rcv = false; 1536 1537 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1538 local_bh_disable(); 1539 skb_record_rx_queue(skb, tfile->queue_index); 1540 netif_receive_skb(skb); 1541 local_bh_enable(); 1542 return; 1543 } 1544 1545 spin_lock(&queue->lock); 1546 if (!more || skb_queue_len(queue) == rx_batched) { 1547 __skb_queue_head_init(&process_queue); 1548 skb_queue_splice_tail_init(queue, &process_queue); 1549 rcv = true; 1550 } else { 1551 __skb_queue_tail(queue, skb); 1552 } 1553 spin_unlock(&queue->lock); 1554 1555 if (rcv) { 1556 struct sk_buff *nskb; 1557 1558 local_bh_disable(); 1559 while ((nskb = __skb_dequeue(&process_queue))) { 1560 skb_record_rx_queue(nskb, tfile->queue_index); 1561 netif_receive_skb(nskb); 1562 } 1563 skb_record_rx_queue(skb, tfile->queue_index); 1564 netif_receive_skb(skb); 1565 local_bh_enable(); 1566 } 1567 } 1568 1569 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1570 int len, int noblock, bool zerocopy) 1571 { 1572 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1573 return false; 1574 1575 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1576 return false; 1577 1578 if (!noblock) 1579 return false; 1580 1581 if (zerocopy) 1582 return false; 1583 1584 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 1585 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1586 return false; 1587 1588 return true; 1589 } 1590 1591 static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, 1592 int buflen, int len, int pad) 1593 { 1594 struct sk_buff *skb = build_skb(buf, buflen); 1595 1596 if (!skb) 1597 return ERR_PTR(-ENOMEM); 1598 1599 skb_reserve(skb, pad); 1600 skb_put(skb, len); 1601 1602 get_page(alloc_frag->page); 1603 alloc_frag->offset += buflen; 1604 1605 return skb; 1606 } 1607 1608 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 1609 struct xdp_buff *xdp, u32 act) 1610 { 1611 int err; 1612 1613 switch (act) { 1614 case XDP_REDIRECT: 1615 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 1616 if (err) 1617 return err; 1618 break; 1619 case XDP_TX: 1620 err = tun_xdp_tx(tun->dev, xdp); 1621 if (err < 0) 1622 return err; 1623 break; 1624 case XDP_PASS: 1625 break; 1626 default: 1627 bpf_warn_invalid_xdp_action(act); 1628 /* fall through */ 1629 case XDP_ABORTED: 1630 trace_xdp_exception(tun->dev, xdp_prog, act); 1631 /* fall through */ 1632 case XDP_DROP: 1633 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1634 break; 1635 } 1636 1637 return act; 1638 } 1639 1640 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1641 struct tun_file *tfile, 1642 struct iov_iter *from, 1643 struct virtio_net_hdr *hdr, 1644 int len, int *skb_xdp) 1645 { 1646 struct page_frag *alloc_frag = ¤t->task_frag; 1647 struct bpf_prog *xdp_prog; 1648 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1649 char *buf; 1650 size_t copied; 1651 int pad = TUN_RX_PAD; 1652 int err = 0; 1653 1654 rcu_read_lock(); 1655 xdp_prog = rcu_dereference(tun->xdp_prog); 1656 if (xdp_prog) 1657 pad += XDP_PACKET_HEADROOM; 1658 buflen += SKB_DATA_ALIGN(len + pad); 1659 rcu_read_unlock(); 1660 1661 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1662 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1663 return ERR_PTR(-ENOMEM); 1664 1665 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1666 copied = copy_page_from_iter(alloc_frag->page, 1667 alloc_frag->offset + pad, 1668 len, from); 1669 if (copied != len) 1670 return ERR_PTR(-EFAULT); 1671 1672 /* There's a small window that XDP may be set after the check 1673 * of xdp_prog above, this should be rare and for simplicity 1674 * we do XDP on skb in case the headroom is not enough. 1675 */ 1676 if (hdr->gso_type || !xdp_prog) { 1677 *skb_xdp = 1; 1678 return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1679 } 1680 1681 *skb_xdp = 0; 1682 1683 local_bh_disable(); 1684 rcu_read_lock(); 1685 xdp_prog = rcu_dereference(tun->xdp_prog); 1686 if (xdp_prog) { 1687 struct xdp_buff xdp; 1688 u32 act; 1689 1690 xdp.data_hard_start = buf; 1691 xdp.data = buf + pad; 1692 xdp_set_data_meta_invalid(&xdp); 1693 xdp.data_end = xdp.data + len; 1694 xdp.rxq = &tfile->xdp_rxq; 1695 1696 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1697 if (act == XDP_REDIRECT || act == XDP_TX) { 1698 get_page(alloc_frag->page); 1699 alloc_frag->offset += buflen; 1700 } 1701 err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1702 if (err < 0) 1703 goto err_xdp; 1704 if (err == XDP_REDIRECT) 1705 xdp_do_flush_map(); 1706 if (err != XDP_PASS) 1707 goto out; 1708 1709 pad = xdp.data - xdp.data_hard_start; 1710 len = xdp.data_end - xdp.data; 1711 } 1712 rcu_read_unlock(); 1713 local_bh_enable(); 1714 1715 return __tun_build_skb(alloc_frag, buf, buflen, len, pad); 1716 1717 err_xdp: 1718 put_page(alloc_frag->page); 1719 out: 1720 rcu_read_unlock(); 1721 local_bh_enable(); 1722 return NULL; 1723 } 1724 1725 /* Get packet from user space buffer */ 1726 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1727 void *msg_control, struct iov_iter *from, 1728 int noblock, bool more) 1729 { 1730 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1731 struct sk_buff *skb; 1732 size_t total_len = iov_iter_count(from); 1733 size_t len = total_len, align = tun->align, linear; 1734 struct virtio_net_hdr gso = { 0 }; 1735 struct tun_pcpu_stats *stats; 1736 int good_linear; 1737 int copylen; 1738 bool zerocopy = false; 1739 int err; 1740 u32 rxhash = 0; 1741 int skb_xdp = 1; 1742 bool frags = tun_napi_frags_enabled(tfile); 1743 1744 if (!(tun->dev->flags & IFF_UP)) 1745 return -EIO; 1746 1747 if (!(tun->flags & IFF_NO_PI)) { 1748 if (len < sizeof(pi)) 1749 return -EINVAL; 1750 len -= sizeof(pi); 1751 1752 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1753 return -EFAULT; 1754 } 1755 1756 if (tun->flags & IFF_VNET_HDR) { 1757 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1758 1759 if (len < vnet_hdr_sz) 1760 return -EINVAL; 1761 len -= vnet_hdr_sz; 1762 1763 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1764 return -EFAULT; 1765 1766 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1767 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 1768 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 1769 1770 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1771 return -EINVAL; 1772 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1773 } 1774 1775 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1776 align += NET_IP_ALIGN; 1777 if (unlikely(len < ETH_HLEN || 1778 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1779 return -EINVAL; 1780 } 1781 1782 good_linear = SKB_MAX_HEAD(align); 1783 1784 if (msg_control) { 1785 struct iov_iter i = *from; 1786 1787 /* There are 256 bytes to be copied in skb, so there is 1788 * enough room for skb expand head in case it is used. 1789 * The rest of the buffer is mapped from userspace. 1790 */ 1791 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 1792 if (copylen > good_linear) 1793 copylen = good_linear; 1794 linear = copylen; 1795 iov_iter_advance(&i, copylen); 1796 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1797 zerocopy = true; 1798 } 1799 1800 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1801 /* For the packet that is not easy to be processed 1802 * (e.g gso or jumbo packet), we will do it at after 1803 * skb was created with generic XDP routine. 1804 */ 1805 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1806 if (IS_ERR(skb)) { 1807 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1808 return PTR_ERR(skb); 1809 } 1810 if (!skb) 1811 return total_len; 1812 } else { 1813 if (!zerocopy) { 1814 copylen = len; 1815 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 1816 linear = good_linear; 1817 else 1818 linear = tun16_to_cpu(tun, gso.hdr_len); 1819 } 1820 1821 if (frags) { 1822 mutex_lock(&tfile->napi_mutex); 1823 skb = tun_napi_alloc_frags(tfile, copylen, from); 1824 /* tun_napi_alloc_frags() enforces a layout for the skb. 1825 * If zerocopy is enabled, then this layout will be 1826 * overwritten by zerocopy_sg_from_iter(). 1827 */ 1828 zerocopy = false; 1829 } else { 1830 skb = tun_alloc_skb(tfile, align, copylen, linear, 1831 noblock); 1832 } 1833 1834 if (IS_ERR(skb)) { 1835 if (PTR_ERR(skb) != -EAGAIN) 1836 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1837 if (frags) 1838 mutex_unlock(&tfile->napi_mutex); 1839 return PTR_ERR(skb); 1840 } 1841 1842 if (zerocopy) 1843 err = zerocopy_sg_from_iter(skb, from); 1844 else 1845 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1846 1847 if (err) { 1848 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1849 kfree_skb(skb); 1850 if (frags) { 1851 tfile->napi.skb = NULL; 1852 mutex_unlock(&tfile->napi_mutex); 1853 } 1854 1855 return -EFAULT; 1856 } 1857 } 1858 1859 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1860 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1861 kfree_skb(skb); 1862 if (frags) { 1863 tfile->napi.skb = NULL; 1864 mutex_unlock(&tfile->napi_mutex); 1865 } 1866 1867 return -EINVAL; 1868 } 1869 1870 switch (tun->flags & TUN_TYPE_MASK) { 1871 case IFF_TUN: 1872 if (tun->flags & IFF_NO_PI) { 1873 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 1874 1875 switch (ip_version) { 1876 case 4: 1877 pi.proto = htons(ETH_P_IP); 1878 break; 1879 case 6: 1880 pi.proto = htons(ETH_P_IPV6); 1881 break; 1882 default: 1883 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1884 kfree_skb(skb); 1885 return -EINVAL; 1886 } 1887 } 1888 1889 skb_reset_mac_header(skb); 1890 skb->protocol = pi.proto; 1891 skb->dev = tun->dev; 1892 break; 1893 case IFF_TAP: 1894 if (!frags) 1895 skb->protocol = eth_type_trans(skb, tun->dev); 1896 break; 1897 } 1898 1899 /* copy skb_ubuf_info for callback when skb has no error */ 1900 if (zerocopy) { 1901 skb_shinfo(skb)->destructor_arg = msg_control; 1902 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1903 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1904 } else if (msg_control) { 1905 struct ubuf_info *uarg = msg_control; 1906 uarg->callback(uarg, false); 1907 } 1908 1909 skb_reset_network_header(skb); 1910 skb_probe_transport_header(skb, 0); 1911 1912 if (skb_xdp) { 1913 struct bpf_prog *xdp_prog; 1914 int ret; 1915 1916 local_bh_disable(); 1917 rcu_read_lock(); 1918 xdp_prog = rcu_dereference(tun->xdp_prog); 1919 if (xdp_prog) { 1920 ret = do_xdp_generic(xdp_prog, skb); 1921 if (ret != XDP_PASS) { 1922 rcu_read_unlock(); 1923 local_bh_enable(); 1924 return total_len; 1925 } 1926 } 1927 rcu_read_unlock(); 1928 local_bh_enable(); 1929 } 1930 1931 /* Compute the costly rx hash only if needed for flow updates. 1932 * We may get a very small possibility of OOO during switching, not 1933 * worth to optimize. 1934 */ 1935 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1936 !tfile->detached) 1937 rxhash = __skb_get_hash_symmetric(skb); 1938 1939 if (frags) { 1940 /* Exercise flow dissector code path. */ 1941 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1942 1943 if (unlikely(headlen > skb_headlen(skb))) { 1944 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1945 napi_free_frags(&tfile->napi); 1946 mutex_unlock(&tfile->napi_mutex); 1947 WARN_ON(1); 1948 return -ENOMEM; 1949 } 1950 1951 local_bh_disable(); 1952 napi_gro_frags(&tfile->napi); 1953 local_bh_enable(); 1954 mutex_unlock(&tfile->napi_mutex); 1955 } else if (tfile->napi_enabled) { 1956 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1957 int queue_len; 1958 1959 spin_lock_bh(&queue->lock); 1960 __skb_queue_tail(queue, skb); 1961 queue_len = skb_queue_len(queue); 1962 spin_unlock(&queue->lock); 1963 1964 if (!more || queue_len > NAPI_POLL_WEIGHT) 1965 napi_schedule(&tfile->napi); 1966 1967 local_bh_enable(); 1968 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1969 tun_rx_batched(tun, tfile, skb, more); 1970 } else { 1971 netif_rx_ni(skb); 1972 } 1973 1974 stats = get_cpu_ptr(tun->pcpu_stats); 1975 u64_stats_update_begin(&stats->syncp); 1976 stats->rx_packets++; 1977 stats->rx_bytes += len; 1978 u64_stats_update_end(&stats->syncp); 1979 put_cpu_ptr(stats); 1980 1981 if (rxhash) 1982 tun_flow_update(tun, rxhash, tfile); 1983 1984 return total_len; 1985 } 1986 1987 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 1988 { 1989 struct file *file = iocb->ki_filp; 1990 struct tun_file *tfile = file->private_data; 1991 struct tun_struct *tun = tun_get(tfile); 1992 ssize_t result; 1993 1994 if (!tun) 1995 return -EBADFD; 1996 1997 result = tun_get_user(tun, tfile, NULL, from, 1998 file->f_flags & O_NONBLOCK, false); 1999 2000 tun_put(tun); 2001 return result; 2002 } 2003 2004 static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2005 struct tun_file *tfile, 2006 struct xdp_frame *xdp_frame, 2007 struct iov_iter *iter) 2008 { 2009 int vnet_hdr_sz = 0; 2010 size_t size = xdp_frame->len; 2011 struct tun_pcpu_stats *stats; 2012 size_t ret; 2013 2014 if (tun->flags & IFF_VNET_HDR) { 2015 struct virtio_net_hdr gso = { 0 }; 2016 2017 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2018 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2019 return -EINVAL; 2020 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2021 sizeof(gso))) 2022 return -EFAULT; 2023 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2024 } 2025 2026 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2027 2028 stats = get_cpu_ptr(tun->pcpu_stats); 2029 u64_stats_update_begin(&stats->syncp); 2030 stats->tx_packets++; 2031 stats->tx_bytes += ret; 2032 u64_stats_update_end(&stats->syncp); 2033 put_cpu_ptr(tun->pcpu_stats); 2034 2035 return ret; 2036 } 2037 2038 /* Put packet to the user space buffer */ 2039 static ssize_t tun_put_user(struct tun_struct *tun, 2040 struct tun_file *tfile, 2041 struct sk_buff *skb, 2042 struct iov_iter *iter) 2043 { 2044 struct tun_pi pi = { 0, skb->protocol }; 2045 struct tun_pcpu_stats *stats; 2046 ssize_t total; 2047 int vlan_offset = 0; 2048 int vlan_hlen = 0; 2049 int vnet_hdr_sz = 0; 2050 2051 if (skb_vlan_tag_present(skb)) 2052 vlan_hlen = VLAN_HLEN; 2053 2054 if (tun->flags & IFF_VNET_HDR) 2055 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2056 2057 total = skb->len + vlan_hlen + vnet_hdr_sz; 2058 2059 if (!(tun->flags & IFF_NO_PI)) { 2060 if (iov_iter_count(iter) < sizeof(pi)) 2061 return -EINVAL; 2062 2063 total += sizeof(pi); 2064 if (iov_iter_count(iter) < total) { 2065 /* Packet will be striped */ 2066 pi.flags |= TUN_PKT_STRIP; 2067 } 2068 2069 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 2070 return -EFAULT; 2071 } 2072 2073 if (vnet_hdr_sz) { 2074 struct virtio_net_hdr gso; 2075 2076 if (iov_iter_count(iter) < vnet_hdr_sz) 2077 return -EINVAL; 2078 2079 if (virtio_net_hdr_from_skb(skb, &gso, 2080 tun_is_little_endian(tun), true, 2081 vlan_hlen)) { 2082 struct skb_shared_info *sinfo = skb_shinfo(skb); 2083 pr_err("unexpected GSO type: " 2084 "0x%x, gso_size %d, hdr_len %d\n", 2085 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 2086 tun16_to_cpu(tun, gso.hdr_len)); 2087 print_hex_dump(KERN_ERR, "tun: ", 2088 DUMP_PREFIX_NONE, 2089 16, 1, skb->head, 2090 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2091 WARN_ON_ONCE(1); 2092 return -EINVAL; 2093 } 2094 2095 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2096 return -EFAULT; 2097 2098 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2099 } 2100 2101 if (vlan_hlen) { 2102 int ret; 2103 struct veth veth; 2104 2105 veth.h_vlan_proto = skb->vlan_proto; 2106 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 2107 2108 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 2109 2110 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2111 if (ret || !iov_iter_count(iter)) 2112 goto done; 2113 2114 ret = copy_to_iter(&veth, sizeof(veth), iter); 2115 if (ret != sizeof(veth) || !iov_iter_count(iter)) 2116 goto done; 2117 } 2118 2119 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 2120 2121 done: 2122 /* caller is in process context, */ 2123 stats = get_cpu_ptr(tun->pcpu_stats); 2124 u64_stats_update_begin(&stats->syncp); 2125 stats->tx_packets++; 2126 stats->tx_bytes += skb->len + vlan_hlen; 2127 u64_stats_update_end(&stats->syncp); 2128 put_cpu_ptr(tun->pcpu_stats); 2129 2130 return total; 2131 } 2132 2133 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 2134 { 2135 DECLARE_WAITQUEUE(wait, current); 2136 void *ptr = NULL; 2137 int error = 0; 2138 2139 ptr = ptr_ring_consume(&tfile->tx_ring); 2140 if (ptr) 2141 goto out; 2142 if (noblock) { 2143 error = -EAGAIN; 2144 goto out; 2145 } 2146 2147 add_wait_queue(&tfile->wq.wait, &wait); 2148 current->state = TASK_INTERRUPTIBLE; 2149 2150 while (1) { 2151 ptr = ptr_ring_consume(&tfile->tx_ring); 2152 if (ptr) 2153 break; 2154 if (signal_pending(current)) { 2155 error = -ERESTARTSYS; 2156 break; 2157 } 2158 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2159 error = -EFAULT; 2160 break; 2161 } 2162 2163 schedule(); 2164 } 2165 2166 current->state = TASK_RUNNING; 2167 remove_wait_queue(&tfile->wq.wait, &wait); 2168 2169 out: 2170 *err = error; 2171 return ptr; 2172 } 2173 2174 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 2175 struct iov_iter *to, 2176 int noblock, void *ptr) 2177 { 2178 ssize_t ret; 2179 int err; 2180 2181 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 2182 2183 if (!iov_iter_count(to)) { 2184 tun_ptr_free(ptr); 2185 return 0; 2186 } 2187 2188 if (!ptr) { 2189 /* Read frames from ring */ 2190 ptr = tun_ring_recv(tfile, noblock, &err); 2191 if (!ptr) 2192 return err; 2193 } 2194 2195 if (tun_is_xdp_frame(ptr)) { 2196 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2197 2198 ret = tun_put_user_xdp(tun, tfile, xdpf, to); 2199 xdp_return_frame(xdpf); 2200 } else { 2201 struct sk_buff *skb = ptr; 2202 2203 ret = tun_put_user(tun, tfile, skb, to); 2204 if (unlikely(ret < 0)) 2205 kfree_skb(skb); 2206 else 2207 consume_skb(skb); 2208 } 2209 2210 return ret; 2211 } 2212 2213 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 2214 { 2215 struct file *file = iocb->ki_filp; 2216 struct tun_file *tfile = file->private_data; 2217 struct tun_struct *tun = tun_get(tfile); 2218 ssize_t len = iov_iter_count(to), ret; 2219 2220 if (!tun) 2221 return -EBADFD; 2222 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 2223 ret = min_t(ssize_t, ret, len); 2224 if (ret > 0) 2225 iocb->ki_pos = ret; 2226 tun_put(tun); 2227 return ret; 2228 } 2229 2230 static void tun_prog_free(struct rcu_head *rcu) 2231 { 2232 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 2233 2234 bpf_prog_destroy(prog->prog); 2235 kfree(prog); 2236 } 2237 2238 static int __tun_set_ebpf(struct tun_struct *tun, 2239 struct tun_prog __rcu **prog_p, 2240 struct bpf_prog *prog) 2241 { 2242 struct tun_prog *old, *new = NULL; 2243 2244 if (prog) { 2245 new = kmalloc(sizeof(*new), GFP_KERNEL); 2246 if (!new) 2247 return -ENOMEM; 2248 new->prog = prog; 2249 } 2250 2251 spin_lock_bh(&tun->lock); 2252 old = rcu_dereference_protected(*prog_p, 2253 lockdep_is_held(&tun->lock)); 2254 rcu_assign_pointer(*prog_p, new); 2255 spin_unlock_bh(&tun->lock); 2256 2257 if (old) 2258 call_rcu(&old->rcu, tun_prog_free); 2259 2260 return 0; 2261 } 2262 2263 static void tun_free_netdev(struct net_device *dev) 2264 { 2265 struct tun_struct *tun = netdev_priv(dev); 2266 2267 BUG_ON(!(list_empty(&tun->disabled))); 2268 free_percpu(tun->pcpu_stats); 2269 tun_flow_uninit(tun); 2270 security_tun_dev_free_security(tun->security); 2271 __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2272 __tun_set_ebpf(tun, &tun->filter_prog, NULL); 2273 } 2274 2275 static void tun_setup(struct net_device *dev) 2276 { 2277 struct tun_struct *tun = netdev_priv(dev); 2278 2279 tun->owner = INVALID_UID; 2280 tun->group = INVALID_GID; 2281 tun_default_link_ksettings(dev, &tun->link_ksettings); 2282 2283 dev->ethtool_ops = &tun_ethtool_ops; 2284 dev->needs_free_netdev = true; 2285 dev->priv_destructor = tun_free_netdev; 2286 /* We prefer our own queue length */ 2287 dev->tx_queue_len = TUN_READQ_SIZE; 2288 } 2289 2290 /* Trivial set of netlink ops to allow deleting tun or tap 2291 * device with netlink. 2292 */ 2293 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2294 struct netlink_ext_ack *extack) 2295 { 2296 if (!data) 2297 return 0; 2298 return -EINVAL; 2299 } 2300 2301 static size_t tun_get_size(const struct net_device *dev) 2302 { 2303 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 2304 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 2305 2306 return nla_total_size(sizeof(uid_t)) + /* OWNER */ 2307 nla_total_size(sizeof(gid_t)) + /* GROUP */ 2308 nla_total_size(sizeof(u8)) + /* TYPE */ 2309 nla_total_size(sizeof(u8)) + /* PI */ 2310 nla_total_size(sizeof(u8)) + /* VNET_HDR */ 2311 nla_total_size(sizeof(u8)) + /* PERSIST */ 2312 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 2313 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 2314 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 2315 0; 2316 } 2317 2318 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 2319 { 2320 struct tun_struct *tun = netdev_priv(dev); 2321 2322 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 2323 goto nla_put_failure; 2324 if (uid_valid(tun->owner) && 2325 nla_put_u32(skb, IFLA_TUN_OWNER, 2326 from_kuid_munged(current_user_ns(), tun->owner))) 2327 goto nla_put_failure; 2328 if (gid_valid(tun->group) && 2329 nla_put_u32(skb, IFLA_TUN_GROUP, 2330 from_kgid_munged(current_user_ns(), tun->group))) 2331 goto nla_put_failure; 2332 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 2333 goto nla_put_failure; 2334 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 2335 goto nla_put_failure; 2336 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 2337 goto nla_put_failure; 2338 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 2339 !!(tun->flags & IFF_MULTI_QUEUE))) 2340 goto nla_put_failure; 2341 if (tun->flags & IFF_MULTI_QUEUE) { 2342 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 2343 goto nla_put_failure; 2344 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 2345 tun->numdisabled)) 2346 goto nla_put_failure; 2347 } 2348 2349 return 0; 2350 2351 nla_put_failure: 2352 return -EMSGSIZE; 2353 } 2354 2355 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2356 .kind = DRV_NAME, 2357 .priv_size = sizeof(struct tun_struct), 2358 .setup = tun_setup, 2359 .validate = tun_validate, 2360 .get_size = tun_get_size, 2361 .fill_info = tun_fill_info, 2362 }; 2363 2364 static void tun_sock_write_space(struct sock *sk) 2365 { 2366 struct tun_file *tfile; 2367 wait_queue_head_t *wqueue; 2368 2369 if (!sock_writeable(sk)) 2370 return; 2371 2372 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2373 return; 2374 2375 wqueue = sk_sleep(sk); 2376 if (wqueue && waitqueue_active(wqueue)) 2377 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2378 EPOLLWRNORM | EPOLLWRBAND); 2379 2380 tfile = container_of(sk, struct tun_file, sk); 2381 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2382 } 2383 2384 static int tun_xdp_one(struct tun_struct *tun, 2385 struct tun_file *tfile, 2386 struct xdp_buff *xdp, int *flush) 2387 { 2388 struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2389 struct virtio_net_hdr *gso = &hdr->gso; 2390 struct tun_pcpu_stats *stats; 2391 struct bpf_prog *xdp_prog; 2392 struct sk_buff *skb = NULL; 2393 u32 rxhash = 0, act; 2394 int buflen = hdr->buflen; 2395 int err = 0; 2396 bool skb_xdp = false; 2397 2398 xdp_prog = rcu_dereference(tun->xdp_prog); 2399 if (xdp_prog) { 2400 if (gso->gso_type) { 2401 skb_xdp = true; 2402 goto build; 2403 } 2404 xdp_set_data_meta_invalid(xdp); 2405 xdp->rxq = &tfile->xdp_rxq; 2406 2407 act = bpf_prog_run_xdp(xdp_prog, xdp); 2408 err = tun_xdp_act(tun, xdp_prog, xdp, act); 2409 if (err < 0) { 2410 put_page(virt_to_head_page(xdp->data)); 2411 return err; 2412 } 2413 2414 switch (err) { 2415 case XDP_REDIRECT: 2416 *flush = true; 2417 /* fall through */ 2418 case XDP_TX: 2419 return 0; 2420 case XDP_PASS: 2421 break; 2422 default: 2423 put_page(virt_to_head_page(xdp->data)); 2424 return 0; 2425 } 2426 } 2427 2428 build: 2429 skb = build_skb(xdp->data_hard_start, buflen); 2430 if (!skb) { 2431 err = -ENOMEM; 2432 goto out; 2433 } 2434 2435 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2436 skb_put(skb, xdp->data_end - xdp->data); 2437 2438 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2439 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 2440 kfree_skb(skb); 2441 err = -EINVAL; 2442 goto out; 2443 } 2444 2445 skb->protocol = eth_type_trans(skb, tun->dev); 2446 skb_reset_network_header(skb); 2447 skb_probe_transport_header(skb, 0); 2448 2449 if (skb_xdp) { 2450 err = do_xdp_generic(xdp_prog, skb); 2451 if (err != XDP_PASS) 2452 goto out; 2453 } 2454 2455 if (!rcu_dereference(tun->steering_prog)) 2456 rxhash = __skb_get_hash_symmetric(skb); 2457 2458 skb_record_rx_queue(skb, tfile->queue_index); 2459 netif_receive_skb(skb); 2460 2461 stats = get_cpu_ptr(tun->pcpu_stats); 2462 u64_stats_update_begin(&stats->syncp); 2463 stats->rx_packets++; 2464 stats->rx_bytes += skb->len; 2465 u64_stats_update_end(&stats->syncp); 2466 put_cpu_ptr(stats); 2467 2468 if (rxhash) 2469 tun_flow_update(tun, rxhash, tfile); 2470 2471 out: 2472 return err; 2473 } 2474 2475 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2476 { 2477 int ret, i; 2478 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2479 struct tun_struct *tun = tun_get(tfile); 2480 struct tun_msg_ctl *ctl = m->msg_control; 2481 struct xdp_buff *xdp; 2482 2483 if (!tun) 2484 return -EBADFD; 2485 2486 if (ctl && (ctl->type == TUN_MSG_PTR)) { 2487 int n = ctl->num; 2488 int flush = 0; 2489 2490 local_bh_disable(); 2491 rcu_read_lock(); 2492 2493 for (i = 0; i < n; i++) { 2494 xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2495 tun_xdp_one(tun, tfile, xdp, &flush); 2496 } 2497 2498 if (flush) 2499 xdp_do_flush_map(); 2500 2501 rcu_read_unlock(); 2502 local_bh_enable(); 2503 2504 ret = total_len; 2505 goto out; 2506 } 2507 2508 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 2509 m->msg_flags & MSG_DONTWAIT, 2510 m->msg_flags & MSG_MORE); 2511 out: 2512 tun_put(tun); 2513 return ret; 2514 } 2515 2516 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2517 int flags) 2518 { 2519 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2520 struct tun_struct *tun = tun_get(tfile); 2521 void *ptr = m->msg_control; 2522 int ret; 2523 2524 if (!tun) { 2525 ret = -EBADFD; 2526 goto out_free; 2527 } 2528 2529 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2530 ret = -EINVAL; 2531 goto out_put_tun; 2532 } 2533 if (flags & MSG_ERRQUEUE) { 2534 ret = sock_recv_errqueue(sock->sk, m, total_len, 2535 SOL_PACKET, TUN_TX_TIMESTAMP); 2536 goto out; 2537 } 2538 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 2539 if (ret > (ssize_t)total_len) { 2540 m->msg_flags |= MSG_TRUNC; 2541 ret = flags & MSG_TRUNC ? ret : total_len; 2542 } 2543 out: 2544 tun_put(tun); 2545 return ret; 2546 2547 out_put_tun: 2548 tun_put(tun); 2549 out_free: 2550 tun_ptr_free(ptr); 2551 return ret; 2552 } 2553 2554 static int tun_ptr_peek_len(void *ptr) 2555 { 2556 if (likely(ptr)) { 2557 if (tun_is_xdp_frame(ptr)) { 2558 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2559 2560 return xdpf->len; 2561 } 2562 return __skb_array_len_with_tag(ptr); 2563 } else { 2564 return 0; 2565 } 2566 } 2567 2568 static int tun_peek_len(struct socket *sock) 2569 { 2570 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2571 struct tun_struct *tun; 2572 int ret = 0; 2573 2574 tun = tun_get(tfile); 2575 if (!tun) 2576 return 0; 2577 2578 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 2579 tun_put(tun); 2580 2581 return ret; 2582 } 2583 2584 /* Ops structure to mimic raw sockets with tun */ 2585 static const struct proto_ops tun_socket_ops = { 2586 .peek_len = tun_peek_len, 2587 .sendmsg = tun_sendmsg, 2588 .recvmsg = tun_recvmsg, 2589 }; 2590 2591 static struct proto tun_proto = { 2592 .name = "tun", 2593 .owner = THIS_MODULE, 2594 .obj_size = sizeof(struct tun_file), 2595 }; 2596 2597 static int tun_flags(struct tun_struct *tun) 2598 { 2599 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2600 } 2601 2602 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2603 char *buf) 2604 { 2605 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2606 return sprintf(buf, "0x%x\n", tun_flags(tun)); 2607 } 2608 2609 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2610 char *buf) 2611 { 2612 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2613 return uid_valid(tun->owner)? 2614 sprintf(buf, "%u\n", 2615 from_kuid_munged(current_user_ns(), tun->owner)): 2616 sprintf(buf, "-1\n"); 2617 } 2618 2619 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2620 char *buf) 2621 { 2622 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2623 return gid_valid(tun->group) ? 2624 sprintf(buf, "%u\n", 2625 from_kgid_munged(current_user_ns(), tun->group)): 2626 sprintf(buf, "-1\n"); 2627 } 2628 2629 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2630 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2631 static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2632 2633 static struct attribute *tun_dev_attrs[] = { 2634 &dev_attr_tun_flags.attr, 2635 &dev_attr_owner.attr, 2636 &dev_attr_group.attr, 2637 NULL 2638 }; 2639 2640 static const struct attribute_group tun_attr_group = { 2641 .attrs = tun_dev_attrs 2642 }; 2643 2644 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2645 { 2646 struct tun_struct *tun; 2647 struct tun_file *tfile = file->private_data; 2648 struct net_device *dev; 2649 int err; 2650 2651 if (tfile->detached) 2652 return -EINVAL; 2653 2654 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2655 if (!capable(CAP_NET_ADMIN)) 2656 return -EPERM; 2657 2658 if (!(ifr->ifr_flags & IFF_NAPI) || 2659 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2660 return -EINVAL; 2661 } 2662 2663 dev = __dev_get_by_name(net, ifr->ifr_name); 2664 if (dev) { 2665 if (ifr->ifr_flags & IFF_TUN_EXCL) 2666 return -EBUSY; 2667 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2668 tun = netdev_priv(dev); 2669 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2670 tun = netdev_priv(dev); 2671 else 2672 return -EINVAL; 2673 2674 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2675 !!(tun->flags & IFF_MULTI_QUEUE)) 2676 return -EINVAL; 2677 2678 if (tun_not_capable(tun)) 2679 return -EPERM; 2680 err = security_tun_dev_open(tun->security); 2681 if (err < 0) 2682 return err; 2683 2684 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2685 ifr->ifr_flags & IFF_NAPI, 2686 ifr->ifr_flags & IFF_NAPI_FRAGS); 2687 if (err < 0) 2688 return err; 2689 2690 if (tun->flags & IFF_MULTI_QUEUE && 2691 (tun->numqueues + tun->numdisabled > 1)) { 2692 /* One or more queue has already been attached, no need 2693 * to initialize the device again. 2694 */ 2695 netdev_state_change(dev); 2696 return 0; 2697 } 2698 2699 tun->flags = (tun->flags & ~TUN_FEATURES) | 2700 (ifr->ifr_flags & TUN_FEATURES); 2701 2702 netdev_state_change(dev); 2703 } else { 2704 char *name; 2705 unsigned long flags = 0; 2706 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2707 MAX_TAP_QUEUES : 1; 2708 2709 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2710 return -EPERM; 2711 err = security_tun_dev_create(); 2712 if (err < 0) 2713 return err; 2714 2715 /* Set dev type */ 2716 if (ifr->ifr_flags & IFF_TUN) { 2717 /* TUN device */ 2718 flags |= IFF_TUN; 2719 name = "tun%d"; 2720 } else if (ifr->ifr_flags & IFF_TAP) { 2721 /* TAP device */ 2722 flags |= IFF_TAP; 2723 name = "tap%d"; 2724 } else 2725 return -EINVAL; 2726 2727 if (*ifr->ifr_name) 2728 name = ifr->ifr_name; 2729 2730 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2731 NET_NAME_UNKNOWN, tun_setup, queues, 2732 queues); 2733 2734 if (!dev) 2735 return -ENOMEM; 2736 err = dev_get_valid_name(net, dev, name); 2737 if (err < 0) 2738 goto err_free_dev; 2739 2740 dev_net_set(dev, net); 2741 dev->rtnl_link_ops = &tun_link_ops; 2742 dev->ifindex = tfile->ifindex; 2743 dev->sysfs_groups[0] = &tun_attr_group; 2744 2745 tun = netdev_priv(dev); 2746 tun->dev = dev; 2747 tun->flags = flags; 2748 tun->txflt.count = 0; 2749 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2750 2751 tun->align = NET_SKB_PAD; 2752 tun->filter_attached = false; 2753 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2754 tun->rx_batched = 0; 2755 RCU_INIT_POINTER(tun->steering_prog, NULL); 2756 2757 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2758 if (!tun->pcpu_stats) { 2759 err = -ENOMEM; 2760 goto err_free_dev; 2761 } 2762 2763 spin_lock_init(&tun->lock); 2764 2765 err = security_tun_dev_alloc_security(&tun->security); 2766 if (err < 0) 2767 goto err_free_stat; 2768 2769 tun_net_init(dev); 2770 tun_flow_init(tun); 2771 2772 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 2773 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 2774 NETIF_F_HW_VLAN_STAG_TX; 2775 dev->features = dev->hw_features | NETIF_F_LLTX; 2776 dev->vlan_features = dev->features & 2777 ~(NETIF_F_HW_VLAN_CTAG_TX | 2778 NETIF_F_HW_VLAN_STAG_TX); 2779 2780 tun->flags = (tun->flags & ~TUN_FEATURES) | 2781 (ifr->ifr_flags & TUN_FEATURES); 2782 2783 INIT_LIST_HEAD(&tun->disabled); 2784 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2785 ifr->ifr_flags & IFF_NAPI_FRAGS); 2786 if (err < 0) 2787 goto err_free_flow; 2788 2789 err = register_netdevice(tun->dev); 2790 if (err < 0) 2791 goto err_detach; 2792 } 2793 2794 netif_carrier_on(tun->dev); 2795 2796 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 2797 2798 /* Make sure persistent devices do not get stuck in 2799 * xoff state. 2800 */ 2801 if (netif_running(tun->dev)) 2802 netif_tx_wake_all_queues(tun->dev); 2803 2804 strcpy(ifr->ifr_name, tun->dev->name); 2805 return 0; 2806 2807 err_detach: 2808 tun_detach_all(dev); 2809 /* register_netdevice() already called tun_free_netdev() */ 2810 goto err_free_dev; 2811 2812 err_free_flow: 2813 tun_flow_uninit(tun); 2814 security_tun_dev_free_security(tun->security); 2815 err_free_stat: 2816 free_percpu(tun->pcpu_stats); 2817 err_free_dev: 2818 free_netdev(dev); 2819 return err; 2820 } 2821 2822 static void tun_get_iff(struct net *net, struct tun_struct *tun, 2823 struct ifreq *ifr) 2824 { 2825 tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2826 2827 strcpy(ifr->ifr_name, tun->dev->name); 2828 2829 ifr->ifr_flags = tun_flags(tun); 2830 2831 } 2832 2833 /* This is like a cut-down ethtool ops, except done via tun fd so no 2834 * privs required. */ 2835 static int set_offload(struct tun_struct *tun, unsigned long arg) 2836 { 2837 netdev_features_t features = 0; 2838 2839 if (arg & TUN_F_CSUM) { 2840 features |= NETIF_F_HW_CSUM; 2841 arg &= ~TUN_F_CSUM; 2842 2843 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2844 if (arg & TUN_F_TSO_ECN) { 2845 features |= NETIF_F_TSO_ECN; 2846 arg &= ~TUN_F_TSO_ECN; 2847 } 2848 if (arg & TUN_F_TSO4) 2849 features |= NETIF_F_TSO; 2850 if (arg & TUN_F_TSO6) 2851 features |= NETIF_F_TSO6; 2852 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2853 } 2854 2855 arg &= ~TUN_F_UFO; 2856 } 2857 2858 /* This gives the user a way to test for new features in future by 2859 * trying to set them. */ 2860 if (arg) 2861 return -EINVAL; 2862 2863 tun->set_features = features; 2864 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2865 tun->dev->wanted_features |= features; 2866 netdev_update_features(tun->dev); 2867 2868 return 0; 2869 } 2870 2871 static void tun_detach_filter(struct tun_struct *tun, int n) 2872 { 2873 int i; 2874 struct tun_file *tfile; 2875 2876 for (i = 0; i < n; i++) { 2877 tfile = rtnl_dereference(tun->tfiles[i]); 2878 lock_sock(tfile->socket.sk); 2879 sk_detach_filter(tfile->socket.sk); 2880 release_sock(tfile->socket.sk); 2881 } 2882 2883 tun->filter_attached = false; 2884 } 2885 2886 static int tun_attach_filter(struct tun_struct *tun) 2887 { 2888 int i, ret = 0; 2889 struct tun_file *tfile; 2890 2891 for (i = 0; i < tun->numqueues; i++) { 2892 tfile = rtnl_dereference(tun->tfiles[i]); 2893 lock_sock(tfile->socket.sk); 2894 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2895 release_sock(tfile->socket.sk); 2896 if (ret) { 2897 tun_detach_filter(tun, i); 2898 return ret; 2899 } 2900 } 2901 2902 tun->filter_attached = true; 2903 return ret; 2904 } 2905 2906 static void tun_set_sndbuf(struct tun_struct *tun) 2907 { 2908 struct tun_file *tfile; 2909 int i; 2910 2911 for (i = 0; i < tun->numqueues; i++) { 2912 tfile = rtnl_dereference(tun->tfiles[i]); 2913 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2914 } 2915 } 2916 2917 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2918 { 2919 struct tun_file *tfile = file->private_data; 2920 struct tun_struct *tun; 2921 int ret = 0; 2922 2923 rtnl_lock(); 2924 2925 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2926 tun = tfile->detached; 2927 if (!tun) { 2928 ret = -EINVAL; 2929 goto unlock; 2930 } 2931 ret = security_tun_dev_attach_queue(tun->security); 2932 if (ret < 0) 2933 goto unlock; 2934 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2935 tun->flags & IFF_NAPI_FRAGS); 2936 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2937 tun = rtnl_dereference(tfile->tun); 2938 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2939 ret = -EINVAL; 2940 else 2941 __tun_detach(tfile, false); 2942 } else 2943 ret = -EINVAL; 2944 2945 if (ret >= 0) 2946 netdev_state_change(tun->dev); 2947 2948 unlock: 2949 rtnl_unlock(); 2950 return ret; 2951 } 2952 2953 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2954 void __user *data) 2955 { 2956 struct bpf_prog *prog; 2957 int fd; 2958 2959 if (copy_from_user(&fd, data, sizeof(fd))) 2960 return -EFAULT; 2961 2962 if (fd == -1) { 2963 prog = NULL; 2964 } else { 2965 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 2966 if (IS_ERR(prog)) 2967 return PTR_ERR(prog); 2968 } 2969 2970 return __tun_set_ebpf(tun, prog_p, prog); 2971 } 2972 2973 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 2974 unsigned long arg, int ifreq_len) 2975 { 2976 struct tun_file *tfile = file->private_data; 2977 struct net *net = sock_net(&tfile->sk); 2978 struct tun_struct *tun; 2979 void __user* argp = (void __user*)arg; 2980 struct ifreq ifr; 2981 kuid_t owner; 2982 kgid_t group; 2983 int sndbuf; 2984 int vnet_hdr_sz; 2985 unsigned int ifindex; 2986 int le; 2987 int ret; 2988 bool do_notify = false; 2989 2990 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2991 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 2992 if (copy_from_user(&ifr, argp, ifreq_len)) 2993 return -EFAULT; 2994 } else { 2995 memset(&ifr, 0, sizeof(ifr)); 2996 } 2997 if (cmd == TUNGETFEATURES) { 2998 /* Currently this just means: "what IFF flags are valid?". 2999 * This is needed because we never checked for invalid flags on 3000 * TUNSETIFF. 3001 */ 3002 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 3003 (unsigned int __user*)argp); 3004 } else if (cmd == TUNSETQUEUE) { 3005 return tun_set_queue(file, &ifr); 3006 } else if (cmd == SIOCGSKNS) { 3007 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3008 return -EPERM; 3009 return open_related_ns(&net->ns, get_net_ns); 3010 } 3011 3012 ret = 0; 3013 rtnl_lock(); 3014 3015 tun = tun_get(tfile); 3016 if (cmd == TUNSETIFF) { 3017 ret = -EEXIST; 3018 if (tun) 3019 goto unlock; 3020 3021 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 3022 3023 ret = tun_set_iff(net, file, &ifr); 3024 3025 if (ret) 3026 goto unlock; 3027 3028 if (copy_to_user(argp, &ifr, ifreq_len)) 3029 ret = -EFAULT; 3030 goto unlock; 3031 } 3032 if (cmd == TUNSETIFINDEX) { 3033 ret = -EPERM; 3034 if (tun) 3035 goto unlock; 3036 3037 ret = -EFAULT; 3038 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3039 goto unlock; 3040 3041 ret = 0; 3042 tfile->ifindex = ifindex; 3043 goto unlock; 3044 } 3045 3046 ret = -EBADFD; 3047 if (!tun) 3048 goto unlock; 3049 3050 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 3051 3052 ret = 0; 3053 switch (cmd) { 3054 case TUNGETIFF: 3055 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 3056 3057 if (tfile->detached) 3058 ifr.ifr_flags |= IFF_DETACH_QUEUE; 3059 if (!tfile->socket.sk->sk_filter) 3060 ifr.ifr_flags |= IFF_NOFILTER; 3061 3062 if (copy_to_user(argp, &ifr, ifreq_len)) 3063 ret = -EFAULT; 3064 break; 3065 3066 case TUNSETNOCSUM: 3067 /* Disable/Enable checksum */ 3068 3069 /* [unimplemented] */ 3070 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 3071 arg ? "disabled" : "enabled"); 3072 break; 3073 3074 case TUNSETPERSIST: 3075 /* Disable/Enable persist mode. Keep an extra reference to the 3076 * module to prevent the module being unprobed. 3077 */ 3078 if (arg && !(tun->flags & IFF_PERSIST)) { 3079 tun->flags |= IFF_PERSIST; 3080 __module_get(THIS_MODULE); 3081 do_notify = true; 3082 } 3083 if (!arg && (tun->flags & IFF_PERSIST)) { 3084 tun->flags &= ~IFF_PERSIST; 3085 module_put(THIS_MODULE); 3086 do_notify = true; 3087 } 3088 3089 tun_debug(KERN_INFO, tun, "persist %s\n", 3090 arg ? "enabled" : "disabled"); 3091 break; 3092 3093 case TUNSETOWNER: 3094 /* Set owner of the device */ 3095 owner = make_kuid(current_user_ns(), arg); 3096 if (!uid_valid(owner)) { 3097 ret = -EINVAL; 3098 break; 3099 } 3100 tun->owner = owner; 3101 do_notify = true; 3102 tun_debug(KERN_INFO, tun, "owner set to %u\n", 3103 from_kuid(&init_user_ns, tun->owner)); 3104 break; 3105 3106 case TUNSETGROUP: 3107 /* Set group of the device */ 3108 group = make_kgid(current_user_ns(), arg); 3109 if (!gid_valid(group)) { 3110 ret = -EINVAL; 3111 break; 3112 } 3113 tun->group = group; 3114 do_notify = true; 3115 tun_debug(KERN_INFO, tun, "group set to %u\n", 3116 from_kgid(&init_user_ns, tun->group)); 3117 break; 3118 3119 case TUNSETLINK: 3120 /* Only allow setting the type when the interface is down */ 3121 if (tun->dev->flags & IFF_UP) { 3122 tun_debug(KERN_INFO, tun, 3123 "Linktype set failed because interface is up\n"); 3124 ret = -EBUSY; 3125 } else { 3126 tun->dev->type = (int) arg; 3127 tun_debug(KERN_INFO, tun, "linktype set to %d\n", 3128 tun->dev->type); 3129 ret = 0; 3130 } 3131 break; 3132 3133 #ifdef TUN_DEBUG 3134 case TUNSETDEBUG: 3135 tun->debug = arg; 3136 break; 3137 #endif 3138 case TUNSETOFFLOAD: 3139 ret = set_offload(tun, arg); 3140 break; 3141 3142 case TUNSETTXFILTER: 3143 /* Can be set only for TAPs */ 3144 ret = -EINVAL; 3145 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3146 break; 3147 ret = update_filter(&tun->txflt, (void __user *)arg); 3148 break; 3149 3150 case SIOCGIFHWADDR: 3151 /* Get hw address */ 3152 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3153 ifr.ifr_hwaddr.sa_family = tun->dev->type; 3154 if (copy_to_user(argp, &ifr, ifreq_len)) 3155 ret = -EFAULT; 3156 break; 3157 3158 case SIOCSIFHWADDR: 3159 /* Set hw address */ 3160 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 3161 ifr.ifr_hwaddr.sa_data); 3162 3163 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 3164 break; 3165 3166 case TUNGETSNDBUF: 3167 sndbuf = tfile->socket.sk->sk_sndbuf; 3168 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 3169 ret = -EFAULT; 3170 break; 3171 3172 case TUNSETSNDBUF: 3173 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 3174 ret = -EFAULT; 3175 break; 3176 } 3177 if (sndbuf <= 0) { 3178 ret = -EINVAL; 3179 break; 3180 } 3181 3182 tun->sndbuf = sndbuf; 3183 tun_set_sndbuf(tun); 3184 break; 3185 3186 case TUNGETVNETHDRSZ: 3187 vnet_hdr_sz = tun->vnet_hdr_sz; 3188 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3189 ret = -EFAULT; 3190 break; 3191 3192 case TUNSETVNETHDRSZ: 3193 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3194 ret = -EFAULT; 3195 break; 3196 } 3197 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3198 ret = -EINVAL; 3199 break; 3200 } 3201 3202 tun->vnet_hdr_sz = vnet_hdr_sz; 3203 break; 3204 3205 case TUNGETVNETLE: 3206 le = !!(tun->flags & TUN_VNET_LE); 3207 if (put_user(le, (int __user *)argp)) 3208 ret = -EFAULT; 3209 break; 3210 3211 case TUNSETVNETLE: 3212 if (get_user(le, (int __user *)argp)) { 3213 ret = -EFAULT; 3214 break; 3215 } 3216 if (le) 3217 tun->flags |= TUN_VNET_LE; 3218 else 3219 tun->flags &= ~TUN_VNET_LE; 3220 break; 3221 3222 case TUNGETVNETBE: 3223 ret = tun_get_vnet_be(tun, argp); 3224 break; 3225 3226 case TUNSETVNETBE: 3227 ret = tun_set_vnet_be(tun, argp); 3228 break; 3229 3230 case TUNATTACHFILTER: 3231 /* Can be set only for TAPs */ 3232 ret = -EINVAL; 3233 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3234 break; 3235 ret = -EFAULT; 3236 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 3237 break; 3238 3239 ret = tun_attach_filter(tun); 3240 break; 3241 3242 case TUNDETACHFILTER: 3243 /* Can be set only for TAPs */ 3244 ret = -EINVAL; 3245 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3246 break; 3247 ret = 0; 3248 tun_detach_filter(tun, tun->numqueues); 3249 break; 3250 3251 case TUNGETFILTER: 3252 ret = -EINVAL; 3253 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3254 break; 3255 ret = -EFAULT; 3256 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 3257 break; 3258 ret = 0; 3259 break; 3260 3261 case TUNSETSTEERINGEBPF: 3262 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 3263 break; 3264 3265 case TUNSETFILTEREBPF: 3266 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3267 break; 3268 3269 default: 3270 ret = -EINVAL; 3271 break; 3272 } 3273 3274 if (do_notify) 3275 netdev_state_change(tun->dev); 3276 3277 unlock: 3278 rtnl_unlock(); 3279 if (tun) 3280 tun_put(tun); 3281 return ret; 3282 } 3283 3284 static long tun_chr_ioctl(struct file *file, 3285 unsigned int cmd, unsigned long arg) 3286 { 3287 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 3288 } 3289 3290 #ifdef CONFIG_COMPAT 3291 static long tun_chr_compat_ioctl(struct file *file, 3292 unsigned int cmd, unsigned long arg) 3293 { 3294 switch (cmd) { 3295 case TUNSETIFF: 3296 case TUNGETIFF: 3297 case TUNSETTXFILTER: 3298 case TUNGETSNDBUF: 3299 case TUNSETSNDBUF: 3300 case SIOCGIFHWADDR: 3301 case SIOCSIFHWADDR: 3302 arg = (unsigned long)compat_ptr(arg); 3303 break; 3304 default: 3305 arg = (compat_ulong_t)arg; 3306 break; 3307 } 3308 3309 /* 3310 * compat_ifreq is shorter than ifreq, so we must not access beyond 3311 * the end of that structure. All fields that are used in this 3312 * driver are compatible though, we don't need to convert the 3313 * contents. 3314 */ 3315 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 3316 } 3317 #endif /* CONFIG_COMPAT */ 3318 3319 static int tun_chr_fasync(int fd, struct file *file, int on) 3320 { 3321 struct tun_file *tfile = file->private_data; 3322 int ret; 3323 3324 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 3325 goto out; 3326 3327 if (on) { 3328 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 3329 tfile->flags |= TUN_FASYNC; 3330 } else 3331 tfile->flags &= ~TUN_FASYNC; 3332 ret = 0; 3333 out: 3334 return ret; 3335 } 3336 3337 static int tun_chr_open(struct inode *inode, struct file * file) 3338 { 3339 struct net *net = current->nsproxy->net_ns; 3340 struct tun_file *tfile; 3341 3342 DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3343 3344 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 3345 &tun_proto, 0); 3346 if (!tfile) 3347 return -ENOMEM; 3348 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3349 sk_free(&tfile->sk); 3350 return -ENOMEM; 3351 } 3352 3353 mutex_init(&tfile->napi_mutex); 3354 RCU_INIT_POINTER(tfile->tun, NULL); 3355 tfile->flags = 0; 3356 tfile->ifindex = 0; 3357 3358 init_waitqueue_head(&tfile->wq.wait); 3359 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 3360 3361 tfile->socket.file = file; 3362 tfile->socket.ops = &tun_socket_ops; 3363 3364 sock_init_data(&tfile->socket, &tfile->sk); 3365 3366 tfile->sk.sk_write_space = tun_sock_write_space; 3367 tfile->sk.sk_sndbuf = INT_MAX; 3368 3369 file->private_data = tfile; 3370 INIT_LIST_HEAD(&tfile->next); 3371 3372 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3373 3374 return 0; 3375 } 3376 3377 static int tun_chr_close(struct inode *inode, struct file *file) 3378 { 3379 struct tun_file *tfile = file->private_data; 3380 3381 tun_detach(tfile, true); 3382 3383 return 0; 3384 } 3385 3386 #ifdef CONFIG_PROC_FS 3387 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 3388 { 3389 struct tun_file *tfile = file->private_data; 3390 struct tun_struct *tun; 3391 struct ifreq ifr; 3392 3393 memset(&ifr, 0, sizeof(ifr)); 3394 3395 rtnl_lock(); 3396 tun = tun_get(tfile); 3397 if (tun) 3398 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 3399 rtnl_unlock(); 3400 3401 if (tun) 3402 tun_put(tun); 3403 3404 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 3405 } 3406 #endif 3407 3408 static const struct file_operations tun_fops = { 3409 .owner = THIS_MODULE, 3410 .llseek = no_llseek, 3411 .read_iter = tun_chr_read_iter, 3412 .write_iter = tun_chr_write_iter, 3413 .poll = tun_chr_poll, 3414 .unlocked_ioctl = tun_chr_ioctl, 3415 #ifdef CONFIG_COMPAT 3416 .compat_ioctl = tun_chr_compat_ioctl, 3417 #endif 3418 .open = tun_chr_open, 3419 .release = tun_chr_close, 3420 .fasync = tun_chr_fasync, 3421 #ifdef CONFIG_PROC_FS 3422 .show_fdinfo = tun_chr_show_fdinfo, 3423 #endif 3424 }; 3425 3426 static struct miscdevice tun_miscdev = { 3427 .minor = TUN_MINOR, 3428 .name = "tun", 3429 .nodename = "net/tun", 3430 .fops = &tun_fops, 3431 }; 3432 3433 /* ethtool interface */ 3434 3435 static void tun_default_link_ksettings(struct net_device *dev, 3436 struct ethtool_link_ksettings *cmd) 3437 { 3438 ethtool_link_ksettings_zero_link_mode(cmd, supported); 3439 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3440 cmd->base.speed = SPEED_10; 3441 cmd->base.duplex = DUPLEX_FULL; 3442 cmd->base.port = PORT_TP; 3443 cmd->base.phy_address = 0; 3444 cmd->base.autoneg = AUTONEG_DISABLE; 3445 } 3446 3447 static int tun_get_link_ksettings(struct net_device *dev, 3448 struct ethtool_link_ksettings *cmd) 3449 { 3450 struct tun_struct *tun = netdev_priv(dev); 3451 3452 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 3453 return 0; 3454 } 3455 3456 static int tun_set_link_ksettings(struct net_device *dev, 3457 const struct ethtool_link_ksettings *cmd) 3458 { 3459 struct tun_struct *tun = netdev_priv(dev); 3460 3461 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 3462 return 0; 3463 } 3464 3465 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3466 { 3467 struct tun_struct *tun = netdev_priv(dev); 3468 3469 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 3470 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 3471 3472 switch (tun->flags & TUN_TYPE_MASK) { 3473 case IFF_TUN: 3474 strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 3475 break; 3476 case IFF_TAP: 3477 strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 3478 break; 3479 } 3480 } 3481 3482 static u32 tun_get_msglevel(struct net_device *dev) 3483 { 3484 #ifdef TUN_DEBUG 3485 struct tun_struct *tun = netdev_priv(dev); 3486 return tun->debug; 3487 #else 3488 return -EOPNOTSUPP; 3489 #endif 3490 } 3491 3492 static void tun_set_msglevel(struct net_device *dev, u32 value) 3493 { 3494 #ifdef TUN_DEBUG 3495 struct tun_struct *tun = netdev_priv(dev); 3496 tun->debug = value; 3497 #endif 3498 } 3499 3500 static int tun_get_coalesce(struct net_device *dev, 3501 struct ethtool_coalesce *ec) 3502 { 3503 struct tun_struct *tun = netdev_priv(dev); 3504 3505 ec->rx_max_coalesced_frames = tun->rx_batched; 3506 3507 return 0; 3508 } 3509 3510 static int tun_set_coalesce(struct net_device *dev, 3511 struct ethtool_coalesce *ec) 3512 { 3513 struct tun_struct *tun = netdev_priv(dev); 3514 3515 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 3516 tun->rx_batched = NAPI_POLL_WEIGHT; 3517 else 3518 tun->rx_batched = ec->rx_max_coalesced_frames; 3519 3520 return 0; 3521 } 3522 3523 static const struct ethtool_ops tun_ethtool_ops = { 3524 .get_drvinfo = tun_get_drvinfo, 3525 .get_msglevel = tun_get_msglevel, 3526 .set_msglevel = tun_set_msglevel, 3527 .get_link = ethtool_op_get_link, 3528 .get_ts_info = ethtool_op_get_ts_info, 3529 .get_coalesce = tun_get_coalesce, 3530 .set_coalesce = tun_set_coalesce, 3531 .get_link_ksettings = tun_get_link_ksettings, 3532 .set_link_ksettings = tun_set_link_ksettings, 3533 }; 3534 3535 static int tun_queue_resize(struct tun_struct *tun) 3536 { 3537 struct net_device *dev = tun->dev; 3538 struct tun_file *tfile; 3539 struct ptr_ring **rings; 3540 int n = tun->numqueues + tun->numdisabled; 3541 int ret, i; 3542 3543 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 3544 if (!rings) 3545 return -ENOMEM; 3546 3547 for (i = 0; i < tun->numqueues; i++) { 3548 tfile = rtnl_dereference(tun->tfiles[i]); 3549 rings[i] = &tfile->tx_ring; 3550 } 3551 list_for_each_entry(tfile, &tun->disabled, next) 3552 rings[i++] = &tfile->tx_ring; 3553 3554 ret = ptr_ring_resize_multiple(rings, n, 3555 dev->tx_queue_len, GFP_KERNEL, 3556 tun_ptr_free); 3557 3558 kfree(rings); 3559 return ret; 3560 } 3561 3562 static int tun_device_event(struct notifier_block *unused, 3563 unsigned long event, void *ptr) 3564 { 3565 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3566 struct tun_struct *tun = netdev_priv(dev); 3567 3568 if (dev->rtnl_link_ops != &tun_link_ops) 3569 return NOTIFY_DONE; 3570 3571 switch (event) { 3572 case NETDEV_CHANGE_TX_QUEUE_LEN: 3573 if (tun_queue_resize(tun)) 3574 return NOTIFY_BAD; 3575 break; 3576 default: 3577 break; 3578 } 3579 3580 return NOTIFY_DONE; 3581 } 3582 3583 static struct notifier_block tun_notifier_block __read_mostly = { 3584 .notifier_call = tun_device_event, 3585 }; 3586 3587 static int __init tun_init(void) 3588 { 3589 int ret = 0; 3590 3591 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3592 3593 ret = rtnl_link_register(&tun_link_ops); 3594 if (ret) { 3595 pr_err("Can't register link_ops\n"); 3596 goto err_linkops; 3597 } 3598 3599 ret = misc_register(&tun_miscdev); 3600 if (ret) { 3601 pr_err("Can't register misc device %d\n", TUN_MINOR); 3602 goto err_misc; 3603 } 3604 3605 ret = register_netdevice_notifier(&tun_notifier_block); 3606 if (ret) { 3607 pr_err("Can't register netdevice notifier\n"); 3608 goto err_notifier; 3609 } 3610 3611 return 0; 3612 3613 err_notifier: 3614 misc_deregister(&tun_miscdev); 3615 err_misc: 3616 rtnl_link_unregister(&tun_link_ops); 3617 err_linkops: 3618 return ret; 3619 } 3620 3621 static void tun_cleanup(void) 3622 { 3623 misc_deregister(&tun_miscdev); 3624 rtnl_link_unregister(&tun_link_ops); 3625 unregister_netdevice_notifier(&tun_notifier_block); 3626 } 3627 3628 /* Get an underlying socket object from tun file. Returns error unless file is 3629 * attached to a device. The returned object works like a packet socket, it 3630 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3631 * holding a reference to the file for as long as the socket is in use. */ 3632 struct socket *tun_get_socket(struct file *file) 3633 { 3634 struct tun_file *tfile; 3635 if (file->f_op != &tun_fops) 3636 return ERR_PTR(-EINVAL); 3637 tfile = file->private_data; 3638 if (!tfile) 3639 return ERR_PTR(-EBADFD); 3640 return &tfile->socket; 3641 } 3642 EXPORT_SYMBOL_GPL(tun_get_socket); 3643 3644 struct ptr_ring *tun_get_tx_ring(struct file *file) 3645 { 3646 struct tun_file *tfile; 3647 3648 if (file->f_op != &tun_fops) 3649 return ERR_PTR(-EINVAL); 3650 tfile = file->private_data; 3651 if (!tfile) 3652 return ERR_PTR(-EBADFD); 3653 return &tfile->tx_ring; 3654 } 3655 EXPORT_SYMBOL_GPL(tun_get_tx_ring); 3656 3657 module_init(tun_init); 3658 module_exit(tun_cleanup); 3659 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3660 MODULE_AUTHOR(DRV_COPYRIGHT); 3661 MODULE_LICENSE("GPL"); 3662 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3663 MODULE_ALIAS("devname:net/tun"); 3664