1 /* 2 * TUN - Universal TUN/TAP device driver. 3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 16 */ 17 18 /* 19 * Changes: 20 * 21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22 * Add TUNSETLINK ioctl to set the link encapsulation 23 * 24 * Mark Smith <markzzzsmith@yahoo.com.au> 25 * Use eth_random_addr() for tap MAC address. 26 * 27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 28 * Fixes in packet dropping, queue length setting and queue wakeup. 29 * Increased default tx queue length. 30 * Added ethtool API. 31 * Minor cleanups 32 * 33 * Daniel Podlejski <underley@underley.eu.org> 34 * Modifications for 2.3.99-pre5 kernel. 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #define DRV_NAME "tun" 40 #define DRV_VERSION "1.6" 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 43 44 #include <linux/module.h> 45 #include <linux/errno.h> 46 #include <linux/kernel.h> 47 #include <linux/sched/signal.h> 48 #include <linux/major.h> 49 #include <linux/slab.h> 50 #include <linux/poll.h> 51 #include <linux/fcntl.h> 52 #include <linux/init.h> 53 #include <linux/skbuff.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/miscdevice.h> 57 #include <linux/ethtool.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/compat.h> 60 #include <linux/if.h> 61 #include <linux/if_arp.h> 62 #include <linux/if_ether.h> 63 #include <linux/if_tun.h> 64 #include <linux/if_vlan.h> 65 #include <linux/crc32.h> 66 #include <linux/nsproxy.h> 67 #include <linux/virtio_net.h> 68 #include <linux/rcupdate.h> 69 #include <net/net_namespace.h> 70 #include <net/netns/generic.h> 71 #include <net/rtnetlink.h> 72 #include <net/sock.h> 73 #include <net/xdp.h> 74 #include <linux/seq_file.h> 75 #include <linux/uio.h> 76 #include <linux/skb_array.h> 77 #include <linux/bpf.h> 78 #include <linux/bpf_trace.h> 79 #include <linux/mutex.h> 80 81 #include <linux/uaccess.h> 82 #include <linux/proc_fs.h> 83 84 static void tun_default_link_ksettings(struct net_device *dev, 85 struct ethtool_link_ksettings *cmd); 86 87 /* Uncomment to enable debugging */ 88 /* #define TUN_DEBUG 1 */ 89 90 #ifdef TUN_DEBUG 91 static int debug; 92 93 #define tun_debug(level, tun, fmt, args...) \ 94 do { \ 95 if (tun->debug) \ 96 netdev_printk(level, tun->dev, fmt, ##args); \ 97 } while (0) 98 #define DBG1(level, fmt, args...) \ 99 do { \ 100 if (debug == 2) \ 101 printk(level fmt, ##args); \ 102 } while (0) 103 #else 104 #define tun_debug(level, tun, fmt, args...) \ 105 do { \ 106 if (0) \ 107 netdev_printk(level, tun->dev, fmt, ##args); \ 108 } while (0) 109 #define DBG1(level, fmt, args...) \ 110 do { \ 111 if (0) \ 112 printk(level fmt, ##args); \ 113 } while (0) 114 #endif 115 116 #define TUN_HEADROOM 256 117 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 118 119 /* TUN device flags */ 120 121 /* IFF_ATTACH_QUEUE is never stored in device flags, 122 * overload it to mean fasync when stored there. 123 */ 124 #define TUN_FASYNC IFF_ATTACH_QUEUE 125 /* High bits in flags field are unused. */ 126 #define TUN_VNET_LE 0x80000000 127 #define TUN_VNET_BE 0x40000000 128 129 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 130 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 131 132 #define GOODCOPY_LEN 128 133 134 #define FLT_EXACT_COUNT 8 135 struct tap_filter { 136 unsigned int count; /* Number of addrs. Zero means disabled */ 137 u32 mask[2]; /* Mask of the hashed addrs */ 138 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 139 }; 140 141 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 142 * to max number of VCPUs in guest. */ 143 #define MAX_TAP_QUEUES 256 144 #define MAX_TAP_FLOWS 4096 145 146 #define TUN_FLOW_EXPIRE (3 * HZ) 147 148 struct tun_pcpu_stats { 149 u64 rx_packets; 150 u64 rx_bytes; 151 u64 tx_packets; 152 u64 tx_bytes; 153 struct u64_stats_sync syncp; 154 u32 rx_dropped; 155 u32 tx_dropped; 156 u32 rx_frame_errors; 157 }; 158 159 /* A tun_file connects an open character device to a tuntap netdevice. It 160 * also contains all socket related structures (except sock_fprog and tap_filter) 161 * to serve as one transmit queue for tuntap device. The sock_fprog and 162 * tap_filter were kept in tun_struct since they were used for filtering for the 163 * netdevice not for a specific queue (at least I didn't see the requirement for 164 * this). 165 * 166 * RCU usage: 167 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 168 * other can only be read while rcu_read_lock or rtnl_lock is held. 169 */ 170 struct tun_file { 171 struct sock sk; 172 struct socket socket; 173 struct socket_wq wq; 174 struct tun_struct __rcu *tun; 175 struct fasync_struct *fasync; 176 /* only used for fasnyc */ 177 unsigned int flags; 178 union { 179 u16 queue_index; 180 unsigned int ifindex; 181 }; 182 struct napi_struct napi; 183 bool napi_enabled; 184 bool napi_frags_enabled; 185 struct mutex napi_mutex; /* Protects access to the above napi */ 186 struct list_head next; 187 struct tun_struct *detached; 188 struct ptr_ring tx_ring; 189 struct xdp_rxq_info xdp_rxq; 190 }; 191 192 struct tun_flow_entry { 193 struct hlist_node hash_link; 194 struct rcu_head rcu; 195 struct tun_struct *tun; 196 197 u32 rxhash; 198 u32 rps_rxhash; 199 int queue_index; 200 unsigned long updated; 201 }; 202 203 #define TUN_NUM_FLOW_ENTRIES 1024 204 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 205 206 struct tun_prog { 207 struct rcu_head rcu; 208 struct bpf_prog *prog; 209 }; 210 211 /* Since the socket were moved to tun_file, to preserve the behavior of persist 212 * device, socket filter, sndbuf and vnet header size were restore when the 213 * file were attached to a persist device. 214 */ 215 struct tun_struct { 216 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 217 unsigned int numqueues; 218 unsigned int flags; 219 kuid_t owner; 220 kgid_t group; 221 222 struct net_device *dev; 223 netdev_features_t set_features; 224 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 225 NETIF_F_TSO6) 226 227 int align; 228 int vnet_hdr_sz; 229 int sndbuf; 230 struct tap_filter txflt; 231 struct sock_fprog fprog; 232 /* protected by rtnl lock */ 233 bool filter_attached; 234 #ifdef TUN_DEBUG 235 int debug; 236 #endif 237 spinlock_t lock; 238 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 239 struct timer_list flow_gc_timer; 240 unsigned long ageing_time; 241 unsigned int numdisabled; 242 struct list_head disabled; 243 void *security; 244 u32 flow_count; 245 u32 rx_batched; 246 struct tun_pcpu_stats __percpu *pcpu_stats; 247 struct bpf_prog __rcu *xdp_prog; 248 struct tun_prog __rcu *steering_prog; 249 struct tun_prog __rcu *filter_prog; 250 struct ethtool_link_ksettings link_ksettings; 251 }; 252 253 struct veth { 254 __be16 h_vlan_proto; 255 __be16 h_vlan_TCI; 256 }; 257 258 bool tun_is_xdp_frame(void *ptr) 259 { 260 return (unsigned long)ptr & TUN_XDP_FLAG; 261 } 262 EXPORT_SYMBOL(tun_is_xdp_frame); 263 264 void *tun_xdp_to_ptr(void *ptr) 265 { 266 return (void *)((unsigned long)ptr | TUN_XDP_FLAG); 267 } 268 EXPORT_SYMBOL(tun_xdp_to_ptr); 269 270 void *tun_ptr_to_xdp(void *ptr) 271 { 272 return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); 273 } 274 EXPORT_SYMBOL(tun_ptr_to_xdp); 275 276 static int tun_napi_receive(struct napi_struct *napi, int budget) 277 { 278 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 279 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 280 struct sk_buff_head process_queue; 281 struct sk_buff *skb; 282 int received = 0; 283 284 __skb_queue_head_init(&process_queue); 285 286 spin_lock(&queue->lock); 287 skb_queue_splice_tail_init(queue, &process_queue); 288 spin_unlock(&queue->lock); 289 290 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 291 napi_gro_receive(napi, skb); 292 ++received; 293 } 294 295 if (!skb_queue_empty(&process_queue)) { 296 spin_lock(&queue->lock); 297 skb_queue_splice(&process_queue, queue); 298 spin_unlock(&queue->lock); 299 } 300 301 return received; 302 } 303 304 static int tun_napi_poll(struct napi_struct *napi, int budget) 305 { 306 unsigned int received; 307 308 received = tun_napi_receive(napi, budget); 309 310 if (received < budget) 311 napi_complete_done(napi, received); 312 313 return received; 314 } 315 316 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 317 bool napi_en, bool napi_frags) 318 { 319 tfile->napi_enabled = napi_en; 320 tfile->napi_frags_enabled = napi_en && napi_frags; 321 if (napi_en) { 322 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 323 NAPI_POLL_WEIGHT); 324 napi_enable(&tfile->napi); 325 } 326 } 327 328 static void tun_napi_disable(struct tun_file *tfile) 329 { 330 if (tfile->napi_enabled) 331 napi_disable(&tfile->napi); 332 } 333 334 static void tun_napi_del(struct tun_file *tfile) 335 { 336 if (tfile->napi_enabled) 337 netif_napi_del(&tfile->napi); 338 } 339 340 static bool tun_napi_frags_enabled(const struct tun_file *tfile) 341 { 342 return tfile->napi_frags_enabled; 343 } 344 345 #ifdef CONFIG_TUN_VNET_CROSS_LE 346 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 347 { 348 return tun->flags & TUN_VNET_BE ? false : 349 virtio_legacy_is_little_endian(); 350 } 351 352 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 353 { 354 int be = !!(tun->flags & TUN_VNET_BE); 355 356 if (put_user(be, argp)) 357 return -EFAULT; 358 359 return 0; 360 } 361 362 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 363 { 364 int be; 365 366 if (get_user(be, argp)) 367 return -EFAULT; 368 369 if (be) 370 tun->flags |= TUN_VNET_BE; 371 else 372 tun->flags &= ~TUN_VNET_BE; 373 374 return 0; 375 } 376 #else 377 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 378 { 379 return virtio_legacy_is_little_endian(); 380 } 381 382 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 383 { 384 return -EINVAL; 385 } 386 387 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 388 { 389 return -EINVAL; 390 } 391 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 392 393 static inline bool tun_is_little_endian(struct tun_struct *tun) 394 { 395 return tun->flags & TUN_VNET_LE || 396 tun_legacy_is_little_endian(tun); 397 } 398 399 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 400 { 401 return __virtio16_to_cpu(tun_is_little_endian(tun), val); 402 } 403 404 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 405 { 406 return __cpu_to_virtio16(tun_is_little_endian(tun), val); 407 } 408 409 static inline u32 tun_hashfn(u32 rxhash) 410 { 411 return rxhash & TUN_MASK_FLOW_ENTRIES; 412 } 413 414 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 415 { 416 struct tun_flow_entry *e; 417 418 hlist_for_each_entry_rcu(e, head, hash_link) { 419 if (e->rxhash == rxhash) 420 return e; 421 } 422 return NULL; 423 } 424 425 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 426 struct hlist_head *head, 427 u32 rxhash, u16 queue_index) 428 { 429 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 430 431 if (e) { 432 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 433 rxhash, queue_index); 434 e->updated = jiffies; 435 e->rxhash = rxhash; 436 e->rps_rxhash = 0; 437 e->queue_index = queue_index; 438 e->tun = tun; 439 hlist_add_head_rcu(&e->hash_link, head); 440 ++tun->flow_count; 441 } 442 return e; 443 } 444 445 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 446 { 447 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 448 e->rxhash, e->queue_index); 449 hlist_del_rcu(&e->hash_link); 450 kfree_rcu(e, rcu); 451 --tun->flow_count; 452 } 453 454 static void tun_flow_flush(struct tun_struct *tun) 455 { 456 int i; 457 458 spin_lock_bh(&tun->lock); 459 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 460 struct tun_flow_entry *e; 461 struct hlist_node *n; 462 463 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 464 tun_flow_delete(tun, e); 465 } 466 spin_unlock_bh(&tun->lock); 467 } 468 469 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 470 { 471 int i; 472 473 spin_lock_bh(&tun->lock); 474 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 475 struct tun_flow_entry *e; 476 struct hlist_node *n; 477 478 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 479 if (e->queue_index == queue_index) 480 tun_flow_delete(tun, e); 481 } 482 } 483 spin_unlock_bh(&tun->lock); 484 } 485 486 static void tun_flow_cleanup(struct timer_list *t) 487 { 488 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 489 unsigned long delay = tun->ageing_time; 490 unsigned long next_timer = jiffies + delay; 491 unsigned long count = 0; 492 int i; 493 494 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 495 496 spin_lock(&tun->lock); 497 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 498 struct tun_flow_entry *e; 499 struct hlist_node *n; 500 501 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 502 unsigned long this_timer; 503 504 this_timer = e->updated + delay; 505 if (time_before_eq(this_timer, jiffies)) { 506 tun_flow_delete(tun, e); 507 continue; 508 } 509 count++; 510 if (time_before(this_timer, next_timer)) 511 next_timer = this_timer; 512 } 513 } 514 515 if (count) 516 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 517 spin_unlock(&tun->lock); 518 } 519 520 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 521 struct tun_file *tfile) 522 { 523 struct hlist_head *head; 524 struct tun_flow_entry *e; 525 unsigned long delay = tun->ageing_time; 526 u16 queue_index = tfile->queue_index; 527 528 if (!rxhash) 529 return; 530 else 531 head = &tun->flows[tun_hashfn(rxhash)]; 532 533 rcu_read_lock(); 534 535 e = tun_flow_find(head, rxhash); 536 if (likely(e)) { 537 /* TODO: keep queueing to old queue until it's empty? */ 538 e->queue_index = queue_index; 539 e->updated = jiffies; 540 sock_rps_record_flow_hash(e->rps_rxhash); 541 } else { 542 spin_lock_bh(&tun->lock); 543 if (!tun_flow_find(head, rxhash) && 544 tun->flow_count < MAX_TAP_FLOWS) 545 tun_flow_create(tun, head, rxhash, queue_index); 546 547 if (!timer_pending(&tun->flow_gc_timer)) 548 mod_timer(&tun->flow_gc_timer, 549 round_jiffies_up(jiffies + delay)); 550 spin_unlock_bh(&tun->lock); 551 } 552 553 rcu_read_unlock(); 554 } 555 556 /** 557 * Save the hash received in the stack receive path and update the 558 * flow_hash table accordingly. 559 */ 560 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 561 { 562 if (unlikely(e->rps_rxhash != hash)) 563 e->rps_rxhash = hash; 564 } 565 566 /* We try to identify a flow through its rxhash first. The reason that 567 * we do not check rxq no. is because some cards(e.g 82599), chooses 568 * the rxq based on the txq where the last packet of the flow comes. As 569 * the userspace application move between processors, we may get a 570 * different rxq no. here. If we could not get rxhash, then we would 571 * hope the rxq no. may help here. 572 */ 573 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 574 { 575 struct tun_flow_entry *e; 576 u32 txq = 0; 577 u32 numqueues = 0; 578 579 numqueues = READ_ONCE(tun->numqueues); 580 581 txq = __skb_get_hash_symmetric(skb); 582 if (txq) { 583 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 584 if (e) { 585 tun_flow_save_rps_rxhash(e, txq); 586 txq = e->queue_index; 587 } else 588 /* use multiply and shift instead of expensive divide */ 589 txq = ((u64)txq * numqueues) >> 32; 590 } else if (likely(skb_rx_queue_recorded(skb))) { 591 txq = skb_get_rx_queue(skb); 592 while (unlikely(txq >= numqueues)) 593 txq -= numqueues; 594 } 595 596 return txq; 597 } 598 599 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 600 { 601 struct tun_prog *prog; 602 u16 ret = 0; 603 604 prog = rcu_dereference(tun->steering_prog); 605 if (prog) 606 ret = bpf_prog_run_clear_cb(prog->prog, skb); 607 608 return ret % tun->numqueues; 609 } 610 611 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 612 struct net_device *sb_dev, 613 select_queue_fallback_t fallback) 614 { 615 struct tun_struct *tun = netdev_priv(dev); 616 u16 ret; 617 618 rcu_read_lock(); 619 if (rcu_dereference(tun->steering_prog)) 620 ret = tun_ebpf_select_queue(tun, skb); 621 else 622 ret = tun_automq_select_queue(tun, skb); 623 rcu_read_unlock(); 624 625 return ret; 626 } 627 628 static inline bool tun_not_capable(struct tun_struct *tun) 629 { 630 const struct cred *cred = current_cred(); 631 struct net *net = dev_net(tun->dev); 632 633 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 634 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 635 !ns_capable(net->user_ns, CAP_NET_ADMIN); 636 } 637 638 static void tun_set_real_num_queues(struct tun_struct *tun) 639 { 640 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 641 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 642 } 643 644 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 645 { 646 tfile->detached = tun; 647 list_add_tail(&tfile->next, &tun->disabled); 648 ++tun->numdisabled; 649 } 650 651 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 652 { 653 struct tun_struct *tun = tfile->detached; 654 655 tfile->detached = NULL; 656 list_del_init(&tfile->next); 657 --tun->numdisabled; 658 return tun; 659 } 660 661 void tun_ptr_free(void *ptr) 662 { 663 if (!ptr) 664 return; 665 if (tun_is_xdp_frame(ptr)) { 666 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 667 668 xdp_return_frame(xdpf); 669 } else { 670 __skb_array_destroy_skb(ptr); 671 } 672 } 673 EXPORT_SYMBOL_GPL(tun_ptr_free); 674 675 static void tun_queue_purge(struct tun_file *tfile) 676 { 677 void *ptr; 678 679 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 680 tun_ptr_free(ptr); 681 682 skb_queue_purge(&tfile->sk.sk_write_queue); 683 skb_queue_purge(&tfile->sk.sk_error_queue); 684 } 685 686 static void __tun_detach(struct tun_file *tfile, bool clean) 687 { 688 struct tun_file *ntfile; 689 struct tun_struct *tun; 690 691 tun = rtnl_dereference(tfile->tun); 692 693 if (tun && clean) { 694 tun_napi_disable(tfile); 695 tun_napi_del(tfile); 696 } 697 698 if (tun && !tfile->detached) { 699 u16 index = tfile->queue_index; 700 BUG_ON(index >= tun->numqueues); 701 702 rcu_assign_pointer(tun->tfiles[index], 703 tun->tfiles[tun->numqueues - 1]); 704 ntfile = rtnl_dereference(tun->tfiles[index]); 705 ntfile->queue_index = index; 706 707 --tun->numqueues; 708 if (clean) { 709 RCU_INIT_POINTER(tfile->tun, NULL); 710 sock_put(&tfile->sk); 711 } else 712 tun_disable_queue(tun, tfile); 713 714 synchronize_net(); 715 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 716 /* Drop read queue */ 717 tun_queue_purge(tfile); 718 tun_set_real_num_queues(tun); 719 } else if (tfile->detached && clean) { 720 tun = tun_enable_queue(tfile); 721 sock_put(&tfile->sk); 722 } 723 724 if (clean) { 725 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 726 netif_carrier_off(tun->dev); 727 728 if (!(tun->flags & IFF_PERSIST) && 729 tun->dev->reg_state == NETREG_REGISTERED) 730 unregister_netdevice(tun->dev); 731 } 732 if (tun) 733 xdp_rxq_info_unreg(&tfile->xdp_rxq); 734 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 735 sock_put(&tfile->sk); 736 } 737 } 738 739 static void tun_detach(struct tun_file *tfile, bool clean) 740 { 741 struct tun_struct *tun; 742 struct net_device *dev; 743 744 rtnl_lock(); 745 tun = rtnl_dereference(tfile->tun); 746 dev = tun ? tun->dev : NULL; 747 __tun_detach(tfile, clean); 748 if (dev) 749 netdev_state_change(dev); 750 rtnl_unlock(); 751 } 752 753 static void tun_detach_all(struct net_device *dev) 754 { 755 struct tun_struct *tun = netdev_priv(dev); 756 struct tun_file *tfile, *tmp; 757 int i, n = tun->numqueues; 758 759 for (i = 0; i < n; i++) { 760 tfile = rtnl_dereference(tun->tfiles[i]); 761 BUG_ON(!tfile); 762 tun_napi_disable(tfile); 763 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 764 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 765 RCU_INIT_POINTER(tfile->tun, NULL); 766 --tun->numqueues; 767 } 768 list_for_each_entry(tfile, &tun->disabled, next) { 769 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 770 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 771 RCU_INIT_POINTER(tfile->tun, NULL); 772 } 773 BUG_ON(tun->numqueues != 0); 774 775 synchronize_net(); 776 for (i = 0; i < n; i++) { 777 tfile = rtnl_dereference(tun->tfiles[i]); 778 tun_napi_del(tfile); 779 /* Drop read queue */ 780 tun_queue_purge(tfile); 781 xdp_rxq_info_unreg(&tfile->xdp_rxq); 782 sock_put(&tfile->sk); 783 } 784 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 785 tun_enable_queue(tfile); 786 tun_queue_purge(tfile); 787 xdp_rxq_info_unreg(&tfile->xdp_rxq); 788 sock_put(&tfile->sk); 789 } 790 BUG_ON(tun->numdisabled != 0); 791 792 if (tun->flags & IFF_PERSIST) 793 module_put(THIS_MODULE); 794 } 795 796 static int tun_attach(struct tun_struct *tun, struct file *file, 797 bool skip_filter, bool napi, bool napi_frags) 798 { 799 struct tun_file *tfile = file->private_data; 800 struct net_device *dev = tun->dev; 801 int err; 802 803 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 804 if (err < 0) 805 goto out; 806 807 err = -EINVAL; 808 if (rtnl_dereference(tfile->tun) && !tfile->detached) 809 goto out; 810 811 err = -EBUSY; 812 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 813 goto out; 814 815 err = -E2BIG; 816 if (!tfile->detached && 817 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 818 goto out; 819 820 err = 0; 821 822 /* Re-attach the filter to persist device */ 823 if (!skip_filter && (tun->filter_attached == true)) { 824 lock_sock(tfile->socket.sk); 825 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 826 release_sock(tfile->socket.sk); 827 if (!err) 828 goto out; 829 } 830 831 if (!tfile->detached && 832 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 833 GFP_KERNEL, tun_ptr_free)) { 834 err = -ENOMEM; 835 goto out; 836 } 837 838 tfile->queue_index = tun->numqueues; 839 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 840 841 if (tfile->detached) { 842 /* Re-attach detached tfile, updating XDP queue_index */ 843 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 844 845 if (tfile->xdp_rxq.queue_index != tfile->queue_index) 846 tfile->xdp_rxq.queue_index = tfile->queue_index; 847 } else { 848 /* Setup XDP RX-queue info, for new tfile getting attached */ 849 err = xdp_rxq_info_reg(&tfile->xdp_rxq, 850 tun->dev, tfile->queue_index); 851 if (err < 0) 852 goto out; 853 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 854 MEM_TYPE_PAGE_SHARED, NULL); 855 if (err < 0) { 856 xdp_rxq_info_unreg(&tfile->xdp_rxq); 857 goto out; 858 } 859 err = 0; 860 } 861 862 rcu_assign_pointer(tfile->tun, tun); 863 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 864 tun->numqueues++; 865 866 if (tfile->detached) { 867 tun_enable_queue(tfile); 868 } else { 869 sock_hold(&tfile->sk); 870 tun_napi_init(tun, tfile, napi, napi_frags); 871 } 872 873 tun_set_real_num_queues(tun); 874 875 /* device is allowed to go away first, so no need to hold extra 876 * refcnt. 877 */ 878 879 out: 880 return err; 881 } 882 883 static struct tun_struct *tun_get(struct tun_file *tfile) 884 { 885 struct tun_struct *tun; 886 887 rcu_read_lock(); 888 tun = rcu_dereference(tfile->tun); 889 if (tun) 890 dev_hold(tun->dev); 891 rcu_read_unlock(); 892 893 return tun; 894 } 895 896 static void tun_put(struct tun_struct *tun) 897 { 898 dev_put(tun->dev); 899 } 900 901 /* TAP filtering */ 902 static void addr_hash_set(u32 *mask, const u8 *addr) 903 { 904 int n = ether_crc(ETH_ALEN, addr) >> 26; 905 mask[n >> 5] |= (1 << (n & 31)); 906 } 907 908 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 909 { 910 int n = ether_crc(ETH_ALEN, addr) >> 26; 911 return mask[n >> 5] & (1 << (n & 31)); 912 } 913 914 static int update_filter(struct tap_filter *filter, void __user *arg) 915 { 916 struct { u8 u[ETH_ALEN]; } *addr; 917 struct tun_filter uf; 918 int err, alen, n, nexact; 919 920 if (copy_from_user(&uf, arg, sizeof(uf))) 921 return -EFAULT; 922 923 if (!uf.count) { 924 /* Disabled */ 925 filter->count = 0; 926 return 0; 927 } 928 929 alen = ETH_ALEN * uf.count; 930 addr = memdup_user(arg + sizeof(uf), alen); 931 if (IS_ERR(addr)) 932 return PTR_ERR(addr); 933 934 /* The filter is updated without holding any locks. Which is 935 * perfectly safe. We disable it first and in the worst 936 * case we'll accept a few undesired packets. */ 937 filter->count = 0; 938 wmb(); 939 940 /* Use first set of addresses as an exact filter */ 941 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 942 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 943 944 nexact = n; 945 946 /* Remaining multicast addresses are hashed, 947 * unicast will leave the filter disabled. */ 948 memset(filter->mask, 0, sizeof(filter->mask)); 949 for (; n < uf.count; n++) { 950 if (!is_multicast_ether_addr(addr[n].u)) { 951 err = 0; /* no filter */ 952 goto free_addr; 953 } 954 addr_hash_set(filter->mask, addr[n].u); 955 } 956 957 /* For ALLMULTI just set the mask to all ones. 958 * This overrides the mask populated above. */ 959 if ((uf.flags & TUN_FLT_ALLMULTI)) 960 memset(filter->mask, ~0, sizeof(filter->mask)); 961 962 /* Now enable the filter */ 963 wmb(); 964 filter->count = nexact; 965 966 /* Return the number of exact filters */ 967 err = nexact; 968 free_addr: 969 kfree(addr); 970 return err; 971 } 972 973 /* Returns: 0 - drop, !=0 - accept */ 974 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 975 { 976 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 977 * at this point. */ 978 struct ethhdr *eh = (struct ethhdr *) skb->data; 979 int i; 980 981 /* Exact match */ 982 for (i = 0; i < filter->count; i++) 983 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 984 return 1; 985 986 /* Inexact match (multicast only) */ 987 if (is_multicast_ether_addr(eh->h_dest)) 988 return addr_hash_test(filter->mask, eh->h_dest); 989 990 return 0; 991 } 992 993 /* 994 * Checks whether the packet is accepted or not. 995 * Returns: 0 - drop, !=0 - accept 996 */ 997 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 998 { 999 if (!filter->count) 1000 return 1; 1001 1002 return run_filter(filter, skb); 1003 } 1004 1005 /* Network device part of the driver */ 1006 1007 static const struct ethtool_ops tun_ethtool_ops; 1008 1009 /* Net device detach from fd. */ 1010 static void tun_net_uninit(struct net_device *dev) 1011 { 1012 tun_detach_all(dev); 1013 } 1014 1015 /* Net device open. */ 1016 static int tun_net_open(struct net_device *dev) 1017 { 1018 struct tun_struct *tun = netdev_priv(dev); 1019 int i; 1020 1021 netif_tx_start_all_queues(dev); 1022 1023 for (i = 0; i < tun->numqueues; i++) { 1024 struct tun_file *tfile; 1025 1026 tfile = rtnl_dereference(tun->tfiles[i]); 1027 tfile->socket.sk->sk_write_space(tfile->socket.sk); 1028 } 1029 1030 return 0; 1031 } 1032 1033 /* Net device close. */ 1034 static int tun_net_close(struct net_device *dev) 1035 { 1036 netif_tx_stop_all_queues(dev); 1037 return 0; 1038 } 1039 1040 /* Net device start xmit */ 1041 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 1042 { 1043 #ifdef CONFIG_RPS 1044 if (tun->numqueues == 1 && static_key_false(&rps_needed)) { 1045 /* Select queue was not called for the skbuff, so we extract the 1046 * RPS hash and save it into the flow_table here. 1047 */ 1048 __u32 rxhash; 1049 1050 rxhash = __skb_get_hash_symmetric(skb); 1051 if (rxhash) { 1052 struct tun_flow_entry *e; 1053 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 1054 rxhash); 1055 if (e) 1056 tun_flow_save_rps_rxhash(e, rxhash); 1057 } 1058 } 1059 #endif 1060 } 1061 1062 static unsigned int run_ebpf_filter(struct tun_struct *tun, 1063 struct sk_buff *skb, 1064 int len) 1065 { 1066 struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1067 1068 if (prog) 1069 len = bpf_prog_run_clear_cb(prog->prog, skb); 1070 1071 return len; 1072 } 1073 1074 /* Net device start xmit */ 1075 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 1076 { 1077 struct tun_struct *tun = netdev_priv(dev); 1078 int txq = skb->queue_mapping; 1079 struct tun_file *tfile; 1080 int len = skb->len; 1081 1082 rcu_read_lock(); 1083 tfile = rcu_dereference(tun->tfiles[txq]); 1084 1085 /* Drop packet if interface is not attached */ 1086 if (txq >= tun->numqueues) 1087 goto drop; 1088 1089 if (!rcu_dereference(tun->steering_prog)) 1090 tun_automq_xmit(tun, skb); 1091 1092 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 1093 1094 BUG_ON(!tfile); 1095 1096 /* Drop if the filter does not like it. 1097 * This is a noop if the filter is disabled. 1098 * Filter can be enabled only for the TAP devices. */ 1099 if (!check_filter(&tun->txflt, skb)) 1100 goto drop; 1101 1102 if (tfile->socket.sk->sk_filter && 1103 sk_filter(tfile->socket.sk, skb)) 1104 goto drop; 1105 1106 len = run_ebpf_filter(tun, skb, len); 1107 if (len == 0 || pskb_trim(skb, len)) 1108 goto drop; 1109 1110 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 1111 goto drop; 1112 1113 skb_tx_timestamp(skb); 1114 1115 /* Orphan the skb - required as we might hang on to it 1116 * for indefinite time. 1117 */ 1118 skb_orphan(skb); 1119 1120 nf_reset(skb); 1121 1122 if (ptr_ring_produce(&tfile->tx_ring, skb)) 1123 goto drop; 1124 1125 /* Notify and wake up reader process */ 1126 if (tfile->flags & TUN_FASYNC) 1127 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1128 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1129 1130 rcu_read_unlock(); 1131 return NETDEV_TX_OK; 1132 1133 drop: 1134 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1135 skb_tx_error(skb); 1136 kfree_skb(skb); 1137 rcu_read_unlock(); 1138 return NET_XMIT_DROP; 1139 } 1140 1141 static void tun_net_mclist(struct net_device *dev) 1142 { 1143 /* 1144 * This callback is supposed to deal with mc filter in 1145 * _rx_ path and has nothing to do with the _tx_ path. 1146 * In rx path we always accept everything userspace gives us. 1147 */ 1148 } 1149 1150 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1151 netdev_features_t features) 1152 { 1153 struct tun_struct *tun = netdev_priv(dev); 1154 1155 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1156 } 1157 1158 static void tun_set_headroom(struct net_device *dev, int new_hr) 1159 { 1160 struct tun_struct *tun = netdev_priv(dev); 1161 1162 if (new_hr < NET_SKB_PAD) 1163 new_hr = NET_SKB_PAD; 1164 1165 tun->align = new_hr; 1166 } 1167 1168 static void 1169 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1170 { 1171 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1172 struct tun_struct *tun = netdev_priv(dev); 1173 struct tun_pcpu_stats *p; 1174 int i; 1175 1176 for_each_possible_cpu(i) { 1177 u64 rxpackets, rxbytes, txpackets, txbytes; 1178 unsigned int start; 1179 1180 p = per_cpu_ptr(tun->pcpu_stats, i); 1181 do { 1182 start = u64_stats_fetch_begin(&p->syncp); 1183 rxpackets = p->rx_packets; 1184 rxbytes = p->rx_bytes; 1185 txpackets = p->tx_packets; 1186 txbytes = p->tx_bytes; 1187 } while (u64_stats_fetch_retry(&p->syncp, start)); 1188 1189 stats->rx_packets += rxpackets; 1190 stats->rx_bytes += rxbytes; 1191 stats->tx_packets += txpackets; 1192 stats->tx_bytes += txbytes; 1193 1194 /* u32 counters */ 1195 rx_dropped += p->rx_dropped; 1196 rx_frame_errors += p->rx_frame_errors; 1197 tx_dropped += p->tx_dropped; 1198 } 1199 stats->rx_dropped = rx_dropped; 1200 stats->rx_frame_errors = rx_frame_errors; 1201 stats->tx_dropped = tx_dropped; 1202 } 1203 1204 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1205 struct netlink_ext_ack *extack) 1206 { 1207 struct tun_struct *tun = netdev_priv(dev); 1208 struct bpf_prog *old_prog; 1209 1210 old_prog = rtnl_dereference(tun->xdp_prog); 1211 rcu_assign_pointer(tun->xdp_prog, prog); 1212 if (old_prog) 1213 bpf_prog_put(old_prog); 1214 1215 return 0; 1216 } 1217 1218 static u32 tun_xdp_query(struct net_device *dev) 1219 { 1220 struct tun_struct *tun = netdev_priv(dev); 1221 const struct bpf_prog *xdp_prog; 1222 1223 xdp_prog = rtnl_dereference(tun->xdp_prog); 1224 if (xdp_prog) 1225 return xdp_prog->aux->id; 1226 1227 return 0; 1228 } 1229 1230 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1231 { 1232 switch (xdp->command) { 1233 case XDP_SETUP_PROG: 1234 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1235 case XDP_QUERY_PROG: 1236 xdp->prog_id = tun_xdp_query(dev); 1237 return 0; 1238 default: 1239 return -EINVAL; 1240 } 1241 } 1242 1243 static const struct net_device_ops tun_netdev_ops = { 1244 .ndo_uninit = tun_net_uninit, 1245 .ndo_open = tun_net_open, 1246 .ndo_stop = tun_net_close, 1247 .ndo_start_xmit = tun_net_xmit, 1248 .ndo_fix_features = tun_net_fix_features, 1249 .ndo_select_queue = tun_select_queue, 1250 .ndo_set_rx_headroom = tun_set_headroom, 1251 .ndo_get_stats64 = tun_net_get_stats64, 1252 }; 1253 1254 static void __tun_xdp_flush_tfile(struct tun_file *tfile) 1255 { 1256 /* Notify and wake up reader process */ 1257 if (tfile->flags & TUN_FASYNC) 1258 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1259 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1260 } 1261 1262 static int tun_xdp_xmit(struct net_device *dev, int n, 1263 struct xdp_frame **frames, u32 flags) 1264 { 1265 struct tun_struct *tun = netdev_priv(dev); 1266 struct tun_file *tfile; 1267 u32 numqueues; 1268 int drops = 0; 1269 int cnt = n; 1270 int i; 1271 1272 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1273 return -EINVAL; 1274 1275 rcu_read_lock(); 1276 1277 numqueues = READ_ONCE(tun->numqueues); 1278 if (!numqueues) { 1279 rcu_read_unlock(); 1280 return -ENXIO; /* Caller will free/return all frames */ 1281 } 1282 1283 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1284 numqueues]); 1285 1286 spin_lock(&tfile->tx_ring.producer_lock); 1287 for (i = 0; i < n; i++) { 1288 struct xdp_frame *xdp = frames[i]; 1289 /* Encode the XDP flag into lowest bit for consumer to differ 1290 * XDP buffer from sk_buff. 1291 */ 1292 void *frame = tun_xdp_to_ptr(xdp); 1293 1294 if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1295 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1296 xdp_return_frame_rx_napi(xdp); 1297 drops++; 1298 } 1299 } 1300 spin_unlock(&tfile->tx_ring.producer_lock); 1301 1302 if (flags & XDP_XMIT_FLUSH) 1303 __tun_xdp_flush_tfile(tfile); 1304 1305 rcu_read_unlock(); 1306 return cnt - drops; 1307 } 1308 1309 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 1310 { 1311 struct xdp_frame *frame = convert_to_xdp_frame(xdp); 1312 1313 if (unlikely(!frame)) 1314 return -EOVERFLOW; 1315 1316 return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1317 } 1318 1319 static const struct net_device_ops tap_netdev_ops = { 1320 .ndo_uninit = tun_net_uninit, 1321 .ndo_open = tun_net_open, 1322 .ndo_stop = tun_net_close, 1323 .ndo_start_xmit = tun_net_xmit, 1324 .ndo_fix_features = tun_net_fix_features, 1325 .ndo_set_rx_mode = tun_net_mclist, 1326 .ndo_set_mac_address = eth_mac_addr, 1327 .ndo_validate_addr = eth_validate_addr, 1328 .ndo_select_queue = tun_select_queue, 1329 .ndo_features_check = passthru_features_check, 1330 .ndo_set_rx_headroom = tun_set_headroom, 1331 .ndo_get_stats64 = tun_net_get_stats64, 1332 .ndo_bpf = tun_xdp, 1333 .ndo_xdp_xmit = tun_xdp_xmit, 1334 }; 1335 1336 static void tun_flow_init(struct tun_struct *tun) 1337 { 1338 int i; 1339 1340 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1341 INIT_HLIST_HEAD(&tun->flows[i]); 1342 1343 tun->ageing_time = TUN_FLOW_EXPIRE; 1344 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1345 mod_timer(&tun->flow_gc_timer, 1346 round_jiffies_up(jiffies + tun->ageing_time)); 1347 } 1348 1349 static void tun_flow_uninit(struct tun_struct *tun) 1350 { 1351 del_timer_sync(&tun->flow_gc_timer); 1352 tun_flow_flush(tun); 1353 } 1354 1355 #define MIN_MTU 68 1356 #define MAX_MTU 65535 1357 1358 /* Initialize net device. */ 1359 static void tun_net_init(struct net_device *dev) 1360 { 1361 struct tun_struct *tun = netdev_priv(dev); 1362 1363 switch (tun->flags & TUN_TYPE_MASK) { 1364 case IFF_TUN: 1365 dev->netdev_ops = &tun_netdev_ops; 1366 1367 /* Point-to-Point TUN Device */ 1368 dev->hard_header_len = 0; 1369 dev->addr_len = 0; 1370 dev->mtu = 1500; 1371 1372 /* Zero header length */ 1373 dev->type = ARPHRD_NONE; 1374 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1375 break; 1376 1377 case IFF_TAP: 1378 dev->netdev_ops = &tap_netdev_ops; 1379 /* Ethernet TAP Device */ 1380 ether_setup(dev); 1381 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1382 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1383 1384 eth_hw_addr_random(dev); 1385 1386 break; 1387 } 1388 1389 dev->min_mtu = MIN_MTU; 1390 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1391 } 1392 1393 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 1394 { 1395 struct sock *sk = tfile->socket.sk; 1396 1397 return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 1398 } 1399 1400 /* Character device part */ 1401 1402 /* Poll */ 1403 static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 1404 { 1405 struct tun_file *tfile = file->private_data; 1406 struct tun_struct *tun = tun_get(tfile); 1407 struct sock *sk; 1408 __poll_t mask = 0; 1409 1410 if (!tun) 1411 return EPOLLERR; 1412 1413 sk = tfile->socket.sk; 1414 1415 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 1416 1417 poll_wait(file, sk_sleep(sk), wait); 1418 1419 if (!ptr_ring_empty(&tfile->tx_ring)) 1420 mask |= EPOLLIN | EPOLLRDNORM; 1421 1422 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 1423 * guarantee EPOLLOUT to be raised by either here or 1424 * tun_sock_write_space(). Then process could get notification 1425 * after it writes to a down device and meets -EIO. 1426 */ 1427 if (tun_sock_writeable(tun, tfile) || 1428 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1429 tun_sock_writeable(tun, tfile))) 1430 mask |= EPOLLOUT | EPOLLWRNORM; 1431 1432 if (tun->dev->reg_state != NETREG_REGISTERED) 1433 mask = EPOLLERR; 1434 1435 tun_put(tun); 1436 return mask; 1437 } 1438 1439 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1440 size_t len, 1441 const struct iov_iter *it) 1442 { 1443 struct sk_buff *skb; 1444 size_t linear; 1445 int err; 1446 int i; 1447 1448 if (it->nr_segs > MAX_SKB_FRAGS + 1) 1449 return ERR_PTR(-ENOMEM); 1450 1451 local_bh_disable(); 1452 skb = napi_get_frags(&tfile->napi); 1453 local_bh_enable(); 1454 if (!skb) 1455 return ERR_PTR(-ENOMEM); 1456 1457 linear = iov_iter_single_seg_count(it); 1458 err = __skb_grow(skb, linear); 1459 if (err) 1460 goto free; 1461 1462 skb->len = len; 1463 skb->data_len = len - linear; 1464 skb->truesize += skb->data_len; 1465 1466 for (i = 1; i < it->nr_segs; i++) { 1467 struct page_frag *pfrag = ¤t->task_frag; 1468 size_t fragsz = it->iov[i].iov_len; 1469 1470 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1471 err = -EINVAL; 1472 goto free; 1473 } 1474 1475 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { 1476 err = -ENOMEM; 1477 goto free; 1478 } 1479 1480 skb_fill_page_desc(skb, i - 1, pfrag->page, 1481 pfrag->offset, fragsz); 1482 page_ref_inc(pfrag->page); 1483 pfrag->offset += fragsz; 1484 } 1485 1486 return skb; 1487 free: 1488 /* frees skb and all frags allocated with napi_alloc_frag() */ 1489 napi_free_frags(&tfile->napi); 1490 return ERR_PTR(err); 1491 } 1492 1493 /* prepad is the amount to reserve at front. len is length after that. 1494 * linear is a hint as to how much to copy (usually headers). */ 1495 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1496 size_t prepad, size_t len, 1497 size_t linear, int noblock) 1498 { 1499 struct sock *sk = tfile->socket.sk; 1500 struct sk_buff *skb; 1501 int err; 1502 1503 /* Under a page? Don't bother with paged skb. */ 1504 if (prepad + len < PAGE_SIZE || !linear) 1505 linear = len; 1506 1507 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1508 &err, 0); 1509 if (!skb) 1510 return ERR_PTR(err); 1511 1512 skb_reserve(skb, prepad); 1513 skb_put(skb, linear); 1514 skb->data_len = len - linear; 1515 skb->len += len - linear; 1516 1517 return skb; 1518 } 1519 1520 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1521 struct sk_buff *skb, int more) 1522 { 1523 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1524 struct sk_buff_head process_queue; 1525 u32 rx_batched = tun->rx_batched; 1526 bool rcv = false; 1527 1528 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1529 local_bh_disable(); 1530 netif_receive_skb(skb); 1531 local_bh_enable(); 1532 return; 1533 } 1534 1535 spin_lock(&queue->lock); 1536 if (!more || skb_queue_len(queue) == rx_batched) { 1537 __skb_queue_head_init(&process_queue); 1538 skb_queue_splice_tail_init(queue, &process_queue); 1539 rcv = true; 1540 } else { 1541 __skb_queue_tail(queue, skb); 1542 } 1543 spin_unlock(&queue->lock); 1544 1545 if (rcv) { 1546 struct sk_buff *nskb; 1547 1548 local_bh_disable(); 1549 while ((nskb = __skb_dequeue(&process_queue))) 1550 netif_receive_skb(nskb); 1551 netif_receive_skb(skb); 1552 local_bh_enable(); 1553 } 1554 } 1555 1556 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1557 int len, int noblock, bool zerocopy) 1558 { 1559 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1560 return false; 1561 1562 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1563 return false; 1564 1565 if (!noblock) 1566 return false; 1567 1568 if (zerocopy) 1569 return false; 1570 1571 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 1572 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1573 return false; 1574 1575 return true; 1576 } 1577 1578 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1579 struct tun_file *tfile, 1580 struct iov_iter *from, 1581 struct virtio_net_hdr *hdr, 1582 int len, int *skb_xdp) 1583 { 1584 struct page_frag *alloc_frag = ¤t->task_frag; 1585 struct sk_buff *skb; 1586 struct bpf_prog *xdp_prog; 1587 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1588 unsigned int delta = 0; 1589 char *buf; 1590 size_t copied; 1591 int err, pad = TUN_RX_PAD; 1592 1593 rcu_read_lock(); 1594 xdp_prog = rcu_dereference(tun->xdp_prog); 1595 if (xdp_prog) 1596 pad += TUN_HEADROOM; 1597 buflen += SKB_DATA_ALIGN(len + pad); 1598 rcu_read_unlock(); 1599 1600 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1601 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1602 return ERR_PTR(-ENOMEM); 1603 1604 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1605 copied = copy_page_from_iter(alloc_frag->page, 1606 alloc_frag->offset + pad, 1607 len, from); 1608 if (copied != len) 1609 return ERR_PTR(-EFAULT); 1610 1611 /* There's a small window that XDP may be set after the check 1612 * of xdp_prog above, this should be rare and for simplicity 1613 * we do XDP on skb in case the headroom is not enough. 1614 */ 1615 if (hdr->gso_type || !xdp_prog) 1616 *skb_xdp = 1; 1617 else 1618 *skb_xdp = 0; 1619 1620 local_bh_disable(); 1621 rcu_read_lock(); 1622 xdp_prog = rcu_dereference(tun->xdp_prog); 1623 if (xdp_prog && !*skb_xdp) { 1624 struct xdp_buff xdp; 1625 void *orig_data; 1626 u32 act; 1627 1628 xdp.data_hard_start = buf; 1629 xdp.data = buf + pad; 1630 xdp_set_data_meta_invalid(&xdp); 1631 xdp.data_end = xdp.data + len; 1632 xdp.rxq = &tfile->xdp_rxq; 1633 orig_data = xdp.data; 1634 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1635 1636 switch (act) { 1637 case XDP_REDIRECT: 1638 get_page(alloc_frag->page); 1639 alloc_frag->offset += buflen; 1640 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1641 xdp_do_flush_map(); 1642 if (err) 1643 goto err_redirect; 1644 rcu_read_unlock(); 1645 local_bh_enable(); 1646 return NULL; 1647 case XDP_TX: 1648 get_page(alloc_frag->page); 1649 alloc_frag->offset += buflen; 1650 if (tun_xdp_tx(tun->dev, &xdp) < 0) 1651 goto err_redirect; 1652 rcu_read_unlock(); 1653 local_bh_enable(); 1654 return NULL; 1655 case XDP_PASS: 1656 delta = orig_data - xdp.data; 1657 len = xdp.data_end - xdp.data; 1658 break; 1659 default: 1660 bpf_warn_invalid_xdp_action(act); 1661 /* fall through */ 1662 case XDP_ABORTED: 1663 trace_xdp_exception(tun->dev, xdp_prog, act); 1664 /* fall through */ 1665 case XDP_DROP: 1666 goto err_xdp; 1667 } 1668 } 1669 1670 skb = build_skb(buf, buflen); 1671 if (!skb) { 1672 rcu_read_unlock(); 1673 local_bh_enable(); 1674 return ERR_PTR(-ENOMEM); 1675 } 1676 1677 skb_reserve(skb, pad - delta); 1678 skb_put(skb, len); 1679 get_page(alloc_frag->page); 1680 alloc_frag->offset += buflen; 1681 1682 rcu_read_unlock(); 1683 local_bh_enable(); 1684 1685 return skb; 1686 1687 err_redirect: 1688 put_page(alloc_frag->page); 1689 err_xdp: 1690 rcu_read_unlock(); 1691 local_bh_enable(); 1692 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1693 return NULL; 1694 } 1695 1696 /* Get packet from user space buffer */ 1697 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1698 void *msg_control, struct iov_iter *from, 1699 int noblock, bool more) 1700 { 1701 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1702 struct sk_buff *skb; 1703 size_t total_len = iov_iter_count(from); 1704 size_t len = total_len, align = tun->align, linear; 1705 struct virtio_net_hdr gso = { 0 }; 1706 struct tun_pcpu_stats *stats; 1707 int good_linear; 1708 int copylen; 1709 bool zerocopy = false; 1710 int err; 1711 u32 rxhash = 0; 1712 int skb_xdp = 1; 1713 bool frags = tun_napi_frags_enabled(tfile); 1714 1715 if (!(tun->dev->flags & IFF_UP)) 1716 return -EIO; 1717 1718 if (!(tun->flags & IFF_NO_PI)) { 1719 if (len < sizeof(pi)) 1720 return -EINVAL; 1721 len -= sizeof(pi); 1722 1723 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1724 return -EFAULT; 1725 } 1726 1727 if (tun->flags & IFF_VNET_HDR) { 1728 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1729 1730 if (len < vnet_hdr_sz) 1731 return -EINVAL; 1732 len -= vnet_hdr_sz; 1733 1734 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1735 return -EFAULT; 1736 1737 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1738 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 1739 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 1740 1741 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1742 return -EINVAL; 1743 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1744 } 1745 1746 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1747 align += NET_IP_ALIGN; 1748 if (unlikely(len < ETH_HLEN || 1749 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1750 return -EINVAL; 1751 } 1752 1753 good_linear = SKB_MAX_HEAD(align); 1754 1755 if (msg_control) { 1756 struct iov_iter i = *from; 1757 1758 /* There are 256 bytes to be copied in skb, so there is 1759 * enough room for skb expand head in case it is used. 1760 * The rest of the buffer is mapped from userspace. 1761 */ 1762 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 1763 if (copylen > good_linear) 1764 copylen = good_linear; 1765 linear = copylen; 1766 iov_iter_advance(&i, copylen); 1767 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1768 zerocopy = true; 1769 } 1770 1771 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1772 /* For the packet that is not easy to be processed 1773 * (e.g gso or jumbo packet), we will do it at after 1774 * skb was created with generic XDP routine. 1775 */ 1776 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1777 if (IS_ERR(skb)) { 1778 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1779 return PTR_ERR(skb); 1780 } 1781 if (!skb) 1782 return total_len; 1783 } else { 1784 if (!zerocopy) { 1785 copylen = len; 1786 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 1787 linear = good_linear; 1788 else 1789 linear = tun16_to_cpu(tun, gso.hdr_len); 1790 } 1791 1792 if (frags) { 1793 mutex_lock(&tfile->napi_mutex); 1794 skb = tun_napi_alloc_frags(tfile, copylen, from); 1795 /* tun_napi_alloc_frags() enforces a layout for the skb. 1796 * If zerocopy is enabled, then this layout will be 1797 * overwritten by zerocopy_sg_from_iter(). 1798 */ 1799 zerocopy = false; 1800 } else { 1801 skb = tun_alloc_skb(tfile, align, copylen, linear, 1802 noblock); 1803 } 1804 1805 if (IS_ERR(skb)) { 1806 if (PTR_ERR(skb) != -EAGAIN) 1807 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1808 if (frags) 1809 mutex_unlock(&tfile->napi_mutex); 1810 return PTR_ERR(skb); 1811 } 1812 1813 if (zerocopy) 1814 err = zerocopy_sg_from_iter(skb, from); 1815 else 1816 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1817 1818 if (err) { 1819 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1820 kfree_skb(skb); 1821 if (frags) { 1822 tfile->napi.skb = NULL; 1823 mutex_unlock(&tfile->napi_mutex); 1824 } 1825 1826 return -EFAULT; 1827 } 1828 } 1829 1830 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1831 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1832 kfree_skb(skb); 1833 if (frags) { 1834 tfile->napi.skb = NULL; 1835 mutex_unlock(&tfile->napi_mutex); 1836 } 1837 1838 return -EINVAL; 1839 } 1840 1841 switch (tun->flags & TUN_TYPE_MASK) { 1842 case IFF_TUN: 1843 if (tun->flags & IFF_NO_PI) { 1844 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 1845 1846 switch (ip_version) { 1847 case 4: 1848 pi.proto = htons(ETH_P_IP); 1849 break; 1850 case 6: 1851 pi.proto = htons(ETH_P_IPV6); 1852 break; 1853 default: 1854 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1855 kfree_skb(skb); 1856 return -EINVAL; 1857 } 1858 } 1859 1860 skb_reset_mac_header(skb); 1861 skb->protocol = pi.proto; 1862 skb->dev = tun->dev; 1863 break; 1864 case IFF_TAP: 1865 if (!frags) 1866 skb->protocol = eth_type_trans(skb, tun->dev); 1867 break; 1868 } 1869 1870 /* copy skb_ubuf_info for callback when skb has no error */ 1871 if (zerocopy) { 1872 skb_shinfo(skb)->destructor_arg = msg_control; 1873 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1874 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1875 } else if (msg_control) { 1876 struct ubuf_info *uarg = msg_control; 1877 uarg->callback(uarg, false); 1878 } 1879 1880 skb_reset_network_header(skb); 1881 skb_probe_transport_header(skb, 0); 1882 1883 if (skb_xdp) { 1884 struct bpf_prog *xdp_prog; 1885 int ret; 1886 1887 local_bh_disable(); 1888 rcu_read_lock(); 1889 xdp_prog = rcu_dereference(tun->xdp_prog); 1890 if (xdp_prog) { 1891 ret = do_xdp_generic(xdp_prog, skb); 1892 if (ret != XDP_PASS) { 1893 rcu_read_unlock(); 1894 local_bh_enable(); 1895 return total_len; 1896 } 1897 } 1898 rcu_read_unlock(); 1899 local_bh_enable(); 1900 } 1901 1902 /* Compute the costly rx hash only if needed for flow updates. 1903 * We may get a very small possibility of OOO during switching, not 1904 * worth to optimize. 1905 */ 1906 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1907 !tfile->detached) 1908 rxhash = __skb_get_hash_symmetric(skb); 1909 1910 if (frags) { 1911 /* Exercise flow dissector code path. */ 1912 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1913 1914 if (unlikely(headlen > skb_headlen(skb))) { 1915 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1916 napi_free_frags(&tfile->napi); 1917 mutex_unlock(&tfile->napi_mutex); 1918 WARN_ON(1); 1919 return -ENOMEM; 1920 } 1921 1922 local_bh_disable(); 1923 napi_gro_frags(&tfile->napi); 1924 local_bh_enable(); 1925 mutex_unlock(&tfile->napi_mutex); 1926 } else if (tfile->napi_enabled) { 1927 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1928 int queue_len; 1929 1930 spin_lock_bh(&queue->lock); 1931 __skb_queue_tail(queue, skb); 1932 queue_len = skb_queue_len(queue); 1933 spin_unlock(&queue->lock); 1934 1935 if (!more || queue_len > NAPI_POLL_WEIGHT) 1936 napi_schedule(&tfile->napi); 1937 1938 local_bh_enable(); 1939 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1940 tun_rx_batched(tun, tfile, skb, more); 1941 } else { 1942 netif_rx_ni(skb); 1943 } 1944 1945 stats = get_cpu_ptr(tun->pcpu_stats); 1946 u64_stats_update_begin(&stats->syncp); 1947 stats->rx_packets++; 1948 stats->rx_bytes += len; 1949 u64_stats_update_end(&stats->syncp); 1950 put_cpu_ptr(stats); 1951 1952 if (rxhash) 1953 tun_flow_update(tun, rxhash, tfile); 1954 1955 return total_len; 1956 } 1957 1958 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 1959 { 1960 struct file *file = iocb->ki_filp; 1961 struct tun_file *tfile = file->private_data; 1962 struct tun_struct *tun = tun_get(tfile); 1963 ssize_t result; 1964 1965 if (!tun) 1966 return -EBADFD; 1967 1968 result = tun_get_user(tun, tfile, NULL, from, 1969 file->f_flags & O_NONBLOCK, false); 1970 1971 tun_put(tun); 1972 return result; 1973 } 1974 1975 static ssize_t tun_put_user_xdp(struct tun_struct *tun, 1976 struct tun_file *tfile, 1977 struct xdp_frame *xdp_frame, 1978 struct iov_iter *iter) 1979 { 1980 int vnet_hdr_sz = 0; 1981 size_t size = xdp_frame->len; 1982 struct tun_pcpu_stats *stats; 1983 size_t ret; 1984 1985 if (tun->flags & IFF_VNET_HDR) { 1986 struct virtio_net_hdr gso = { 0 }; 1987 1988 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1989 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 1990 return -EINVAL; 1991 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 1992 sizeof(gso))) 1993 return -EFAULT; 1994 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1995 } 1996 1997 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 1998 1999 stats = get_cpu_ptr(tun->pcpu_stats); 2000 u64_stats_update_begin(&stats->syncp); 2001 stats->tx_packets++; 2002 stats->tx_bytes += ret; 2003 u64_stats_update_end(&stats->syncp); 2004 put_cpu_ptr(tun->pcpu_stats); 2005 2006 return ret; 2007 } 2008 2009 /* Put packet to the user space buffer */ 2010 static ssize_t tun_put_user(struct tun_struct *tun, 2011 struct tun_file *tfile, 2012 struct sk_buff *skb, 2013 struct iov_iter *iter) 2014 { 2015 struct tun_pi pi = { 0, skb->protocol }; 2016 struct tun_pcpu_stats *stats; 2017 ssize_t total; 2018 int vlan_offset = 0; 2019 int vlan_hlen = 0; 2020 int vnet_hdr_sz = 0; 2021 2022 if (skb_vlan_tag_present(skb)) 2023 vlan_hlen = VLAN_HLEN; 2024 2025 if (tun->flags & IFF_VNET_HDR) 2026 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2027 2028 total = skb->len + vlan_hlen + vnet_hdr_sz; 2029 2030 if (!(tun->flags & IFF_NO_PI)) { 2031 if (iov_iter_count(iter) < sizeof(pi)) 2032 return -EINVAL; 2033 2034 total += sizeof(pi); 2035 if (iov_iter_count(iter) < total) { 2036 /* Packet will be striped */ 2037 pi.flags |= TUN_PKT_STRIP; 2038 } 2039 2040 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 2041 return -EFAULT; 2042 } 2043 2044 if (vnet_hdr_sz) { 2045 struct virtio_net_hdr gso; 2046 2047 if (iov_iter_count(iter) < vnet_hdr_sz) 2048 return -EINVAL; 2049 2050 if (virtio_net_hdr_from_skb(skb, &gso, 2051 tun_is_little_endian(tun), true, 2052 vlan_hlen)) { 2053 struct skb_shared_info *sinfo = skb_shinfo(skb); 2054 pr_err("unexpected GSO type: " 2055 "0x%x, gso_size %d, hdr_len %d\n", 2056 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 2057 tun16_to_cpu(tun, gso.hdr_len)); 2058 print_hex_dump(KERN_ERR, "tun: ", 2059 DUMP_PREFIX_NONE, 2060 16, 1, skb->head, 2061 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2062 WARN_ON_ONCE(1); 2063 return -EINVAL; 2064 } 2065 2066 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2067 return -EFAULT; 2068 2069 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2070 } 2071 2072 if (vlan_hlen) { 2073 int ret; 2074 struct veth veth; 2075 2076 veth.h_vlan_proto = skb->vlan_proto; 2077 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 2078 2079 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 2080 2081 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2082 if (ret || !iov_iter_count(iter)) 2083 goto done; 2084 2085 ret = copy_to_iter(&veth, sizeof(veth), iter); 2086 if (ret != sizeof(veth) || !iov_iter_count(iter)) 2087 goto done; 2088 } 2089 2090 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 2091 2092 done: 2093 /* caller is in process context, */ 2094 stats = get_cpu_ptr(tun->pcpu_stats); 2095 u64_stats_update_begin(&stats->syncp); 2096 stats->tx_packets++; 2097 stats->tx_bytes += skb->len + vlan_hlen; 2098 u64_stats_update_end(&stats->syncp); 2099 put_cpu_ptr(tun->pcpu_stats); 2100 2101 return total; 2102 } 2103 2104 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 2105 { 2106 DECLARE_WAITQUEUE(wait, current); 2107 void *ptr = NULL; 2108 int error = 0; 2109 2110 ptr = ptr_ring_consume(&tfile->tx_ring); 2111 if (ptr) 2112 goto out; 2113 if (noblock) { 2114 error = -EAGAIN; 2115 goto out; 2116 } 2117 2118 add_wait_queue(&tfile->wq.wait, &wait); 2119 current->state = TASK_INTERRUPTIBLE; 2120 2121 while (1) { 2122 ptr = ptr_ring_consume(&tfile->tx_ring); 2123 if (ptr) 2124 break; 2125 if (signal_pending(current)) { 2126 error = -ERESTARTSYS; 2127 break; 2128 } 2129 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2130 error = -EFAULT; 2131 break; 2132 } 2133 2134 schedule(); 2135 } 2136 2137 current->state = TASK_RUNNING; 2138 remove_wait_queue(&tfile->wq.wait, &wait); 2139 2140 out: 2141 *err = error; 2142 return ptr; 2143 } 2144 2145 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 2146 struct iov_iter *to, 2147 int noblock, void *ptr) 2148 { 2149 ssize_t ret; 2150 int err; 2151 2152 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 2153 2154 if (!iov_iter_count(to)) { 2155 tun_ptr_free(ptr); 2156 return 0; 2157 } 2158 2159 if (!ptr) { 2160 /* Read frames from ring */ 2161 ptr = tun_ring_recv(tfile, noblock, &err); 2162 if (!ptr) 2163 return err; 2164 } 2165 2166 if (tun_is_xdp_frame(ptr)) { 2167 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2168 2169 ret = tun_put_user_xdp(tun, tfile, xdpf, to); 2170 xdp_return_frame(xdpf); 2171 } else { 2172 struct sk_buff *skb = ptr; 2173 2174 ret = tun_put_user(tun, tfile, skb, to); 2175 if (unlikely(ret < 0)) 2176 kfree_skb(skb); 2177 else 2178 consume_skb(skb); 2179 } 2180 2181 return ret; 2182 } 2183 2184 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 2185 { 2186 struct file *file = iocb->ki_filp; 2187 struct tun_file *tfile = file->private_data; 2188 struct tun_struct *tun = tun_get(tfile); 2189 ssize_t len = iov_iter_count(to), ret; 2190 2191 if (!tun) 2192 return -EBADFD; 2193 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 2194 ret = min_t(ssize_t, ret, len); 2195 if (ret > 0) 2196 iocb->ki_pos = ret; 2197 tun_put(tun); 2198 return ret; 2199 } 2200 2201 static void tun_prog_free(struct rcu_head *rcu) 2202 { 2203 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 2204 2205 bpf_prog_destroy(prog->prog); 2206 kfree(prog); 2207 } 2208 2209 static int __tun_set_ebpf(struct tun_struct *tun, 2210 struct tun_prog __rcu **prog_p, 2211 struct bpf_prog *prog) 2212 { 2213 struct tun_prog *old, *new = NULL; 2214 2215 if (prog) { 2216 new = kmalloc(sizeof(*new), GFP_KERNEL); 2217 if (!new) 2218 return -ENOMEM; 2219 new->prog = prog; 2220 } 2221 2222 spin_lock_bh(&tun->lock); 2223 old = rcu_dereference_protected(*prog_p, 2224 lockdep_is_held(&tun->lock)); 2225 rcu_assign_pointer(*prog_p, new); 2226 spin_unlock_bh(&tun->lock); 2227 2228 if (old) 2229 call_rcu(&old->rcu, tun_prog_free); 2230 2231 return 0; 2232 } 2233 2234 static void tun_free_netdev(struct net_device *dev) 2235 { 2236 struct tun_struct *tun = netdev_priv(dev); 2237 2238 BUG_ON(!(list_empty(&tun->disabled))); 2239 free_percpu(tun->pcpu_stats); 2240 tun_flow_uninit(tun); 2241 security_tun_dev_free_security(tun->security); 2242 __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2243 __tun_set_ebpf(tun, &tun->filter_prog, NULL); 2244 } 2245 2246 static void tun_setup(struct net_device *dev) 2247 { 2248 struct tun_struct *tun = netdev_priv(dev); 2249 2250 tun->owner = INVALID_UID; 2251 tun->group = INVALID_GID; 2252 tun_default_link_ksettings(dev, &tun->link_ksettings); 2253 2254 dev->ethtool_ops = &tun_ethtool_ops; 2255 dev->needs_free_netdev = true; 2256 dev->priv_destructor = tun_free_netdev; 2257 /* We prefer our own queue length */ 2258 dev->tx_queue_len = TUN_READQ_SIZE; 2259 } 2260 2261 /* Trivial set of netlink ops to allow deleting tun or tap 2262 * device with netlink. 2263 */ 2264 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2265 struct netlink_ext_ack *extack) 2266 { 2267 return -EINVAL; 2268 } 2269 2270 static size_t tun_get_size(const struct net_device *dev) 2271 { 2272 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 2273 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 2274 2275 return nla_total_size(sizeof(uid_t)) + /* OWNER */ 2276 nla_total_size(sizeof(gid_t)) + /* GROUP */ 2277 nla_total_size(sizeof(u8)) + /* TYPE */ 2278 nla_total_size(sizeof(u8)) + /* PI */ 2279 nla_total_size(sizeof(u8)) + /* VNET_HDR */ 2280 nla_total_size(sizeof(u8)) + /* PERSIST */ 2281 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 2282 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 2283 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 2284 0; 2285 } 2286 2287 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 2288 { 2289 struct tun_struct *tun = netdev_priv(dev); 2290 2291 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 2292 goto nla_put_failure; 2293 if (uid_valid(tun->owner) && 2294 nla_put_u32(skb, IFLA_TUN_OWNER, 2295 from_kuid_munged(current_user_ns(), tun->owner))) 2296 goto nla_put_failure; 2297 if (gid_valid(tun->group) && 2298 nla_put_u32(skb, IFLA_TUN_GROUP, 2299 from_kgid_munged(current_user_ns(), tun->group))) 2300 goto nla_put_failure; 2301 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 2302 goto nla_put_failure; 2303 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 2304 goto nla_put_failure; 2305 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 2306 goto nla_put_failure; 2307 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 2308 !!(tun->flags & IFF_MULTI_QUEUE))) 2309 goto nla_put_failure; 2310 if (tun->flags & IFF_MULTI_QUEUE) { 2311 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 2312 goto nla_put_failure; 2313 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 2314 tun->numdisabled)) 2315 goto nla_put_failure; 2316 } 2317 2318 return 0; 2319 2320 nla_put_failure: 2321 return -EMSGSIZE; 2322 } 2323 2324 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2325 .kind = DRV_NAME, 2326 .priv_size = sizeof(struct tun_struct), 2327 .setup = tun_setup, 2328 .validate = tun_validate, 2329 .get_size = tun_get_size, 2330 .fill_info = tun_fill_info, 2331 }; 2332 2333 static void tun_sock_write_space(struct sock *sk) 2334 { 2335 struct tun_file *tfile; 2336 wait_queue_head_t *wqueue; 2337 2338 if (!sock_writeable(sk)) 2339 return; 2340 2341 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2342 return; 2343 2344 wqueue = sk_sleep(sk); 2345 if (wqueue && waitqueue_active(wqueue)) 2346 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2347 EPOLLWRNORM | EPOLLWRBAND); 2348 2349 tfile = container_of(sk, struct tun_file, sk); 2350 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2351 } 2352 2353 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2354 { 2355 int ret; 2356 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2357 struct tun_struct *tun = tun_get(tfile); 2358 2359 if (!tun) 2360 return -EBADFD; 2361 2362 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2363 m->msg_flags & MSG_DONTWAIT, 2364 m->msg_flags & MSG_MORE); 2365 tun_put(tun); 2366 return ret; 2367 } 2368 2369 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2370 int flags) 2371 { 2372 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2373 struct tun_struct *tun = tun_get(tfile); 2374 void *ptr = m->msg_control; 2375 int ret; 2376 2377 if (!tun) { 2378 ret = -EBADFD; 2379 goto out_free; 2380 } 2381 2382 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2383 ret = -EINVAL; 2384 goto out_put_tun; 2385 } 2386 if (flags & MSG_ERRQUEUE) { 2387 ret = sock_recv_errqueue(sock->sk, m, total_len, 2388 SOL_PACKET, TUN_TX_TIMESTAMP); 2389 goto out; 2390 } 2391 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 2392 if (ret > (ssize_t)total_len) { 2393 m->msg_flags |= MSG_TRUNC; 2394 ret = flags & MSG_TRUNC ? ret : total_len; 2395 } 2396 out: 2397 tun_put(tun); 2398 return ret; 2399 2400 out_put_tun: 2401 tun_put(tun); 2402 out_free: 2403 tun_ptr_free(ptr); 2404 return ret; 2405 } 2406 2407 static int tun_ptr_peek_len(void *ptr) 2408 { 2409 if (likely(ptr)) { 2410 if (tun_is_xdp_frame(ptr)) { 2411 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2412 2413 return xdpf->len; 2414 } 2415 return __skb_array_len_with_tag(ptr); 2416 } else { 2417 return 0; 2418 } 2419 } 2420 2421 static int tun_peek_len(struct socket *sock) 2422 { 2423 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2424 struct tun_struct *tun; 2425 int ret = 0; 2426 2427 tun = tun_get(tfile); 2428 if (!tun) 2429 return 0; 2430 2431 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 2432 tun_put(tun); 2433 2434 return ret; 2435 } 2436 2437 /* Ops structure to mimic raw sockets with tun */ 2438 static const struct proto_ops tun_socket_ops = { 2439 .peek_len = tun_peek_len, 2440 .sendmsg = tun_sendmsg, 2441 .recvmsg = tun_recvmsg, 2442 }; 2443 2444 static struct proto tun_proto = { 2445 .name = "tun", 2446 .owner = THIS_MODULE, 2447 .obj_size = sizeof(struct tun_file), 2448 }; 2449 2450 static int tun_flags(struct tun_struct *tun) 2451 { 2452 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2453 } 2454 2455 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2456 char *buf) 2457 { 2458 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2459 return sprintf(buf, "0x%x\n", tun_flags(tun)); 2460 } 2461 2462 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2463 char *buf) 2464 { 2465 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2466 return uid_valid(tun->owner)? 2467 sprintf(buf, "%u\n", 2468 from_kuid_munged(current_user_ns(), tun->owner)): 2469 sprintf(buf, "-1\n"); 2470 } 2471 2472 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2473 char *buf) 2474 { 2475 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2476 return gid_valid(tun->group) ? 2477 sprintf(buf, "%u\n", 2478 from_kgid_munged(current_user_ns(), tun->group)): 2479 sprintf(buf, "-1\n"); 2480 } 2481 2482 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2483 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2484 static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2485 2486 static struct attribute *tun_dev_attrs[] = { 2487 &dev_attr_tun_flags.attr, 2488 &dev_attr_owner.attr, 2489 &dev_attr_group.attr, 2490 NULL 2491 }; 2492 2493 static const struct attribute_group tun_attr_group = { 2494 .attrs = tun_dev_attrs 2495 }; 2496 2497 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2498 { 2499 struct tun_struct *tun; 2500 struct tun_file *tfile = file->private_data; 2501 struct net_device *dev; 2502 int err; 2503 2504 if (tfile->detached) 2505 return -EINVAL; 2506 2507 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2508 if (!capable(CAP_NET_ADMIN)) 2509 return -EPERM; 2510 2511 if (!(ifr->ifr_flags & IFF_NAPI) || 2512 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2513 return -EINVAL; 2514 } 2515 2516 dev = __dev_get_by_name(net, ifr->ifr_name); 2517 if (dev) { 2518 if (ifr->ifr_flags & IFF_TUN_EXCL) 2519 return -EBUSY; 2520 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2521 tun = netdev_priv(dev); 2522 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2523 tun = netdev_priv(dev); 2524 else 2525 return -EINVAL; 2526 2527 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2528 !!(tun->flags & IFF_MULTI_QUEUE)) 2529 return -EINVAL; 2530 2531 if (tun_not_capable(tun)) 2532 return -EPERM; 2533 err = security_tun_dev_open(tun->security); 2534 if (err < 0) 2535 return err; 2536 2537 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2538 ifr->ifr_flags & IFF_NAPI, 2539 ifr->ifr_flags & IFF_NAPI_FRAGS); 2540 if (err < 0) 2541 return err; 2542 2543 if (tun->flags & IFF_MULTI_QUEUE && 2544 (tun->numqueues + tun->numdisabled > 1)) { 2545 /* One or more queue has already been attached, no need 2546 * to initialize the device again. 2547 */ 2548 netdev_state_change(dev); 2549 return 0; 2550 } 2551 2552 tun->flags = (tun->flags & ~TUN_FEATURES) | 2553 (ifr->ifr_flags & TUN_FEATURES); 2554 2555 netdev_state_change(dev); 2556 } else { 2557 char *name; 2558 unsigned long flags = 0; 2559 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2560 MAX_TAP_QUEUES : 1; 2561 2562 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2563 return -EPERM; 2564 err = security_tun_dev_create(); 2565 if (err < 0) 2566 return err; 2567 2568 /* Set dev type */ 2569 if (ifr->ifr_flags & IFF_TUN) { 2570 /* TUN device */ 2571 flags |= IFF_TUN; 2572 name = "tun%d"; 2573 } else if (ifr->ifr_flags & IFF_TAP) { 2574 /* TAP device */ 2575 flags |= IFF_TAP; 2576 name = "tap%d"; 2577 } else 2578 return -EINVAL; 2579 2580 if (*ifr->ifr_name) 2581 name = ifr->ifr_name; 2582 2583 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2584 NET_NAME_UNKNOWN, tun_setup, queues, 2585 queues); 2586 2587 if (!dev) 2588 return -ENOMEM; 2589 err = dev_get_valid_name(net, dev, name); 2590 if (err < 0) 2591 goto err_free_dev; 2592 2593 dev_net_set(dev, net); 2594 dev->rtnl_link_ops = &tun_link_ops; 2595 dev->ifindex = tfile->ifindex; 2596 dev->sysfs_groups[0] = &tun_attr_group; 2597 2598 tun = netdev_priv(dev); 2599 tun->dev = dev; 2600 tun->flags = flags; 2601 tun->txflt.count = 0; 2602 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2603 2604 tun->align = NET_SKB_PAD; 2605 tun->filter_attached = false; 2606 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2607 tun->rx_batched = 0; 2608 RCU_INIT_POINTER(tun->steering_prog, NULL); 2609 2610 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2611 if (!tun->pcpu_stats) { 2612 err = -ENOMEM; 2613 goto err_free_dev; 2614 } 2615 2616 spin_lock_init(&tun->lock); 2617 2618 err = security_tun_dev_alloc_security(&tun->security); 2619 if (err < 0) 2620 goto err_free_stat; 2621 2622 tun_net_init(dev); 2623 tun_flow_init(tun); 2624 2625 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 2626 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 2627 NETIF_F_HW_VLAN_STAG_TX; 2628 dev->features = dev->hw_features | NETIF_F_LLTX; 2629 dev->vlan_features = dev->features & 2630 ~(NETIF_F_HW_VLAN_CTAG_TX | 2631 NETIF_F_HW_VLAN_STAG_TX); 2632 2633 tun->flags = (tun->flags & ~TUN_FEATURES) | 2634 (ifr->ifr_flags & TUN_FEATURES); 2635 2636 INIT_LIST_HEAD(&tun->disabled); 2637 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2638 ifr->ifr_flags & IFF_NAPI_FRAGS); 2639 if (err < 0) 2640 goto err_free_flow; 2641 2642 err = register_netdevice(tun->dev); 2643 if (err < 0) 2644 goto err_detach; 2645 } 2646 2647 netif_carrier_on(tun->dev); 2648 2649 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 2650 2651 /* Make sure persistent devices do not get stuck in 2652 * xoff state. 2653 */ 2654 if (netif_running(tun->dev)) 2655 netif_tx_wake_all_queues(tun->dev); 2656 2657 strcpy(ifr->ifr_name, tun->dev->name); 2658 return 0; 2659 2660 err_detach: 2661 tun_detach_all(dev); 2662 /* register_netdevice() already called tun_free_netdev() */ 2663 goto err_free_dev; 2664 2665 err_free_flow: 2666 tun_flow_uninit(tun); 2667 security_tun_dev_free_security(tun->security); 2668 err_free_stat: 2669 free_percpu(tun->pcpu_stats); 2670 err_free_dev: 2671 free_netdev(dev); 2672 return err; 2673 } 2674 2675 static void tun_get_iff(struct net *net, struct tun_struct *tun, 2676 struct ifreq *ifr) 2677 { 2678 tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2679 2680 strcpy(ifr->ifr_name, tun->dev->name); 2681 2682 ifr->ifr_flags = tun_flags(tun); 2683 2684 } 2685 2686 /* This is like a cut-down ethtool ops, except done via tun fd so no 2687 * privs required. */ 2688 static int set_offload(struct tun_struct *tun, unsigned long arg) 2689 { 2690 netdev_features_t features = 0; 2691 2692 if (arg & TUN_F_CSUM) { 2693 features |= NETIF_F_HW_CSUM; 2694 arg &= ~TUN_F_CSUM; 2695 2696 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2697 if (arg & TUN_F_TSO_ECN) { 2698 features |= NETIF_F_TSO_ECN; 2699 arg &= ~TUN_F_TSO_ECN; 2700 } 2701 if (arg & TUN_F_TSO4) 2702 features |= NETIF_F_TSO; 2703 if (arg & TUN_F_TSO6) 2704 features |= NETIF_F_TSO6; 2705 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2706 } 2707 2708 arg &= ~TUN_F_UFO; 2709 } 2710 2711 /* This gives the user a way to test for new features in future by 2712 * trying to set them. */ 2713 if (arg) 2714 return -EINVAL; 2715 2716 tun->set_features = features; 2717 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2718 tun->dev->wanted_features |= features; 2719 netdev_update_features(tun->dev); 2720 2721 return 0; 2722 } 2723 2724 static void tun_detach_filter(struct tun_struct *tun, int n) 2725 { 2726 int i; 2727 struct tun_file *tfile; 2728 2729 for (i = 0; i < n; i++) { 2730 tfile = rtnl_dereference(tun->tfiles[i]); 2731 lock_sock(tfile->socket.sk); 2732 sk_detach_filter(tfile->socket.sk); 2733 release_sock(tfile->socket.sk); 2734 } 2735 2736 tun->filter_attached = false; 2737 } 2738 2739 static int tun_attach_filter(struct tun_struct *tun) 2740 { 2741 int i, ret = 0; 2742 struct tun_file *tfile; 2743 2744 for (i = 0; i < tun->numqueues; i++) { 2745 tfile = rtnl_dereference(tun->tfiles[i]); 2746 lock_sock(tfile->socket.sk); 2747 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2748 release_sock(tfile->socket.sk); 2749 if (ret) { 2750 tun_detach_filter(tun, i); 2751 return ret; 2752 } 2753 } 2754 2755 tun->filter_attached = true; 2756 return ret; 2757 } 2758 2759 static void tun_set_sndbuf(struct tun_struct *tun) 2760 { 2761 struct tun_file *tfile; 2762 int i; 2763 2764 for (i = 0; i < tun->numqueues; i++) { 2765 tfile = rtnl_dereference(tun->tfiles[i]); 2766 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2767 } 2768 } 2769 2770 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2771 { 2772 struct tun_file *tfile = file->private_data; 2773 struct tun_struct *tun; 2774 int ret = 0; 2775 2776 rtnl_lock(); 2777 2778 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2779 tun = tfile->detached; 2780 if (!tun) { 2781 ret = -EINVAL; 2782 goto unlock; 2783 } 2784 ret = security_tun_dev_attach_queue(tun->security); 2785 if (ret < 0) 2786 goto unlock; 2787 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2788 tun->flags & IFF_NAPI_FRAGS); 2789 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2790 tun = rtnl_dereference(tfile->tun); 2791 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2792 ret = -EINVAL; 2793 else 2794 __tun_detach(tfile, false); 2795 } else 2796 ret = -EINVAL; 2797 2798 if (ret >= 0) 2799 netdev_state_change(tun->dev); 2800 2801 unlock: 2802 rtnl_unlock(); 2803 return ret; 2804 } 2805 2806 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, 2807 void __user *data) 2808 { 2809 struct bpf_prog *prog; 2810 int fd; 2811 2812 if (copy_from_user(&fd, data, sizeof(fd))) 2813 return -EFAULT; 2814 2815 if (fd == -1) { 2816 prog = NULL; 2817 } else { 2818 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 2819 if (IS_ERR(prog)) 2820 return PTR_ERR(prog); 2821 } 2822 2823 return __tun_set_ebpf(tun, prog_p, prog); 2824 } 2825 2826 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 2827 unsigned long arg, int ifreq_len) 2828 { 2829 struct tun_file *tfile = file->private_data; 2830 struct net *net = sock_net(&tfile->sk); 2831 struct tun_struct *tun; 2832 void __user* argp = (void __user*)arg; 2833 struct ifreq ifr; 2834 kuid_t owner; 2835 kgid_t group; 2836 int sndbuf; 2837 int vnet_hdr_sz; 2838 unsigned int ifindex; 2839 int le; 2840 int ret; 2841 bool do_notify = false; 2842 2843 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 2844 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 2845 if (copy_from_user(&ifr, argp, ifreq_len)) 2846 return -EFAULT; 2847 } else { 2848 memset(&ifr, 0, sizeof(ifr)); 2849 } 2850 if (cmd == TUNGETFEATURES) { 2851 /* Currently this just means: "what IFF flags are valid?". 2852 * This is needed because we never checked for invalid flags on 2853 * TUNSETIFF. 2854 */ 2855 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2856 (unsigned int __user*)argp); 2857 } else if (cmd == TUNSETQUEUE) { 2858 return tun_set_queue(file, &ifr); 2859 } else if (cmd == SIOCGSKNS) { 2860 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2861 return -EPERM; 2862 return open_related_ns(&net->ns, get_net_ns); 2863 } 2864 2865 ret = 0; 2866 rtnl_lock(); 2867 2868 tun = tun_get(tfile); 2869 if (cmd == TUNSETIFF) { 2870 ret = -EEXIST; 2871 if (tun) 2872 goto unlock; 2873 2874 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 2875 2876 ret = tun_set_iff(net, file, &ifr); 2877 2878 if (ret) 2879 goto unlock; 2880 2881 if (copy_to_user(argp, &ifr, ifreq_len)) 2882 ret = -EFAULT; 2883 goto unlock; 2884 } 2885 if (cmd == TUNSETIFINDEX) { 2886 ret = -EPERM; 2887 if (tun) 2888 goto unlock; 2889 2890 ret = -EFAULT; 2891 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2892 goto unlock; 2893 2894 ret = 0; 2895 tfile->ifindex = ifindex; 2896 goto unlock; 2897 } 2898 2899 ret = -EBADFD; 2900 if (!tun) 2901 goto unlock; 2902 2903 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 2904 2905 ret = 0; 2906 switch (cmd) { 2907 case TUNGETIFF: 2908 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2909 2910 if (tfile->detached) 2911 ifr.ifr_flags |= IFF_DETACH_QUEUE; 2912 if (!tfile->socket.sk->sk_filter) 2913 ifr.ifr_flags |= IFF_NOFILTER; 2914 2915 if (copy_to_user(argp, &ifr, ifreq_len)) 2916 ret = -EFAULT; 2917 break; 2918 2919 case TUNSETNOCSUM: 2920 /* Disable/Enable checksum */ 2921 2922 /* [unimplemented] */ 2923 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 2924 arg ? "disabled" : "enabled"); 2925 break; 2926 2927 case TUNSETPERSIST: 2928 /* Disable/Enable persist mode. Keep an extra reference to the 2929 * module to prevent the module being unprobed. 2930 */ 2931 if (arg && !(tun->flags & IFF_PERSIST)) { 2932 tun->flags |= IFF_PERSIST; 2933 __module_get(THIS_MODULE); 2934 do_notify = true; 2935 } 2936 if (!arg && (tun->flags & IFF_PERSIST)) { 2937 tun->flags &= ~IFF_PERSIST; 2938 module_put(THIS_MODULE); 2939 do_notify = true; 2940 } 2941 2942 tun_debug(KERN_INFO, tun, "persist %s\n", 2943 arg ? "enabled" : "disabled"); 2944 break; 2945 2946 case TUNSETOWNER: 2947 /* Set owner of the device */ 2948 owner = make_kuid(current_user_ns(), arg); 2949 if (!uid_valid(owner)) { 2950 ret = -EINVAL; 2951 break; 2952 } 2953 tun->owner = owner; 2954 do_notify = true; 2955 tun_debug(KERN_INFO, tun, "owner set to %u\n", 2956 from_kuid(&init_user_ns, tun->owner)); 2957 break; 2958 2959 case TUNSETGROUP: 2960 /* Set group of the device */ 2961 group = make_kgid(current_user_ns(), arg); 2962 if (!gid_valid(group)) { 2963 ret = -EINVAL; 2964 break; 2965 } 2966 tun->group = group; 2967 do_notify = true; 2968 tun_debug(KERN_INFO, tun, "group set to %u\n", 2969 from_kgid(&init_user_ns, tun->group)); 2970 break; 2971 2972 case TUNSETLINK: 2973 /* Only allow setting the type when the interface is down */ 2974 if (tun->dev->flags & IFF_UP) { 2975 tun_debug(KERN_INFO, tun, 2976 "Linktype set failed because interface is up\n"); 2977 ret = -EBUSY; 2978 } else { 2979 tun->dev->type = (int) arg; 2980 tun_debug(KERN_INFO, tun, "linktype set to %d\n", 2981 tun->dev->type); 2982 ret = 0; 2983 } 2984 break; 2985 2986 #ifdef TUN_DEBUG 2987 case TUNSETDEBUG: 2988 tun->debug = arg; 2989 break; 2990 #endif 2991 case TUNSETOFFLOAD: 2992 ret = set_offload(tun, arg); 2993 break; 2994 2995 case TUNSETTXFILTER: 2996 /* Can be set only for TAPs */ 2997 ret = -EINVAL; 2998 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2999 break; 3000 ret = update_filter(&tun->txflt, (void __user *)arg); 3001 break; 3002 3003 case SIOCGIFHWADDR: 3004 /* Get hw address */ 3005 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 3006 ifr.ifr_hwaddr.sa_family = tun->dev->type; 3007 if (copy_to_user(argp, &ifr, ifreq_len)) 3008 ret = -EFAULT; 3009 break; 3010 3011 case SIOCSIFHWADDR: 3012 /* Set hw address */ 3013 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 3014 ifr.ifr_hwaddr.sa_data); 3015 3016 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 3017 break; 3018 3019 case TUNGETSNDBUF: 3020 sndbuf = tfile->socket.sk->sk_sndbuf; 3021 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 3022 ret = -EFAULT; 3023 break; 3024 3025 case TUNSETSNDBUF: 3026 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 3027 ret = -EFAULT; 3028 break; 3029 } 3030 if (sndbuf <= 0) { 3031 ret = -EINVAL; 3032 break; 3033 } 3034 3035 tun->sndbuf = sndbuf; 3036 tun_set_sndbuf(tun); 3037 break; 3038 3039 case TUNGETVNETHDRSZ: 3040 vnet_hdr_sz = tun->vnet_hdr_sz; 3041 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3042 ret = -EFAULT; 3043 break; 3044 3045 case TUNSETVNETHDRSZ: 3046 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3047 ret = -EFAULT; 3048 break; 3049 } 3050 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3051 ret = -EINVAL; 3052 break; 3053 } 3054 3055 tun->vnet_hdr_sz = vnet_hdr_sz; 3056 break; 3057 3058 case TUNGETVNETLE: 3059 le = !!(tun->flags & TUN_VNET_LE); 3060 if (put_user(le, (int __user *)argp)) 3061 ret = -EFAULT; 3062 break; 3063 3064 case TUNSETVNETLE: 3065 if (get_user(le, (int __user *)argp)) { 3066 ret = -EFAULT; 3067 break; 3068 } 3069 if (le) 3070 tun->flags |= TUN_VNET_LE; 3071 else 3072 tun->flags &= ~TUN_VNET_LE; 3073 break; 3074 3075 case TUNGETVNETBE: 3076 ret = tun_get_vnet_be(tun, argp); 3077 break; 3078 3079 case TUNSETVNETBE: 3080 ret = tun_set_vnet_be(tun, argp); 3081 break; 3082 3083 case TUNATTACHFILTER: 3084 /* Can be set only for TAPs */ 3085 ret = -EINVAL; 3086 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3087 break; 3088 ret = -EFAULT; 3089 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 3090 break; 3091 3092 ret = tun_attach_filter(tun); 3093 break; 3094 3095 case TUNDETACHFILTER: 3096 /* Can be set only for TAPs */ 3097 ret = -EINVAL; 3098 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3099 break; 3100 ret = 0; 3101 tun_detach_filter(tun, tun->numqueues); 3102 break; 3103 3104 case TUNGETFILTER: 3105 ret = -EINVAL; 3106 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3107 break; 3108 ret = -EFAULT; 3109 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 3110 break; 3111 ret = 0; 3112 break; 3113 3114 case TUNSETSTEERINGEBPF: 3115 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 3116 break; 3117 3118 case TUNSETFILTEREBPF: 3119 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3120 break; 3121 3122 default: 3123 ret = -EINVAL; 3124 break; 3125 } 3126 3127 if (do_notify) 3128 netdev_state_change(tun->dev); 3129 3130 unlock: 3131 rtnl_unlock(); 3132 if (tun) 3133 tun_put(tun); 3134 return ret; 3135 } 3136 3137 static long tun_chr_ioctl(struct file *file, 3138 unsigned int cmd, unsigned long arg) 3139 { 3140 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 3141 } 3142 3143 #ifdef CONFIG_COMPAT 3144 static long tun_chr_compat_ioctl(struct file *file, 3145 unsigned int cmd, unsigned long arg) 3146 { 3147 switch (cmd) { 3148 case TUNSETIFF: 3149 case TUNGETIFF: 3150 case TUNSETTXFILTER: 3151 case TUNGETSNDBUF: 3152 case TUNSETSNDBUF: 3153 case SIOCGIFHWADDR: 3154 case SIOCSIFHWADDR: 3155 arg = (unsigned long)compat_ptr(arg); 3156 break; 3157 default: 3158 arg = (compat_ulong_t)arg; 3159 break; 3160 } 3161 3162 /* 3163 * compat_ifreq is shorter than ifreq, so we must not access beyond 3164 * the end of that structure. All fields that are used in this 3165 * driver are compatible though, we don't need to convert the 3166 * contents. 3167 */ 3168 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 3169 } 3170 #endif /* CONFIG_COMPAT */ 3171 3172 static int tun_chr_fasync(int fd, struct file *file, int on) 3173 { 3174 struct tun_file *tfile = file->private_data; 3175 int ret; 3176 3177 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 3178 goto out; 3179 3180 if (on) { 3181 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 3182 tfile->flags |= TUN_FASYNC; 3183 } else 3184 tfile->flags &= ~TUN_FASYNC; 3185 ret = 0; 3186 out: 3187 return ret; 3188 } 3189 3190 static int tun_chr_open(struct inode *inode, struct file * file) 3191 { 3192 struct net *net = current->nsproxy->net_ns; 3193 struct tun_file *tfile; 3194 3195 DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 3196 3197 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 3198 &tun_proto, 0); 3199 if (!tfile) 3200 return -ENOMEM; 3201 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3202 sk_free(&tfile->sk); 3203 return -ENOMEM; 3204 } 3205 3206 mutex_init(&tfile->napi_mutex); 3207 RCU_INIT_POINTER(tfile->tun, NULL); 3208 tfile->flags = 0; 3209 tfile->ifindex = 0; 3210 3211 init_waitqueue_head(&tfile->wq.wait); 3212 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 3213 3214 tfile->socket.file = file; 3215 tfile->socket.ops = &tun_socket_ops; 3216 3217 sock_init_data(&tfile->socket, &tfile->sk); 3218 3219 tfile->sk.sk_write_space = tun_sock_write_space; 3220 tfile->sk.sk_sndbuf = INT_MAX; 3221 3222 file->private_data = tfile; 3223 INIT_LIST_HEAD(&tfile->next); 3224 3225 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3226 3227 return 0; 3228 } 3229 3230 static int tun_chr_close(struct inode *inode, struct file *file) 3231 { 3232 struct tun_file *tfile = file->private_data; 3233 3234 tun_detach(tfile, true); 3235 3236 return 0; 3237 } 3238 3239 #ifdef CONFIG_PROC_FS 3240 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 3241 { 3242 struct tun_file *tfile = file->private_data; 3243 struct tun_struct *tun; 3244 struct ifreq ifr; 3245 3246 memset(&ifr, 0, sizeof(ifr)); 3247 3248 rtnl_lock(); 3249 tun = tun_get(tfile); 3250 if (tun) 3251 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 3252 rtnl_unlock(); 3253 3254 if (tun) 3255 tun_put(tun); 3256 3257 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 3258 } 3259 #endif 3260 3261 static const struct file_operations tun_fops = { 3262 .owner = THIS_MODULE, 3263 .llseek = no_llseek, 3264 .read_iter = tun_chr_read_iter, 3265 .write_iter = tun_chr_write_iter, 3266 .poll = tun_chr_poll, 3267 .unlocked_ioctl = tun_chr_ioctl, 3268 #ifdef CONFIG_COMPAT 3269 .compat_ioctl = tun_chr_compat_ioctl, 3270 #endif 3271 .open = tun_chr_open, 3272 .release = tun_chr_close, 3273 .fasync = tun_chr_fasync, 3274 #ifdef CONFIG_PROC_FS 3275 .show_fdinfo = tun_chr_show_fdinfo, 3276 #endif 3277 }; 3278 3279 static struct miscdevice tun_miscdev = { 3280 .minor = TUN_MINOR, 3281 .name = "tun", 3282 .nodename = "net/tun", 3283 .fops = &tun_fops, 3284 }; 3285 3286 /* ethtool interface */ 3287 3288 static void tun_default_link_ksettings(struct net_device *dev, 3289 struct ethtool_link_ksettings *cmd) 3290 { 3291 ethtool_link_ksettings_zero_link_mode(cmd, supported); 3292 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3293 cmd->base.speed = SPEED_10; 3294 cmd->base.duplex = DUPLEX_FULL; 3295 cmd->base.port = PORT_TP; 3296 cmd->base.phy_address = 0; 3297 cmd->base.autoneg = AUTONEG_DISABLE; 3298 } 3299 3300 static int tun_get_link_ksettings(struct net_device *dev, 3301 struct ethtool_link_ksettings *cmd) 3302 { 3303 struct tun_struct *tun = netdev_priv(dev); 3304 3305 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 3306 return 0; 3307 } 3308 3309 static int tun_set_link_ksettings(struct net_device *dev, 3310 const struct ethtool_link_ksettings *cmd) 3311 { 3312 struct tun_struct *tun = netdev_priv(dev); 3313 3314 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 3315 return 0; 3316 } 3317 3318 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3319 { 3320 struct tun_struct *tun = netdev_priv(dev); 3321 3322 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 3323 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 3324 3325 switch (tun->flags & TUN_TYPE_MASK) { 3326 case IFF_TUN: 3327 strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 3328 break; 3329 case IFF_TAP: 3330 strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 3331 break; 3332 } 3333 } 3334 3335 static u32 tun_get_msglevel(struct net_device *dev) 3336 { 3337 #ifdef TUN_DEBUG 3338 struct tun_struct *tun = netdev_priv(dev); 3339 return tun->debug; 3340 #else 3341 return -EOPNOTSUPP; 3342 #endif 3343 } 3344 3345 static void tun_set_msglevel(struct net_device *dev, u32 value) 3346 { 3347 #ifdef TUN_DEBUG 3348 struct tun_struct *tun = netdev_priv(dev); 3349 tun->debug = value; 3350 #endif 3351 } 3352 3353 static int tun_get_coalesce(struct net_device *dev, 3354 struct ethtool_coalesce *ec) 3355 { 3356 struct tun_struct *tun = netdev_priv(dev); 3357 3358 ec->rx_max_coalesced_frames = tun->rx_batched; 3359 3360 return 0; 3361 } 3362 3363 static int tun_set_coalesce(struct net_device *dev, 3364 struct ethtool_coalesce *ec) 3365 { 3366 struct tun_struct *tun = netdev_priv(dev); 3367 3368 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 3369 tun->rx_batched = NAPI_POLL_WEIGHT; 3370 else 3371 tun->rx_batched = ec->rx_max_coalesced_frames; 3372 3373 return 0; 3374 } 3375 3376 static const struct ethtool_ops tun_ethtool_ops = { 3377 .get_drvinfo = tun_get_drvinfo, 3378 .get_msglevel = tun_get_msglevel, 3379 .set_msglevel = tun_set_msglevel, 3380 .get_link = ethtool_op_get_link, 3381 .get_ts_info = ethtool_op_get_ts_info, 3382 .get_coalesce = tun_get_coalesce, 3383 .set_coalesce = tun_set_coalesce, 3384 .get_link_ksettings = tun_get_link_ksettings, 3385 .set_link_ksettings = tun_set_link_ksettings, 3386 }; 3387 3388 static int tun_queue_resize(struct tun_struct *tun) 3389 { 3390 struct net_device *dev = tun->dev; 3391 struct tun_file *tfile; 3392 struct ptr_ring **rings; 3393 int n = tun->numqueues + tun->numdisabled; 3394 int ret, i; 3395 3396 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 3397 if (!rings) 3398 return -ENOMEM; 3399 3400 for (i = 0; i < tun->numqueues; i++) { 3401 tfile = rtnl_dereference(tun->tfiles[i]); 3402 rings[i] = &tfile->tx_ring; 3403 } 3404 list_for_each_entry(tfile, &tun->disabled, next) 3405 rings[i++] = &tfile->tx_ring; 3406 3407 ret = ptr_ring_resize_multiple(rings, n, 3408 dev->tx_queue_len, GFP_KERNEL, 3409 tun_ptr_free); 3410 3411 kfree(rings); 3412 return ret; 3413 } 3414 3415 static int tun_device_event(struct notifier_block *unused, 3416 unsigned long event, void *ptr) 3417 { 3418 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3419 struct tun_struct *tun = netdev_priv(dev); 3420 3421 if (dev->rtnl_link_ops != &tun_link_ops) 3422 return NOTIFY_DONE; 3423 3424 switch (event) { 3425 case NETDEV_CHANGE_TX_QUEUE_LEN: 3426 if (tun_queue_resize(tun)) 3427 return NOTIFY_BAD; 3428 break; 3429 default: 3430 break; 3431 } 3432 3433 return NOTIFY_DONE; 3434 } 3435 3436 static struct notifier_block tun_notifier_block __read_mostly = { 3437 .notifier_call = tun_device_event, 3438 }; 3439 3440 static int __init tun_init(void) 3441 { 3442 int ret = 0; 3443 3444 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3445 3446 ret = rtnl_link_register(&tun_link_ops); 3447 if (ret) { 3448 pr_err("Can't register link_ops\n"); 3449 goto err_linkops; 3450 } 3451 3452 ret = misc_register(&tun_miscdev); 3453 if (ret) { 3454 pr_err("Can't register misc device %d\n", TUN_MINOR); 3455 goto err_misc; 3456 } 3457 3458 ret = register_netdevice_notifier(&tun_notifier_block); 3459 if (ret) { 3460 pr_err("Can't register netdevice notifier\n"); 3461 goto err_notifier; 3462 } 3463 3464 return 0; 3465 3466 err_notifier: 3467 misc_deregister(&tun_miscdev); 3468 err_misc: 3469 rtnl_link_unregister(&tun_link_ops); 3470 err_linkops: 3471 return ret; 3472 } 3473 3474 static void tun_cleanup(void) 3475 { 3476 misc_deregister(&tun_miscdev); 3477 rtnl_link_unregister(&tun_link_ops); 3478 unregister_netdevice_notifier(&tun_notifier_block); 3479 } 3480 3481 /* Get an underlying socket object from tun file. Returns error unless file is 3482 * attached to a device. The returned object works like a packet socket, it 3483 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3484 * holding a reference to the file for as long as the socket is in use. */ 3485 struct socket *tun_get_socket(struct file *file) 3486 { 3487 struct tun_file *tfile; 3488 if (file->f_op != &tun_fops) 3489 return ERR_PTR(-EINVAL); 3490 tfile = file->private_data; 3491 if (!tfile) 3492 return ERR_PTR(-EBADFD); 3493 return &tfile->socket; 3494 } 3495 EXPORT_SYMBOL_GPL(tun_get_socket); 3496 3497 struct ptr_ring *tun_get_tx_ring(struct file *file) 3498 { 3499 struct tun_file *tfile; 3500 3501 if (file->f_op != &tun_fops) 3502 return ERR_PTR(-EINVAL); 3503 tfile = file->private_data; 3504 if (!tfile) 3505 return ERR_PTR(-EBADFD); 3506 return &tfile->tx_ring; 3507 } 3508 EXPORT_SYMBOL_GPL(tun_get_tx_ring); 3509 3510 module_init(tun_init); 3511 module_exit(tun_cleanup); 3512 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3513 MODULE_AUTHOR(DRV_COPYRIGHT); 3514 MODULE_LICENSE("GPL"); 3515 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3516 MODULE_ALIAS("devname:net/tun"); 3517