1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * TUN - Universal TUN/TAP device driver. 4 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 5 * 6 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 7 */ 8 9 /* 10 * Changes: 11 * 12 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 13 * Add TUNSETLINK ioctl to set the link encapsulation 14 * 15 * Mark Smith <markzzzsmith@yahoo.com.au> 16 * Use eth_random_addr() for tap MAC address. 17 * 18 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 19 * Fixes in packet dropping, queue length setting and queue wakeup. 20 * Increased default tx queue length. 21 * Added ethtool API. 22 * Minor cleanups 23 * 24 * Daniel Podlejski <underley@underley.eu.org> 25 * Modifications for 2.3.99-pre5 kernel. 26 */ 27 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 #define DRV_NAME "tun" 31 #define DRV_VERSION "1.6" 32 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 33 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 34 35 #include <linux/module.h> 36 #include <linux/errno.h> 37 #include <linux/kernel.h> 38 #include <linux/sched/signal.h> 39 #include <linux/major.h> 40 #include <linux/slab.h> 41 #include <linux/poll.h> 42 #include <linux/fcntl.h> 43 #include <linux/init.h> 44 #include <linux/skbuff.h> 45 #include <linux/netdevice.h> 46 #include <linux/etherdevice.h> 47 #include <linux/miscdevice.h> 48 #include <linux/ethtool.h> 49 #include <linux/rtnetlink.h> 50 #include <linux/compat.h> 51 #include <linux/if.h> 52 #include <linux/if_arp.h> 53 #include <linux/if_ether.h> 54 #include <linux/if_tun.h> 55 #include <linux/if_vlan.h> 56 #include <linux/crc32.h> 57 #include <linux/nsproxy.h> 58 #include <linux/virtio_net.h> 59 #include <linux/rcupdate.h> 60 #include <net/net_namespace.h> 61 #include <net/netns/generic.h> 62 #include <net/rtnetlink.h> 63 #include <net/sock.h> 64 #include <net/xdp.h> 65 #include <net/ip_tunnels.h> 66 #include <linux/seq_file.h> 67 #include <linux/uio.h> 68 #include <linux/skb_array.h> 69 #include <linux/bpf.h> 70 #include <linux/bpf_trace.h> 71 #include <linux/mutex.h> 72 #include <linux/ieee802154.h> 73 #include <linux/if_ltalk.h> 74 #include <uapi/linux/if_fddi.h> 75 #include <uapi/linux/if_hippi.h> 76 #include <uapi/linux/if_fc.h> 77 #include <net/ax25.h> 78 #include <net/rose.h> 79 #include <net/6lowpan.h> 80 81 #include <linux/uaccess.h> 82 #include <linux/proc_fs.h> 83 84 static void tun_default_link_ksettings(struct net_device *dev, 85 struct ethtool_link_ksettings *cmd); 86 87 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 88 89 /* TUN device flags */ 90 91 /* IFF_ATTACH_QUEUE is never stored in device flags, 92 * overload it to mean fasync when stored there. 93 */ 94 #define TUN_FASYNC IFF_ATTACH_QUEUE 95 /* High bits in flags field are unused. */ 96 #define TUN_VNET_LE 0x80000000 97 #define TUN_VNET_BE 0x40000000 98 99 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 100 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 101 102 #define GOODCOPY_LEN 128 103 104 #define FLT_EXACT_COUNT 8 105 struct tap_filter { 106 unsigned int count; /* Number of addrs. Zero means disabled */ 107 u32 mask[2]; /* Mask of the hashed addrs */ 108 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 109 }; 110 111 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 112 * to max number of VCPUs in guest. */ 113 #define MAX_TAP_QUEUES 256 114 #define MAX_TAP_FLOWS 4096 115 116 #define TUN_FLOW_EXPIRE (3 * HZ) 117 118 /* A tun_file connects an open character device to a tuntap netdevice. It 119 * also contains all socket related structures (except sock_fprog and tap_filter) 120 * to serve as one transmit queue for tuntap device. The sock_fprog and 121 * tap_filter were kept in tun_struct since they were used for filtering for the 122 * netdevice not for a specific queue (at least I didn't see the requirement for 123 * this). 124 * 125 * RCU usage: 126 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 127 * other can only be read while rcu_read_lock or rtnl_lock is held. 128 */ 129 struct tun_file { 130 struct sock sk; 131 struct socket socket; 132 struct tun_struct __rcu *tun; 133 struct fasync_struct *fasync; 134 /* only used for fasnyc */ 135 unsigned int flags; 136 union { 137 u16 queue_index; 138 unsigned int ifindex; 139 }; 140 struct napi_struct napi; 141 bool napi_enabled; 142 bool napi_frags_enabled; 143 struct mutex napi_mutex; /* Protects access to the above napi */ 144 struct list_head next; 145 struct tun_struct *detached; 146 struct ptr_ring tx_ring; 147 struct xdp_rxq_info xdp_rxq; 148 }; 149 150 struct tun_page { 151 struct page *page; 152 int count; 153 }; 154 155 struct tun_flow_entry { 156 struct hlist_node hash_link; 157 struct rcu_head rcu; 158 struct tun_struct *tun; 159 160 u32 rxhash; 161 u32 rps_rxhash; 162 int queue_index; 163 unsigned long updated ____cacheline_aligned_in_smp; 164 }; 165 166 #define TUN_NUM_FLOW_ENTRIES 1024 167 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) 168 169 struct tun_prog { 170 struct rcu_head rcu; 171 struct bpf_prog *prog; 172 }; 173 174 /* Since the socket were moved to tun_file, to preserve the behavior of persist 175 * device, socket filter, sndbuf and vnet header size were restore when the 176 * file were attached to a persist device. 177 */ 178 struct tun_struct { 179 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 180 unsigned int numqueues; 181 unsigned int flags; 182 kuid_t owner; 183 kgid_t group; 184 185 struct net_device *dev; 186 netdev_features_t set_features; 187 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 188 NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4) 189 190 int align; 191 int vnet_hdr_sz; 192 int sndbuf; 193 struct tap_filter txflt; 194 struct sock_fprog fprog; 195 /* protected by rtnl lock */ 196 bool filter_attached; 197 u32 msg_enable; 198 spinlock_t lock; 199 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 200 struct timer_list flow_gc_timer; 201 unsigned long ageing_time; 202 unsigned int numdisabled; 203 struct list_head disabled; 204 void *security; 205 u32 flow_count; 206 u32 rx_batched; 207 atomic_long_t rx_frame_errors; 208 struct bpf_prog __rcu *xdp_prog; 209 struct tun_prog __rcu *steering_prog; 210 struct tun_prog __rcu *filter_prog; 211 struct ethtool_link_ksettings link_ksettings; 212 /* init args */ 213 struct file *file; 214 struct ifreq *ifr; 215 }; 216 217 struct veth { 218 __be16 h_vlan_proto; 219 __be16 h_vlan_TCI; 220 }; 221 222 static void tun_flow_init(struct tun_struct *tun); 223 static void tun_flow_uninit(struct tun_struct *tun); 224 225 static int tun_napi_receive(struct napi_struct *napi, int budget) 226 { 227 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 228 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 229 struct sk_buff_head process_queue; 230 struct sk_buff *skb; 231 int received = 0; 232 233 __skb_queue_head_init(&process_queue); 234 235 spin_lock(&queue->lock); 236 skb_queue_splice_tail_init(queue, &process_queue); 237 spin_unlock(&queue->lock); 238 239 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 240 napi_gro_receive(napi, skb); 241 ++received; 242 } 243 244 if (!skb_queue_empty(&process_queue)) { 245 spin_lock(&queue->lock); 246 skb_queue_splice(&process_queue, queue); 247 spin_unlock(&queue->lock); 248 } 249 250 return received; 251 } 252 253 static int tun_napi_poll(struct napi_struct *napi, int budget) 254 { 255 unsigned int received; 256 257 received = tun_napi_receive(napi, budget); 258 259 if (received < budget) 260 napi_complete_done(napi, received); 261 262 return received; 263 } 264 265 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 266 bool napi_en, bool napi_frags) 267 { 268 tfile->napi_enabled = napi_en; 269 tfile->napi_frags_enabled = napi_en && napi_frags; 270 if (napi_en) { 271 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); 272 napi_enable(&tfile->napi); 273 } 274 } 275 276 static void tun_napi_enable(struct tun_file *tfile) 277 { 278 if (tfile->napi_enabled) 279 napi_enable(&tfile->napi); 280 } 281 282 static void tun_napi_disable(struct tun_file *tfile) 283 { 284 if (tfile->napi_enabled) 285 napi_disable(&tfile->napi); 286 } 287 288 static void tun_napi_del(struct tun_file *tfile) 289 { 290 if (tfile->napi_enabled) 291 netif_napi_del(&tfile->napi); 292 } 293 294 static bool tun_napi_frags_enabled(const struct tun_file *tfile) 295 { 296 return tfile->napi_frags_enabled; 297 } 298 299 #ifdef CONFIG_TUN_VNET_CROSS_LE 300 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 301 { 302 return tun->flags & TUN_VNET_BE ? false : 303 virtio_legacy_is_little_endian(); 304 } 305 306 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 307 { 308 int be = !!(tun->flags & TUN_VNET_BE); 309 310 if (put_user(be, argp)) 311 return -EFAULT; 312 313 return 0; 314 } 315 316 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 317 { 318 int be; 319 320 if (get_user(be, argp)) 321 return -EFAULT; 322 323 if (be) 324 tun->flags |= TUN_VNET_BE; 325 else 326 tun->flags &= ~TUN_VNET_BE; 327 328 return 0; 329 } 330 #else 331 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 332 { 333 return virtio_legacy_is_little_endian(); 334 } 335 336 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 337 { 338 return -EINVAL; 339 } 340 341 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 342 { 343 return -EINVAL; 344 } 345 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 346 347 static inline bool tun_is_little_endian(struct tun_struct *tun) 348 { 349 return tun->flags & TUN_VNET_LE || 350 tun_legacy_is_little_endian(tun); 351 } 352 353 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 354 { 355 return __virtio16_to_cpu(tun_is_little_endian(tun), val); 356 } 357 358 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 359 { 360 return __cpu_to_virtio16(tun_is_little_endian(tun), val); 361 } 362 363 static inline u32 tun_hashfn(u32 rxhash) 364 { 365 return rxhash & TUN_MASK_FLOW_ENTRIES; 366 } 367 368 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 369 { 370 struct tun_flow_entry *e; 371 372 hlist_for_each_entry_rcu(e, head, hash_link) { 373 if (e->rxhash == rxhash) 374 return e; 375 } 376 return NULL; 377 } 378 379 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 380 struct hlist_head *head, 381 u32 rxhash, u16 queue_index) 382 { 383 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 384 385 if (e) { 386 netif_info(tun, tx_queued, tun->dev, 387 "create flow: hash %u index %u\n", 388 rxhash, queue_index); 389 e->updated = jiffies; 390 e->rxhash = rxhash; 391 e->rps_rxhash = 0; 392 e->queue_index = queue_index; 393 e->tun = tun; 394 hlist_add_head_rcu(&e->hash_link, head); 395 ++tun->flow_count; 396 } 397 return e; 398 } 399 400 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 401 { 402 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", 403 e->rxhash, e->queue_index); 404 hlist_del_rcu(&e->hash_link); 405 kfree_rcu(e, rcu); 406 --tun->flow_count; 407 } 408 409 static void tun_flow_flush(struct tun_struct *tun) 410 { 411 int i; 412 413 spin_lock_bh(&tun->lock); 414 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 415 struct tun_flow_entry *e; 416 struct hlist_node *n; 417 418 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 419 tun_flow_delete(tun, e); 420 } 421 spin_unlock_bh(&tun->lock); 422 } 423 424 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 425 { 426 int i; 427 428 spin_lock_bh(&tun->lock); 429 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 430 struct tun_flow_entry *e; 431 struct hlist_node *n; 432 433 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 434 if (e->queue_index == queue_index) 435 tun_flow_delete(tun, e); 436 } 437 } 438 spin_unlock_bh(&tun->lock); 439 } 440 441 static void tun_flow_cleanup(struct timer_list *t) 442 { 443 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); 444 unsigned long delay = tun->ageing_time; 445 unsigned long next_timer = jiffies + delay; 446 unsigned long count = 0; 447 int i; 448 449 spin_lock(&tun->lock); 450 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 451 struct tun_flow_entry *e; 452 struct hlist_node *n; 453 454 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 455 unsigned long this_timer; 456 457 this_timer = e->updated + delay; 458 if (time_before_eq(this_timer, jiffies)) { 459 tun_flow_delete(tun, e); 460 continue; 461 } 462 count++; 463 if (time_before(this_timer, next_timer)) 464 next_timer = this_timer; 465 } 466 } 467 468 if (count) 469 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 470 spin_unlock(&tun->lock); 471 } 472 473 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 474 struct tun_file *tfile) 475 { 476 struct hlist_head *head; 477 struct tun_flow_entry *e; 478 unsigned long delay = tun->ageing_time; 479 u16 queue_index = tfile->queue_index; 480 481 head = &tun->flows[tun_hashfn(rxhash)]; 482 483 rcu_read_lock(); 484 485 e = tun_flow_find(head, rxhash); 486 if (likely(e)) { 487 /* TODO: keep queueing to old queue until it's empty? */ 488 if (READ_ONCE(e->queue_index) != queue_index) 489 WRITE_ONCE(e->queue_index, queue_index); 490 if (e->updated != jiffies) 491 e->updated = jiffies; 492 sock_rps_record_flow_hash(e->rps_rxhash); 493 } else { 494 spin_lock_bh(&tun->lock); 495 if (!tun_flow_find(head, rxhash) && 496 tun->flow_count < MAX_TAP_FLOWS) 497 tun_flow_create(tun, head, rxhash, queue_index); 498 499 if (!timer_pending(&tun->flow_gc_timer)) 500 mod_timer(&tun->flow_gc_timer, 501 round_jiffies_up(jiffies + delay)); 502 spin_unlock_bh(&tun->lock); 503 } 504 505 rcu_read_unlock(); 506 } 507 508 /* Save the hash received in the stack receive path and update the 509 * flow_hash table accordingly. 510 */ 511 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 512 { 513 if (unlikely(e->rps_rxhash != hash)) 514 e->rps_rxhash = hash; 515 } 516 517 /* We try to identify a flow through its rxhash. The reason that 518 * we do not check rxq no. is because some cards(e.g 82599), chooses 519 * the rxq based on the txq where the last packet of the flow comes. As 520 * the userspace application move between processors, we may get a 521 * different rxq no. here. 522 */ 523 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) 524 { 525 struct tun_flow_entry *e; 526 u32 txq = 0; 527 u32 numqueues = 0; 528 529 numqueues = READ_ONCE(tun->numqueues); 530 531 txq = __skb_get_hash_symmetric(skb); 532 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 533 if (e) { 534 tun_flow_save_rps_rxhash(e, txq); 535 txq = e->queue_index; 536 } else { 537 /* use multiply and shift instead of expensive divide */ 538 txq = ((u64)txq * numqueues) >> 32; 539 } 540 541 return txq; 542 } 543 544 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) 545 { 546 struct tun_prog *prog; 547 u32 numqueues; 548 u16 ret = 0; 549 550 numqueues = READ_ONCE(tun->numqueues); 551 if (!numqueues) 552 return 0; 553 554 prog = rcu_dereference(tun->steering_prog); 555 if (prog) 556 ret = bpf_prog_run_clear_cb(prog->prog, skb); 557 558 return ret % numqueues; 559 } 560 561 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 562 struct net_device *sb_dev) 563 { 564 struct tun_struct *tun = netdev_priv(dev); 565 u16 ret; 566 567 rcu_read_lock(); 568 if (rcu_dereference(tun->steering_prog)) 569 ret = tun_ebpf_select_queue(tun, skb); 570 else 571 ret = tun_automq_select_queue(tun, skb); 572 rcu_read_unlock(); 573 574 return ret; 575 } 576 577 static inline bool tun_not_capable(struct tun_struct *tun) 578 { 579 const struct cred *cred = current_cred(); 580 struct net *net = dev_net(tun->dev); 581 582 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 583 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 584 !ns_capable(net->user_ns, CAP_NET_ADMIN); 585 } 586 587 static void tun_set_real_num_queues(struct tun_struct *tun) 588 { 589 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 590 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 591 } 592 593 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 594 { 595 tfile->detached = tun; 596 list_add_tail(&tfile->next, &tun->disabled); 597 ++tun->numdisabled; 598 } 599 600 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 601 { 602 struct tun_struct *tun = tfile->detached; 603 604 tfile->detached = NULL; 605 list_del_init(&tfile->next); 606 --tun->numdisabled; 607 return tun; 608 } 609 610 void tun_ptr_free(void *ptr) 611 { 612 if (!ptr) 613 return; 614 if (tun_is_xdp_frame(ptr)) { 615 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 616 617 xdp_return_frame(xdpf); 618 } else { 619 __skb_array_destroy_skb(ptr); 620 } 621 } 622 EXPORT_SYMBOL_GPL(tun_ptr_free); 623 624 static void tun_queue_purge(struct tun_file *tfile) 625 { 626 void *ptr; 627 628 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) 629 tun_ptr_free(ptr); 630 631 skb_queue_purge(&tfile->sk.sk_write_queue); 632 skb_queue_purge(&tfile->sk.sk_error_queue); 633 } 634 635 static void __tun_detach(struct tun_file *tfile, bool clean) 636 { 637 struct tun_file *ntfile; 638 struct tun_struct *tun; 639 640 tun = rtnl_dereference(tfile->tun); 641 642 if (tun && clean) { 643 if (!tfile->detached) 644 tun_napi_disable(tfile); 645 tun_napi_del(tfile); 646 } 647 648 if (tun && !tfile->detached) { 649 u16 index = tfile->queue_index; 650 BUG_ON(index >= tun->numqueues); 651 652 rcu_assign_pointer(tun->tfiles[index], 653 tun->tfiles[tun->numqueues - 1]); 654 ntfile = rtnl_dereference(tun->tfiles[index]); 655 ntfile->queue_index = index; 656 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], 657 NULL); 658 659 --tun->numqueues; 660 if (clean) { 661 RCU_INIT_POINTER(tfile->tun, NULL); 662 sock_put(&tfile->sk); 663 } else { 664 tun_disable_queue(tun, tfile); 665 tun_napi_disable(tfile); 666 } 667 668 synchronize_net(); 669 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 670 /* Drop read queue */ 671 tun_queue_purge(tfile); 672 tun_set_real_num_queues(tun); 673 } else if (tfile->detached && clean) { 674 tun = tun_enable_queue(tfile); 675 sock_put(&tfile->sk); 676 } 677 678 if (clean) { 679 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 680 netif_carrier_off(tun->dev); 681 682 if (!(tun->flags & IFF_PERSIST) && 683 tun->dev->reg_state == NETREG_REGISTERED) 684 unregister_netdevice(tun->dev); 685 } 686 if (tun) 687 xdp_rxq_info_unreg(&tfile->xdp_rxq); 688 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); 689 } 690 } 691 692 static void tun_detach(struct tun_file *tfile, bool clean) 693 { 694 struct tun_struct *tun; 695 struct net_device *dev; 696 697 rtnl_lock(); 698 tun = rtnl_dereference(tfile->tun); 699 dev = tun ? tun->dev : NULL; 700 __tun_detach(tfile, clean); 701 if (dev) 702 netdev_state_change(dev); 703 rtnl_unlock(); 704 705 if (clean) 706 sock_put(&tfile->sk); 707 } 708 709 static void tun_detach_all(struct net_device *dev) 710 { 711 struct tun_struct *tun = netdev_priv(dev); 712 struct tun_file *tfile, *tmp; 713 int i, n = tun->numqueues; 714 715 for (i = 0; i < n; i++) { 716 tfile = rtnl_dereference(tun->tfiles[i]); 717 BUG_ON(!tfile); 718 tun_napi_disable(tfile); 719 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 720 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 721 RCU_INIT_POINTER(tfile->tun, NULL); 722 --tun->numqueues; 723 } 724 list_for_each_entry(tfile, &tun->disabled, next) { 725 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 726 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 727 RCU_INIT_POINTER(tfile->tun, NULL); 728 } 729 BUG_ON(tun->numqueues != 0); 730 731 synchronize_net(); 732 for (i = 0; i < n; i++) { 733 tfile = rtnl_dereference(tun->tfiles[i]); 734 tun_napi_del(tfile); 735 /* Drop read queue */ 736 tun_queue_purge(tfile); 737 xdp_rxq_info_unreg(&tfile->xdp_rxq); 738 sock_put(&tfile->sk); 739 } 740 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 741 tun_napi_del(tfile); 742 tun_enable_queue(tfile); 743 tun_queue_purge(tfile); 744 xdp_rxq_info_unreg(&tfile->xdp_rxq); 745 sock_put(&tfile->sk); 746 } 747 BUG_ON(tun->numdisabled != 0); 748 749 if (tun->flags & IFF_PERSIST) 750 module_put(THIS_MODULE); 751 } 752 753 static int tun_attach(struct tun_struct *tun, struct file *file, 754 bool skip_filter, bool napi, bool napi_frags, 755 bool publish_tun) 756 { 757 struct tun_file *tfile = file->private_data; 758 struct net_device *dev = tun->dev; 759 int err; 760 761 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 762 if (err < 0) 763 goto out; 764 765 err = -EINVAL; 766 if (rtnl_dereference(tfile->tun) && !tfile->detached) 767 goto out; 768 769 err = -EBUSY; 770 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 771 goto out; 772 773 err = -E2BIG; 774 if (!tfile->detached && 775 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 776 goto out; 777 778 err = 0; 779 780 /* Re-attach the filter to persist device */ 781 if (!skip_filter && (tun->filter_attached == true)) { 782 lock_sock(tfile->socket.sk); 783 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 784 release_sock(tfile->socket.sk); 785 if (!err) 786 goto out; 787 } 788 789 if (!tfile->detached && 790 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, 791 GFP_KERNEL, tun_ptr_free)) { 792 err = -ENOMEM; 793 goto out; 794 } 795 796 tfile->queue_index = tun->numqueues; 797 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 798 799 if (tfile->detached) { 800 /* Re-attach detached tfile, updating XDP queue_index */ 801 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); 802 803 if (tfile->xdp_rxq.queue_index != tfile->queue_index) 804 tfile->xdp_rxq.queue_index = tfile->queue_index; 805 } else { 806 /* Setup XDP RX-queue info, for new tfile getting attached */ 807 err = xdp_rxq_info_reg(&tfile->xdp_rxq, 808 tun->dev, tfile->queue_index, 0); 809 if (err < 0) 810 goto out; 811 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, 812 MEM_TYPE_PAGE_SHARED, NULL); 813 if (err < 0) { 814 xdp_rxq_info_unreg(&tfile->xdp_rxq); 815 goto out; 816 } 817 err = 0; 818 } 819 820 if (tfile->detached) { 821 tun_enable_queue(tfile); 822 tun_napi_enable(tfile); 823 } else { 824 sock_hold(&tfile->sk); 825 tun_napi_init(tun, tfile, napi, napi_frags); 826 } 827 828 if (rtnl_dereference(tun->xdp_prog)) 829 sock_set_flag(&tfile->sk, SOCK_XDP); 830 831 /* device is allowed to go away first, so no need to hold extra 832 * refcnt. 833 */ 834 835 /* Publish tfile->tun and tun->tfiles only after we've fully 836 * initialized tfile; otherwise we risk using half-initialized 837 * object. 838 */ 839 if (publish_tun) 840 rcu_assign_pointer(tfile->tun, tun); 841 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 842 tun->numqueues++; 843 tun_set_real_num_queues(tun); 844 out: 845 return err; 846 } 847 848 static struct tun_struct *tun_get(struct tun_file *tfile) 849 { 850 struct tun_struct *tun; 851 852 rcu_read_lock(); 853 tun = rcu_dereference(tfile->tun); 854 if (tun) 855 dev_hold(tun->dev); 856 rcu_read_unlock(); 857 858 return tun; 859 } 860 861 static void tun_put(struct tun_struct *tun) 862 { 863 dev_put(tun->dev); 864 } 865 866 /* TAP filtering */ 867 static void addr_hash_set(u32 *mask, const u8 *addr) 868 { 869 int n = ether_crc(ETH_ALEN, addr) >> 26; 870 mask[n >> 5] |= (1 << (n & 31)); 871 } 872 873 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 874 { 875 int n = ether_crc(ETH_ALEN, addr) >> 26; 876 return mask[n >> 5] & (1 << (n & 31)); 877 } 878 879 static int update_filter(struct tap_filter *filter, void __user *arg) 880 { 881 struct { u8 u[ETH_ALEN]; } *addr; 882 struct tun_filter uf; 883 int err, alen, n, nexact; 884 885 if (copy_from_user(&uf, arg, sizeof(uf))) 886 return -EFAULT; 887 888 if (!uf.count) { 889 /* Disabled */ 890 filter->count = 0; 891 return 0; 892 } 893 894 alen = ETH_ALEN * uf.count; 895 addr = memdup_user(arg + sizeof(uf), alen); 896 if (IS_ERR(addr)) 897 return PTR_ERR(addr); 898 899 /* The filter is updated without holding any locks. Which is 900 * perfectly safe. We disable it first and in the worst 901 * case we'll accept a few undesired packets. */ 902 filter->count = 0; 903 wmb(); 904 905 /* Use first set of addresses as an exact filter */ 906 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 907 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 908 909 nexact = n; 910 911 /* Remaining multicast addresses are hashed, 912 * unicast will leave the filter disabled. */ 913 memset(filter->mask, 0, sizeof(filter->mask)); 914 for (; n < uf.count; n++) { 915 if (!is_multicast_ether_addr(addr[n].u)) { 916 err = 0; /* no filter */ 917 goto free_addr; 918 } 919 addr_hash_set(filter->mask, addr[n].u); 920 } 921 922 /* For ALLMULTI just set the mask to all ones. 923 * This overrides the mask populated above. */ 924 if ((uf.flags & TUN_FLT_ALLMULTI)) 925 memset(filter->mask, ~0, sizeof(filter->mask)); 926 927 /* Now enable the filter */ 928 wmb(); 929 filter->count = nexact; 930 931 /* Return the number of exact filters */ 932 err = nexact; 933 free_addr: 934 kfree(addr); 935 return err; 936 } 937 938 /* Returns: 0 - drop, !=0 - accept */ 939 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 940 { 941 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 942 * at this point. */ 943 struct ethhdr *eh = (struct ethhdr *) skb->data; 944 int i; 945 946 /* Exact match */ 947 for (i = 0; i < filter->count; i++) 948 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 949 return 1; 950 951 /* Inexact match (multicast only) */ 952 if (is_multicast_ether_addr(eh->h_dest)) 953 return addr_hash_test(filter->mask, eh->h_dest); 954 955 return 0; 956 } 957 958 /* 959 * Checks whether the packet is accepted or not. 960 * Returns: 0 - drop, !=0 - accept 961 */ 962 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 963 { 964 if (!filter->count) 965 return 1; 966 967 return run_filter(filter, skb); 968 } 969 970 /* Network device part of the driver */ 971 972 static const struct ethtool_ops tun_ethtool_ops; 973 974 static int tun_net_init(struct net_device *dev) 975 { 976 struct tun_struct *tun = netdev_priv(dev); 977 struct ifreq *ifr = tun->ifr; 978 int err; 979 980 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 981 if (!dev->tstats) 982 return -ENOMEM; 983 984 spin_lock_init(&tun->lock); 985 986 err = security_tun_dev_alloc_security(&tun->security); 987 if (err < 0) { 988 free_percpu(dev->tstats); 989 return err; 990 } 991 992 tun_flow_init(tun); 993 994 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 995 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 996 NETIF_F_HW_VLAN_STAG_TX; 997 dev->features = dev->hw_features | NETIF_F_LLTX; 998 dev->vlan_features = dev->features & 999 ~(NETIF_F_HW_VLAN_CTAG_TX | 1000 NETIF_F_HW_VLAN_STAG_TX); 1001 1002 tun->flags = (tun->flags & ~TUN_FEATURES) | 1003 (ifr->ifr_flags & TUN_FEATURES); 1004 1005 INIT_LIST_HEAD(&tun->disabled); 1006 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, 1007 ifr->ifr_flags & IFF_NAPI_FRAGS, false); 1008 if (err < 0) { 1009 tun_flow_uninit(tun); 1010 security_tun_dev_free_security(tun->security); 1011 free_percpu(dev->tstats); 1012 return err; 1013 } 1014 return 0; 1015 } 1016 1017 /* Net device detach from fd. */ 1018 static void tun_net_uninit(struct net_device *dev) 1019 { 1020 tun_detach_all(dev); 1021 } 1022 1023 /* Net device open. */ 1024 static int tun_net_open(struct net_device *dev) 1025 { 1026 netif_tx_start_all_queues(dev); 1027 1028 return 0; 1029 } 1030 1031 /* Net device close. */ 1032 static int tun_net_close(struct net_device *dev) 1033 { 1034 netif_tx_stop_all_queues(dev); 1035 return 0; 1036 } 1037 1038 /* Net device start xmit */ 1039 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) 1040 { 1041 #ifdef CONFIG_RPS 1042 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { 1043 /* Select queue was not called for the skbuff, so we extract the 1044 * RPS hash and save it into the flow_table here. 1045 */ 1046 struct tun_flow_entry *e; 1047 __u32 rxhash; 1048 1049 rxhash = __skb_get_hash_symmetric(skb); 1050 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); 1051 if (e) 1052 tun_flow_save_rps_rxhash(e, rxhash); 1053 } 1054 #endif 1055 } 1056 1057 static unsigned int run_ebpf_filter(struct tun_struct *tun, 1058 struct sk_buff *skb, 1059 int len) 1060 { 1061 struct tun_prog *prog = rcu_dereference(tun->filter_prog); 1062 1063 if (prog) 1064 len = bpf_prog_run_clear_cb(prog->prog, skb); 1065 1066 return len; 1067 } 1068 1069 /* Net device start xmit */ 1070 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 1071 { 1072 struct tun_struct *tun = netdev_priv(dev); 1073 enum skb_drop_reason drop_reason; 1074 int txq = skb->queue_mapping; 1075 struct netdev_queue *queue; 1076 struct tun_file *tfile; 1077 int len = skb->len; 1078 1079 rcu_read_lock(); 1080 tfile = rcu_dereference(tun->tfiles[txq]); 1081 1082 /* Drop packet if interface is not attached */ 1083 if (!tfile) { 1084 drop_reason = SKB_DROP_REASON_DEV_READY; 1085 goto drop; 1086 } 1087 1088 if (!rcu_dereference(tun->steering_prog)) 1089 tun_automq_xmit(tun, skb); 1090 1091 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); 1092 1093 /* Drop if the filter does not like it. 1094 * This is a noop if the filter is disabled. 1095 * Filter can be enabled only for the TAP devices. */ 1096 if (!check_filter(&tun->txflt, skb)) { 1097 drop_reason = SKB_DROP_REASON_TAP_TXFILTER; 1098 goto drop; 1099 } 1100 1101 if (tfile->socket.sk->sk_filter && 1102 sk_filter(tfile->socket.sk, skb)) { 1103 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 1104 goto drop; 1105 } 1106 1107 len = run_ebpf_filter(tun, skb, len); 1108 if (len == 0) { 1109 drop_reason = SKB_DROP_REASON_TAP_FILTER; 1110 goto drop; 1111 } 1112 1113 if (pskb_trim(skb, len)) { 1114 drop_reason = SKB_DROP_REASON_NOMEM; 1115 goto drop; 1116 } 1117 1118 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { 1119 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 1120 goto drop; 1121 } 1122 1123 skb_tx_timestamp(skb); 1124 1125 /* Orphan the skb - required as we might hang on to it 1126 * for indefinite time. 1127 */ 1128 skb_orphan(skb); 1129 1130 nf_reset_ct(skb); 1131 1132 if (ptr_ring_produce(&tfile->tx_ring, skb)) { 1133 drop_reason = SKB_DROP_REASON_FULL_RING; 1134 goto drop; 1135 } 1136 1137 /* NETIF_F_LLTX requires to do our own update of trans_start */ 1138 queue = netdev_get_tx_queue(dev, txq); 1139 txq_trans_cond_update(queue); 1140 1141 /* Notify and wake up reader process */ 1142 if (tfile->flags & TUN_FASYNC) 1143 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1144 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1145 1146 rcu_read_unlock(); 1147 return NETDEV_TX_OK; 1148 1149 drop: 1150 dev_core_stats_tx_dropped_inc(dev); 1151 skb_tx_error(skb); 1152 kfree_skb_reason(skb, drop_reason); 1153 rcu_read_unlock(); 1154 return NET_XMIT_DROP; 1155 } 1156 1157 static void tun_net_mclist(struct net_device *dev) 1158 { 1159 /* 1160 * This callback is supposed to deal with mc filter in 1161 * _rx_ path and has nothing to do with the _tx_ path. 1162 * In rx path we always accept everything userspace gives us. 1163 */ 1164 } 1165 1166 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1167 netdev_features_t features) 1168 { 1169 struct tun_struct *tun = netdev_priv(dev); 1170 1171 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1172 } 1173 1174 static void tun_set_headroom(struct net_device *dev, int new_hr) 1175 { 1176 struct tun_struct *tun = netdev_priv(dev); 1177 1178 if (new_hr < NET_SKB_PAD) 1179 new_hr = NET_SKB_PAD; 1180 1181 tun->align = new_hr; 1182 } 1183 1184 static void 1185 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1186 { 1187 struct tun_struct *tun = netdev_priv(dev); 1188 1189 dev_get_tstats64(dev, stats); 1190 1191 stats->rx_frame_errors += 1192 (unsigned long)atomic_long_read(&tun->rx_frame_errors); 1193 } 1194 1195 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1196 struct netlink_ext_ack *extack) 1197 { 1198 struct tun_struct *tun = netdev_priv(dev); 1199 struct tun_file *tfile; 1200 struct bpf_prog *old_prog; 1201 int i; 1202 1203 old_prog = rtnl_dereference(tun->xdp_prog); 1204 rcu_assign_pointer(tun->xdp_prog, prog); 1205 if (old_prog) 1206 bpf_prog_put(old_prog); 1207 1208 for (i = 0; i < tun->numqueues; i++) { 1209 tfile = rtnl_dereference(tun->tfiles[i]); 1210 if (prog) 1211 sock_set_flag(&tfile->sk, SOCK_XDP); 1212 else 1213 sock_reset_flag(&tfile->sk, SOCK_XDP); 1214 } 1215 list_for_each_entry(tfile, &tun->disabled, next) { 1216 if (prog) 1217 sock_set_flag(&tfile->sk, SOCK_XDP); 1218 else 1219 sock_reset_flag(&tfile->sk, SOCK_XDP); 1220 } 1221 1222 return 0; 1223 } 1224 1225 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) 1226 { 1227 switch (xdp->command) { 1228 case XDP_SETUP_PROG: 1229 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1230 default: 1231 return -EINVAL; 1232 } 1233 } 1234 1235 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) 1236 { 1237 if (new_carrier) { 1238 struct tun_struct *tun = netdev_priv(dev); 1239 1240 if (!tun->numqueues) 1241 return -EPERM; 1242 1243 netif_carrier_on(dev); 1244 } else { 1245 netif_carrier_off(dev); 1246 } 1247 return 0; 1248 } 1249 1250 static const struct net_device_ops tun_netdev_ops = { 1251 .ndo_init = tun_net_init, 1252 .ndo_uninit = tun_net_uninit, 1253 .ndo_open = tun_net_open, 1254 .ndo_stop = tun_net_close, 1255 .ndo_start_xmit = tun_net_xmit, 1256 .ndo_fix_features = tun_net_fix_features, 1257 .ndo_select_queue = tun_select_queue, 1258 .ndo_set_rx_headroom = tun_set_headroom, 1259 .ndo_get_stats64 = tun_net_get_stats64, 1260 .ndo_change_carrier = tun_net_change_carrier, 1261 }; 1262 1263 static void __tun_xdp_flush_tfile(struct tun_file *tfile) 1264 { 1265 /* Notify and wake up reader process */ 1266 if (tfile->flags & TUN_FASYNC) 1267 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1268 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1269 } 1270 1271 static int tun_xdp_xmit(struct net_device *dev, int n, 1272 struct xdp_frame **frames, u32 flags) 1273 { 1274 struct tun_struct *tun = netdev_priv(dev); 1275 struct tun_file *tfile; 1276 u32 numqueues; 1277 int nxmit = 0; 1278 int i; 1279 1280 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1281 return -EINVAL; 1282 1283 rcu_read_lock(); 1284 1285 resample: 1286 numqueues = READ_ONCE(tun->numqueues); 1287 if (!numqueues) { 1288 rcu_read_unlock(); 1289 return -ENXIO; /* Caller will free/return all frames */ 1290 } 1291 1292 tfile = rcu_dereference(tun->tfiles[smp_processor_id() % 1293 numqueues]); 1294 if (unlikely(!tfile)) 1295 goto resample; 1296 1297 spin_lock(&tfile->tx_ring.producer_lock); 1298 for (i = 0; i < n; i++) { 1299 struct xdp_frame *xdp = frames[i]; 1300 /* Encode the XDP flag into lowest bit for consumer to differ 1301 * XDP buffer from sk_buff. 1302 */ 1303 void *frame = tun_xdp_to_ptr(xdp); 1304 1305 if (__ptr_ring_produce(&tfile->tx_ring, frame)) { 1306 dev_core_stats_tx_dropped_inc(dev); 1307 break; 1308 } 1309 nxmit++; 1310 } 1311 spin_unlock(&tfile->tx_ring.producer_lock); 1312 1313 if (flags & XDP_XMIT_FLUSH) 1314 __tun_xdp_flush_tfile(tfile); 1315 1316 rcu_read_unlock(); 1317 return nxmit; 1318 } 1319 1320 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) 1321 { 1322 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); 1323 int nxmit; 1324 1325 if (unlikely(!frame)) 1326 return -EOVERFLOW; 1327 1328 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); 1329 if (!nxmit) 1330 xdp_return_frame_rx_napi(frame); 1331 return nxmit; 1332 } 1333 1334 static const struct net_device_ops tap_netdev_ops = { 1335 .ndo_init = tun_net_init, 1336 .ndo_uninit = tun_net_uninit, 1337 .ndo_open = tun_net_open, 1338 .ndo_stop = tun_net_close, 1339 .ndo_start_xmit = tun_net_xmit, 1340 .ndo_fix_features = tun_net_fix_features, 1341 .ndo_set_rx_mode = tun_net_mclist, 1342 .ndo_set_mac_address = eth_mac_addr, 1343 .ndo_validate_addr = eth_validate_addr, 1344 .ndo_select_queue = tun_select_queue, 1345 .ndo_features_check = passthru_features_check, 1346 .ndo_set_rx_headroom = tun_set_headroom, 1347 .ndo_get_stats64 = dev_get_tstats64, 1348 .ndo_bpf = tun_xdp, 1349 .ndo_xdp_xmit = tun_xdp_xmit, 1350 .ndo_change_carrier = tun_net_change_carrier, 1351 }; 1352 1353 static void tun_flow_init(struct tun_struct *tun) 1354 { 1355 int i; 1356 1357 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1358 INIT_HLIST_HEAD(&tun->flows[i]); 1359 1360 tun->ageing_time = TUN_FLOW_EXPIRE; 1361 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); 1362 mod_timer(&tun->flow_gc_timer, 1363 round_jiffies_up(jiffies + tun->ageing_time)); 1364 } 1365 1366 static void tun_flow_uninit(struct tun_struct *tun) 1367 { 1368 del_timer_sync(&tun->flow_gc_timer); 1369 tun_flow_flush(tun); 1370 } 1371 1372 #define MIN_MTU 68 1373 #define MAX_MTU 65535 1374 1375 /* Initialize net device. */ 1376 static void tun_net_initialize(struct net_device *dev) 1377 { 1378 struct tun_struct *tun = netdev_priv(dev); 1379 1380 switch (tun->flags & TUN_TYPE_MASK) { 1381 case IFF_TUN: 1382 dev->netdev_ops = &tun_netdev_ops; 1383 dev->header_ops = &ip_tunnel_header_ops; 1384 1385 /* Point-to-Point TUN Device */ 1386 dev->hard_header_len = 0; 1387 dev->addr_len = 0; 1388 dev->mtu = 1500; 1389 1390 /* Zero header length */ 1391 dev->type = ARPHRD_NONE; 1392 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1393 break; 1394 1395 case IFF_TAP: 1396 dev->netdev_ops = &tap_netdev_ops; 1397 /* Ethernet TAP Device */ 1398 ether_setup(dev); 1399 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1400 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1401 1402 eth_hw_addr_random(dev); 1403 1404 /* Currently tun does not support XDP, only tap does. */ 1405 dev->xdp_features = NETDEV_XDP_ACT_BASIC | 1406 NETDEV_XDP_ACT_REDIRECT | 1407 NETDEV_XDP_ACT_NDO_XMIT; 1408 1409 break; 1410 } 1411 1412 dev->min_mtu = MIN_MTU; 1413 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1414 } 1415 1416 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) 1417 { 1418 struct sock *sk = tfile->socket.sk; 1419 1420 return (tun->dev->flags & IFF_UP) && sock_writeable(sk); 1421 } 1422 1423 /* Character device part */ 1424 1425 /* Poll */ 1426 static __poll_t tun_chr_poll(struct file *file, poll_table *wait) 1427 { 1428 struct tun_file *tfile = file->private_data; 1429 struct tun_struct *tun = tun_get(tfile); 1430 struct sock *sk; 1431 __poll_t mask = 0; 1432 1433 if (!tun) 1434 return EPOLLERR; 1435 1436 sk = tfile->socket.sk; 1437 1438 poll_wait(file, sk_sleep(sk), wait); 1439 1440 if (!ptr_ring_empty(&tfile->tx_ring)) 1441 mask |= EPOLLIN | EPOLLRDNORM; 1442 1443 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to 1444 * guarantee EPOLLOUT to be raised by either here or 1445 * tun_sock_write_space(). Then process could get notification 1446 * after it writes to a down device and meets -EIO. 1447 */ 1448 if (tun_sock_writeable(tun, tfile) || 1449 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1450 tun_sock_writeable(tun, tfile))) 1451 mask |= EPOLLOUT | EPOLLWRNORM; 1452 1453 if (tun->dev->reg_state != NETREG_REGISTERED) 1454 mask = EPOLLERR; 1455 1456 tun_put(tun); 1457 return mask; 1458 } 1459 1460 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1461 size_t len, 1462 const struct iov_iter *it) 1463 { 1464 struct sk_buff *skb; 1465 size_t linear; 1466 int err; 1467 int i; 1468 1469 if (it->nr_segs > MAX_SKB_FRAGS + 1 || 1470 len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) 1471 return ERR_PTR(-EMSGSIZE); 1472 1473 local_bh_disable(); 1474 skb = napi_get_frags(&tfile->napi); 1475 local_bh_enable(); 1476 if (!skb) 1477 return ERR_PTR(-ENOMEM); 1478 1479 linear = iov_iter_single_seg_count(it); 1480 err = __skb_grow(skb, linear); 1481 if (err) 1482 goto free; 1483 1484 skb->len = len; 1485 skb->data_len = len - linear; 1486 skb->truesize += skb->data_len; 1487 1488 for (i = 1; i < it->nr_segs; i++) { 1489 const struct iovec *iov = iter_iov(it); 1490 size_t fragsz = iov->iov_len; 1491 struct page *page; 1492 void *frag; 1493 1494 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1495 err = -EINVAL; 1496 goto free; 1497 } 1498 frag = netdev_alloc_frag(fragsz); 1499 if (!frag) { 1500 err = -ENOMEM; 1501 goto free; 1502 } 1503 page = virt_to_head_page(frag); 1504 skb_fill_page_desc(skb, i - 1, page, 1505 frag - page_address(page), fragsz); 1506 } 1507 1508 return skb; 1509 free: 1510 /* frees skb and all frags allocated with napi_alloc_frag() */ 1511 napi_free_frags(&tfile->napi); 1512 return ERR_PTR(err); 1513 } 1514 1515 /* prepad is the amount to reserve at front. len is length after that. 1516 * linear is a hint as to how much to copy (usually headers). */ 1517 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1518 size_t prepad, size_t len, 1519 size_t linear, int noblock) 1520 { 1521 struct sock *sk = tfile->socket.sk; 1522 struct sk_buff *skb; 1523 int err; 1524 1525 /* Under a page? Don't bother with paged skb. */ 1526 if (prepad + len < PAGE_SIZE || !linear) 1527 linear = len; 1528 1529 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) 1530 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); 1531 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1532 &err, PAGE_ALLOC_COSTLY_ORDER); 1533 if (!skb) 1534 return ERR_PTR(err); 1535 1536 skb_reserve(skb, prepad); 1537 skb_put(skb, linear); 1538 skb->data_len = len - linear; 1539 skb->len += len - linear; 1540 1541 return skb; 1542 } 1543 1544 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1545 struct sk_buff *skb, int more) 1546 { 1547 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1548 struct sk_buff_head process_queue; 1549 u32 rx_batched = tun->rx_batched; 1550 bool rcv = false; 1551 1552 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1553 local_bh_disable(); 1554 skb_record_rx_queue(skb, tfile->queue_index); 1555 netif_receive_skb(skb); 1556 local_bh_enable(); 1557 return; 1558 } 1559 1560 spin_lock(&queue->lock); 1561 if (!more || skb_queue_len(queue) == rx_batched) { 1562 __skb_queue_head_init(&process_queue); 1563 skb_queue_splice_tail_init(queue, &process_queue); 1564 rcv = true; 1565 } else { 1566 __skb_queue_tail(queue, skb); 1567 } 1568 spin_unlock(&queue->lock); 1569 1570 if (rcv) { 1571 struct sk_buff *nskb; 1572 1573 local_bh_disable(); 1574 while ((nskb = __skb_dequeue(&process_queue))) { 1575 skb_record_rx_queue(nskb, tfile->queue_index); 1576 netif_receive_skb(nskb); 1577 } 1578 skb_record_rx_queue(skb, tfile->queue_index); 1579 netif_receive_skb(skb); 1580 local_bh_enable(); 1581 } 1582 } 1583 1584 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1585 int len, int noblock, bool zerocopy) 1586 { 1587 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1588 return false; 1589 1590 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1591 return false; 1592 1593 if (!noblock) 1594 return false; 1595 1596 if (zerocopy) 1597 return false; 1598 1599 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 1600 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1601 return false; 1602 1603 return true; 1604 } 1605 1606 static struct sk_buff *__tun_build_skb(struct tun_file *tfile, 1607 struct page_frag *alloc_frag, char *buf, 1608 int buflen, int len, int pad) 1609 { 1610 struct sk_buff *skb = build_skb(buf, buflen); 1611 1612 if (!skb) 1613 return ERR_PTR(-ENOMEM); 1614 1615 skb_reserve(skb, pad); 1616 skb_put(skb, len); 1617 skb_set_owner_w(skb, tfile->socket.sk); 1618 1619 get_page(alloc_frag->page); 1620 alloc_frag->offset += buflen; 1621 1622 return skb; 1623 } 1624 1625 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, 1626 struct xdp_buff *xdp, u32 act) 1627 { 1628 int err; 1629 1630 switch (act) { 1631 case XDP_REDIRECT: 1632 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 1633 if (err) 1634 return err; 1635 break; 1636 case XDP_TX: 1637 err = tun_xdp_tx(tun->dev, xdp); 1638 if (err < 0) 1639 return err; 1640 break; 1641 case XDP_PASS: 1642 break; 1643 default: 1644 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); 1645 fallthrough; 1646 case XDP_ABORTED: 1647 trace_xdp_exception(tun->dev, xdp_prog, act); 1648 fallthrough; 1649 case XDP_DROP: 1650 dev_core_stats_rx_dropped_inc(tun->dev); 1651 break; 1652 } 1653 1654 return act; 1655 } 1656 1657 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1658 struct tun_file *tfile, 1659 struct iov_iter *from, 1660 struct virtio_net_hdr *hdr, 1661 int len, int *skb_xdp) 1662 { 1663 struct page_frag *alloc_frag = ¤t->task_frag; 1664 struct bpf_prog *xdp_prog; 1665 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1666 char *buf; 1667 size_t copied; 1668 int pad = TUN_RX_PAD; 1669 int err = 0; 1670 1671 rcu_read_lock(); 1672 xdp_prog = rcu_dereference(tun->xdp_prog); 1673 if (xdp_prog) 1674 pad += XDP_PACKET_HEADROOM; 1675 buflen += SKB_DATA_ALIGN(len + pad); 1676 rcu_read_unlock(); 1677 1678 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); 1679 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1680 return ERR_PTR(-ENOMEM); 1681 1682 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1683 copied = copy_page_from_iter(alloc_frag->page, 1684 alloc_frag->offset + pad, 1685 len, from); 1686 if (copied != len) 1687 return ERR_PTR(-EFAULT); 1688 1689 /* There's a small window that XDP may be set after the check 1690 * of xdp_prog above, this should be rare and for simplicity 1691 * we do XDP on skb in case the headroom is not enough. 1692 */ 1693 if (hdr->gso_type || !xdp_prog) { 1694 *skb_xdp = 1; 1695 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, 1696 pad); 1697 } 1698 1699 *skb_xdp = 0; 1700 1701 local_bh_disable(); 1702 rcu_read_lock(); 1703 xdp_prog = rcu_dereference(tun->xdp_prog); 1704 if (xdp_prog) { 1705 struct xdp_buff xdp; 1706 u32 act; 1707 1708 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); 1709 xdp_prepare_buff(&xdp, buf, pad, len, false); 1710 1711 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1712 if (act == XDP_REDIRECT || act == XDP_TX) { 1713 get_page(alloc_frag->page); 1714 alloc_frag->offset += buflen; 1715 } 1716 err = tun_xdp_act(tun, xdp_prog, &xdp, act); 1717 if (err < 0) { 1718 if (act == XDP_REDIRECT || act == XDP_TX) 1719 put_page(alloc_frag->page); 1720 goto out; 1721 } 1722 1723 if (err == XDP_REDIRECT) 1724 xdp_do_flush(); 1725 if (err != XDP_PASS) 1726 goto out; 1727 1728 pad = xdp.data - xdp.data_hard_start; 1729 len = xdp.data_end - xdp.data; 1730 } 1731 rcu_read_unlock(); 1732 local_bh_enable(); 1733 1734 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); 1735 1736 out: 1737 rcu_read_unlock(); 1738 local_bh_enable(); 1739 return NULL; 1740 } 1741 1742 /* Get packet from user space buffer */ 1743 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1744 void *msg_control, struct iov_iter *from, 1745 int noblock, bool more) 1746 { 1747 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1748 struct sk_buff *skb; 1749 size_t total_len = iov_iter_count(from); 1750 size_t len = total_len, align = tun->align, linear; 1751 struct virtio_net_hdr gso = { 0 }; 1752 int good_linear; 1753 int copylen; 1754 bool zerocopy = false; 1755 int err; 1756 u32 rxhash = 0; 1757 int skb_xdp = 1; 1758 bool frags = tun_napi_frags_enabled(tfile); 1759 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; 1760 1761 if (!(tun->flags & IFF_NO_PI)) { 1762 if (len < sizeof(pi)) 1763 return -EINVAL; 1764 len -= sizeof(pi); 1765 1766 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1767 return -EFAULT; 1768 } 1769 1770 if (tun->flags & IFF_VNET_HDR) { 1771 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1772 1773 if (len < vnet_hdr_sz) 1774 return -EINVAL; 1775 len -= vnet_hdr_sz; 1776 1777 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1778 return -EFAULT; 1779 1780 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1781 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 1782 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 1783 1784 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1785 return -EINVAL; 1786 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1787 } 1788 1789 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1790 align += NET_IP_ALIGN; 1791 if (unlikely(len < ETH_HLEN || 1792 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1793 return -EINVAL; 1794 } 1795 1796 good_linear = SKB_MAX_HEAD(align); 1797 1798 if (msg_control) { 1799 struct iov_iter i = *from; 1800 1801 /* There are 256 bytes to be copied in skb, so there is 1802 * enough room for skb expand head in case it is used. 1803 * The rest of the buffer is mapped from userspace. 1804 */ 1805 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 1806 if (copylen > good_linear) 1807 copylen = good_linear; 1808 linear = copylen; 1809 iov_iter_advance(&i, copylen); 1810 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1811 zerocopy = true; 1812 } 1813 1814 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1815 /* For the packet that is not easy to be processed 1816 * (e.g gso or jumbo packet), we will do it at after 1817 * skb was created with generic XDP routine. 1818 */ 1819 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1820 err = PTR_ERR_OR_ZERO(skb); 1821 if (err) 1822 goto drop; 1823 if (!skb) 1824 return total_len; 1825 } else { 1826 if (!zerocopy) { 1827 copylen = len; 1828 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 1829 linear = good_linear; 1830 else 1831 linear = tun16_to_cpu(tun, gso.hdr_len); 1832 } 1833 1834 if (frags) { 1835 mutex_lock(&tfile->napi_mutex); 1836 skb = tun_napi_alloc_frags(tfile, copylen, from); 1837 /* tun_napi_alloc_frags() enforces a layout for the skb. 1838 * If zerocopy is enabled, then this layout will be 1839 * overwritten by zerocopy_sg_from_iter(). 1840 */ 1841 zerocopy = false; 1842 } else { 1843 skb = tun_alloc_skb(tfile, align, copylen, linear, 1844 noblock); 1845 } 1846 1847 err = PTR_ERR_OR_ZERO(skb); 1848 if (err) 1849 goto drop; 1850 1851 if (zerocopy) 1852 err = zerocopy_sg_from_iter(skb, from); 1853 else 1854 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1855 1856 if (err) { 1857 err = -EFAULT; 1858 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; 1859 goto drop; 1860 } 1861 } 1862 1863 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1864 atomic_long_inc(&tun->rx_frame_errors); 1865 err = -EINVAL; 1866 goto free_skb; 1867 } 1868 1869 switch (tun->flags & TUN_TYPE_MASK) { 1870 case IFF_TUN: 1871 if (tun->flags & IFF_NO_PI) { 1872 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; 1873 1874 switch (ip_version) { 1875 case 4: 1876 pi.proto = htons(ETH_P_IP); 1877 break; 1878 case 6: 1879 pi.proto = htons(ETH_P_IPV6); 1880 break; 1881 default: 1882 err = -EINVAL; 1883 goto drop; 1884 } 1885 } 1886 1887 skb_reset_mac_header(skb); 1888 skb->protocol = pi.proto; 1889 skb->dev = tun->dev; 1890 break; 1891 case IFF_TAP: 1892 if (frags && !pskb_may_pull(skb, ETH_HLEN)) { 1893 err = -ENOMEM; 1894 drop_reason = SKB_DROP_REASON_HDR_TRUNC; 1895 goto drop; 1896 } 1897 skb->protocol = eth_type_trans(skb, tun->dev); 1898 break; 1899 } 1900 1901 /* copy skb_ubuf_info for callback when skb has no error */ 1902 if (zerocopy) { 1903 skb_zcopy_init(skb, msg_control); 1904 } else if (msg_control) { 1905 struct ubuf_info *uarg = msg_control; 1906 uarg->callback(NULL, uarg, false); 1907 } 1908 1909 skb_reset_network_header(skb); 1910 skb_probe_transport_header(skb); 1911 skb_record_rx_queue(skb, tfile->queue_index); 1912 1913 if (skb_xdp) { 1914 struct bpf_prog *xdp_prog; 1915 int ret; 1916 1917 local_bh_disable(); 1918 rcu_read_lock(); 1919 xdp_prog = rcu_dereference(tun->xdp_prog); 1920 if (xdp_prog) { 1921 ret = do_xdp_generic(xdp_prog, skb); 1922 if (ret != XDP_PASS) { 1923 rcu_read_unlock(); 1924 local_bh_enable(); 1925 goto unlock_frags; 1926 } 1927 } 1928 rcu_read_unlock(); 1929 local_bh_enable(); 1930 } 1931 1932 /* Compute the costly rx hash only if needed for flow updates. 1933 * We may get a very small possibility of OOO during switching, not 1934 * worth to optimize. 1935 */ 1936 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && 1937 !tfile->detached) 1938 rxhash = __skb_get_hash_symmetric(skb); 1939 1940 rcu_read_lock(); 1941 if (unlikely(!(tun->dev->flags & IFF_UP))) { 1942 err = -EIO; 1943 rcu_read_unlock(); 1944 drop_reason = SKB_DROP_REASON_DEV_READY; 1945 goto drop; 1946 } 1947 1948 if (frags) { 1949 u32 headlen; 1950 1951 /* Exercise flow dissector code path. */ 1952 skb_push(skb, ETH_HLEN); 1953 headlen = eth_get_headlen(tun->dev, skb->data, 1954 skb_headlen(skb)); 1955 1956 if (unlikely(headlen > skb_headlen(skb))) { 1957 WARN_ON_ONCE(1); 1958 err = -ENOMEM; 1959 dev_core_stats_rx_dropped_inc(tun->dev); 1960 napi_busy: 1961 napi_free_frags(&tfile->napi); 1962 rcu_read_unlock(); 1963 mutex_unlock(&tfile->napi_mutex); 1964 return err; 1965 } 1966 1967 if (likely(napi_schedule_prep(&tfile->napi))) { 1968 local_bh_disable(); 1969 napi_gro_frags(&tfile->napi); 1970 napi_complete(&tfile->napi); 1971 local_bh_enable(); 1972 } else { 1973 err = -EBUSY; 1974 goto napi_busy; 1975 } 1976 mutex_unlock(&tfile->napi_mutex); 1977 } else if (tfile->napi_enabled) { 1978 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1979 int queue_len; 1980 1981 spin_lock_bh(&queue->lock); 1982 1983 if (unlikely(tfile->detached)) { 1984 spin_unlock_bh(&queue->lock); 1985 rcu_read_unlock(); 1986 err = -EBUSY; 1987 goto free_skb; 1988 } 1989 1990 __skb_queue_tail(queue, skb); 1991 queue_len = skb_queue_len(queue); 1992 spin_unlock(&queue->lock); 1993 1994 if (!more || queue_len > NAPI_POLL_WEIGHT) 1995 napi_schedule(&tfile->napi); 1996 1997 local_bh_enable(); 1998 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1999 tun_rx_batched(tun, tfile, skb, more); 2000 } else { 2001 netif_rx(skb); 2002 } 2003 rcu_read_unlock(); 2004 2005 preempt_disable(); 2006 dev_sw_netstats_rx_add(tun->dev, len); 2007 preempt_enable(); 2008 2009 if (rxhash) 2010 tun_flow_update(tun, rxhash, tfile); 2011 2012 return total_len; 2013 2014 drop: 2015 if (err != -EAGAIN) 2016 dev_core_stats_rx_dropped_inc(tun->dev); 2017 2018 free_skb: 2019 if (!IS_ERR_OR_NULL(skb)) 2020 kfree_skb_reason(skb, drop_reason); 2021 2022 unlock_frags: 2023 if (frags) { 2024 tfile->napi.skb = NULL; 2025 mutex_unlock(&tfile->napi_mutex); 2026 } 2027 2028 return err ?: total_len; 2029 } 2030 2031 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 2032 { 2033 struct file *file = iocb->ki_filp; 2034 struct tun_file *tfile = file->private_data; 2035 struct tun_struct *tun = tun_get(tfile); 2036 ssize_t result; 2037 int noblock = 0; 2038 2039 if (!tun) 2040 return -EBADFD; 2041 2042 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 2043 noblock = 1; 2044 2045 result = tun_get_user(tun, tfile, NULL, from, noblock, false); 2046 2047 tun_put(tun); 2048 return result; 2049 } 2050 2051 static ssize_t tun_put_user_xdp(struct tun_struct *tun, 2052 struct tun_file *tfile, 2053 struct xdp_frame *xdp_frame, 2054 struct iov_iter *iter) 2055 { 2056 int vnet_hdr_sz = 0; 2057 size_t size = xdp_frame->len; 2058 size_t ret; 2059 2060 if (tun->flags & IFF_VNET_HDR) { 2061 struct virtio_net_hdr gso = { 0 }; 2062 2063 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2064 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) 2065 return -EINVAL; 2066 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != 2067 sizeof(gso))) 2068 return -EFAULT; 2069 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2070 } 2071 2072 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; 2073 2074 preempt_disable(); 2075 dev_sw_netstats_tx_add(tun->dev, 1, ret); 2076 preempt_enable(); 2077 2078 return ret; 2079 } 2080 2081 /* Put packet to the user space buffer */ 2082 static ssize_t tun_put_user(struct tun_struct *tun, 2083 struct tun_file *tfile, 2084 struct sk_buff *skb, 2085 struct iov_iter *iter) 2086 { 2087 struct tun_pi pi = { 0, skb->protocol }; 2088 ssize_t total; 2089 int vlan_offset = 0; 2090 int vlan_hlen = 0; 2091 int vnet_hdr_sz = 0; 2092 2093 if (skb_vlan_tag_present(skb)) 2094 vlan_hlen = VLAN_HLEN; 2095 2096 if (tun->flags & IFF_VNET_HDR) 2097 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 2098 2099 total = skb->len + vlan_hlen + vnet_hdr_sz; 2100 2101 if (!(tun->flags & IFF_NO_PI)) { 2102 if (iov_iter_count(iter) < sizeof(pi)) 2103 return -EINVAL; 2104 2105 total += sizeof(pi); 2106 if (iov_iter_count(iter) < total) { 2107 /* Packet will be striped */ 2108 pi.flags |= TUN_PKT_STRIP; 2109 } 2110 2111 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 2112 return -EFAULT; 2113 } 2114 2115 if (vnet_hdr_sz) { 2116 struct virtio_net_hdr gso; 2117 2118 if (iov_iter_count(iter) < vnet_hdr_sz) 2119 return -EINVAL; 2120 2121 if (virtio_net_hdr_from_skb(skb, &gso, 2122 tun_is_little_endian(tun), true, 2123 vlan_hlen)) { 2124 struct skb_shared_info *sinfo = skb_shinfo(skb); 2125 pr_err("unexpected GSO type: " 2126 "0x%x, gso_size %d, hdr_len %d\n", 2127 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 2128 tun16_to_cpu(tun, gso.hdr_len)); 2129 print_hex_dump(KERN_ERR, "tun: ", 2130 DUMP_PREFIX_NONE, 2131 16, 1, skb->head, 2132 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 2133 WARN_ON_ONCE(1); 2134 return -EINVAL; 2135 } 2136 2137 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 2138 return -EFAULT; 2139 2140 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 2141 } 2142 2143 if (vlan_hlen) { 2144 int ret; 2145 struct veth veth; 2146 2147 veth.h_vlan_proto = skb->vlan_proto; 2148 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 2149 2150 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 2151 2152 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 2153 if (ret || !iov_iter_count(iter)) 2154 goto done; 2155 2156 ret = copy_to_iter(&veth, sizeof(veth), iter); 2157 if (ret != sizeof(veth) || !iov_iter_count(iter)) 2158 goto done; 2159 } 2160 2161 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 2162 2163 done: 2164 /* caller is in process context, */ 2165 preempt_disable(); 2166 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); 2167 preempt_enable(); 2168 2169 return total; 2170 } 2171 2172 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) 2173 { 2174 DECLARE_WAITQUEUE(wait, current); 2175 void *ptr = NULL; 2176 int error = 0; 2177 2178 ptr = ptr_ring_consume(&tfile->tx_ring); 2179 if (ptr) 2180 goto out; 2181 if (noblock) { 2182 error = -EAGAIN; 2183 goto out; 2184 } 2185 2186 add_wait_queue(&tfile->socket.wq.wait, &wait); 2187 2188 while (1) { 2189 set_current_state(TASK_INTERRUPTIBLE); 2190 ptr = ptr_ring_consume(&tfile->tx_ring); 2191 if (ptr) 2192 break; 2193 if (signal_pending(current)) { 2194 error = -ERESTARTSYS; 2195 break; 2196 } 2197 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 2198 error = -EFAULT; 2199 break; 2200 } 2201 2202 schedule(); 2203 } 2204 2205 __set_current_state(TASK_RUNNING); 2206 remove_wait_queue(&tfile->socket.wq.wait, &wait); 2207 2208 out: 2209 *err = error; 2210 return ptr; 2211 } 2212 2213 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 2214 struct iov_iter *to, 2215 int noblock, void *ptr) 2216 { 2217 ssize_t ret; 2218 int err; 2219 2220 if (!iov_iter_count(to)) { 2221 tun_ptr_free(ptr); 2222 return 0; 2223 } 2224 2225 if (!ptr) { 2226 /* Read frames from ring */ 2227 ptr = tun_ring_recv(tfile, noblock, &err); 2228 if (!ptr) 2229 return err; 2230 } 2231 2232 if (tun_is_xdp_frame(ptr)) { 2233 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2234 2235 ret = tun_put_user_xdp(tun, tfile, xdpf, to); 2236 xdp_return_frame(xdpf); 2237 } else { 2238 struct sk_buff *skb = ptr; 2239 2240 ret = tun_put_user(tun, tfile, skb, to); 2241 if (unlikely(ret < 0)) 2242 kfree_skb(skb); 2243 else 2244 consume_skb(skb); 2245 } 2246 2247 return ret; 2248 } 2249 2250 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 2251 { 2252 struct file *file = iocb->ki_filp; 2253 struct tun_file *tfile = file->private_data; 2254 struct tun_struct *tun = tun_get(tfile); 2255 ssize_t len = iov_iter_count(to), ret; 2256 int noblock = 0; 2257 2258 if (!tun) 2259 return -EBADFD; 2260 2261 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) 2262 noblock = 1; 2263 2264 ret = tun_do_read(tun, tfile, to, noblock, NULL); 2265 ret = min_t(ssize_t, ret, len); 2266 if (ret > 0) 2267 iocb->ki_pos = ret; 2268 tun_put(tun); 2269 return ret; 2270 } 2271 2272 static void tun_prog_free(struct rcu_head *rcu) 2273 { 2274 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); 2275 2276 bpf_prog_destroy(prog->prog); 2277 kfree(prog); 2278 } 2279 2280 static int __tun_set_ebpf(struct tun_struct *tun, 2281 struct tun_prog __rcu **prog_p, 2282 struct bpf_prog *prog) 2283 { 2284 struct tun_prog *old, *new = NULL; 2285 2286 if (prog) { 2287 new = kmalloc(sizeof(*new), GFP_KERNEL); 2288 if (!new) 2289 return -ENOMEM; 2290 new->prog = prog; 2291 } 2292 2293 spin_lock_bh(&tun->lock); 2294 old = rcu_dereference_protected(*prog_p, 2295 lockdep_is_held(&tun->lock)); 2296 rcu_assign_pointer(*prog_p, new); 2297 spin_unlock_bh(&tun->lock); 2298 2299 if (old) 2300 call_rcu(&old->rcu, tun_prog_free); 2301 2302 return 0; 2303 } 2304 2305 static void tun_free_netdev(struct net_device *dev) 2306 { 2307 struct tun_struct *tun = netdev_priv(dev); 2308 2309 BUG_ON(!(list_empty(&tun->disabled))); 2310 2311 free_percpu(dev->tstats); 2312 tun_flow_uninit(tun); 2313 security_tun_dev_free_security(tun->security); 2314 __tun_set_ebpf(tun, &tun->steering_prog, NULL); 2315 __tun_set_ebpf(tun, &tun->filter_prog, NULL); 2316 } 2317 2318 static void tun_setup(struct net_device *dev) 2319 { 2320 struct tun_struct *tun = netdev_priv(dev); 2321 2322 tun->owner = INVALID_UID; 2323 tun->group = INVALID_GID; 2324 tun_default_link_ksettings(dev, &tun->link_ksettings); 2325 2326 dev->ethtool_ops = &tun_ethtool_ops; 2327 dev->needs_free_netdev = true; 2328 dev->priv_destructor = tun_free_netdev; 2329 /* We prefer our own queue length */ 2330 dev->tx_queue_len = TUN_READQ_SIZE; 2331 } 2332 2333 /* Trivial set of netlink ops to allow deleting tun or tap 2334 * device with netlink. 2335 */ 2336 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2337 struct netlink_ext_ack *extack) 2338 { 2339 NL_SET_ERR_MSG(extack, 2340 "tun/tap creation via rtnetlink is not supported."); 2341 return -EOPNOTSUPP; 2342 } 2343 2344 static size_t tun_get_size(const struct net_device *dev) 2345 { 2346 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); 2347 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); 2348 2349 return nla_total_size(sizeof(uid_t)) + /* OWNER */ 2350 nla_total_size(sizeof(gid_t)) + /* GROUP */ 2351 nla_total_size(sizeof(u8)) + /* TYPE */ 2352 nla_total_size(sizeof(u8)) + /* PI */ 2353 nla_total_size(sizeof(u8)) + /* VNET_HDR */ 2354 nla_total_size(sizeof(u8)) + /* PERSIST */ 2355 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ 2356 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ 2357 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 2358 0; 2359 } 2360 2361 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) 2362 { 2363 struct tun_struct *tun = netdev_priv(dev); 2364 2365 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) 2366 goto nla_put_failure; 2367 if (uid_valid(tun->owner) && 2368 nla_put_u32(skb, IFLA_TUN_OWNER, 2369 from_kuid_munged(current_user_ns(), tun->owner))) 2370 goto nla_put_failure; 2371 if (gid_valid(tun->group) && 2372 nla_put_u32(skb, IFLA_TUN_GROUP, 2373 from_kgid_munged(current_user_ns(), tun->group))) 2374 goto nla_put_failure; 2375 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) 2376 goto nla_put_failure; 2377 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) 2378 goto nla_put_failure; 2379 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) 2380 goto nla_put_failure; 2381 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, 2382 !!(tun->flags & IFF_MULTI_QUEUE))) 2383 goto nla_put_failure; 2384 if (tun->flags & IFF_MULTI_QUEUE) { 2385 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) 2386 goto nla_put_failure; 2387 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, 2388 tun->numdisabled)) 2389 goto nla_put_failure; 2390 } 2391 2392 return 0; 2393 2394 nla_put_failure: 2395 return -EMSGSIZE; 2396 } 2397 2398 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2399 .kind = DRV_NAME, 2400 .priv_size = sizeof(struct tun_struct), 2401 .setup = tun_setup, 2402 .validate = tun_validate, 2403 .get_size = tun_get_size, 2404 .fill_info = tun_fill_info, 2405 }; 2406 2407 static void tun_sock_write_space(struct sock *sk) 2408 { 2409 struct tun_file *tfile; 2410 wait_queue_head_t *wqueue; 2411 2412 if (!sock_writeable(sk)) 2413 return; 2414 2415 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2416 return; 2417 2418 wqueue = sk_sleep(sk); 2419 if (wqueue && waitqueue_active(wqueue)) 2420 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | 2421 EPOLLWRNORM | EPOLLWRBAND); 2422 2423 tfile = container_of(sk, struct tun_file, sk); 2424 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2425 } 2426 2427 static void tun_put_page(struct tun_page *tpage) 2428 { 2429 if (tpage->page) 2430 __page_frag_cache_drain(tpage->page, tpage->count); 2431 } 2432 2433 static int tun_xdp_one(struct tun_struct *tun, 2434 struct tun_file *tfile, 2435 struct xdp_buff *xdp, int *flush, 2436 struct tun_page *tpage) 2437 { 2438 unsigned int datasize = xdp->data_end - xdp->data; 2439 struct tun_xdp_hdr *hdr = xdp->data_hard_start; 2440 struct virtio_net_hdr *gso = &hdr->gso; 2441 struct bpf_prog *xdp_prog; 2442 struct sk_buff *skb = NULL; 2443 struct sk_buff_head *queue; 2444 u32 rxhash = 0, act; 2445 int buflen = hdr->buflen; 2446 int ret = 0; 2447 bool skb_xdp = false; 2448 struct page *page; 2449 2450 xdp_prog = rcu_dereference(tun->xdp_prog); 2451 if (xdp_prog) { 2452 if (gso->gso_type) { 2453 skb_xdp = true; 2454 goto build; 2455 } 2456 2457 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); 2458 xdp_set_data_meta_invalid(xdp); 2459 2460 act = bpf_prog_run_xdp(xdp_prog, xdp); 2461 ret = tun_xdp_act(tun, xdp_prog, xdp, act); 2462 if (ret < 0) { 2463 put_page(virt_to_head_page(xdp->data)); 2464 return ret; 2465 } 2466 2467 switch (ret) { 2468 case XDP_REDIRECT: 2469 *flush = true; 2470 fallthrough; 2471 case XDP_TX: 2472 return 0; 2473 case XDP_PASS: 2474 break; 2475 default: 2476 page = virt_to_head_page(xdp->data); 2477 if (tpage->page == page) { 2478 ++tpage->count; 2479 } else { 2480 tun_put_page(tpage); 2481 tpage->page = page; 2482 tpage->count = 1; 2483 } 2484 return 0; 2485 } 2486 } 2487 2488 build: 2489 skb = build_skb(xdp->data_hard_start, buflen); 2490 if (!skb) { 2491 ret = -ENOMEM; 2492 goto out; 2493 } 2494 2495 skb_reserve(skb, xdp->data - xdp->data_hard_start); 2496 skb_put(skb, xdp->data_end - xdp->data); 2497 2498 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { 2499 atomic_long_inc(&tun->rx_frame_errors); 2500 kfree_skb(skb); 2501 ret = -EINVAL; 2502 goto out; 2503 } 2504 2505 skb->protocol = eth_type_trans(skb, tun->dev); 2506 skb_reset_network_header(skb); 2507 skb_probe_transport_header(skb); 2508 skb_record_rx_queue(skb, tfile->queue_index); 2509 2510 if (skb_xdp) { 2511 ret = do_xdp_generic(xdp_prog, skb); 2512 if (ret != XDP_PASS) { 2513 ret = 0; 2514 goto out; 2515 } 2516 } 2517 2518 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && 2519 !tfile->detached) 2520 rxhash = __skb_get_hash_symmetric(skb); 2521 2522 if (tfile->napi_enabled) { 2523 queue = &tfile->sk.sk_write_queue; 2524 spin_lock(&queue->lock); 2525 2526 if (unlikely(tfile->detached)) { 2527 spin_unlock(&queue->lock); 2528 kfree_skb(skb); 2529 return -EBUSY; 2530 } 2531 2532 __skb_queue_tail(queue, skb); 2533 spin_unlock(&queue->lock); 2534 ret = 1; 2535 } else { 2536 netif_receive_skb(skb); 2537 ret = 0; 2538 } 2539 2540 /* No need to disable preemption here since this function is 2541 * always called with bh disabled 2542 */ 2543 dev_sw_netstats_rx_add(tun->dev, datasize); 2544 2545 if (rxhash) 2546 tun_flow_update(tun, rxhash, tfile); 2547 2548 out: 2549 return ret; 2550 } 2551 2552 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2553 { 2554 int ret, i; 2555 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2556 struct tun_struct *tun = tun_get(tfile); 2557 struct tun_msg_ctl *ctl = m->msg_control; 2558 struct xdp_buff *xdp; 2559 2560 if (!tun) 2561 return -EBADFD; 2562 2563 if (m->msg_controllen == sizeof(struct tun_msg_ctl) && 2564 ctl && ctl->type == TUN_MSG_PTR) { 2565 struct tun_page tpage; 2566 int n = ctl->num; 2567 int flush = 0, queued = 0; 2568 2569 memset(&tpage, 0, sizeof(tpage)); 2570 2571 local_bh_disable(); 2572 rcu_read_lock(); 2573 2574 for (i = 0; i < n; i++) { 2575 xdp = &((struct xdp_buff *)ctl->ptr)[i]; 2576 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); 2577 if (ret > 0) 2578 queued += ret; 2579 } 2580 2581 if (flush) 2582 xdp_do_flush(); 2583 2584 if (tfile->napi_enabled && queued > 0) 2585 napi_schedule(&tfile->napi); 2586 2587 rcu_read_unlock(); 2588 local_bh_enable(); 2589 2590 tun_put_page(&tpage); 2591 2592 ret = total_len; 2593 goto out; 2594 } 2595 2596 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, 2597 m->msg_flags & MSG_DONTWAIT, 2598 m->msg_flags & MSG_MORE); 2599 out: 2600 tun_put(tun); 2601 return ret; 2602 } 2603 2604 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2605 int flags) 2606 { 2607 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2608 struct tun_struct *tun = tun_get(tfile); 2609 void *ptr = m->msg_control; 2610 int ret; 2611 2612 if (!tun) { 2613 ret = -EBADFD; 2614 goto out_free; 2615 } 2616 2617 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2618 ret = -EINVAL; 2619 goto out_put_tun; 2620 } 2621 if (flags & MSG_ERRQUEUE) { 2622 ret = sock_recv_errqueue(sock->sk, m, total_len, 2623 SOL_PACKET, TUN_TX_TIMESTAMP); 2624 goto out; 2625 } 2626 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); 2627 if (ret > (ssize_t)total_len) { 2628 m->msg_flags |= MSG_TRUNC; 2629 ret = flags & MSG_TRUNC ? ret : total_len; 2630 } 2631 out: 2632 tun_put(tun); 2633 return ret; 2634 2635 out_put_tun: 2636 tun_put(tun); 2637 out_free: 2638 tun_ptr_free(ptr); 2639 return ret; 2640 } 2641 2642 static int tun_ptr_peek_len(void *ptr) 2643 { 2644 if (likely(ptr)) { 2645 if (tun_is_xdp_frame(ptr)) { 2646 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 2647 2648 return xdpf->len; 2649 } 2650 return __skb_array_len_with_tag(ptr); 2651 } else { 2652 return 0; 2653 } 2654 } 2655 2656 static int tun_peek_len(struct socket *sock) 2657 { 2658 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2659 struct tun_struct *tun; 2660 int ret = 0; 2661 2662 tun = tun_get(tfile); 2663 if (!tun) 2664 return 0; 2665 2666 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); 2667 tun_put(tun); 2668 2669 return ret; 2670 } 2671 2672 /* Ops structure to mimic raw sockets with tun */ 2673 static const struct proto_ops tun_socket_ops = { 2674 .peek_len = tun_peek_len, 2675 .sendmsg = tun_sendmsg, 2676 .recvmsg = tun_recvmsg, 2677 }; 2678 2679 static struct proto tun_proto = { 2680 .name = "tun", 2681 .owner = THIS_MODULE, 2682 .obj_size = sizeof(struct tun_file), 2683 }; 2684 2685 static int tun_flags(struct tun_struct *tun) 2686 { 2687 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2688 } 2689 2690 static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, 2691 char *buf) 2692 { 2693 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2694 return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); 2695 } 2696 2697 static ssize_t owner_show(struct device *dev, struct device_attribute *attr, 2698 char *buf) 2699 { 2700 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2701 return uid_valid(tun->owner)? 2702 sysfs_emit(buf, "%u\n", 2703 from_kuid_munged(current_user_ns(), tun->owner)) : 2704 sysfs_emit(buf, "-1\n"); 2705 } 2706 2707 static ssize_t group_show(struct device *dev, struct device_attribute *attr, 2708 char *buf) 2709 { 2710 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2711 return gid_valid(tun->group) ? 2712 sysfs_emit(buf, "%u\n", 2713 from_kgid_munged(current_user_ns(), tun->group)) : 2714 sysfs_emit(buf, "-1\n"); 2715 } 2716 2717 static DEVICE_ATTR_RO(tun_flags); 2718 static DEVICE_ATTR_RO(owner); 2719 static DEVICE_ATTR_RO(group); 2720 2721 static struct attribute *tun_dev_attrs[] = { 2722 &dev_attr_tun_flags.attr, 2723 &dev_attr_owner.attr, 2724 &dev_attr_group.attr, 2725 NULL 2726 }; 2727 2728 static const struct attribute_group tun_attr_group = { 2729 .attrs = tun_dev_attrs 2730 }; 2731 2732 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2733 { 2734 struct tun_struct *tun; 2735 struct tun_file *tfile = file->private_data; 2736 struct net_device *dev; 2737 int err; 2738 2739 if (tfile->detached) 2740 return -EINVAL; 2741 2742 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2743 if (!capable(CAP_NET_ADMIN)) 2744 return -EPERM; 2745 2746 if (!(ifr->ifr_flags & IFF_NAPI) || 2747 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2748 return -EINVAL; 2749 } 2750 2751 dev = __dev_get_by_name(net, ifr->ifr_name); 2752 if (dev) { 2753 if (ifr->ifr_flags & IFF_TUN_EXCL) 2754 return -EBUSY; 2755 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2756 tun = netdev_priv(dev); 2757 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2758 tun = netdev_priv(dev); 2759 else 2760 return -EINVAL; 2761 2762 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2763 !!(tun->flags & IFF_MULTI_QUEUE)) 2764 return -EINVAL; 2765 2766 if (tun_not_capable(tun)) 2767 return -EPERM; 2768 err = security_tun_dev_open(tun->security); 2769 if (err < 0) 2770 return err; 2771 2772 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2773 ifr->ifr_flags & IFF_NAPI, 2774 ifr->ifr_flags & IFF_NAPI_FRAGS, true); 2775 if (err < 0) 2776 return err; 2777 2778 if (tun->flags & IFF_MULTI_QUEUE && 2779 (tun->numqueues + tun->numdisabled > 1)) { 2780 /* One or more queue has already been attached, no need 2781 * to initialize the device again. 2782 */ 2783 netdev_state_change(dev); 2784 return 0; 2785 } 2786 2787 tun->flags = (tun->flags & ~TUN_FEATURES) | 2788 (ifr->ifr_flags & TUN_FEATURES); 2789 2790 netdev_state_change(dev); 2791 } else { 2792 char *name; 2793 unsigned long flags = 0; 2794 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2795 MAX_TAP_QUEUES : 1; 2796 2797 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2798 return -EPERM; 2799 err = security_tun_dev_create(); 2800 if (err < 0) 2801 return err; 2802 2803 /* Set dev type */ 2804 if (ifr->ifr_flags & IFF_TUN) { 2805 /* TUN device */ 2806 flags |= IFF_TUN; 2807 name = "tun%d"; 2808 } else if (ifr->ifr_flags & IFF_TAP) { 2809 /* TAP device */ 2810 flags |= IFF_TAP; 2811 name = "tap%d"; 2812 } else 2813 return -EINVAL; 2814 2815 if (*ifr->ifr_name) 2816 name = ifr->ifr_name; 2817 2818 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2819 NET_NAME_UNKNOWN, tun_setup, queues, 2820 queues); 2821 2822 if (!dev) 2823 return -ENOMEM; 2824 2825 dev_net_set(dev, net); 2826 dev->rtnl_link_ops = &tun_link_ops; 2827 dev->ifindex = tfile->ifindex; 2828 dev->sysfs_groups[0] = &tun_attr_group; 2829 2830 tun = netdev_priv(dev); 2831 tun->dev = dev; 2832 tun->flags = flags; 2833 tun->txflt.count = 0; 2834 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2835 2836 tun->align = NET_SKB_PAD; 2837 tun->filter_attached = false; 2838 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2839 tun->rx_batched = 0; 2840 RCU_INIT_POINTER(tun->steering_prog, NULL); 2841 2842 tun->ifr = ifr; 2843 tun->file = file; 2844 2845 tun_net_initialize(dev); 2846 2847 err = register_netdevice(tun->dev); 2848 if (err < 0) { 2849 free_netdev(dev); 2850 return err; 2851 } 2852 /* free_netdev() won't check refcnt, to avoid race 2853 * with dev_put() we need publish tun after registration. 2854 */ 2855 rcu_assign_pointer(tfile->tun, tun); 2856 } 2857 2858 if (ifr->ifr_flags & IFF_NO_CARRIER) 2859 netif_carrier_off(tun->dev); 2860 else 2861 netif_carrier_on(tun->dev); 2862 2863 /* Make sure persistent devices do not get stuck in 2864 * xoff state. 2865 */ 2866 if (netif_running(tun->dev)) 2867 netif_tx_wake_all_queues(tun->dev); 2868 2869 strcpy(ifr->ifr_name, tun->dev->name); 2870 return 0; 2871 } 2872 2873 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) 2874 { 2875 strcpy(ifr->ifr_name, tun->dev->name); 2876 2877 ifr->ifr_flags = tun_flags(tun); 2878 2879 } 2880 2881 /* This is like a cut-down ethtool ops, except done via tun fd so no 2882 * privs required. */ 2883 static int set_offload(struct tun_struct *tun, unsigned long arg) 2884 { 2885 netdev_features_t features = 0; 2886 2887 if (arg & TUN_F_CSUM) { 2888 features |= NETIF_F_HW_CSUM; 2889 arg &= ~TUN_F_CSUM; 2890 2891 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2892 if (arg & TUN_F_TSO_ECN) { 2893 features |= NETIF_F_TSO_ECN; 2894 arg &= ~TUN_F_TSO_ECN; 2895 } 2896 if (arg & TUN_F_TSO4) 2897 features |= NETIF_F_TSO; 2898 if (arg & TUN_F_TSO6) 2899 features |= NETIF_F_TSO6; 2900 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2901 } 2902 2903 arg &= ~TUN_F_UFO; 2904 2905 /* TODO: for now USO4 and USO6 should work simultaneously */ 2906 if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { 2907 features |= NETIF_F_GSO_UDP_L4; 2908 arg &= ~(TUN_F_USO4 | TUN_F_USO6); 2909 } 2910 } 2911 2912 /* This gives the user a way to test for new features in future by 2913 * trying to set them. */ 2914 if (arg) 2915 return -EINVAL; 2916 2917 tun->set_features = features; 2918 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2919 tun->dev->wanted_features |= features; 2920 netdev_update_features(tun->dev); 2921 2922 return 0; 2923 } 2924 2925 static void tun_detach_filter(struct tun_struct *tun, int n) 2926 { 2927 int i; 2928 struct tun_file *tfile; 2929 2930 for (i = 0; i < n; i++) { 2931 tfile = rtnl_dereference(tun->tfiles[i]); 2932 lock_sock(tfile->socket.sk); 2933 sk_detach_filter(tfile->socket.sk); 2934 release_sock(tfile->socket.sk); 2935 } 2936 2937 tun->filter_attached = false; 2938 } 2939 2940 static int tun_attach_filter(struct tun_struct *tun) 2941 { 2942 int i, ret = 0; 2943 struct tun_file *tfile; 2944 2945 for (i = 0; i < tun->numqueues; i++) { 2946 tfile = rtnl_dereference(tun->tfiles[i]); 2947 lock_sock(tfile->socket.sk); 2948 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2949 release_sock(tfile->socket.sk); 2950 if (ret) { 2951 tun_detach_filter(tun, i); 2952 return ret; 2953 } 2954 } 2955 2956 tun->filter_attached = true; 2957 return ret; 2958 } 2959 2960 static void tun_set_sndbuf(struct tun_struct *tun) 2961 { 2962 struct tun_file *tfile; 2963 int i; 2964 2965 for (i = 0; i < tun->numqueues; i++) { 2966 tfile = rtnl_dereference(tun->tfiles[i]); 2967 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2968 } 2969 } 2970 2971 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2972 { 2973 struct tun_file *tfile = file->private_data; 2974 struct tun_struct *tun; 2975 int ret = 0; 2976 2977 rtnl_lock(); 2978 2979 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2980 tun = tfile->detached; 2981 if (!tun) { 2982 ret = -EINVAL; 2983 goto unlock; 2984 } 2985 ret = security_tun_dev_attach_queue(tun->security); 2986 if (ret < 0) 2987 goto unlock; 2988 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2989 tun->flags & IFF_NAPI_FRAGS, true); 2990 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2991 tun = rtnl_dereference(tfile->tun); 2992 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2993 ret = -EINVAL; 2994 else 2995 __tun_detach(tfile, false); 2996 } else 2997 ret = -EINVAL; 2998 2999 if (ret >= 0) 3000 netdev_state_change(tun->dev); 3001 3002 unlock: 3003 rtnl_unlock(); 3004 return ret; 3005 } 3006 3007 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, 3008 void __user *data) 3009 { 3010 struct bpf_prog *prog; 3011 int fd; 3012 3013 if (copy_from_user(&fd, data, sizeof(fd))) 3014 return -EFAULT; 3015 3016 if (fd == -1) { 3017 prog = NULL; 3018 } else { 3019 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); 3020 if (IS_ERR(prog)) 3021 return PTR_ERR(prog); 3022 } 3023 3024 return __tun_set_ebpf(tun, prog_p, prog); 3025 } 3026 3027 /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ 3028 static unsigned char tun_get_addr_len(unsigned short type) 3029 { 3030 switch (type) { 3031 case ARPHRD_IP6GRE: 3032 case ARPHRD_TUNNEL6: 3033 return sizeof(struct in6_addr); 3034 case ARPHRD_IPGRE: 3035 case ARPHRD_TUNNEL: 3036 case ARPHRD_SIT: 3037 return 4; 3038 case ARPHRD_ETHER: 3039 return ETH_ALEN; 3040 case ARPHRD_IEEE802154: 3041 case ARPHRD_IEEE802154_MONITOR: 3042 return IEEE802154_EXTENDED_ADDR_LEN; 3043 case ARPHRD_PHONET_PIPE: 3044 case ARPHRD_PPP: 3045 case ARPHRD_NONE: 3046 return 0; 3047 case ARPHRD_6LOWPAN: 3048 return EUI64_ADDR_LEN; 3049 case ARPHRD_FDDI: 3050 return FDDI_K_ALEN; 3051 case ARPHRD_HIPPI: 3052 return HIPPI_ALEN; 3053 case ARPHRD_IEEE802: 3054 return FC_ALEN; 3055 case ARPHRD_ROSE: 3056 return ROSE_ADDR_LEN; 3057 case ARPHRD_NETROM: 3058 return AX25_ADDR_LEN; 3059 case ARPHRD_LOCALTLK: 3060 return LTALK_ALEN; 3061 default: 3062 return 0; 3063 } 3064 } 3065 3066 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 3067 unsigned long arg, int ifreq_len) 3068 { 3069 struct tun_file *tfile = file->private_data; 3070 struct net *net = sock_net(&tfile->sk); 3071 struct tun_struct *tun; 3072 void __user* argp = (void __user*)arg; 3073 unsigned int ifindex, carrier; 3074 struct ifreq ifr; 3075 kuid_t owner; 3076 kgid_t group; 3077 int sndbuf; 3078 int vnet_hdr_sz; 3079 int le; 3080 int ret; 3081 bool do_notify = false; 3082 3083 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || 3084 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { 3085 if (copy_from_user(&ifr, argp, ifreq_len)) 3086 return -EFAULT; 3087 } else { 3088 memset(&ifr, 0, sizeof(ifr)); 3089 } 3090 if (cmd == TUNGETFEATURES) { 3091 /* Currently this just means: "what IFF flags are valid?". 3092 * This is needed because we never checked for invalid flags on 3093 * TUNSETIFF. 3094 */ 3095 return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | 3096 TUN_FEATURES, (unsigned int __user*)argp); 3097 } else if (cmd == TUNSETQUEUE) { 3098 return tun_set_queue(file, &ifr); 3099 } else if (cmd == SIOCGSKNS) { 3100 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3101 return -EPERM; 3102 return open_related_ns(&net->ns, get_net_ns); 3103 } 3104 3105 rtnl_lock(); 3106 3107 tun = tun_get(tfile); 3108 if (cmd == TUNSETIFF) { 3109 ret = -EEXIST; 3110 if (tun) 3111 goto unlock; 3112 3113 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 3114 3115 ret = tun_set_iff(net, file, &ifr); 3116 3117 if (ret) 3118 goto unlock; 3119 3120 if (copy_to_user(argp, &ifr, ifreq_len)) 3121 ret = -EFAULT; 3122 goto unlock; 3123 } 3124 if (cmd == TUNSETIFINDEX) { 3125 ret = -EPERM; 3126 if (tun) 3127 goto unlock; 3128 3129 ret = -EFAULT; 3130 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 3131 goto unlock; 3132 3133 ret = 0; 3134 tfile->ifindex = ifindex; 3135 goto unlock; 3136 } 3137 3138 ret = -EBADFD; 3139 if (!tun) 3140 goto unlock; 3141 3142 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); 3143 3144 net = dev_net(tun->dev); 3145 ret = 0; 3146 switch (cmd) { 3147 case TUNGETIFF: 3148 tun_get_iff(tun, &ifr); 3149 3150 if (tfile->detached) 3151 ifr.ifr_flags |= IFF_DETACH_QUEUE; 3152 if (!tfile->socket.sk->sk_filter) 3153 ifr.ifr_flags |= IFF_NOFILTER; 3154 3155 if (copy_to_user(argp, &ifr, ifreq_len)) 3156 ret = -EFAULT; 3157 break; 3158 3159 case TUNSETNOCSUM: 3160 /* Disable/Enable checksum */ 3161 3162 /* [unimplemented] */ 3163 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", 3164 arg ? "disabled" : "enabled"); 3165 break; 3166 3167 case TUNSETPERSIST: 3168 /* Disable/Enable persist mode. Keep an extra reference to the 3169 * module to prevent the module being unprobed. 3170 */ 3171 if (arg && !(tun->flags & IFF_PERSIST)) { 3172 tun->flags |= IFF_PERSIST; 3173 __module_get(THIS_MODULE); 3174 do_notify = true; 3175 } 3176 if (!arg && (tun->flags & IFF_PERSIST)) { 3177 tun->flags &= ~IFF_PERSIST; 3178 module_put(THIS_MODULE); 3179 do_notify = true; 3180 } 3181 3182 netif_info(tun, drv, tun->dev, "persist %s\n", 3183 arg ? "enabled" : "disabled"); 3184 break; 3185 3186 case TUNSETOWNER: 3187 /* Set owner of the device */ 3188 owner = make_kuid(current_user_ns(), arg); 3189 if (!uid_valid(owner)) { 3190 ret = -EINVAL; 3191 break; 3192 } 3193 tun->owner = owner; 3194 do_notify = true; 3195 netif_info(tun, drv, tun->dev, "owner set to %u\n", 3196 from_kuid(&init_user_ns, tun->owner)); 3197 break; 3198 3199 case TUNSETGROUP: 3200 /* Set group of the device */ 3201 group = make_kgid(current_user_ns(), arg); 3202 if (!gid_valid(group)) { 3203 ret = -EINVAL; 3204 break; 3205 } 3206 tun->group = group; 3207 do_notify = true; 3208 netif_info(tun, drv, tun->dev, "group set to %u\n", 3209 from_kgid(&init_user_ns, tun->group)); 3210 break; 3211 3212 case TUNSETLINK: 3213 /* Only allow setting the type when the interface is down */ 3214 if (tun->dev->flags & IFF_UP) { 3215 netif_info(tun, drv, tun->dev, 3216 "Linktype set failed because interface is up\n"); 3217 ret = -EBUSY; 3218 } else { 3219 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, 3220 tun->dev); 3221 ret = notifier_to_errno(ret); 3222 if (ret) { 3223 netif_info(tun, drv, tun->dev, 3224 "Refused to change device type\n"); 3225 break; 3226 } 3227 tun->dev->type = (int) arg; 3228 tun->dev->addr_len = tun_get_addr_len(tun->dev->type); 3229 netif_info(tun, drv, tun->dev, "linktype set to %d\n", 3230 tun->dev->type); 3231 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, 3232 tun->dev); 3233 } 3234 break; 3235 3236 case TUNSETDEBUG: 3237 tun->msg_enable = (u32)arg; 3238 break; 3239 3240 case TUNSETOFFLOAD: 3241 ret = set_offload(tun, arg); 3242 break; 3243 3244 case TUNSETTXFILTER: 3245 /* Can be set only for TAPs */ 3246 ret = -EINVAL; 3247 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3248 break; 3249 ret = update_filter(&tun->txflt, (void __user *)arg); 3250 break; 3251 3252 case SIOCGIFHWADDR: 3253 /* Get hw address */ 3254 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); 3255 if (copy_to_user(argp, &ifr, ifreq_len)) 3256 ret = -EFAULT; 3257 break; 3258 3259 case SIOCSIFHWADDR: 3260 /* Set hw address */ 3261 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); 3262 break; 3263 3264 case TUNGETSNDBUF: 3265 sndbuf = tfile->socket.sk->sk_sndbuf; 3266 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 3267 ret = -EFAULT; 3268 break; 3269 3270 case TUNSETSNDBUF: 3271 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 3272 ret = -EFAULT; 3273 break; 3274 } 3275 if (sndbuf <= 0) { 3276 ret = -EINVAL; 3277 break; 3278 } 3279 3280 tun->sndbuf = sndbuf; 3281 tun_set_sndbuf(tun); 3282 break; 3283 3284 case TUNGETVNETHDRSZ: 3285 vnet_hdr_sz = tun->vnet_hdr_sz; 3286 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 3287 ret = -EFAULT; 3288 break; 3289 3290 case TUNSETVNETHDRSZ: 3291 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 3292 ret = -EFAULT; 3293 break; 3294 } 3295 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 3296 ret = -EINVAL; 3297 break; 3298 } 3299 3300 tun->vnet_hdr_sz = vnet_hdr_sz; 3301 break; 3302 3303 case TUNGETVNETLE: 3304 le = !!(tun->flags & TUN_VNET_LE); 3305 if (put_user(le, (int __user *)argp)) 3306 ret = -EFAULT; 3307 break; 3308 3309 case TUNSETVNETLE: 3310 if (get_user(le, (int __user *)argp)) { 3311 ret = -EFAULT; 3312 break; 3313 } 3314 if (le) 3315 tun->flags |= TUN_VNET_LE; 3316 else 3317 tun->flags &= ~TUN_VNET_LE; 3318 break; 3319 3320 case TUNGETVNETBE: 3321 ret = tun_get_vnet_be(tun, argp); 3322 break; 3323 3324 case TUNSETVNETBE: 3325 ret = tun_set_vnet_be(tun, argp); 3326 break; 3327 3328 case TUNATTACHFILTER: 3329 /* Can be set only for TAPs */ 3330 ret = -EINVAL; 3331 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3332 break; 3333 ret = -EFAULT; 3334 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 3335 break; 3336 3337 ret = tun_attach_filter(tun); 3338 break; 3339 3340 case TUNDETACHFILTER: 3341 /* Can be set only for TAPs */ 3342 ret = -EINVAL; 3343 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3344 break; 3345 ret = 0; 3346 tun_detach_filter(tun, tun->numqueues); 3347 break; 3348 3349 case TUNGETFILTER: 3350 ret = -EINVAL; 3351 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 3352 break; 3353 ret = -EFAULT; 3354 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 3355 break; 3356 ret = 0; 3357 break; 3358 3359 case TUNSETSTEERINGEBPF: 3360 ret = tun_set_ebpf(tun, &tun->steering_prog, argp); 3361 break; 3362 3363 case TUNSETFILTEREBPF: 3364 ret = tun_set_ebpf(tun, &tun->filter_prog, argp); 3365 break; 3366 3367 case TUNSETCARRIER: 3368 ret = -EFAULT; 3369 if (copy_from_user(&carrier, argp, sizeof(carrier))) 3370 goto unlock; 3371 3372 ret = tun_net_change_carrier(tun->dev, (bool)carrier); 3373 break; 3374 3375 case TUNGETDEVNETNS: 3376 ret = -EPERM; 3377 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 3378 goto unlock; 3379 ret = open_related_ns(&net->ns, get_net_ns); 3380 break; 3381 3382 default: 3383 ret = -EINVAL; 3384 break; 3385 } 3386 3387 if (do_notify) 3388 netdev_state_change(tun->dev); 3389 3390 unlock: 3391 rtnl_unlock(); 3392 if (tun) 3393 tun_put(tun); 3394 return ret; 3395 } 3396 3397 static long tun_chr_ioctl(struct file *file, 3398 unsigned int cmd, unsigned long arg) 3399 { 3400 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 3401 } 3402 3403 #ifdef CONFIG_COMPAT 3404 static long tun_chr_compat_ioctl(struct file *file, 3405 unsigned int cmd, unsigned long arg) 3406 { 3407 switch (cmd) { 3408 case TUNSETIFF: 3409 case TUNGETIFF: 3410 case TUNSETTXFILTER: 3411 case TUNGETSNDBUF: 3412 case TUNSETSNDBUF: 3413 case SIOCGIFHWADDR: 3414 case SIOCSIFHWADDR: 3415 arg = (unsigned long)compat_ptr(arg); 3416 break; 3417 default: 3418 arg = (compat_ulong_t)arg; 3419 break; 3420 } 3421 3422 /* 3423 * compat_ifreq is shorter than ifreq, so we must not access beyond 3424 * the end of that structure. All fields that are used in this 3425 * driver are compatible though, we don't need to convert the 3426 * contents. 3427 */ 3428 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 3429 } 3430 #endif /* CONFIG_COMPAT */ 3431 3432 static int tun_chr_fasync(int fd, struct file *file, int on) 3433 { 3434 struct tun_file *tfile = file->private_data; 3435 int ret; 3436 3437 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 3438 goto out; 3439 3440 if (on) { 3441 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); 3442 tfile->flags |= TUN_FASYNC; 3443 } else 3444 tfile->flags &= ~TUN_FASYNC; 3445 ret = 0; 3446 out: 3447 return ret; 3448 } 3449 3450 static int tun_chr_open(struct inode *inode, struct file * file) 3451 { 3452 struct net *net = current->nsproxy->net_ns; 3453 struct tun_file *tfile; 3454 3455 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 3456 &tun_proto, 0); 3457 if (!tfile) 3458 return -ENOMEM; 3459 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { 3460 sk_free(&tfile->sk); 3461 return -ENOMEM; 3462 } 3463 3464 mutex_init(&tfile->napi_mutex); 3465 RCU_INIT_POINTER(tfile->tun, NULL); 3466 tfile->flags = 0; 3467 tfile->ifindex = 0; 3468 3469 init_waitqueue_head(&tfile->socket.wq.wait); 3470 3471 tfile->socket.file = file; 3472 tfile->socket.ops = &tun_socket_ops; 3473 3474 sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); 3475 3476 tfile->sk.sk_write_space = tun_sock_write_space; 3477 tfile->sk.sk_sndbuf = INT_MAX; 3478 3479 file->private_data = tfile; 3480 INIT_LIST_HEAD(&tfile->next); 3481 3482 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 3483 3484 /* tun groks IOCB_NOWAIT just fine, mark it as such */ 3485 file->f_mode |= FMODE_NOWAIT; 3486 return 0; 3487 } 3488 3489 static int tun_chr_close(struct inode *inode, struct file *file) 3490 { 3491 struct tun_file *tfile = file->private_data; 3492 3493 tun_detach(tfile, true); 3494 3495 return 0; 3496 } 3497 3498 #ifdef CONFIG_PROC_FS 3499 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 3500 { 3501 struct tun_file *tfile = file->private_data; 3502 struct tun_struct *tun; 3503 struct ifreq ifr; 3504 3505 memset(&ifr, 0, sizeof(ifr)); 3506 3507 rtnl_lock(); 3508 tun = tun_get(tfile); 3509 if (tun) 3510 tun_get_iff(tun, &ifr); 3511 rtnl_unlock(); 3512 3513 if (tun) 3514 tun_put(tun); 3515 3516 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 3517 } 3518 #endif 3519 3520 static const struct file_operations tun_fops = { 3521 .owner = THIS_MODULE, 3522 .llseek = no_llseek, 3523 .read_iter = tun_chr_read_iter, 3524 .write_iter = tun_chr_write_iter, 3525 .poll = tun_chr_poll, 3526 .unlocked_ioctl = tun_chr_ioctl, 3527 #ifdef CONFIG_COMPAT 3528 .compat_ioctl = tun_chr_compat_ioctl, 3529 #endif 3530 .open = tun_chr_open, 3531 .release = tun_chr_close, 3532 .fasync = tun_chr_fasync, 3533 #ifdef CONFIG_PROC_FS 3534 .show_fdinfo = tun_chr_show_fdinfo, 3535 #endif 3536 }; 3537 3538 static struct miscdevice tun_miscdev = { 3539 .minor = TUN_MINOR, 3540 .name = "tun", 3541 .nodename = "net/tun", 3542 .fops = &tun_fops, 3543 }; 3544 3545 /* ethtool interface */ 3546 3547 static void tun_default_link_ksettings(struct net_device *dev, 3548 struct ethtool_link_ksettings *cmd) 3549 { 3550 ethtool_link_ksettings_zero_link_mode(cmd, supported); 3551 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 3552 cmd->base.speed = SPEED_10000; 3553 cmd->base.duplex = DUPLEX_FULL; 3554 cmd->base.port = PORT_TP; 3555 cmd->base.phy_address = 0; 3556 cmd->base.autoneg = AUTONEG_DISABLE; 3557 } 3558 3559 static int tun_get_link_ksettings(struct net_device *dev, 3560 struct ethtool_link_ksettings *cmd) 3561 { 3562 struct tun_struct *tun = netdev_priv(dev); 3563 3564 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); 3565 return 0; 3566 } 3567 3568 static int tun_set_link_ksettings(struct net_device *dev, 3569 const struct ethtool_link_ksettings *cmd) 3570 { 3571 struct tun_struct *tun = netdev_priv(dev); 3572 3573 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); 3574 return 0; 3575 } 3576 3577 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3578 { 3579 struct tun_struct *tun = netdev_priv(dev); 3580 3581 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); 3582 strscpy(info->version, DRV_VERSION, sizeof(info->version)); 3583 3584 switch (tun->flags & TUN_TYPE_MASK) { 3585 case IFF_TUN: 3586 strscpy(info->bus_info, "tun", sizeof(info->bus_info)); 3587 break; 3588 case IFF_TAP: 3589 strscpy(info->bus_info, "tap", sizeof(info->bus_info)); 3590 break; 3591 } 3592 } 3593 3594 static u32 tun_get_msglevel(struct net_device *dev) 3595 { 3596 struct tun_struct *tun = netdev_priv(dev); 3597 3598 return tun->msg_enable; 3599 } 3600 3601 static void tun_set_msglevel(struct net_device *dev, u32 value) 3602 { 3603 struct tun_struct *tun = netdev_priv(dev); 3604 3605 tun->msg_enable = value; 3606 } 3607 3608 static int tun_get_coalesce(struct net_device *dev, 3609 struct ethtool_coalesce *ec, 3610 struct kernel_ethtool_coalesce *kernel_coal, 3611 struct netlink_ext_ack *extack) 3612 { 3613 struct tun_struct *tun = netdev_priv(dev); 3614 3615 ec->rx_max_coalesced_frames = tun->rx_batched; 3616 3617 return 0; 3618 } 3619 3620 static int tun_set_coalesce(struct net_device *dev, 3621 struct ethtool_coalesce *ec, 3622 struct kernel_ethtool_coalesce *kernel_coal, 3623 struct netlink_ext_ack *extack) 3624 { 3625 struct tun_struct *tun = netdev_priv(dev); 3626 3627 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 3628 tun->rx_batched = NAPI_POLL_WEIGHT; 3629 else 3630 tun->rx_batched = ec->rx_max_coalesced_frames; 3631 3632 return 0; 3633 } 3634 3635 static const struct ethtool_ops tun_ethtool_ops = { 3636 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, 3637 .get_drvinfo = tun_get_drvinfo, 3638 .get_msglevel = tun_get_msglevel, 3639 .set_msglevel = tun_set_msglevel, 3640 .get_link = ethtool_op_get_link, 3641 .get_ts_info = ethtool_op_get_ts_info, 3642 .get_coalesce = tun_get_coalesce, 3643 .set_coalesce = tun_set_coalesce, 3644 .get_link_ksettings = tun_get_link_ksettings, 3645 .set_link_ksettings = tun_set_link_ksettings, 3646 }; 3647 3648 static int tun_queue_resize(struct tun_struct *tun) 3649 { 3650 struct net_device *dev = tun->dev; 3651 struct tun_file *tfile; 3652 struct ptr_ring **rings; 3653 int n = tun->numqueues + tun->numdisabled; 3654 int ret, i; 3655 3656 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); 3657 if (!rings) 3658 return -ENOMEM; 3659 3660 for (i = 0; i < tun->numqueues; i++) { 3661 tfile = rtnl_dereference(tun->tfiles[i]); 3662 rings[i] = &tfile->tx_ring; 3663 } 3664 list_for_each_entry(tfile, &tun->disabled, next) 3665 rings[i++] = &tfile->tx_ring; 3666 3667 ret = ptr_ring_resize_multiple(rings, n, 3668 dev->tx_queue_len, GFP_KERNEL, 3669 tun_ptr_free); 3670 3671 kfree(rings); 3672 return ret; 3673 } 3674 3675 static int tun_device_event(struct notifier_block *unused, 3676 unsigned long event, void *ptr) 3677 { 3678 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3679 struct tun_struct *tun = netdev_priv(dev); 3680 int i; 3681 3682 if (dev->rtnl_link_ops != &tun_link_ops) 3683 return NOTIFY_DONE; 3684 3685 switch (event) { 3686 case NETDEV_CHANGE_TX_QUEUE_LEN: 3687 if (tun_queue_resize(tun)) 3688 return NOTIFY_BAD; 3689 break; 3690 case NETDEV_UP: 3691 for (i = 0; i < tun->numqueues; i++) { 3692 struct tun_file *tfile; 3693 3694 tfile = rtnl_dereference(tun->tfiles[i]); 3695 tfile->socket.sk->sk_write_space(tfile->socket.sk); 3696 } 3697 break; 3698 default: 3699 break; 3700 } 3701 3702 return NOTIFY_DONE; 3703 } 3704 3705 static struct notifier_block tun_notifier_block __read_mostly = { 3706 .notifier_call = tun_device_event, 3707 }; 3708 3709 static int __init tun_init(void) 3710 { 3711 int ret = 0; 3712 3713 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3714 3715 ret = rtnl_link_register(&tun_link_ops); 3716 if (ret) { 3717 pr_err("Can't register link_ops\n"); 3718 goto err_linkops; 3719 } 3720 3721 ret = misc_register(&tun_miscdev); 3722 if (ret) { 3723 pr_err("Can't register misc device %d\n", TUN_MINOR); 3724 goto err_misc; 3725 } 3726 3727 ret = register_netdevice_notifier(&tun_notifier_block); 3728 if (ret) { 3729 pr_err("Can't register netdevice notifier\n"); 3730 goto err_notifier; 3731 } 3732 3733 return 0; 3734 3735 err_notifier: 3736 misc_deregister(&tun_miscdev); 3737 err_misc: 3738 rtnl_link_unregister(&tun_link_ops); 3739 err_linkops: 3740 return ret; 3741 } 3742 3743 static void tun_cleanup(void) 3744 { 3745 misc_deregister(&tun_miscdev); 3746 rtnl_link_unregister(&tun_link_ops); 3747 unregister_netdevice_notifier(&tun_notifier_block); 3748 } 3749 3750 /* Get an underlying socket object from tun file. Returns error unless file is 3751 * attached to a device. The returned object works like a packet socket, it 3752 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3753 * holding a reference to the file for as long as the socket is in use. */ 3754 struct socket *tun_get_socket(struct file *file) 3755 { 3756 struct tun_file *tfile; 3757 if (file->f_op != &tun_fops) 3758 return ERR_PTR(-EINVAL); 3759 tfile = file->private_data; 3760 if (!tfile) 3761 return ERR_PTR(-EBADFD); 3762 return &tfile->socket; 3763 } 3764 EXPORT_SYMBOL_GPL(tun_get_socket); 3765 3766 struct ptr_ring *tun_get_tx_ring(struct file *file) 3767 { 3768 struct tun_file *tfile; 3769 3770 if (file->f_op != &tun_fops) 3771 return ERR_PTR(-EINVAL); 3772 tfile = file->private_data; 3773 if (!tfile) 3774 return ERR_PTR(-EBADFD); 3775 return &tfile->tx_ring; 3776 } 3777 EXPORT_SYMBOL_GPL(tun_get_tx_ring); 3778 3779 module_init(tun_init); 3780 module_exit(tun_cleanup); 3781 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3782 MODULE_AUTHOR(DRV_COPYRIGHT); 3783 MODULE_LICENSE("GPL"); 3784 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3785 MODULE_ALIAS("devname:net/tun"); 3786