1 /* 2 * TUN - Universal TUN/TAP device driver. 3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ 16 */ 17 18 /* 19 * Changes: 20 * 21 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 22 * Add TUNSETLINK ioctl to set the link encapsulation 23 * 24 * Mark Smith <markzzzsmith@yahoo.com.au> 25 * Use eth_random_addr() for tap MAC address. 26 * 27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 28 * Fixes in packet dropping, queue length setting and queue wakeup. 29 * Increased default tx queue length. 30 * Added ethtool API. 31 * Minor cleanups 32 * 33 * Daniel Podlejski <underley@underley.eu.org> 34 * Modifications for 2.3.99-pre5 kernel. 35 */ 36 37 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 38 39 #define DRV_NAME "tun" 40 #define DRV_VERSION "1.6" 41 #define DRV_DESCRIPTION "Universal TUN/TAP device driver" 42 #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" 43 44 #include <linux/module.h> 45 #include <linux/errno.h> 46 #include <linux/kernel.h> 47 #include <linux/sched/signal.h> 48 #include <linux/major.h> 49 #include <linux/slab.h> 50 #include <linux/poll.h> 51 #include <linux/fcntl.h> 52 #include <linux/init.h> 53 #include <linux/skbuff.h> 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/miscdevice.h> 57 #include <linux/ethtool.h> 58 #include <linux/rtnetlink.h> 59 #include <linux/compat.h> 60 #include <linux/if.h> 61 #include <linux/if_arp.h> 62 #include <linux/if_ether.h> 63 #include <linux/if_tun.h> 64 #include <linux/if_vlan.h> 65 #include <linux/crc32.h> 66 #include <linux/nsproxy.h> 67 #include <linux/virtio_net.h> 68 #include <linux/rcupdate.h> 69 #include <net/net_namespace.h> 70 #include <net/netns/generic.h> 71 #include <net/rtnetlink.h> 72 #include <net/sock.h> 73 #include <linux/seq_file.h> 74 #include <linux/uio.h> 75 #include <linux/skb_array.h> 76 #include <linux/bpf.h> 77 #include <linux/bpf_trace.h> 78 #include <linux/mutex.h> 79 80 #include <linux/uaccess.h> 81 82 /* Uncomment to enable debugging */ 83 /* #define TUN_DEBUG 1 */ 84 85 #ifdef TUN_DEBUG 86 static int debug; 87 88 #define tun_debug(level, tun, fmt, args...) \ 89 do { \ 90 if (tun->debug) \ 91 netdev_printk(level, tun->dev, fmt, ##args); \ 92 } while (0) 93 #define DBG1(level, fmt, args...) \ 94 do { \ 95 if (debug == 2) \ 96 printk(level fmt, ##args); \ 97 } while (0) 98 #else 99 #define tun_debug(level, tun, fmt, args...) \ 100 do { \ 101 if (0) \ 102 netdev_printk(level, tun->dev, fmt, ##args); \ 103 } while (0) 104 #define DBG1(level, fmt, args...) \ 105 do { \ 106 if (0) \ 107 printk(level fmt, ##args); \ 108 } while (0) 109 #endif 110 111 #define TUN_HEADROOM 256 112 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) 113 114 /* TUN device flags */ 115 116 /* IFF_ATTACH_QUEUE is never stored in device flags, 117 * overload it to mean fasync when stored there. 118 */ 119 #define TUN_FASYNC IFF_ATTACH_QUEUE 120 /* High bits in flags field are unused. */ 121 #define TUN_VNET_LE 0x80000000 122 #define TUN_VNET_BE 0x40000000 123 124 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ 125 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) 126 127 #define GOODCOPY_LEN 128 128 129 #define FLT_EXACT_COUNT 8 130 struct tap_filter { 131 unsigned int count; /* Number of addrs. Zero means disabled */ 132 u32 mask[2]; /* Mask of the hashed addrs */ 133 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 134 }; 135 136 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal 137 * to max number of VCPUs in guest. */ 138 #define MAX_TAP_QUEUES 256 139 #define MAX_TAP_FLOWS 4096 140 141 #define TUN_FLOW_EXPIRE (3 * HZ) 142 143 struct tun_pcpu_stats { 144 u64 rx_packets; 145 u64 rx_bytes; 146 u64 tx_packets; 147 u64 tx_bytes; 148 struct u64_stats_sync syncp; 149 u32 rx_dropped; 150 u32 tx_dropped; 151 u32 rx_frame_errors; 152 }; 153 154 /* A tun_file connects an open character device to a tuntap netdevice. It 155 * also contains all socket related structures (except sock_fprog and tap_filter) 156 * to serve as one transmit queue for tuntap device. The sock_fprog and 157 * tap_filter were kept in tun_struct since they were used for filtering for the 158 * netdevice not for a specific queue (at least I didn't see the requirement for 159 * this). 160 * 161 * RCU usage: 162 * The tun_file and tun_struct are loosely coupled, the pointer from one to the 163 * other can only be read while rcu_read_lock or rtnl_lock is held. 164 */ 165 struct tun_file { 166 struct sock sk; 167 struct socket socket; 168 struct socket_wq wq; 169 struct tun_struct __rcu *tun; 170 struct fasync_struct *fasync; 171 /* only used for fasnyc */ 172 unsigned int flags; 173 union { 174 u16 queue_index; 175 unsigned int ifindex; 176 }; 177 struct napi_struct napi; 178 struct mutex napi_mutex; /* Protects access to the above napi */ 179 struct list_head next; 180 struct tun_struct *detached; 181 struct skb_array tx_array; 182 }; 183 184 struct tun_flow_entry { 185 struct hlist_node hash_link; 186 struct rcu_head rcu; 187 struct tun_struct *tun; 188 189 u32 rxhash; 190 u32 rps_rxhash; 191 int queue_index; 192 unsigned long updated; 193 }; 194 195 #define TUN_NUM_FLOW_ENTRIES 1024 196 197 /* Since the socket were moved to tun_file, to preserve the behavior of persist 198 * device, socket filter, sndbuf and vnet header size were restore when the 199 * file were attached to a persist device. 200 */ 201 struct tun_struct { 202 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; 203 unsigned int numqueues; 204 unsigned int flags; 205 kuid_t owner; 206 kgid_t group; 207 208 struct net_device *dev; 209 netdev_features_t set_features; 210 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 211 NETIF_F_TSO6) 212 213 int align; 214 int vnet_hdr_sz; 215 int sndbuf; 216 struct tap_filter txflt; 217 struct sock_fprog fprog; 218 /* protected by rtnl lock */ 219 bool filter_attached; 220 #ifdef TUN_DEBUG 221 int debug; 222 #endif 223 spinlock_t lock; 224 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; 225 struct timer_list flow_gc_timer; 226 unsigned long ageing_time; 227 unsigned int numdisabled; 228 struct list_head disabled; 229 void *security; 230 u32 flow_count; 231 u32 rx_batched; 232 struct tun_pcpu_stats __percpu *pcpu_stats; 233 struct bpf_prog __rcu *xdp_prog; 234 }; 235 236 static int tun_napi_receive(struct napi_struct *napi, int budget) 237 { 238 struct tun_file *tfile = container_of(napi, struct tun_file, napi); 239 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 240 struct sk_buff_head process_queue; 241 struct sk_buff *skb; 242 int received = 0; 243 244 __skb_queue_head_init(&process_queue); 245 246 spin_lock(&queue->lock); 247 skb_queue_splice_tail_init(queue, &process_queue); 248 spin_unlock(&queue->lock); 249 250 while (received < budget && (skb = __skb_dequeue(&process_queue))) { 251 napi_gro_receive(napi, skb); 252 ++received; 253 } 254 255 if (!skb_queue_empty(&process_queue)) { 256 spin_lock(&queue->lock); 257 skb_queue_splice(&process_queue, queue); 258 spin_unlock(&queue->lock); 259 } 260 261 return received; 262 } 263 264 static int tun_napi_poll(struct napi_struct *napi, int budget) 265 { 266 unsigned int received; 267 268 received = tun_napi_receive(napi, budget); 269 270 if (received < budget) 271 napi_complete_done(napi, received); 272 273 return received; 274 } 275 276 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, 277 bool napi_en) 278 { 279 if (napi_en) { 280 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll, 281 NAPI_POLL_WEIGHT); 282 napi_enable(&tfile->napi); 283 mutex_init(&tfile->napi_mutex); 284 } 285 } 286 287 static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile) 288 { 289 if (tun->flags & IFF_NAPI) 290 napi_disable(&tfile->napi); 291 } 292 293 static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile) 294 { 295 if (tun->flags & IFF_NAPI) 296 netif_napi_del(&tfile->napi); 297 } 298 299 static bool tun_napi_frags_enabled(const struct tun_struct *tun) 300 { 301 return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS; 302 } 303 304 #ifdef CONFIG_TUN_VNET_CROSS_LE 305 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 306 { 307 return tun->flags & TUN_VNET_BE ? false : 308 virtio_legacy_is_little_endian(); 309 } 310 311 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 312 { 313 int be = !!(tun->flags & TUN_VNET_BE); 314 315 if (put_user(be, argp)) 316 return -EFAULT; 317 318 return 0; 319 } 320 321 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 322 { 323 int be; 324 325 if (get_user(be, argp)) 326 return -EFAULT; 327 328 if (be) 329 tun->flags |= TUN_VNET_BE; 330 else 331 tun->flags &= ~TUN_VNET_BE; 332 333 return 0; 334 } 335 #else 336 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) 337 { 338 return virtio_legacy_is_little_endian(); 339 } 340 341 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) 342 { 343 return -EINVAL; 344 } 345 346 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) 347 { 348 return -EINVAL; 349 } 350 #endif /* CONFIG_TUN_VNET_CROSS_LE */ 351 352 static inline bool tun_is_little_endian(struct tun_struct *tun) 353 { 354 return tun->flags & TUN_VNET_LE || 355 tun_legacy_is_little_endian(tun); 356 } 357 358 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) 359 { 360 return __virtio16_to_cpu(tun_is_little_endian(tun), val); 361 } 362 363 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) 364 { 365 return __cpu_to_virtio16(tun_is_little_endian(tun), val); 366 } 367 368 static inline u32 tun_hashfn(u32 rxhash) 369 { 370 return rxhash & 0x3ff; 371 } 372 373 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) 374 { 375 struct tun_flow_entry *e; 376 377 hlist_for_each_entry_rcu(e, head, hash_link) { 378 if (e->rxhash == rxhash) 379 return e; 380 } 381 return NULL; 382 } 383 384 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, 385 struct hlist_head *head, 386 u32 rxhash, u16 queue_index) 387 { 388 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); 389 390 if (e) { 391 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", 392 rxhash, queue_index); 393 e->updated = jiffies; 394 e->rxhash = rxhash; 395 e->rps_rxhash = 0; 396 e->queue_index = queue_index; 397 e->tun = tun; 398 hlist_add_head_rcu(&e->hash_link, head); 399 ++tun->flow_count; 400 } 401 return e; 402 } 403 404 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) 405 { 406 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", 407 e->rxhash, e->queue_index); 408 hlist_del_rcu(&e->hash_link); 409 kfree_rcu(e, rcu); 410 --tun->flow_count; 411 } 412 413 static void tun_flow_flush(struct tun_struct *tun) 414 { 415 int i; 416 417 spin_lock_bh(&tun->lock); 418 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 419 struct tun_flow_entry *e; 420 struct hlist_node *n; 421 422 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) 423 tun_flow_delete(tun, e); 424 } 425 spin_unlock_bh(&tun->lock); 426 } 427 428 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) 429 { 430 int i; 431 432 spin_lock_bh(&tun->lock); 433 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 434 struct tun_flow_entry *e; 435 struct hlist_node *n; 436 437 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 438 if (e->queue_index == queue_index) 439 tun_flow_delete(tun, e); 440 } 441 } 442 spin_unlock_bh(&tun->lock); 443 } 444 445 static void tun_flow_cleanup(unsigned long data) 446 { 447 struct tun_struct *tun = (struct tun_struct *)data; 448 unsigned long delay = tun->ageing_time; 449 unsigned long next_timer = jiffies + delay; 450 unsigned long count = 0; 451 int i; 452 453 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); 454 455 spin_lock_bh(&tun->lock); 456 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { 457 struct tun_flow_entry *e; 458 struct hlist_node *n; 459 460 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { 461 unsigned long this_timer; 462 count++; 463 this_timer = e->updated + delay; 464 if (time_before_eq(this_timer, jiffies)) 465 tun_flow_delete(tun, e); 466 else if (time_before(this_timer, next_timer)) 467 next_timer = this_timer; 468 } 469 } 470 471 if (count) 472 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); 473 spin_unlock_bh(&tun->lock); 474 } 475 476 static void tun_flow_update(struct tun_struct *tun, u32 rxhash, 477 struct tun_file *tfile) 478 { 479 struct hlist_head *head; 480 struct tun_flow_entry *e; 481 unsigned long delay = tun->ageing_time; 482 u16 queue_index = tfile->queue_index; 483 484 if (!rxhash) 485 return; 486 else 487 head = &tun->flows[tun_hashfn(rxhash)]; 488 489 rcu_read_lock(); 490 491 /* We may get a very small possibility of OOO during switching, not 492 * worth to optimize.*/ 493 if (tun->numqueues == 1 || tfile->detached) 494 goto unlock; 495 496 e = tun_flow_find(head, rxhash); 497 if (likely(e)) { 498 /* TODO: keep queueing to old queue until it's empty? */ 499 e->queue_index = queue_index; 500 e->updated = jiffies; 501 sock_rps_record_flow_hash(e->rps_rxhash); 502 } else { 503 spin_lock_bh(&tun->lock); 504 if (!tun_flow_find(head, rxhash) && 505 tun->flow_count < MAX_TAP_FLOWS) 506 tun_flow_create(tun, head, rxhash, queue_index); 507 508 if (!timer_pending(&tun->flow_gc_timer)) 509 mod_timer(&tun->flow_gc_timer, 510 round_jiffies_up(jiffies + delay)); 511 spin_unlock_bh(&tun->lock); 512 } 513 514 unlock: 515 rcu_read_unlock(); 516 } 517 518 /** 519 * Save the hash received in the stack receive path and update the 520 * flow_hash table accordingly. 521 */ 522 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) 523 { 524 if (unlikely(e->rps_rxhash != hash)) 525 e->rps_rxhash = hash; 526 } 527 528 /* We try to identify a flow through its rxhash first. The reason that 529 * we do not check rxq no. is because some cards(e.g 82599), chooses 530 * the rxq based on the txq where the last packet of the flow comes. As 531 * the userspace application move between processors, we may get a 532 * different rxq no. here. If we could not get rxhash, then we would 533 * hope the rxq no. may help here. 534 */ 535 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, 536 void *accel_priv, select_queue_fallback_t fallback) 537 { 538 struct tun_struct *tun = netdev_priv(dev); 539 struct tun_flow_entry *e; 540 u32 txq = 0; 541 u32 numqueues = 0; 542 543 rcu_read_lock(); 544 numqueues = ACCESS_ONCE(tun->numqueues); 545 546 txq = __skb_get_hash_symmetric(skb); 547 if (txq) { 548 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); 549 if (e) { 550 tun_flow_save_rps_rxhash(e, txq); 551 txq = e->queue_index; 552 } else 553 /* use multiply and shift instead of expensive divide */ 554 txq = ((u64)txq * numqueues) >> 32; 555 } else if (likely(skb_rx_queue_recorded(skb))) { 556 txq = skb_get_rx_queue(skb); 557 while (unlikely(txq >= numqueues)) 558 txq -= numqueues; 559 } 560 561 rcu_read_unlock(); 562 return txq; 563 } 564 565 static inline bool tun_not_capable(struct tun_struct *tun) 566 { 567 const struct cred *cred = current_cred(); 568 struct net *net = dev_net(tun->dev); 569 570 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 571 (gid_valid(tun->group) && !in_egroup_p(tun->group))) && 572 !ns_capable(net->user_ns, CAP_NET_ADMIN); 573 } 574 575 static void tun_set_real_num_queues(struct tun_struct *tun) 576 { 577 netif_set_real_num_tx_queues(tun->dev, tun->numqueues); 578 netif_set_real_num_rx_queues(tun->dev, tun->numqueues); 579 } 580 581 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) 582 { 583 tfile->detached = tun; 584 list_add_tail(&tfile->next, &tun->disabled); 585 ++tun->numdisabled; 586 } 587 588 static struct tun_struct *tun_enable_queue(struct tun_file *tfile) 589 { 590 struct tun_struct *tun = tfile->detached; 591 592 tfile->detached = NULL; 593 list_del_init(&tfile->next); 594 --tun->numdisabled; 595 return tun; 596 } 597 598 static void tun_queue_purge(struct tun_file *tfile) 599 { 600 struct sk_buff *skb; 601 602 while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) 603 kfree_skb(skb); 604 605 skb_queue_purge(&tfile->sk.sk_write_queue); 606 skb_queue_purge(&tfile->sk.sk_error_queue); 607 } 608 609 static void __tun_detach(struct tun_file *tfile, bool clean) 610 { 611 struct tun_file *ntfile; 612 struct tun_struct *tun; 613 614 tun = rtnl_dereference(tfile->tun); 615 616 if (tun && clean) { 617 tun_napi_disable(tun, tfile); 618 tun_napi_del(tun, tfile); 619 } 620 621 if (tun && !tfile->detached) { 622 u16 index = tfile->queue_index; 623 BUG_ON(index >= tun->numqueues); 624 625 rcu_assign_pointer(tun->tfiles[index], 626 tun->tfiles[tun->numqueues - 1]); 627 ntfile = rtnl_dereference(tun->tfiles[index]); 628 ntfile->queue_index = index; 629 630 --tun->numqueues; 631 if (clean) { 632 RCU_INIT_POINTER(tfile->tun, NULL); 633 sock_put(&tfile->sk); 634 } else 635 tun_disable_queue(tun, tfile); 636 637 synchronize_net(); 638 tun_flow_delete_by_queue(tun, tun->numqueues + 1); 639 /* Drop read queue */ 640 tun_queue_purge(tfile); 641 tun_set_real_num_queues(tun); 642 } else if (tfile->detached && clean) { 643 tun = tun_enable_queue(tfile); 644 sock_put(&tfile->sk); 645 } 646 647 if (clean) { 648 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { 649 netif_carrier_off(tun->dev); 650 651 if (!(tun->flags & IFF_PERSIST) && 652 tun->dev->reg_state == NETREG_REGISTERED) 653 unregister_netdevice(tun->dev); 654 } 655 if (tun) 656 skb_array_cleanup(&tfile->tx_array); 657 sock_put(&tfile->sk); 658 } 659 } 660 661 static void tun_detach(struct tun_file *tfile, bool clean) 662 { 663 rtnl_lock(); 664 __tun_detach(tfile, clean); 665 rtnl_unlock(); 666 } 667 668 static void tun_detach_all(struct net_device *dev) 669 { 670 struct tun_struct *tun = netdev_priv(dev); 671 struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); 672 struct tun_file *tfile, *tmp; 673 int i, n = tun->numqueues; 674 675 for (i = 0; i < n; i++) { 676 tfile = rtnl_dereference(tun->tfiles[i]); 677 BUG_ON(!tfile); 678 tun_napi_disable(tun, tfile); 679 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 680 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 681 RCU_INIT_POINTER(tfile->tun, NULL); 682 --tun->numqueues; 683 } 684 list_for_each_entry(tfile, &tun->disabled, next) { 685 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; 686 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 687 RCU_INIT_POINTER(tfile->tun, NULL); 688 } 689 BUG_ON(tun->numqueues != 0); 690 691 synchronize_net(); 692 for (i = 0; i < n; i++) { 693 tfile = rtnl_dereference(tun->tfiles[i]); 694 tun_napi_del(tun, tfile); 695 /* Drop read queue */ 696 tun_queue_purge(tfile); 697 sock_put(&tfile->sk); 698 } 699 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { 700 tun_enable_queue(tfile); 701 tun_queue_purge(tfile); 702 sock_put(&tfile->sk); 703 } 704 BUG_ON(tun->numdisabled != 0); 705 706 if (xdp_prog) 707 bpf_prog_put(xdp_prog); 708 709 if (tun->flags & IFF_PERSIST) 710 module_put(THIS_MODULE); 711 } 712 713 static int tun_attach(struct tun_struct *tun, struct file *file, 714 bool skip_filter, bool napi) 715 { 716 struct tun_file *tfile = file->private_data; 717 struct net_device *dev = tun->dev; 718 int err; 719 720 err = security_tun_dev_attach(tfile->socket.sk, tun->security); 721 if (err < 0) 722 goto out; 723 724 err = -EINVAL; 725 if (rtnl_dereference(tfile->tun) && !tfile->detached) 726 goto out; 727 728 err = -EBUSY; 729 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) 730 goto out; 731 732 err = -E2BIG; 733 if (!tfile->detached && 734 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) 735 goto out; 736 737 err = 0; 738 739 /* Re-attach the filter to persist device */ 740 if (!skip_filter && (tun->filter_attached == true)) { 741 lock_sock(tfile->socket.sk); 742 err = sk_attach_filter(&tun->fprog, tfile->socket.sk); 743 release_sock(tfile->socket.sk); 744 if (!err) 745 goto out; 746 } 747 748 if (!tfile->detached && 749 skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { 750 err = -ENOMEM; 751 goto out; 752 } 753 754 tfile->queue_index = tun->numqueues; 755 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; 756 rcu_assign_pointer(tfile->tun, tun); 757 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 758 tun->numqueues++; 759 760 if (tfile->detached) { 761 tun_enable_queue(tfile); 762 } else { 763 sock_hold(&tfile->sk); 764 tun_napi_init(tun, tfile, napi); 765 } 766 767 tun_set_real_num_queues(tun); 768 769 /* device is allowed to go away first, so no need to hold extra 770 * refcnt. 771 */ 772 773 out: 774 return err; 775 } 776 777 static struct tun_struct *tun_get(struct tun_file *tfile) 778 { 779 struct tun_struct *tun; 780 781 rcu_read_lock(); 782 tun = rcu_dereference(tfile->tun); 783 if (tun) 784 dev_hold(tun->dev); 785 rcu_read_unlock(); 786 787 return tun; 788 } 789 790 static void tun_put(struct tun_struct *tun) 791 { 792 dev_put(tun->dev); 793 } 794 795 /* TAP filtering */ 796 static void addr_hash_set(u32 *mask, const u8 *addr) 797 { 798 int n = ether_crc(ETH_ALEN, addr) >> 26; 799 mask[n >> 5] |= (1 << (n & 31)); 800 } 801 802 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) 803 { 804 int n = ether_crc(ETH_ALEN, addr) >> 26; 805 return mask[n >> 5] & (1 << (n & 31)); 806 } 807 808 static int update_filter(struct tap_filter *filter, void __user *arg) 809 { 810 struct { u8 u[ETH_ALEN]; } *addr; 811 struct tun_filter uf; 812 int err, alen, n, nexact; 813 814 if (copy_from_user(&uf, arg, sizeof(uf))) 815 return -EFAULT; 816 817 if (!uf.count) { 818 /* Disabled */ 819 filter->count = 0; 820 return 0; 821 } 822 823 alen = ETH_ALEN * uf.count; 824 addr = memdup_user(arg + sizeof(uf), alen); 825 if (IS_ERR(addr)) 826 return PTR_ERR(addr); 827 828 /* The filter is updated without holding any locks. Which is 829 * perfectly safe. We disable it first and in the worst 830 * case we'll accept a few undesired packets. */ 831 filter->count = 0; 832 wmb(); 833 834 /* Use first set of addresses as an exact filter */ 835 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) 836 memcpy(filter->addr[n], addr[n].u, ETH_ALEN); 837 838 nexact = n; 839 840 /* Remaining multicast addresses are hashed, 841 * unicast will leave the filter disabled. */ 842 memset(filter->mask, 0, sizeof(filter->mask)); 843 for (; n < uf.count; n++) { 844 if (!is_multicast_ether_addr(addr[n].u)) { 845 err = 0; /* no filter */ 846 goto free_addr; 847 } 848 addr_hash_set(filter->mask, addr[n].u); 849 } 850 851 /* For ALLMULTI just set the mask to all ones. 852 * This overrides the mask populated above. */ 853 if ((uf.flags & TUN_FLT_ALLMULTI)) 854 memset(filter->mask, ~0, sizeof(filter->mask)); 855 856 /* Now enable the filter */ 857 wmb(); 858 filter->count = nexact; 859 860 /* Return the number of exact filters */ 861 err = nexact; 862 free_addr: 863 kfree(addr); 864 return err; 865 } 866 867 /* Returns: 0 - drop, !=0 - accept */ 868 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) 869 { 870 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect 871 * at this point. */ 872 struct ethhdr *eh = (struct ethhdr *) skb->data; 873 int i; 874 875 /* Exact match */ 876 for (i = 0; i < filter->count; i++) 877 if (ether_addr_equal(eh->h_dest, filter->addr[i])) 878 return 1; 879 880 /* Inexact match (multicast only) */ 881 if (is_multicast_ether_addr(eh->h_dest)) 882 return addr_hash_test(filter->mask, eh->h_dest); 883 884 return 0; 885 } 886 887 /* 888 * Checks whether the packet is accepted or not. 889 * Returns: 0 - drop, !=0 - accept 890 */ 891 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) 892 { 893 if (!filter->count) 894 return 1; 895 896 return run_filter(filter, skb); 897 } 898 899 /* Network device part of the driver */ 900 901 static const struct ethtool_ops tun_ethtool_ops; 902 903 /* Net device detach from fd. */ 904 static void tun_net_uninit(struct net_device *dev) 905 { 906 tun_detach_all(dev); 907 } 908 909 /* Net device open. */ 910 static int tun_net_open(struct net_device *dev) 911 { 912 struct tun_struct *tun = netdev_priv(dev); 913 int i; 914 915 netif_tx_start_all_queues(dev); 916 917 for (i = 0; i < tun->numqueues; i++) { 918 struct tun_file *tfile; 919 920 tfile = rtnl_dereference(tun->tfiles[i]); 921 tfile->socket.sk->sk_write_space(tfile->socket.sk); 922 } 923 924 return 0; 925 } 926 927 /* Net device close. */ 928 static int tun_net_close(struct net_device *dev) 929 { 930 netif_tx_stop_all_queues(dev); 931 return 0; 932 } 933 934 /* Net device start xmit */ 935 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 936 { 937 struct tun_struct *tun = netdev_priv(dev); 938 int txq = skb->queue_mapping; 939 struct tun_file *tfile; 940 u32 numqueues = 0; 941 942 rcu_read_lock(); 943 tfile = rcu_dereference(tun->tfiles[txq]); 944 numqueues = ACCESS_ONCE(tun->numqueues); 945 946 /* Drop packet if interface is not attached */ 947 if (txq >= numqueues) 948 goto drop; 949 950 #ifdef CONFIG_RPS 951 if (numqueues == 1 && static_key_false(&rps_needed)) { 952 /* Select queue was not called for the skbuff, so we extract the 953 * RPS hash and save it into the flow_table here. 954 */ 955 __u32 rxhash; 956 957 rxhash = __skb_get_hash_symmetric(skb); 958 if (rxhash) { 959 struct tun_flow_entry *e; 960 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], 961 rxhash); 962 if (e) 963 tun_flow_save_rps_rxhash(e, rxhash); 964 } 965 } 966 #endif 967 968 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 969 970 BUG_ON(!tfile); 971 972 /* Drop if the filter does not like it. 973 * This is a noop if the filter is disabled. 974 * Filter can be enabled only for the TAP devices. */ 975 if (!check_filter(&tun->txflt, skb)) 976 goto drop; 977 978 if (tfile->socket.sk->sk_filter && 979 sk_filter(tfile->socket.sk, skb)) 980 goto drop; 981 982 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) 983 goto drop; 984 985 skb_tx_timestamp(skb); 986 987 /* Orphan the skb - required as we might hang on to it 988 * for indefinite time. 989 */ 990 skb_orphan(skb); 991 992 nf_reset(skb); 993 994 if (skb_array_produce(&tfile->tx_array, skb)) 995 goto drop; 996 997 /* Notify and wake up reader process */ 998 if (tfile->flags & TUN_FASYNC) 999 kill_fasync(&tfile->fasync, SIGIO, POLL_IN); 1000 tfile->socket.sk->sk_data_ready(tfile->socket.sk); 1001 1002 rcu_read_unlock(); 1003 return NETDEV_TX_OK; 1004 1005 drop: 1006 this_cpu_inc(tun->pcpu_stats->tx_dropped); 1007 skb_tx_error(skb); 1008 kfree_skb(skb); 1009 rcu_read_unlock(); 1010 return NET_XMIT_DROP; 1011 } 1012 1013 static void tun_net_mclist(struct net_device *dev) 1014 { 1015 /* 1016 * This callback is supposed to deal with mc filter in 1017 * _rx_ path and has nothing to do with the _tx_ path. 1018 * In rx path we always accept everything userspace gives us. 1019 */ 1020 } 1021 1022 static netdev_features_t tun_net_fix_features(struct net_device *dev, 1023 netdev_features_t features) 1024 { 1025 struct tun_struct *tun = netdev_priv(dev); 1026 1027 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1028 } 1029 #ifdef CONFIG_NET_POLL_CONTROLLER 1030 static void tun_poll_controller(struct net_device *dev) 1031 { 1032 /* 1033 * Tun only receives frames when: 1034 * 1) the char device endpoint gets data from user space 1035 * 2) the tun socket gets a sendmsg call from user space 1036 * If NAPI is not enabled, since both of those are synchronous 1037 * operations, we are guaranteed never to have pending data when we poll 1038 * for it so there is nothing to do here but return. 1039 * We need this though so netpoll recognizes us as an interface that 1040 * supports polling, which enables bridge devices in virt setups to 1041 * still use netconsole 1042 * If NAPI is enabled, however, we need to schedule polling for all 1043 * queues unless we are using napi_gro_frags(), which we call in 1044 * process context and not in NAPI context. 1045 */ 1046 struct tun_struct *tun = netdev_priv(dev); 1047 1048 if (tun->flags & IFF_NAPI) { 1049 struct tun_file *tfile; 1050 int i; 1051 1052 if (tun_napi_frags_enabled(tun)) 1053 return; 1054 1055 rcu_read_lock(); 1056 for (i = 0; i < tun->numqueues; i++) { 1057 tfile = rcu_dereference(tun->tfiles[i]); 1058 napi_schedule(&tfile->napi); 1059 } 1060 rcu_read_unlock(); 1061 } 1062 return; 1063 } 1064 #endif 1065 1066 static void tun_set_headroom(struct net_device *dev, int new_hr) 1067 { 1068 struct tun_struct *tun = netdev_priv(dev); 1069 1070 if (new_hr < NET_SKB_PAD) 1071 new_hr = NET_SKB_PAD; 1072 1073 tun->align = new_hr; 1074 } 1075 1076 static void 1077 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 1078 { 1079 u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; 1080 struct tun_struct *tun = netdev_priv(dev); 1081 struct tun_pcpu_stats *p; 1082 int i; 1083 1084 for_each_possible_cpu(i) { 1085 u64 rxpackets, rxbytes, txpackets, txbytes; 1086 unsigned int start; 1087 1088 p = per_cpu_ptr(tun->pcpu_stats, i); 1089 do { 1090 start = u64_stats_fetch_begin(&p->syncp); 1091 rxpackets = p->rx_packets; 1092 rxbytes = p->rx_bytes; 1093 txpackets = p->tx_packets; 1094 txbytes = p->tx_bytes; 1095 } while (u64_stats_fetch_retry(&p->syncp, start)); 1096 1097 stats->rx_packets += rxpackets; 1098 stats->rx_bytes += rxbytes; 1099 stats->tx_packets += txpackets; 1100 stats->tx_bytes += txbytes; 1101 1102 /* u32 counters */ 1103 rx_dropped += p->rx_dropped; 1104 rx_frame_errors += p->rx_frame_errors; 1105 tx_dropped += p->tx_dropped; 1106 } 1107 stats->rx_dropped = rx_dropped; 1108 stats->rx_frame_errors = rx_frame_errors; 1109 stats->tx_dropped = tx_dropped; 1110 } 1111 1112 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, 1113 struct netlink_ext_ack *extack) 1114 { 1115 struct tun_struct *tun = netdev_priv(dev); 1116 struct bpf_prog *old_prog; 1117 1118 old_prog = rtnl_dereference(tun->xdp_prog); 1119 rcu_assign_pointer(tun->xdp_prog, prog); 1120 if (old_prog) 1121 bpf_prog_put(old_prog); 1122 1123 return 0; 1124 } 1125 1126 static u32 tun_xdp_query(struct net_device *dev) 1127 { 1128 struct tun_struct *tun = netdev_priv(dev); 1129 const struct bpf_prog *xdp_prog; 1130 1131 xdp_prog = rtnl_dereference(tun->xdp_prog); 1132 if (xdp_prog) 1133 return xdp_prog->aux->id; 1134 1135 return 0; 1136 } 1137 1138 static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) 1139 { 1140 switch (xdp->command) { 1141 case XDP_SETUP_PROG: 1142 return tun_xdp_set(dev, xdp->prog, xdp->extack); 1143 case XDP_QUERY_PROG: 1144 xdp->prog_id = tun_xdp_query(dev); 1145 xdp->prog_attached = !!xdp->prog_id; 1146 return 0; 1147 default: 1148 return -EINVAL; 1149 } 1150 } 1151 1152 static const struct net_device_ops tun_netdev_ops = { 1153 .ndo_uninit = tun_net_uninit, 1154 .ndo_open = tun_net_open, 1155 .ndo_stop = tun_net_close, 1156 .ndo_start_xmit = tun_net_xmit, 1157 .ndo_fix_features = tun_net_fix_features, 1158 .ndo_select_queue = tun_select_queue, 1159 #ifdef CONFIG_NET_POLL_CONTROLLER 1160 .ndo_poll_controller = tun_poll_controller, 1161 #endif 1162 .ndo_set_rx_headroom = tun_set_headroom, 1163 .ndo_get_stats64 = tun_net_get_stats64, 1164 }; 1165 1166 static const struct net_device_ops tap_netdev_ops = { 1167 .ndo_uninit = tun_net_uninit, 1168 .ndo_open = tun_net_open, 1169 .ndo_stop = tun_net_close, 1170 .ndo_start_xmit = tun_net_xmit, 1171 .ndo_fix_features = tun_net_fix_features, 1172 .ndo_set_rx_mode = tun_net_mclist, 1173 .ndo_set_mac_address = eth_mac_addr, 1174 .ndo_validate_addr = eth_validate_addr, 1175 .ndo_select_queue = tun_select_queue, 1176 #ifdef CONFIG_NET_POLL_CONTROLLER 1177 .ndo_poll_controller = tun_poll_controller, 1178 #endif 1179 .ndo_features_check = passthru_features_check, 1180 .ndo_set_rx_headroom = tun_set_headroom, 1181 .ndo_get_stats64 = tun_net_get_stats64, 1182 .ndo_xdp = tun_xdp, 1183 }; 1184 1185 static void tun_flow_init(struct tun_struct *tun) 1186 { 1187 int i; 1188 1189 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) 1190 INIT_HLIST_HEAD(&tun->flows[i]); 1191 1192 tun->ageing_time = TUN_FLOW_EXPIRE; 1193 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); 1194 mod_timer(&tun->flow_gc_timer, 1195 round_jiffies_up(jiffies + tun->ageing_time)); 1196 } 1197 1198 static void tun_flow_uninit(struct tun_struct *tun) 1199 { 1200 del_timer_sync(&tun->flow_gc_timer); 1201 tun_flow_flush(tun); 1202 } 1203 1204 #define MIN_MTU 68 1205 #define MAX_MTU 65535 1206 1207 /* Initialize net device. */ 1208 static void tun_net_init(struct net_device *dev) 1209 { 1210 struct tun_struct *tun = netdev_priv(dev); 1211 1212 switch (tun->flags & TUN_TYPE_MASK) { 1213 case IFF_TUN: 1214 dev->netdev_ops = &tun_netdev_ops; 1215 1216 /* Point-to-Point TUN Device */ 1217 dev->hard_header_len = 0; 1218 dev->addr_len = 0; 1219 dev->mtu = 1500; 1220 1221 /* Zero header length */ 1222 dev->type = ARPHRD_NONE; 1223 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1224 break; 1225 1226 case IFF_TAP: 1227 dev->netdev_ops = &tap_netdev_ops; 1228 /* Ethernet TAP Device */ 1229 ether_setup(dev); 1230 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1231 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1232 1233 eth_hw_addr_random(dev); 1234 1235 break; 1236 } 1237 1238 dev->min_mtu = MIN_MTU; 1239 dev->max_mtu = MAX_MTU - dev->hard_header_len; 1240 } 1241 1242 /* Character device part */ 1243 1244 /* Poll */ 1245 static unsigned int tun_chr_poll(struct file *file, poll_table *wait) 1246 { 1247 struct tun_file *tfile = file->private_data; 1248 struct tun_struct *tun = tun_get(tfile); 1249 struct sock *sk; 1250 unsigned int mask = 0; 1251 1252 if (!tun) 1253 return POLLERR; 1254 1255 sk = tfile->socket.sk; 1256 1257 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 1258 1259 poll_wait(file, sk_sleep(sk), wait); 1260 1261 if (!skb_array_empty(&tfile->tx_array)) 1262 mask |= POLLIN | POLLRDNORM; 1263 1264 if (tun->dev->flags & IFF_UP && 1265 (sock_writeable(sk) || 1266 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && 1267 sock_writeable(sk)))) 1268 mask |= POLLOUT | POLLWRNORM; 1269 1270 if (tun->dev->reg_state != NETREG_REGISTERED) 1271 mask = POLLERR; 1272 1273 tun_put(tun); 1274 return mask; 1275 } 1276 1277 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, 1278 size_t len, 1279 const struct iov_iter *it) 1280 { 1281 struct sk_buff *skb; 1282 size_t linear; 1283 int err; 1284 int i; 1285 1286 if (it->nr_segs > MAX_SKB_FRAGS + 1) 1287 return ERR_PTR(-ENOMEM); 1288 1289 local_bh_disable(); 1290 skb = napi_get_frags(&tfile->napi); 1291 local_bh_enable(); 1292 if (!skb) 1293 return ERR_PTR(-ENOMEM); 1294 1295 linear = iov_iter_single_seg_count(it); 1296 err = __skb_grow(skb, linear); 1297 if (err) 1298 goto free; 1299 1300 skb->len = len; 1301 skb->data_len = len - linear; 1302 skb->truesize += skb->data_len; 1303 1304 for (i = 1; i < it->nr_segs; i++) { 1305 size_t fragsz = it->iov[i].iov_len; 1306 unsigned long offset; 1307 struct page *page; 1308 void *data; 1309 1310 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1311 err = -EINVAL; 1312 goto free; 1313 } 1314 1315 local_bh_disable(); 1316 data = napi_alloc_frag(fragsz); 1317 local_bh_enable(); 1318 if (!data) { 1319 err = -ENOMEM; 1320 goto free; 1321 } 1322 1323 page = virt_to_head_page(data); 1324 offset = data - page_address(page); 1325 skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 1326 } 1327 1328 return skb; 1329 free: 1330 /* frees skb and all frags allocated with napi_alloc_frag() */ 1331 napi_free_frags(&tfile->napi); 1332 return ERR_PTR(err); 1333 } 1334 1335 /* prepad is the amount to reserve at front. len is length after that. 1336 * linear is a hint as to how much to copy (usually headers). */ 1337 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, 1338 size_t prepad, size_t len, 1339 size_t linear, int noblock) 1340 { 1341 struct sock *sk = tfile->socket.sk; 1342 struct sk_buff *skb; 1343 int err; 1344 1345 /* Under a page? Don't bother with paged skb. */ 1346 if (prepad + len < PAGE_SIZE || !linear) 1347 linear = len; 1348 1349 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 1350 &err, 0); 1351 if (!skb) 1352 return ERR_PTR(err); 1353 1354 skb_reserve(skb, prepad); 1355 skb_put(skb, linear); 1356 skb->data_len = len - linear; 1357 skb->len += len - linear; 1358 1359 return skb; 1360 } 1361 1362 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, 1363 struct sk_buff *skb, int more) 1364 { 1365 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1366 struct sk_buff_head process_queue; 1367 u32 rx_batched = tun->rx_batched; 1368 bool rcv = false; 1369 1370 if (!rx_batched || (!more && skb_queue_empty(queue))) { 1371 local_bh_disable(); 1372 netif_receive_skb(skb); 1373 local_bh_enable(); 1374 return; 1375 } 1376 1377 spin_lock(&queue->lock); 1378 if (!more || skb_queue_len(queue) == rx_batched) { 1379 __skb_queue_head_init(&process_queue); 1380 skb_queue_splice_tail_init(queue, &process_queue); 1381 rcv = true; 1382 } else { 1383 __skb_queue_tail(queue, skb); 1384 } 1385 spin_unlock(&queue->lock); 1386 1387 if (rcv) { 1388 struct sk_buff *nskb; 1389 1390 local_bh_disable(); 1391 while ((nskb = __skb_dequeue(&process_queue))) 1392 netif_receive_skb(nskb); 1393 netif_receive_skb(skb); 1394 local_bh_enable(); 1395 } 1396 } 1397 1398 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, 1399 int len, int noblock, bool zerocopy) 1400 { 1401 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 1402 return false; 1403 1404 if (tfile->socket.sk->sk_sndbuf != INT_MAX) 1405 return false; 1406 1407 if (!noblock) 1408 return false; 1409 1410 if (zerocopy) 1411 return false; 1412 1413 if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + 1414 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) 1415 return false; 1416 1417 return true; 1418 } 1419 1420 static struct sk_buff *tun_build_skb(struct tun_struct *tun, 1421 struct tun_file *tfile, 1422 struct iov_iter *from, 1423 struct virtio_net_hdr *hdr, 1424 int len, int *skb_xdp) 1425 { 1426 struct page_frag *alloc_frag = ¤t->task_frag; 1427 struct sk_buff *skb; 1428 struct bpf_prog *xdp_prog; 1429 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1430 unsigned int delta = 0; 1431 char *buf; 1432 size_t copied; 1433 bool xdp_xmit = false; 1434 int err, pad = TUN_RX_PAD; 1435 1436 rcu_read_lock(); 1437 xdp_prog = rcu_dereference(tun->xdp_prog); 1438 if (xdp_prog) 1439 pad += TUN_HEADROOM; 1440 buflen += SKB_DATA_ALIGN(len + pad); 1441 rcu_read_unlock(); 1442 1443 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) 1444 return ERR_PTR(-ENOMEM); 1445 1446 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 1447 copied = copy_page_from_iter(alloc_frag->page, 1448 alloc_frag->offset + pad, 1449 len, from); 1450 if (copied != len) 1451 return ERR_PTR(-EFAULT); 1452 1453 /* There's a small window that XDP may be set after the check 1454 * of xdp_prog above, this should be rare and for simplicity 1455 * we do XDP on skb in case the headroom is not enough. 1456 */ 1457 if (hdr->gso_type || !xdp_prog) 1458 *skb_xdp = 1; 1459 else 1460 *skb_xdp = 0; 1461 1462 rcu_read_lock(); 1463 xdp_prog = rcu_dereference(tun->xdp_prog); 1464 if (xdp_prog && !*skb_xdp) { 1465 struct xdp_buff xdp; 1466 void *orig_data; 1467 u32 act; 1468 1469 xdp.data_hard_start = buf; 1470 xdp.data = buf + pad; 1471 xdp_set_data_meta_invalid(&xdp); 1472 xdp.data_end = xdp.data + len; 1473 orig_data = xdp.data; 1474 act = bpf_prog_run_xdp(xdp_prog, &xdp); 1475 1476 switch (act) { 1477 case XDP_REDIRECT: 1478 get_page(alloc_frag->page); 1479 alloc_frag->offset += buflen; 1480 err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); 1481 if (err) 1482 goto err_redirect; 1483 return NULL; 1484 case XDP_TX: 1485 xdp_xmit = true; 1486 /* fall through */ 1487 case XDP_PASS: 1488 delta = orig_data - xdp.data; 1489 break; 1490 default: 1491 bpf_warn_invalid_xdp_action(act); 1492 /* fall through */ 1493 case XDP_ABORTED: 1494 trace_xdp_exception(tun->dev, xdp_prog, act); 1495 /* fall through */ 1496 case XDP_DROP: 1497 goto err_xdp; 1498 } 1499 } 1500 1501 skb = build_skb(buf, buflen); 1502 if (!skb) { 1503 rcu_read_unlock(); 1504 return ERR_PTR(-ENOMEM); 1505 } 1506 1507 skb_reserve(skb, pad - delta); 1508 skb_put(skb, len + delta); 1509 get_page(alloc_frag->page); 1510 alloc_frag->offset += buflen; 1511 1512 if (xdp_xmit) { 1513 skb->dev = tun->dev; 1514 generic_xdp_tx(skb, xdp_prog); 1515 rcu_read_lock(); 1516 return NULL; 1517 } 1518 1519 rcu_read_unlock(); 1520 1521 return skb; 1522 1523 err_redirect: 1524 put_page(alloc_frag->page); 1525 err_xdp: 1526 rcu_read_unlock(); 1527 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1528 return NULL; 1529 } 1530 1531 /* Get packet from user space buffer */ 1532 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, 1533 void *msg_control, struct iov_iter *from, 1534 int noblock, bool more) 1535 { 1536 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1537 struct sk_buff *skb; 1538 size_t total_len = iov_iter_count(from); 1539 size_t len = total_len, align = tun->align, linear; 1540 struct virtio_net_hdr gso = { 0 }; 1541 struct tun_pcpu_stats *stats; 1542 int good_linear; 1543 int copylen; 1544 bool zerocopy = false; 1545 int err; 1546 u32 rxhash; 1547 int skb_xdp = 1; 1548 bool frags = tun_napi_frags_enabled(tun); 1549 1550 if (!(tun->dev->flags & IFF_UP)) 1551 return -EIO; 1552 1553 if (!(tun->flags & IFF_NO_PI)) { 1554 if (len < sizeof(pi)) 1555 return -EINVAL; 1556 len -= sizeof(pi); 1557 1558 if (!copy_from_iter_full(&pi, sizeof(pi), from)) 1559 return -EFAULT; 1560 } 1561 1562 if (tun->flags & IFF_VNET_HDR) { 1563 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1564 1565 if (len < vnet_hdr_sz) 1566 return -EINVAL; 1567 len -= vnet_hdr_sz; 1568 1569 if (!copy_from_iter_full(&gso, sizeof(gso), from)) 1570 return -EFAULT; 1571 1572 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 1573 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) 1574 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); 1575 1576 if (tun16_to_cpu(tun, gso.hdr_len) > len) 1577 return -EINVAL; 1578 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); 1579 } 1580 1581 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { 1582 align += NET_IP_ALIGN; 1583 if (unlikely(len < ETH_HLEN || 1584 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) 1585 return -EINVAL; 1586 } 1587 1588 good_linear = SKB_MAX_HEAD(align); 1589 1590 if (msg_control) { 1591 struct iov_iter i = *from; 1592 1593 /* There are 256 bytes to be copied in skb, so there is 1594 * enough room for skb expand head in case it is used. 1595 * The rest of the buffer is mapped from userspace. 1596 */ 1597 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; 1598 if (copylen > good_linear) 1599 copylen = good_linear; 1600 linear = copylen; 1601 iov_iter_advance(&i, copylen); 1602 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) 1603 zerocopy = true; 1604 } 1605 1606 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { 1607 /* For the packet that is not easy to be processed 1608 * (e.g gso or jumbo packet), we will do it at after 1609 * skb was created with generic XDP routine. 1610 */ 1611 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); 1612 if (IS_ERR(skb)) { 1613 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1614 return PTR_ERR(skb); 1615 } 1616 if (!skb) 1617 return total_len; 1618 } else { 1619 if (!zerocopy) { 1620 copylen = len; 1621 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) 1622 linear = good_linear; 1623 else 1624 linear = tun16_to_cpu(tun, gso.hdr_len); 1625 } 1626 1627 if (frags) { 1628 mutex_lock(&tfile->napi_mutex); 1629 skb = tun_napi_alloc_frags(tfile, copylen, from); 1630 /* tun_napi_alloc_frags() enforces a layout for the skb. 1631 * If zerocopy is enabled, then this layout will be 1632 * overwritten by zerocopy_sg_from_iter(). 1633 */ 1634 zerocopy = false; 1635 } else { 1636 skb = tun_alloc_skb(tfile, align, copylen, linear, 1637 noblock); 1638 } 1639 1640 if (IS_ERR(skb)) { 1641 if (PTR_ERR(skb) != -EAGAIN) 1642 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1643 if (frags) 1644 mutex_unlock(&tfile->napi_mutex); 1645 return PTR_ERR(skb); 1646 } 1647 1648 if (zerocopy) 1649 err = zerocopy_sg_from_iter(skb, from); 1650 else 1651 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1652 1653 if (err) { 1654 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1655 kfree_skb(skb); 1656 if (frags) { 1657 tfile->napi.skb = NULL; 1658 mutex_unlock(&tfile->napi_mutex); 1659 } 1660 1661 return -EFAULT; 1662 } 1663 } 1664 1665 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { 1666 this_cpu_inc(tun->pcpu_stats->rx_frame_errors); 1667 kfree_skb(skb); 1668 if (frags) { 1669 tfile->napi.skb = NULL; 1670 mutex_unlock(&tfile->napi_mutex); 1671 } 1672 1673 return -EINVAL; 1674 } 1675 1676 switch (tun->flags & TUN_TYPE_MASK) { 1677 case IFF_TUN: 1678 if (tun->flags & IFF_NO_PI) { 1679 switch (skb->data[0] & 0xf0) { 1680 case 0x40: 1681 pi.proto = htons(ETH_P_IP); 1682 break; 1683 case 0x60: 1684 pi.proto = htons(ETH_P_IPV6); 1685 break; 1686 default: 1687 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1688 kfree_skb(skb); 1689 return -EINVAL; 1690 } 1691 } 1692 1693 skb_reset_mac_header(skb); 1694 skb->protocol = pi.proto; 1695 skb->dev = tun->dev; 1696 break; 1697 case IFF_TAP: 1698 if (!frags) 1699 skb->protocol = eth_type_trans(skb, tun->dev); 1700 break; 1701 } 1702 1703 /* copy skb_ubuf_info for callback when skb has no error */ 1704 if (zerocopy) { 1705 skb_shinfo(skb)->destructor_arg = msg_control; 1706 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 1707 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1708 } else if (msg_control) { 1709 struct ubuf_info *uarg = msg_control; 1710 uarg->callback(uarg, false); 1711 } 1712 1713 skb_reset_network_header(skb); 1714 skb_probe_transport_header(skb, 0); 1715 1716 if (skb_xdp) { 1717 struct bpf_prog *xdp_prog; 1718 int ret; 1719 1720 rcu_read_lock(); 1721 xdp_prog = rcu_dereference(tun->xdp_prog); 1722 if (xdp_prog) { 1723 ret = do_xdp_generic(xdp_prog, skb); 1724 if (ret != XDP_PASS) { 1725 rcu_read_unlock(); 1726 return total_len; 1727 } 1728 } 1729 rcu_read_unlock(); 1730 } 1731 1732 rxhash = __skb_get_hash_symmetric(skb); 1733 1734 if (frags) { 1735 /* Exercise flow dissector code path. */ 1736 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1737 1738 if (headlen > skb_headlen(skb) || headlen < ETH_HLEN) { 1739 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1740 napi_free_frags(&tfile->napi); 1741 mutex_unlock(&tfile->napi_mutex); 1742 WARN_ON(1); 1743 return -ENOMEM; 1744 } 1745 1746 local_bh_disable(); 1747 napi_gro_frags(&tfile->napi); 1748 local_bh_enable(); 1749 mutex_unlock(&tfile->napi_mutex); 1750 } else if (tun->flags & IFF_NAPI) { 1751 struct sk_buff_head *queue = &tfile->sk.sk_write_queue; 1752 int queue_len; 1753 1754 spin_lock_bh(&queue->lock); 1755 __skb_queue_tail(queue, skb); 1756 queue_len = skb_queue_len(queue); 1757 spin_unlock(&queue->lock); 1758 1759 if (!more || queue_len > NAPI_POLL_WEIGHT) 1760 napi_schedule(&tfile->napi); 1761 1762 local_bh_enable(); 1763 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { 1764 tun_rx_batched(tun, tfile, skb, more); 1765 } else { 1766 netif_rx_ni(skb); 1767 } 1768 1769 stats = get_cpu_ptr(tun->pcpu_stats); 1770 u64_stats_update_begin(&stats->syncp); 1771 stats->rx_packets++; 1772 stats->rx_bytes += len; 1773 u64_stats_update_end(&stats->syncp); 1774 put_cpu_ptr(stats); 1775 1776 tun_flow_update(tun, rxhash, tfile); 1777 return total_len; 1778 } 1779 1780 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) 1781 { 1782 struct file *file = iocb->ki_filp; 1783 struct tun_file *tfile = file->private_data; 1784 struct tun_struct *tun = tun_get(tfile); 1785 ssize_t result; 1786 1787 if (!tun) 1788 return -EBADFD; 1789 1790 result = tun_get_user(tun, tfile, NULL, from, 1791 file->f_flags & O_NONBLOCK, false); 1792 1793 tun_put(tun); 1794 return result; 1795 } 1796 1797 /* Put packet to the user space buffer */ 1798 static ssize_t tun_put_user(struct tun_struct *tun, 1799 struct tun_file *tfile, 1800 struct sk_buff *skb, 1801 struct iov_iter *iter) 1802 { 1803 struct tun_pi pi = { 0, skb->protocol }; 1804 struct tun_pcpu_stats *stats; 1805 ssize_t total; 1806 int vlan_offset = 0; 1807 int vlan_hlen = 0; 1808 int vnet_hdr_sz = 0; 1809 1810 if (skb_vlan_tag_present(skb)) 1811 vlan_hlen = VLAN_HLEN; 1812 1813 if (tun->flags & IFF_VNET_HDR) 1814 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); 1815 1816 total = skb->len + vlan_hlen + vnet_hdr_sz; 1817 1818 if (!(tun->flags & IFF_NO_PI)) { 1819 if (iov_iter_count(iter) < sizeof(pi)) 1820 return -EINVAL; 1821 1822 total += sizeof(pi); 1823 if (iov_iter_count(iter) < total) { 1824 /* Packet will be striped */ 1825 pi.flags |= TUN_PKT_STRIP; 1826 } 1827 1828 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) 1829 return -EFAULT; 1830 } 1831 1832 if (vnet_hdr_sz) { 1833 struct virtio_net_hdr gso; 1834 1835 if (iov_iter_count(iter) < vnet_hdr_sz) 1836 return -EINVAL; 1837 1838 if (virtio_net_hdr_from_skb(skb, &gso, 1839 tun_is_little_endian(tun), true)) { 1840 struct skb_shared_info *sinfo = skb_shinfo(skb); 1841 pr_err("unexpected GSO type: " 1842 "0x%x, gso_size %d, hdr_len %d\n", 1843 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), 1844 tun16_to_cpu(tun, gso.hdr_len)); 1845 print_hex_dump(KERN_ERR, "tun: ", 1846 DUMP_PREFIX_NONE, 1847 16, 1, skb->head, 1848 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); 1849 WARN_ON_ONCE(1); 1850 return -EINVAL; 1851 } 1852 1853 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) 1854 return -EFAULT; 1855 1856 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); 1857 } 1858 1859 if (vlan_hlen) { 1860 int ret; 1861 struct { 1862 __be16 h_vlan_proto; 1863 __be16 h_vlan_TCI; 1864 } veth; 1865 1866 veth.h_vlan_proto = skb->vlan_proto; 1867 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); 1868 1869 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1870 1871 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); 1872 if (ret || !iov_iter_count(iter)) 1873 goto done; 1874 1875 ret = copy_to_iter(&veth, sizeof(veth), iter); 1876 if (ret != sizeof(veth) || !iov_iter_count(iter)) 1877 goto done; 1878 } 1879 1880 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); 1881 1882 done: 1883 /* caller is in process context, */ 1884 stats = get_cpu_ptr(tun->pcpu_stats); 1885 u64_stats_update_begin(&stats->syncp); 1886 stats->tx_packets++; 1887 stats->tx_bytes += skb->len + vlan_hlen; 1888 u64_stats_update_end(&stats->syncp); 1889 put_cpu_ptr(tun->pcpu_stats); 1890 1891 return total; 1892 } 1893 1894 static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, 1895 int *err) 1896 { 1897 DECLARE_WAITQUEUE(wait, current); 1898 struct sk_buff *skb = NULL; 1899 int error = 0; 1900 1901 skb = skb_array_consume(&tfile->tx_array); 1902 if (skb) 1903 goto out; 1904 if (noblock) { 1905 error = -EAGAIN; 1906 goto out; 1907 } 1908 1909 add_wait_queue(&tfile->wq.wait, &wait); 1910 current->state = TASK_INTERRUPTIBLE; 1911 1912 while (1) { 1913 skb = skb_array_consume(&tfile->tx_array); 1914 if (skb) 1915 break; 1916 if (signal_pending(current)) { 1917 error = -ERESTARTSYS; 1918 break; 1919 } 1920 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { 1921 error = -EFAULT; 1922 break; 1923 } 1924 1925 schedule(); 1926 } 1927 1928 current->state = TASK_RUNNING; 1929 remove_wait_queue(&tfile->wq.wait, &wait); 1930 1931 out: 1932 *err = error; 1933 return skb; 1934 } 1935 1936 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, 1937 struct iov_iter *to, 1938 int noblock, struct sk_buff *skb) 1939 { 1940 ssize_t ret; 1941 int err; 1942 1943 tun_debug(KERN_INFO, tun, "tun_do_read\n"); 1944 1945 if (!iov_iter_count(to)) 1946 return 0; 1947 1948 if (!skb) { 1949 /* Read frames from ring */ 1950 skb = tun_ring_recv(tfile, noblock, &err); 1951 if (!skb) 1952 return err; 1953 } 1954 1955 ret = tun_put_user(tun, tfile, skb, to); 1956 if (unlikely(ret < 0)) 1957 kfree_skb(skb); 1958 else 1959 consume_skb(skb); 1960 1961 return ret; 1962 } 1963 1964 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 1965 { 1966 struct file *file = iocb->ki_filp; 1967 struct tun_file *tfile = file->private_data; 1968 struct tun_struct *tun = tun_get(tfile); 1969 ssize_t len = iov_iter_count(to), ret; 1970 1971 if (!tun) 1972 return -EBADFD; 1973 ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); 1974 ret = min_t(ssize_t, ret, len); 1975 if (ret > 0) 1976 iocb->ki_pos = ret; 1977 tun_put(tun); 1978 return ret; 1979 } 1980 1981 static void tun_free_netdev(struct net_device *dev) 1982 { 1983 struct tun_struct *tun = netdev_priv(dev); 1984 1985 BUG_ON(!(list_empty(&tun->disabled))); 1986 free_percpu(tun->pcpu_stats); 1987 tun_flow_uninit(tun); 1988 security_tun_dev_free_security(tun->security); 1989 } 1990 1991 static void tun_setup(struct net_device *dev) 1992 { 1993 struct tun_struct *tun = netdev_priv(dev); 1994 1995 tun->owner = INVALID_UID; 1996 tun->group = INVALID_GID; 1997 1998 dev->ethtool_ops = &tun_ethtool_ops; 1999 dev->needs_free_netdev = true; 2000 dev->priv_destructor = tun_free_netdev; 2001 /* We prefer our own queue length */ 2002 dev->tx_queue_len = TUN_READQ_SIZE; 2003 } 2004 2005 /* Trivial set of netlink ops to allow deleting tun or tap 2006 * device with netlink. 2007 */ 2008 static int tun_validate(struct nlattr *tb[], struct nlattr *data[], 2009 struct netlink_ext_ack *extack) 2010 { 2011 return -EINVAL; 2012 } 2013 2014 static struct rtnl_link_ops tun_link_ops __read_mostly = { 2015 .kind = DRV_NAME, 2016 .priv_size = sizeof(struct tun_struct), 2017 .setup = tun_setup, 2018 .validate = tun_validate, 2019 }; 2020 2021 static void tun_sock_write_space(struct sock *sk) 2022 { 2023 struct tun_file *tfile; 2024 wait_queue_head_t *wqueue; 2025 2026 if (!sock_writeable(sk)) 2027 return; 2028 2029 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) 2030 return; 2031 2032 wqueue = sk_sleep(sk); 2033 if (wqueue && waitqueue_active(wqueue)) 2034 wake_up_interruptible_sync_poll(wqueue, POLLOUT | 2035 POLLWRNORM | POLLWRBAND); 2036 2037 tfile = container_of(sk, struct tun_file, sk); 2038 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); 2039 } 2040 2041 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) 2042 { 2043 int ret; 2044 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2045 struct tun_struct *tun = tun_get(tfile); 2046 2047 if (!tun) 2048 return -EBADFD; 2049 2050 ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, 2051 m->msg_flags & MSG_DONTWAIT, 2052 m->msg_flags & MSG_MORE); 2053 tun_put(tun); 2054 return ret; 2055 } 2056 2057 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, 2058 int flags) 2059 { 2060 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2061 struct tun_struct *tun = tun_get(tfile); 2062 int ret; 2063 2064 if (!tun) 2065 return -EBADFD; 2066 2067 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { 2068 ret = -EINVAL; 2069 goto out; 2070 } 2071 if (flags & MSG_ERRQUEUE) { 2072 ret = sock_recv_errqueue(sock->sk, m, total_len, 2073 SOL_PACKET, TUN_TX_TIMESTAMP); 2074 goto out; 2075 } 2076 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, 2077 m->msg_control); 2078 if (ret > (ssize_t)total_len) { 2079 m->msg_flags |= MSG_TRUNC; 2080 ret = flags & MSG_TRUNC ? ret : total_len; 2081 } 2082 out: 2083 tun_put(tun); 2084 return ret; 2085 } 2086 2087 static int tun_peek_len(struct socket *sock) 2088 { 2089 struct tun_file *tfile = container_of(sock, struct tun_file, socket); 2090 struct tun_struct *tun; 2091 int ret = 0; 2092 2093 tun = tun_get(tfile); 2094 if (!tun) 2095 return 0; 2096 2097 ret = skb_array_peek_len(&tfile->tx_array); 2098 tun_put(tun); 2099 2100 return ret; 2101 } 2102 2103 /* Ops structure to mimic raw sockets with tun */ 2104 static const struct proto_ops tun_socket_ops = { 2105 .peek_len = tun_peek_len, 2106 .sendmsg = tun_sendmsg, 2107 .recvmsg = tun_recvmsg, 2108 }; 2109 2110 static struct proto tun_proto = { 2111 .name = "tun", 2112 .owner = THIS_MODULE, 2113 .obj_size = sizeof(struct tun_file), 2114 }; 2115 2116 static int tun_flags(struct tun_struct *tun) 2117 { 2118 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); 2119 } 2120 2121 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, 2122 char *buf) 2123 { 2124 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2125 return sprintf(buf, "0x%x\n", tun_flags(tun)); 2126 } 2127 2128 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, 2129 char *buf) 2130 { 2131 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2132 return uid_valid(tun->owner)? 2133 sprintf(buf, "%u\n", 2134 from_kuid_munged(current_user_ns(), tun->owner)): 2135 sprintf(buf, "-1\n"); 2136 } 2137 2138 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, 2139 char *buf) 2140 { 2141 struct tun_struct *tun = netdev_priv(to_net_dev(dev)); 2142 return gid_valid(tun->group) ? 2143 sprintf(buf, "%u\n", 2144 from_kgid_munged(current_user_ns(), tun->group)): 2145 sprintf(buf, "-1\n"); 2146 } 2147 2148 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); 2149 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); 2150 static DEVICE_ATTR(group, 0444, tun_show_group, NULL); 2151 2152 static struct attribute *tun_dev_attrs[] = { 2153 &dev_attr_tun_flags.attr, 2154 &dev_attr_owner.attr, 2155 &dev_attr_group.attr, 2156 NULL 2157 }; 2158 2159 static const struct attribute_group tun_attr_group = { 2160 .attrs = tun_dev_attrs 2161 }; 2162 2163 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 2164 { 2165 struct tun_struct *tun; 2166 struct tun_file *tfile = file->private_data; 2167 struct net_device *dev; 2168 int err; 2169 2170 if (tfile->detached) 2171 return -EINVAL; 2172 2173 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { 2174 if (!capable(CAP_NET_ADMIN)) 2175 return -EPERM; 2176 2177 if (!(ifr->ifr_flags & IFF_NAPI) || 2178 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) 2179 return -EINVAL; 2180 } 2181 2182 dev = __dev_get_by_name(net, ifr->ifr_name); 2183 if (dev) { 2184 if (ifr->ifr_flags & IFF_TUN_EXCL) 2185 return -EBUSY; 2186 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 2187 tun = netdev_priv(dev); 2188 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) 2189 tun = netdev_priv(dev); 2190 else 2191 return -EINVAL; 2192 2193 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != 2194 !!(tun->flags & IFF_MULTI_QUEUE)) 2195 return -EINVAL; 2196 2197 if (tun_not_capable(tun)) 2198 return -EPERM; 2199 err = security_tun_dev_open(tun->security); 2200 if (err < 0) 2201 return err; 2202 2203 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2204 ifr->ifr_flags & IFF_NAPI); 2205 if (err < 0) 2206 return err; 2207 2208 if (tun->flags & IFF_MULTI_QUEUE && 2209 (tun->numqueues + tun->numdisabled > 1)) { 2210 /* One or more queue has already been attached, no need 2211 * to initialize the device again. 2212 */ 2213 return 0; 2214 } 2215 } 2216 else { 2217 char *name; 2218 unsigned long flags = 0; 2219 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? 2220 MAX_TAP_QUEUES : 1; 2221 2222 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2223 return -EPERM; 2224 err = security_tun_dev_create(); 2225 if (err < 0) 2226 return err; 2227 2228 /* Set dev type */ 2229 if (ifr->ifr_flags & IFF_TUN) { 2230 /* TUN device */ 2231 flags |= IFF_TUN; 2232 name = "tun%d"; 2233 } else if (ifr->ifr_flags & IFF_TAP) { 2234 /* TAP device */ 2235 flags |= IFF_TAP; 2236 name = "tap%d"; 2237 } else 2238 return -EINVAL; 2239 2240 if (*ifr->ifr_name) 2241 name = ifr->ifr_name; 2242 2243 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, 2244 NET_NAME_UNKNOWN, tun_setup, queues, 2245 queues); 2246 2247 if (!dev) 2248 return -ENOMEM; 2249 2250 dev_net_set(dev, net); 2251 dev->rtnl_link_ops = &tun_link_ops; 2252 dev->ifindex = tfile->ifindex; 2253 dev->sysfs_groups[0] = &tun_attr_group; 2254 2255 tun = netdev_priv(dev); 2256 tun->dev = dev; 2257 tun->flags = flags; 2258 tun->txflt.count = 0; 2259 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 2260 2261 tun->align = NET_SKB_PAD; 2262 tun->filter_attached = false; 2263 tun->sndbuf = tfile->socket.sk->sk_sndbuf; 2264 tun->rx_batched = 0; 2265 2266 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); 2267 if (!tun->pcpu_stats) { 2268 err = -ENOMEM; 2269 goto err_free_dev; 2270 } 2271 2272 spin_lock_init(&tun->lock); 2273 2274 err = security_tun_dev_alloc_security(&tun->security); 2275 if (err < 0) 2276 goto err_free_stat; 2277 2278 tun_net_init(dev); 2279 tun_flow_init(tun); 2280 2281 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 2282 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | 2283 NETIF_F_HW_VLAN_STAG_TX; 2284 dev->features = dev->hw_features | NETIF_F_LLTX; 2285 dev->vlan_features = dev->features & 2286 ~(NETIF_F_HW_VLAN_CTAG_TX | 2287 NETIF_F_HW_VLAN_STAG_TX); 2288 2289 INIT_LIST_HEAD(&tun->disabled); 2290 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI); 2291 if (err < 0) 2292 goto err_free_flow; 2293 2294 err = register_netdevice(tun->dev); 2295 if (err < 0) 2296 goto err_detach; 2297 } 2298 2299 netif_carrier_on(tun->dev); 2300 2301 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 2302 2303 tun->flags = (tun->flags & ~TUN_FEATURES) | 2304 (ifr->ifr_flags & TUN_FEATURES); 2305 2306 /* Make sure persistent devices do not get stuck in 2307 * xoff state. 2308 */ 2309 if (netif_running(tun->dev)) 2310 netif_tx_wake_all_queues(tun->dev); 2311 2312 strcpy(ifr->ifr_name, tun->dev->name); 2313 return 0; 2314 2315 err_detach: 2316 tun_detach_all(dev); 2317 /* register_netdevice() already called tun_free_netdev() */ 2318 goto err_free_dev; 2319 2320 err_free_flow: 2321 tun_flow_uninit(tun); 2322 security_tun_dev_free_security(tun->security); 2323 err_free_stat: 2324 free_percpu(tun->pcpu_stats); 2325 err_free_dev: 2326 free_netdev(dev); 2327 return err; 2328 } 2329 2330 static void tun_get_iff(struct net *net, struct tun_struct *tun, 2331 struct ifreq *ifr) 2332 { 2333 tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 2334 2335 strcpy(ifr->ifr_name, tun->dev->name); 2336 2337 ifr->ifr_flags = tun_flags(tun); 2338 2339 } 2340 2341 /* This is like a cut-down ethtool ops, except done via tun fd so no 2342 * privs required. */ 2343 static int set_offload(struct tun_struct *tun, unsigned long arg) 2344 { 2345 netdev_features_t features = 0; 2346 2347 if (arg & TUN_F_CSUM) { 2348 features |= NETIF_F_HW_CSUM; 2349 arg &= ~TUN_F_CSUM; 2350 2351 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { 2352 if (arg & TUN_F_TSO_ECN) { 2353 features |= NETIF_F_TSO_ECN; 2354 arg &= ~TUN_F_TSO_ECN; 2355 } 2356 if (arg & TUN_F_TSO4) 2357 features |= NETIF_F_TSO; 2358 if (arg & TUN_F_TSO6) 2359 features |= NETIF_F_TSO6; 2360 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 2361 } 2362 } 2363 2364 /* This gives the user a way to test for new features in future by 2365 * trying to set them. */ 2366 if (arg) 2367 return -EINVAL; 2368 2369 tun->set_features = features; 2370 tun->dev->wanted_features &= ~TUN_USER_FEATURES; 2371 tun->dev->wanted_features |= features; 2372 netdev_update_features(tun->dev); 2373 2374 return 0; 2375 } 2376 2377 static void tun_detach_filter(struct tun_struct *tun, int n) 2378 { 2379 int i; 2380 struct tun_file *tfile; 2381 2382 for (i = 0; i < n; i++) { 2383 tfile = rtnl_dereference(tun->tfiles[i]); 2384 lock_sock(tfile->socket.sk); 2385 sk_detach_filter(tfile->socket.sk); 2386 release_sock(tfile->socket.sk); 2387 } 2388 2389 tun->filter_attached = false; 2390 } 2391 2392 static int tun_attach_filter(struct tun_struct *tun) 2393 { 2394 int i, ret = 0; 2395 struct tun_file *tfile; 2396 2397 for (i = 0; i < tun->numqueues; i++) { 2398 tfile = rtnl_dereference(tun->tfiles[i]); 2399 lock_sock(tfile->socket.sk); 2400 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); 2401 release_sock(tfile->socket.sk); 2402 if (ret) { 2403 tun_detach_filter(tun, i); 2404 return ret; 2405 } 2406 } 2407 2408 tun->filter_attached = true; 2409 return ret; 2410 } 2411 2412 static void tun_set_sndbuf(struct tun_struct *tun) 2413 { 2414 struct tun_file *tfile; 2415 int i; 2416 2417 for (i = 0; i < tun->numqueues; i++) { 2418 tfile = rtnl_dereference(tun->tfiles[i]); 2419 tfile->socket.sk->sk_sndbuf = tun->sndbuf; 2420 } 2421 } 2422 2423 static int tun_set_queue(struct file *file, struct ifreq *ifr) 2424 { 2425 struct tun_file *tfile = file->private_data; 2426 struct tun_struct *tun; 2427 int ret = 0; 2428 2429 rtnl_lock(); 2430 2431 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { 2432 tun = tfile->detached; 2433 if (!tun) { 2434 ret = -EINVAL; 2435 goto unlock; 2436 } 2437 ret = security_tun_dev_attach_queue(tun->security); 2438 if (ret < 0) 2439 goto unlock; 2440 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI); 2441 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2442 tun = rtnl_dereference(tfile->tun); 2443 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2444 ret = -EINVAL; 2445 else 2446 __tun_detach(tfile, false); 2447 } else 2448 ret = -EINVAL; 2449 2450 unlock: 2451 rtnl_unlock(); 2452 return ret; 2453 } 2454 2455 static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 2456 unsigned long arg, int ifreq_len) 2457 { 2458 struct tun_file *tfile = file->private_data; 2459 struct tun_struct *tun; 2460 void __user* argp = (void __user*)arg; 2461 struct ifreq ifr; 2462 kuid_t owner; 2463 kgid_t group; 2464 int sndbuf; 2465 int vnet_hdr_sz; 2466 unsigned int ifindex; 2467 int le; 2468 int ret; 2469 2470 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) { 2471 if (copy_from_user(&ifr, argp, ifreq_len)) 2472 return -EFAULT; 2473 } else { 2474 memset(&ifr, 0, sizeof(ifr)); 2475 } 2476 if (cmd == TUNGETFEATURES) { 2477 /* Currently this just means: "what IFF flags are valid?". 2478 * This is needed because we never checked for invalid flags on 2479 * TUNSETIFF. 2480 */ 2481 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, 2482 (unsigned int __user*)argp); 2483 } else if (cmd == TUNSETQUEUE) 2484 return tun_set_queue(file, &ifr); 2485 2486 ret = 0; 2487 rtnl_lock(); 2488 2489 tun = tun_get(tfile); 2490 if (cmd == TUNSETIFF) { 2491 ret = -EEXIST; 2492 if (tun) 2493 goto unlock; 2494 2495 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 2496 2497 ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr); 2498 2499 if (ret) 2500 goto unlock; 2501 2502 if (copy_to_user(argp, &ifr, ifreq_len)) 2503 ret = -EFAULT; 2504 goto unlock; 2505 } 2506 if (cmd == TUNSETIFINDEX) { 2507 ret = -EPERM; 2508 if (tun) 2509 goto unlock; 2510 2511 ret = -EFAULT; 2512 if (copy_from_user(&ifindex, argp, sizeof(ifindex))) 2513 goto unlock; 2514 2515 ret = 0; 2516 tfile->ifindex = ifindex; 2517 goto unlock; 2518 } 2519 2520 ret = -EBADFD; 2521 if (!tun) 2522 goto unlock; 2523 2524 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); 2525 2526 ret = 0; 2527 switch (cmd) { 2528 case TUNGETIFF: 2529 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2530 2531 if (tfile->detached) 2532 ifr.ifr_flags |= IFF_DETACH_QUEUE; 2533 if (!tfile->socket.sk->sk_filter) 2534 ifr.ifr_flags |= IFF_NOFILTER; 2535 2536 if (copy_to_user(argp, &ifr, ifreq_len)) 2537 ret = -EFAULT; 2538 break; 2539 2540 case TUNSETNOCSUM: 2541 /* Disable/Enable checksum */ 2542 2543 /* [unimplemented] */ 2544 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", 2545 arg ? "disabled" : "enabled"); 2546 break; 2547 2548 case TUNSETPERSIST: 2549 /* Disable/Enable persist mode. Keep an extra reference to the 2550 * module to prevent the module being unprobed. 2551 */ 2552 if (arg && !(tun->flags & IFF_PERSIST)) { 2553 tun->flags |= IFF_PERSIST; 2554 __module_get(THIS_MODULE); 2555 } 2556 if (!arg && (tun->flags & IFF_PERSIST)) { 2557 tun->flags &= ~IFF_PERSIST; 2558 module_put(THIS_MODULE); 2559 } 2560 2561 tun_debug(KERN_INFO, tun, "persist %s\n", 2562 arg ? "enabled" : "disabled"); 2563 break; 2564 2565 case TUNSETOWNER: 2566 /* Set owner of the device */ 2567 owner = make_kuid(current_user_ns(), arg); 2568 if (!uid_valid(owner)) { 2569 ret = -EINVAL; 2570 break; 2571 } 2572 tun->owner = owner; 2573 tun_debug(KERN_INFO, tun, "owner set to %u\n", 2574 from_kuid(&init_user_ns, tun->owner)); 2575 break; 2576 2577 case TUNSETGROUP: 2578 /* Set group of the device */ 2579 group = make_kgid(current_user_ns(), arg); 2580 if (!gid_valid(group)) { 2581 ret = -EINVAL; 2582 break; 2583 } 2584 tun->group = group; 2585 tun_debug(KERN_INFO, tun, "group set to %u\n", 2586 from_kgid(&init_user_ns, tun->group)); 2587 break; 2588 2589 case TUNSETLINK: 2590 /* Only allow setting the type when the interface is down */ 2591 if (tun->dev->flags & IFF_UP) { 2592 tun_debug(KERN_INFO, tun, 2593 "Linktype set failed because interface is up\n"); 2594 ret = -EBUSY; 2595 } else { 2596 tun->dev->type = (int) arg; 2597 tun_debug(KERN_INFO, tun, "linktype set to %d\n", 2598 tun->dev->type); 2599 ret = 0; 2600 } 2601 break; 2602 2603 #ifdef TUN_DEBUG 2604 case TUNSETDEBUG: 2605 tun->debug = arg; 2606 break; 2607 #endif 2608 case TUNSETOFFLOAD: 2609 ret = set_offload(tun, arg); 2610 break; 2611 2612 case TUNSETTXFILTER: 2613 /* Can be set only for TAPs */ 2614 ret = -EINVAL; 2615 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2616 break; 2617 ret = update_filter(&tun->txflt, (void __user *)arg); 2618 break; 2619 2620 case SIOCGIFHWADDR: 2621 /* Get hw address */ 2622 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 2623 ifr.ifr_hwaddr.sa_family = tun->dev->type; 2624 if (copy_to_user(argp, &ifr, ifreq_len)) 2625 ret = -EFAULT; 2626 break; 2627 2628 case SIOCSIFHWADDR: 2629 /* Set hw address */ 2630 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", 2631 ifr.ifr_hwaddr.sa_data); 2632 2633 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 2634 break; 2635 2636 case TUNGETSNDBUF: 2637 sndbuf = tfile->socket.sk->sk_sndbuf; 2638 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 2639 ret = -EFAULT; 2640 break; 2641 2642 case TUNSETSNDBUF: 2643 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { 2644 ret = -EFAULT; 2645 break; 2646 } 2647 2648 tun->sndbuf = sndbuf; 2649 tun_set_sndbuf(tun); 2650 break; 2651 2652 case TUNGETVNETHDRSZ: 2653 vnet_hdr_sz = tun->vnet_hdr_sz; 2654 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) 2655 ret = -EFAULT; 2656 break; 2657 2658 case TUNSETVNETHDRSZ: 2659 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { 2660 ret = -EFAULT; 2661 break; 2662 } 2663 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { 2664 ret = -EINVAL; 2665 break; 2666 } 2667 2668 tun->vnet_hdr_sz = vnet_hdr_sz; 2669 break; 2670 2671 case TUNGETVNETLE: 2672 le = !!(tun->flags & TUN_VNET_LE); 2673 if (put_user(le, (int __user *)argp)) 2674 ret = -EFAULT; 2675 break; 2676 2677 case TUNSETVNETLE: 2678 if (get_user(le, (int __user *)argp)) { 2679 ret = -EFAULT; 2680 break; 2681 } 2682 if (le) 2683 tun->flags |= TUN_VNET_LE; 2684 else 2685 tun->flags &= ~TUN_VNET_LE; 2686 break; 2687 2688 case TUNGETVNETBE: 2689 ret = tun_get_vnet_be(tun, argp); 2690 break; 2691 2692 case TUNSETVNETBE: 2693 ret = tun_set_vnet_be(tun, argp); 2694 break; 2695 2696 case TUNATTACHFILTER: 2697 /* Can be set only for TAPs */ 2698 ret = -EINVAL; 2699 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2700 break; 2701 ret = -EFAULT; 2702 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) 2703 break; 2704 2705 ret = tun_attach_filter(tun); 2706 break; 2707 2708 case TUNDETACHFILTER: 2709 /* Can be set only for TAPs */ 2710 ret = -EINVAL; 2711 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2712 break; 2713 ret = 0; 2714 tun_detach_filter(tun, tun->numqueues); 2715 break; 2716 2717 case TUNGETFILTER: 2718 ret = -EINVAL; 2719 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) 2720 break; 2721 ret = -EFAULT; 2722 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) 2723 break; 2724 ret = 0; 2725 break; 2726 2727 default: 2728 ret = -EINVAL; 2729 break; 2730 } 2731 2732 unlock: 2733 rtnl_unlock(); 2734 if (tun) 2735 tun_put(tun); 2736 return ret; 2737 } 2738 2739 static long tun_chr_ioctl(struct file *file, 2740 unsigned int cmd, unsigned long arg) 2741 { 2742 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); 2743 } 2744 2745 #ifdef CONFIG_COMPAT 2746 static long tun_chr_compat_ioctl(struct file *file, 2747 unsigned int cmd, unsigned long arg) 2748 { 2749 switch (cmd) { 2750 case TUNSETIFF: 2751 case TUNGETIFF: 2752 case TUNSETTXFILTER: 2753 case TUNGETSNDBUF: 2754 case TUNSETSNDBUF: 2755 case SIOCGIFHWADDR: 2756 case SIOCSIFHWADDR: 2757 arg = (unsigned long)compat_ptr(arg); 2758 break; 2759 default: 2760 arg = (compat_ulong_t)arg; 2761 break; 2762 } 2763 2764 /* 2765 * compat_ifreq is shorter than ifreq, so we must not access beyond 2766 * the end of that structure. All fields that are used in this 2767 * driver are compatible though, we don't need to convert the 2768 * contents. 2769 */ 2770 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); 2771 } 2772 #endif /* CONFIG_COMPAT */ 2773 2774 static int tun_chr_fasync(int fd, struct file *file, int on) 2775 { 2776 struct tun_file *tfile = file->private_data; 2777 int ret; 2778 2779 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) 2780 goto out; 2781 2782 if (on) { 2783 __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 2784 tfile->flags |= TUN_FASYNC; 2785 } else 2786 tfile->flags &= ~TUN_FASYNC; 2787 ret = 0; 2788 out: 2789 return ret; 2790 } 2791 2792 static int tun_chr_open(struct inode *inode, struct file * file) 2793 { 2794 struct net *net = current->nsproxy->net_ns; 2795 struct tun_file *tfile; 2796 2797 DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 2798 2799 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 2800 &tun_proto, 0); 2801 if (!tfile) 2802 return -ENOMEM; 2803 RCU_INIT_POINTER(tfile->tun, NULL); 2804 tfile->flags = 0; 2805 tfile->ifindex = 0; 2806 2807 init_waitqueue_head(&tfile->wq.wait); 2808 RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq); 2809 2810 tfile->socket.file = file; 2811 tfile->socket.ops = &tun_socket_ops; 2812 2813 sock_init_data(&tfile->socket, &tfile->sk); 2814 2815 tfile->sk.sk_write_space = tun_sock_write_space; 2816 tfile->sk.sk_sndbuf = INT_MAX; 2817 2818 file->private_data = tfile; 2819 INIT_LIST_HEAD(&tfile->next); 2820 2821 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); 2822 2823 return 0; 2824 } 2825 2826 static int tun_chr_close(struct inode *inode, struct file *file) 2827 { 2828 struct tun_file *tfile = file->private_data; 2829 2830 tun_detach(tfile, true); 2831 2832 return 0; 2833 } 2834 2835 #ifdef CONFIG_PROC_FS 2836 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) 2837 { 2838 struct tun_file *tfile = file->private_data; 2839 struct tun_struct *tun; 2840 struct ifreq ifr; 2841 2842 memset(&ifr, 0, sizeof(ifr)); 2843 2844 rtnl_lock(); 2845 tun = tun_get(tfile); 2846 if (tun) 2847 tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 2848 rtnl_unlock(); 2849 2850 if (tun) 2851 tun_put(tun); 2852 2853 seq_printf(m, "iff:\t%s\n", ifr.ifr_name); 2854 } 2855 #endif 2856 2857 static const struct file_operations tun_fops = { 2858 .owner = THIS_MODULE, 2859 .llseek = no_llseek, 2860 .read_iter = tun_chr_read_iter, 2861 .write_iter = tun_chr_write_iter, 2862 .poll = tun_chr_poll, 2863 .unlocked_ioctl = tun_chr_ioctl, 2864 #ifdef CONFIG_COMPAT 2865 .compat_ioctl = tun_chr_compat_ioctl, 2866 #endif 2867 .open = tun_chr_open, 2868 .release = tun_chr_close, 2869 .fasync = tun_chr_fasync, 2870 #ifdef CONFIG_PROC_FS 2871 .show_fdinfo = tun_chr_show_fdinfo, 2872 #endif 2873 }; 2874 2875 static struct miscdevice tun_miscdev = { 2876 .minor = TUN_MINOR, 2877 .name = "tun", 2878 .nodename = "net/tun", 2879 .fops = &tun_fops, 2880 }; 2881 2882 /* ethtool interface */ 2883 2884 static int tun_get_link_ksettings(struct net_device *dev, 2885 struct ethtool_link_ksettings *cmd) 2886 { 2887 ethtool_link_ksettings_zero_link_mode(cmd, supported); 2888 ethtool_link_ksettings_zero_link_mode(cmd, advertising); 2889 cmd->base.speed = SPEED_10; 2890 cmd->base.duplex = DUPLEX_FULL; 2891 cmd->base.port = PORT_TP; 2892 cmd->base.phy_address = 0; 2893 cmd->base.autoneg = AUTONEG_DISABLE; 2894 return 0; 2895 } 2896 2897 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2898 { 2899 struct tun_struct *tun = netdev_priv(dev); 2900 2901 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 2902 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 2903 2904 switch (tun->flags & TUN_TYPE_MASK) { 2905 case IFF_TUN: 2906 strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); 2907 break; 2908 case IFF_TAP: 2909 strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); 2910 break; 2911 } 2912 } 2913 2914 static u32 tun_get_msglevel(struct net_device *dev) 2915 { 2916 #ifdef TUN_DEBUG 2917 struct tun_struct *tun = netdev_priv(dev); 2918 return tun->debug; 2919 #else 2920 return -EOPNOTSUPP; 2921 #endif 2922 } 2923 2924 static void tun_set_msglevel(struct net_device *dev, u32 value) 2925 { 2926 #ifdef TUN_DEBUG 2927 struct tun_struct *tun = netdev_priv(dev); 2928 tun->debug = value; 2929 #endif 2930 } 2931 2932 static int tun_get_coalesce(struct net_device *dev, 2933 struct ethtool_coalesce *ec) 2934 { 2935 struct tun_struct *tun = netdev_priv(dev); 2936 2937 ec->rx_max_coalesced_frames = tun->rx_batched; 2938 2939 return 0; 2940 } 2941 2942 static int tun_set_coalesce(struct net_device *dev, 2943 struct ethtool_coalesce *ec) 2944 { 2945 struct tun_struct *tun = netdev_priv(dev); 2946 2947 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) 2948 tun->rx_batched = NAPI_POLL_WEIGHT; 2949 else 2950 tun->rx_batched = ec->rx_max_coalesced_frames; 2951 2952 return 0; 2953 } 2954 2955 static const struct ethtool_ops tun_ethtool_ops = { 2956 .get_drvinfo = tun_get_drvinfo, 2957 .get_msglevel = tun_get_msglevel, 2958 .set_msglevel = tun_set_msglevel, 2959 .get_link = ethtool_op_get_link, 2960 .get_ts_info = ethtool_op_get_ts_info, 2961 .get_coalesce = tun_get_coalesce, 2962 .set_coalesce = tun_set_coalesce, 2963 .get_link_ksettings = tun_get_link_ksettings, 2964 }; 2965 2966 static int tun_queue_resize(struct tun_struct *tun) 2967 { 2968 struct net_device *dev = tun->dev; 2969 struct tun_file *tfile; 2970 struct skb_array **arrays; 2971 int n = tun->numqueues + tun->numdisabled; 2972 int ret, i; 2973 2974 arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); 2975 if (!arrays) 2976 return -ENOMEM; 2977 2978 for (i = 0; i < tun->numqueues; i++) { 2979 tfile = rtnl_dereference(tun->tfiles[i]); 2980 arrays[i] = &tfile->tx_array; 2981 } 2982 list_for_each_entry(tfile, &tun->disabled, next) 2983 arrays[i++] = &tfile->tx_array; 2984 2985 ret = skb_array_resize_multiple(arrays, n, 2986 dev->tx_queue_len, GFP_KERNEL); 2987 2988 kfree(arrays); 2989 return ret; 2990 } 2991 2992 static int tun_device_event(struct notifier_block *unused, 2993 unsigned long event, void *ptr) 2994 { 2995 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2996 struct tun_struct *tun = netdev_priv(dev); 2997 2998 if (dev->rtnl_link_ops != &tun_link_ops) 2999 return NOTIFY_DONE; 3000 3001 switch (event) { 3002 case NETDEV_CHANGE_TX_QUEUE_LEN: 3003 if (tun_queue_resize(tun)) 3004 return NOTIFY_BAD; 3005 break; 3006 default: 3007 break; 3008 } 3009 3010 return NOTIFY_DONE; 3011 } 3012 3013 static struct notifier_block tun_notifier_block __read_mostly = { 3014 .notifier_call = tun_device_event, 3015 }; 3016 3017 static int __init tun_init(void) 3018 { 3019 int ret = 0; 3020 3021 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3022 3023 ret = rtnl_link_register(&tun_link_ops); 3024 if (ret) { 3025 pr_err("Can't register link_ops\n"); 3026 goto err_linkops; 3027 } 3028 3029 ret = misc_register(&tun_miscdev); 3030 if (ret) { 3031 pr_err("Can't register misc device %d\n", TUN_MINOR); 3032 goto err_misc; 3033 } 3034 3035 ret = register_netdevice_notifier(&tun_notifier_block); 3036 if (ret) { 3037 pr_err("Can't register netdevice notifier\n"); 3038 goto err_notifier; 3039 } 3040 3041 return 0; 3042 3043 err_notifier: 3044 misc_deregister(&tun_miscdev); 3045 err_misc: 3046 rtnl_link_unregister(&tun_link_ops); 3047 err_linkops: 3048 return ret; 3049 } 3050 3051 static void tun_cleanup(void) 3052 { 3053 misc_deregister(&tun_miscdev); 3054 rtnl_link_unregister(&tun_link_ops); 3055 unregister_netdevice_notifier(&tun_notifier_block); 3056 } 3057 3058 /* Get an underlying socket object from tun file. Returns error unless file is 3059 * attached to a device. The returned object works like a packet socket, it 3060 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 3061 * holding a reference to the file for as long as the socket is in use. */ 3062 struct socket *tun_get_socket(struct file *file) 3063 { 3064 struct tun_file *tfile; 3065 if (file->f_op != &tun_fops) 3066 return ERR_PTR(-EINVAL); 3067 tfile = file->private_data; 3068 if (!tfile) 3069 return ERR_PTR(-EBADFD); 3070 return &tfile->socket; 3071 } 3072 EXPORT_SYMBOL_GPL(tun_get_socket); 3073 3074 struct skb_array *tun_get_skb_array(struct file *file) 3075 { 3076 struct tun_file *tfile; 3077 3078 if (file->f_op != &tun_fops) 3079 return ERR_PTR(-EINVAL); 3080 tfile = file->private_data; 3081 if (!tfile) 3082 return ERR_PTR(-EBADFD); 3083 return &tfile->tx_array; 3084 } 3085 EXPORT_SYMBOL_GPL(tun_get_skb_array); 3086 3087 module_init(tun_init); 3088 module_exit(tun_cleanup); 3089 MODULE_DESCRIPTION(DRV_DESCRIPTION); 3090 MODULE_AUTHOR(DRV_COPYRIGHT); 3091 MODULE_LICENSE("GPL"); 3092 MODULE_ALIAS_MISCDEV(TUN_MINOR); 3093 MODULE_ALIAS("devname:net/tun"); 3094