1 #include <linux/etherdevice.h> 2 #include <linux/if_macvlan.h> 3 #include <linux/if_vlan.h> 4 #include <linux/interrupt.h> 5 #include <linux/nsproxy.h> 6 #include <linux/compat.h> 7 #include <linux/if_tun.h> 8 #include <linux/module.h> 9 #include <linux/skbuff.h> 10 #include <linux/cache.h> 11 #include <linux/sched.h> 12 #include <linux/types.h> 13 #include <linux/slab.h> 14 #include <linux/wait.h> 15 #include <linux/cdev.h> 16 #include <linux/idr.h> 17 #include <linux/fs.h> 18 19 #include <net/ipv6.h> 20 #include <net/net_namespace.h> 21 #include <net/rtnetlink.h> 22 #include <net/sock.h> 23 #include <linux/virtio_net.h> 24 25 /* 26 * A macvtap queue is the central object of this driver, it connects 27 * an open character device to a macvlan interface. There can be 28 * multiple queues on one interface, which map back to queues 29 * implemented in hardware on the underlying device. 30 * 31 * macvtap_proto is used to allocate queues through the sock allocation 32 * mechanism. 33 * 34 */ 35 struct macvtap_queue { 36 struct sock sk; 37 struct socket sock; 38 struct socket_wq wq; 39 int vnet_hdr_sz; 40 struct macvlan_dev __rcu *vlan; 41 struct file *file; 42 unsigned int flags; 43 u16 queue_index; 44 bool enabled; 45 struct list_head next; 46 }; 47 48 static struct proto macvtap_proto = { 49 .name = "macvtap", 50 .owner = THIS_MODULE, 51 .obj_size = sizeof (struct macvtap_queue), 52 }; 53 54 /* 55 * Variables for dealing with macvtaps device numbers. 56 */ 57 static dev_t macvtap_major; 58 #define MACVTAP_NUM_DEVS (1U << MINORBITS) 59 static DEFINE_MUTEX(minor_lock); 60 static DEFINE_IDR(minor_idr); 61 62 #define GOODCOPY_LEN 128 63 static struct class *macvtap_class; 64 static struct cdev macvtap_cdev; 65 66 static const struct proto_ops macvtap_socket_ops; 67 68 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 69 NETIF_F_TSO6) 70 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 71 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 72 73 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev) 74 { 75 return rcu_dereference(dev->rx_handler_data); 76 } 77 78 /* 79 * RCU usage: 80 * The macvtap_queue and the macvlan_dev are loosely coupled, the 81 * pointers from one to the other can only be read while rcu_read_lock 82 * or rtnl is held. 83 * 84 * Both the file and the macvlan_dev hold a reference on the macvtap_queue 85 * through sock_hold(&q->sk). When the macvlan_dev goes away first, 86 * q->vlan becomes inaccessible. When the files gets closed, 87 * macvtap_get_queue() fails. 88 * 89 * There may still be references to the struct sock inside of the 90 * queue from outbound SKBs, but these never reference back to the 91 * file or the dev. The data structure is freed through __sk_free 92 * when both our references and any pending SKBs are gone. 93 */ 94 95 static int macvtap_enable_queue(struct net_device *dev, struct file *file, 96 struct macvtap_queue *q) 97 { 98 struct macvlan_dev *vlan = netdev_priv(dev); 99 int err = -EINVAL; 100 101 ASSERT_RTNL(); 102 103 if (q->enabled) 104 goto out; 105 106 err = 0; 107 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 108 q->queue_index = vlan->numvtaps; 109 q->enabled = true; 110 111 vlan->numvtaps++; 112 out: 113 return err; 114 } 115 116 /* Requires RTNL */ 117 static int macvtap_set_queue(struct net_device *dev, struct file *file, 118 struct macvtap_queue *q) 119 { 120 struct macvlan_dev *vlan = netdev_priv(dev); 121 122 if (vlan->numqueues == MAX_MACVTAP_QUEUES) 123 return -EBUSY; 124 125 rcu_assign_pointer(q->vlan, vlan); 126 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 127 sock_hold(&q->sk); 128 129 q->file = file; 130 q->queue_index = vlan->numvtaps; 131 q->enabled = true; 132 file->private_data = q; 133 list_add_tail(&q->next, &vlan->queue_list); 134 135 vlan->numvtaps++; 136 vlan->numqueues++; 137 138 return 0; 139 } 140 141 static int macvtap_disable_queue(struct macvtap_queue *q) 142 { 143 struct macvlan_dev *vlan; 144 struct macvtap_queue *nq; 145 146 ASSERT_RTNL(); 147 if (!q->enabled) 148 return -EINVAL; 149 150 vlan = rtnl_dereference(q->vlan); 151 152 if (vlan) { 153 int index = q->queue_index; 154 BUG_ON(index >= vlan->numvtaps); 155 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]); 156 nq->queue_index = index; 157 158 rcu_assign_pointer(vlan->taps[index], nq); 159 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL); 160 q->enabled = false; 161 162 vlan->numvtaps--; 163 } 164 165 return 0; 166 } 167 168 /* 169 * The file owning the queue got closed, give up both 170 * the reference that the files holds as well as the 171 * one from the macvlan_dev if that still exists. 172 * 173 * Using the spinlock makes sure that we don't get 174 * to the queue again after destroying it. 175 */ 176 static void macvtap_put_queue(struct macvtap_queue *q) 177 { 178 struct macvlan_dev *vlan; 179 180 rtnl_lock(); 181 vlan = rtnl_dereference(q->vlan); 182 183 if (vlan) { 184 if (q->enabled) 185 BUG_ON(macvtap_disable_queue(q)); 186 187 vlan->numqueues--; 188 RCU_INIT_POINTER(q->vlan, NULL); 189 sock_put(&q->sk); 190 list_del_init(&q->next); 191 } 192 193 rtnl_unlock(); 194 195 synchronize_rcu(); 196 sock_put(&q->sk); 197 } 198 199 /* 200 * Select a queue based on the rxq of the device on which this packet 201 * arrived. If the incoming device is not mq, calculate a flow hash 202 * to select a queue. If all fails, find the first available queue. 203 * Cache vlan->numvtaps since it can become zero during the execution 204 * of this function. 205 */ 206 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, 207 struct sk_buff *skb) 208 { 209 struct macvlan_dev *vlan = netdev_priv(dev); 210 struct macvtap_queue *tap = NULL; 211 /* Access to taps array is protected by rcu, but access to numvtaps 212 * isn't. Below we use it to lookup a queue, but treat it as a hint 213 * and validate that the result isn't NULL - in case we are 214 * racing against queue removal. 215 */ 216 int numvtaps = ACCESS_ONCE(vlan->numvtaps); 217 __u32 rxq; 218 219 if (!numvtaps) 220 goto out; 221 222 /* Check if we can use flow to select a queue */ 223 rxq = skb_get_hash(skb); 224 if (rxq) { 225 tap = rcu_dereference(vlan->taps[rxq % numvtaps]); 226 goto out; 227 } 228 229 if (likely(skb_rx_queue_recorded(skb))) { 230 rxq = skb_get_rx_queue(skb); 231 232 while (unlikely(rxq >= numvtaps)) 233 rxq -= numvtaps; 234 235 tap = rcu_dereference(vlan->taps[rxq]); 236 goto out; 237 } 238 239 tap = rcu_dereference(vlan->taps[0]); 240 out: 241 return tap; 242 } 243 244 /* 245 * The net_device is going away, give up the reference 246 * that it holds on all queues and safely set the pointer 247 * from the queues to NULL. 248 */ 249 static void macvtap_del_queues(struct net_device *dev) 250 { 251 struct macvlan_dev *vlan = netdev_priv(dev); 252 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES]; 253 int i, j = 0; 254 255 ASSERT_RTNL(); 256 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) { 257 list_del_init(&q->next); 258 qlist[j++] = q; 259 RCU_INIT_POINTER(q->vlan, NULL); 260 if (q->enabled) 261 vlan->numvtaps--; 262 vlan->numqueues--; 263 } 264 for (i = 0; i < vlan->numvtaps; i++) 265 RCU_INIT_POINTER(vlan->taps[i], NULL); 266 BUG_ON(vlan->numvtaps); 267 BUG_ON(vlan->numqueues); 268 /* guarantee that any future macvtap_set_queue will fail */ 269 vlan->numvtaps = MAX_MACVTAP_QUEUES; 270 271 for (--j; j >= 0; j--) 272 sock_put(&qlist[j]->sk); 273 } 274 275 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) 276 { 277 struct sk_buff *skb = *pskb; 278 struct net_device *dev = skb->dev; 279 struct macvlan_dev *vlan; 280 struct macvtap_queue *q; 281 netdev_features_t features = TAP_FEATURES; 282 283 vlan = macvtap_get_vlan_rcu(dev); 284 if (!vlan) 285 return RX_HANDLER_PASS; 286 287 q = macvtap_get_queue(dev, skb); 288 if (!q) 289 return RX_HANDLER_PASS; 290 291 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) 292 goto drop; 293 294 skb_push(skb, ETH_HLEN); 295 296 /* Apply the forward feature mask so that we perform segmentation 297 * according to users wishes. This only works if VNET_HDR is 298 * enabled. 299 */ 300 if (q->flags & IFF_VNET_HDR) 301 features |= vlan->tap_features; 302 if (netif_needs_gso(dev, skb, features)) { 303 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 304 305 if (IS_ERR(segs)) 306 goto drop; 307 308 if (!segs) { 309 skb_queue_tail(&q->sk.sk_receive_queue, skb); 310 goto wake_up; 311 } 312 313 kfree_skb(skb); 314 while (segs) { 315 struct sk_buff *nskb = segs->next; 316 317 segs->next = NULL; 318 skb_queue_tail(&q->sk.sk_receive_queue, segs); 319 segs = nskb; 320 } 321 } else { 322 /* If we receive a partial checksum and the tap side 323 * doesn't support checksum offload, compute the checksum. 324 * Note: it doesn't matter which checksum feature to 325 * check, we either support them all or none. 326 */ 327 if (skb->ip_summed == CHECKSUM_PARTIAL && 328 !(features & NETIF_F_ALL_CSUM) && 329 skb_checksum_help(skb)) 330 goto drop; 331 skb_queue_tail(&q->sk.sk_receive_queue, skb); 332 } 333 334 wake_up: 335 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND); 336 return RX_HANDLER_CONSUMED; 337 338 drop: 339 /* Count errors/drops only here, thus don't care about args. */ 340 macvlan_count_rx(vlan, 0, 0, 0); 341 kfree_skb(skb); 342 return RX_HANDLER_CONSUMED; 343 } 344 345 static int macvtap_get_minor(struct macvlan_dev *vlan) 346 { 347 int retval = -ENOMEM; 348 349 mutex_lock(&minor_lock); 350 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL); 351 if (retval >= 0) { 352 vlan->minor = retval; 353 } else if (retval == -ENOSPC) { 354 printk(KERN_ERR "too many macvtap devices\n"); 355 retval = -EINVAL; 356 } 357 mutex_unlock(&minor_lock); 358 return retval < 0 ? retval : 0; 359 } 360 361 static void macvtap_free_minor(struct macvlan_dev *vlan) 362 { 363 mutex_lock(&minor_lock); 364 if (vlan->minor) { 365 idr_remove(&minor_idr, vlan->minor); 366 vlan->minor = 0; 367 } 368 mutex_unlock(&minor_lock); 369 } 370 371 static struct net_device *dev_get_by_macvtap_minor(int minor) 372 { 373 struct net_device *dev = NULL; 374 struct macvlan_dev *vlan; 375 376 mutex_lock(&minor_lock); 377 vlan = idr_find(&minor_idr, minor); 378 if (vlan) { 379 dev = vlan->dev; 380 dev_hold(dev); 381 } 382 mutex_unlock(&minor_lock); 383 return dev; 384 } 385 386 static int macvtap_newlink(struct net *src_net, 387 struct net_device *dev, 388 struct nlattr *tb[], 389 struct nlattr *data[]) 390 { 391 struct macvlan_dev *vlan = netdev_priv(dev); 392 int err; 393 394 INIT_LIST_HEAD(&vlan->queue_list); 395 396 /* Since macvlan supports all offloads by default, make 397 * tap support all offloads also. 398 */ 399 vlan->tap_features = TUN_OFFLOADS; 400 401 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan); 402 if (err) 403 return err; 404 405 /* Don't put anything that may fail after macvlan_common_newlink 406 * because we can't undo what it does. 407 */ 408 return macvlan_common_newlink(src_net, dev, tb, data); 409 } 410 411 static void macvtap_dellink(struct net_device *dev, 412 struct list_head *head) 413 { 414 netdev_rx_handler_unregister(dev); 415 macvtap_del_queues(dev); 416 macvlan_dellink(dev, head); 417 } 418 419 static void macvtap_setup(struct net_device *dev) 420 { 421 macvlan_common_setup(dev); 422 dev->tx_queue_len = TUN_READQ_SIZE; 423 } 424 425 static struct rtnl_link_ops macvtap_link_ops __read_mostly = { 426 .kind = "macvtap", 427 .setup = macvtap_setup, 428 .newlink = macvtap_newlink, 429 .dellink = macvtap_dellink, 430 }; 431 432 433 static void macvtap_sock_write_space(struct sock *sk) 434 { 435 wait_queue_head_t *wqueue; 436 437 if (!sock_writeable(sk) || 438 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 439 return; 440 441 wqueue = sk_sleep(sk); 442 if (wqueue && waitqueue_active(wqueue)) 443 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND); 444 } 445 446 static void macvtap_sock_destruct(struct sock *sk) 447 { 448 skb_queue_purge(&sk->sk_receive_queue); 449 } 450 451 static int macvtap_open(struct inode *inode, struct file *file) 452 { 453 struct net *net = current->nsproxy->net_ns; 454 struct net_device *dev; 455 struct macvtap_queue *q; 456 int err = -ENODEV; 457 458 rtnl_lock(); 459 dev = dev_get_by_macvtap_minor(iminor(inode)); 460 if (!dev) 461 goto out; 462 463 err = -ENOMEM; 464 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, 465 &macvtap_proto); 466 if (!q) 467 goto out; 468 469 RCU_INIT_POINTER(q->sock.wq, &q->wq); 470 init_waitqueue_head(&q->wq.wait); 471 q->sock.type = SOCK_RAW; 472 q->sock.state = SS_CONNECTED; 473 q->sock.file = file; 474 q->sock.ops = &macvtap_socket_ops; 475 sock_init_data(&q->sock, &q->sk); 476 q->sk.sk_write_space = macvtap_sock_write_space; 477 q->sk.sk_destruct = macvtap_sock_destruct; 478 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 479 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 480 481 /* 482 * so far only KVM virtio_net uses macvtap, enable zero copy between 483 * guest kernel and host kernel when lower device supports zerocopy 484 * 485 * The macvlan supports zerocopy iff the lower device supports zero 486 * copy so we don't have to look at the lower device directly. 487 */ 488 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) 489 sock_set_flag(&q->sk, SOCK_ZEROCOPY); 490 491 err = macvtap_set_queue(dev, file, q); 492 if (err) 493 sock_put(&q->sk); 494 495 out: 496 if (dev) 497 dev_put(dev); 498 499 rtnl_unlock(); 500 return err; 501 } 502 503 static int macvtap_release(struct inode *inode, struct file *file) 504 { 505 struct macvtap_queue *q = file->private_data; 506 macvtap_put_queue(q); 507 return 0; 508 } 509 510 static unsigned int macvtap_poll(struct file *file, poll_table * wait) 511 { 512 struct macvtap_queue *q = file->private_data; 513 unsigned int mask = POLLERR; 514 515 if (!q) 516 goto out; 517 518 mask = 0; 519 poll_wait(file, &q->wq.wait, wait); 520 521 if (!skb_queue_empty(&q->sk.sk_receive_queue)) 522 mask |= POLLIN | POLLRDNORM; 523 524 if (sock_writeable(&q->sk) || 525 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && 526 sock_writeable(&q->sk))) 527 mask |= POLLOUT | POLLWRNORM; 528 529 out: 530 return mask; 531 } 532 533 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, 534 size_t len, size_t linear, 535 int noblock, int *err) 536 { 537 struct sk_buff *skb; 538 539 /* Under a page? Don't bother with paged skb. */ 540 if (prepad + len < PAGE_SIZE || !linear) 541 linear = len; 542 543 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, 544 err, 0); 545 if (!skb) 546 return NULL; 547 548 skb_reserve(skb, prepad); 549 skb_put(skb, linear); 550 skb->data_len = len - linear; 551 skb->len += len - linear; 552 553 return skb; 554 } 555 556 /* 557 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should 558 * be shared with the tun/tap driver. 559 */ 560 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, 561 struct virtio_net_hdr *vnet_hdr) 562 { 563 unsigned short gso_type = 0; 564 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 565 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 566 case VIRTIO_NET_HDR_GSO_TCPV4: 567 gso_type = SKB_GSO_TCPV4; 568 break; 569 case VIRTIO_NET_HDR_GSO_TCPV6: 570 gso_type = SKB_GSO_TCPV6; 571 break; 572 case VIRTIO_NET_HDR_GSO_UDP: 573 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n", 574 current->comm); 575 gso_type = SKB_GSO_UDP; 576 if (skb->protocol == htons(ETH_P_IPV6)) 577 ipv6_proxy_select_ident(skb); 578 break; 579 default: 580 return -EINVAL; 581 } 582 583 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 584 gso_type |= SKB_GSO_TCP_ECN; 585 586 if (vnet_hdr->gso_size == 0) 587 return -EINVAL; 588 } 589 590 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 591 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start, 592 vnet_hdr->csum_offset)) 593 return -EINVAL; 594 } 595 596 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 597 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size; 598 skb_shinfo(skb)->gso_type = gso_type; 599 600 /* Header must be checked, and gso_segs computed. */ 601 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 602 skb_shinfo(skb)->gso_segs = 0; 603 } 604 return 0; 605 } 606 607 static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, 608 struct virtio_net_hdr *vnet_hdr) 609 { 610 memset(vnet_hdr, 0, sizeof(*vnet_hdr)); 611 612 if (skb_is_gso(skb)) { 613 struct skb_shared_info *sinfo = skb_shinfo(skb); 614 615 /* This is a hint as to how much should be linear. */ 616 vnet_hdr->hdr_len = skb_headlen(skb); 617 vnet_hdr->gso_size = sinfo->gso_size; 618 if (sinfo->gso_type & SKB_GSO_TCPV4) 619 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 620 else if (sinfo->gso_type & SKB_GSO_TCPV6) 621 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 622 else 623 BUG(); 624 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 625 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 626 } else 627 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 628 629 if (skb->ip_summed == CHECKSUM_PARTIAL) { 630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 631 vnet_hdr->csum_start = skb_checksum_start_offset(skb); 632 if (vlan_tx_tag_present(skb)) 633 vnet_hdr->csum_start += VLAN_HLEN; 634 vnet_hdr->csum_offset = skb->csum_offset; 635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 637 } /* else everything is zero */ 638 } 639 640 /* Get packet from user space buffer */ 641 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, 642 const struct iovec *iv, unsigned long total_len, 643 size_t count, int noblock) 644 { 645 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); 646 struct sk_buff *skb; 647 struct macvlan_dev *vlan; 648 unsigned long len = total_len; 649 int err; 650 struct virtio_net_hdr vnet_hdr = { 0 }; 651 int vnet_hdr_len = 0; 652 int copylen = 0; 653 bool zerocopy = false; 654 size_t linear; 655 656 if (q->flags & IFF_VNET_HDR) { 657 vnet_hdr_len = q->vnet_hdr_sz; 658 659 err = -EINVAL; 660 if (len < vnet_hdr_len) 661 goto err; 662 len -= vnet_hdr_len; 663 664 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 665 sizeof(vnet_hdr)); 666 if (err < 0) 667 goto err; 668 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 669 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > 670 vnet_hdr.hdr_len) 671 vnet_hdr.hdr_len = vnet_hdr.csum_start + 672 vnet_hdr.csum_offset + 2; 673 err = -EINVAL; 674 if (vnet_hdr.hdr_len > len) 675 goto err; 676 } 677 678 err = -EINVAL; 679 if (unlikely(len < ETH_HLEN)) 680 goto err; 681 682 err = -EMSGSIZE; 683 if (unlikely(count > UIO_MAXIOV)) 684 goto err; 685 686 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { 687 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN; 688 if (copylen > good_linear) 689 copylen = good_linear; 690 linear = copylen; 691 if (iov_pages(iv, vnet_hdr_len + copylen, count) 692 <= MAX_SKB_FRAGS) 693 zerocopy = true; 694 } 695 696 if (!zerocopy) { 697 copylen = len; 698 if (vnet_hdr.hdr_len > good_linear) 699 linear = good_linear; 700 else 701 linear = vnet_hdr.hdr_len; 702 } 703 704 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, 705 linear, noblock, &err); 706 if (!skb) 707 goto err; 708 709 if (zerocopy) 710 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); 711 else { 712 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, 713 len); 714 if (!err && m && m->msg_control) { 715 struct ubuf_info *uarg = m->msg_control; 716 uarg->callback(uarg, false); 717 } 718 } 719 720 if (err) 721 goto err_kfree; 722 723 skb_set_network_header(skb, ETH_HLEN); 724 skb_reset_mac_header(skb); 725 skb->protocol = eth_hdr(skb)->h_proto; 726 727 if (vnet_hdr_len) { 728 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); 729 if (err) 730 goto err_kfree; 731 } 732 733 skb_probe_transport_header(skb, ETH_HLEN); 734 735 rcu_read_lock(); 736 vlan = rcu_dereference(q->vlan); 737 /* copy skb_ubuf_info for callback when skb has no error */ 738 if (zerocopy) { 739 skb_shinfo(skb)->destructor_arg = m->msg_control; 740 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 741 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 742 } 743 if (vlan) { 744 skb->dev = vlan->dev; 745 dev_queue_xmit(skb); 746 } else { 747 kfree_skb(skb); 748 } 749 rcu_read_unlock(); 750 751 return total_len; 752 753 err_kfree: 754 kfree_skb(skb); 755 756 err: 757 rcu_read_lock(); 758 vlan = rcu_dereference(q->vlan); 759 if (vlan) 760 this_cpu_inc(vlan->pcpu_stats->tx_dropped); 761 rcu_read_unlock(); 762 763 return err; 764 } 765 766 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv, 767 unsigned long count, loff_t pos) 768 { 769 struct file *file = iocb->ki_filp; 770 ssize_t result = -ENOLINK; 771 struct macvtap_queue *q = file->private_data; 772 773 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count, 774 file->f_flags & O_NONBLOCK); 775 return result; 776 } 777 778 /* Put packet to the user space buffer */ 779 static ssize_t macvtap_put_user(struct macvtap_queue *q, 780 const struct sk_buff *skb, 781 const struct iovec *iv, int len) 782 { 783 int ret; 784 int vnet_hdr_len = 0; 785 int vlan_offset = 0; 786 int copied, total; 787 788 if (q->flags & IFF_VNET_HDR) { 789 struct virtio_net_hdr vnet_hdr; 790 vnet_hdr_len = q->vnet_hdr_sz; 791 if ((len -= vnet_hdr_len) < 0) 792 return -EINVAL; 793 794 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr); 795 796 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) 797 return -EFAULT; 798 } 799 total = copied = vnet_hdr_len; 800 total += skb->len; 801 802 if (!vlan_tx_tag_present(skb)) 803 len = min_t(int, skb->len, len); 804 else { 805 int copy; 806 struct { 807 __be16 h_vlan_proto; 808 __be16 h_vlan_TCI; 809 } veth; 810 veth.h_vlan_proto = skb->vlan_proto; 811 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 812 813 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 814 len = min_t(int, skb->len + VLAN_HLEN, len); 815 total += VLAN_HLEN; 816 817 copy = min_t(int, vlan_offset, len); 818 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 819 len -= copy; 820 copied += copy; 821 if (ret || !len) 822 goto done; 823 824 copy = min_t(int, sizeof(veth), len); 825 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); 826 len -= copy; 827 copied += copy; 828 if (ret || !len) 829 goto done; 830 } 831 832 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); 833 834 done: 835 return ret ? ret : total; 836 } 837 838 static ssize_t macvtap_do_read(struct macvtap_queue *q, 839 const struct iovec *iv, unsigned long len, 840 int noblock) 841 { 842 DEFINE_WAIT(wait); 843 struct sk_buff *skb; 844 ssize_t ret = 0; 845 846 while (len) { 847 if (!noblock) 848 prepare_to_wait(sk_sleep(&q->sk), &wait, 849 TASK_INTERRUPTIBLE); 850 851 /* Read frames from the queue */ 852 skb = skb_dequeue(&q->sk.sk_receive_queue); 853 if (!skb) { 854 if (noblock) { 855 ret = -EAGAIN; 856 break; 857 } 858 if (signal_pending(current)) { 859 ret = -ERESTARTSYS; 860 break; 861 } 862 /* Nothing to read, let's sleep */ 863 schedule(); 864 continue; 865 } 866 ret = macvtap_put_user(q, skb, iv, len); 867 kfree_skb(skb); 868 break; 869 } 870 871 if (!noblock) 872 finish_wait(sk_sleep(&q->sk), &wait); 873 return ret; 874 } 875 876 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv, 877 unsigned long count, loff_t pos) 878 { 879 struct file *file = iocb->ki_filp; 880 struct macvtap_queue *q = file->private_data; 881 ssize_t len, ret = 0; 882 883 len = iov_length(iv, count); 884 if (len < 0) { 885 ret = -EINVAL; 886 goto out; 887 } 888 889 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK); 890 ret = min_t(ssize_t, ret, len); 891 if (ret > 0) 892 iocb->ki_pos = ret; 893 out: 894 return ret; 895 } 896 897 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q) 898 { 899 struct macvlan_dev *vlan; 900 901 ASSERT_RTNL(); 902 vlan = rtnl_dereference(q->vlan); 903 if (vlan) 904 dev_hold(vlan->dev); 905 906 return vlan; 907 } 908 909 static void macvtap_put_vlan(struct macvlan_dev *vlan) 910 { 911 dev_put(vlan->dev); 912 } 913 914 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags) 915 { 916 struct macvtap_queue *q = file->private_data; 917 struct macvlan_dev *vlan; 918 int ret; 919 920 vlan = macvtap_get_vlan(q); 921 if (!vlan) 922 return -EINVAL; 923 924 if (flags & IFF_ATTACH_QUEUE) 925 ret = macvtap_enable_queue(vlan->dev, file, q); 926 else if (flags & IFF_DETACH_QUEUE) 927 ret = macvtap_disable_queue(q); 928 else 929 ret = -EINVAL; 930 931 macvtap_put_vlan(vlan); 932 return ret; 933 } 934 935 static int set_offload(struct macvtap_queue *q, unsigned long arg) 936 { 937 struct macvlan_dev *vlan; 938 netdev_features_t features; 939 netdev_features_t feature_mask = 0; 940 941 vlan = rtnl_dereference(q->vlan); 942 if (!vlan) 943 return -ENOLINK; 944 945 features = vlan->dev->features; 946 947 if (arg & TUN_F_CSUM) { 948 feature_mask = NETIF_F_HW_CSUM; 949 950 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { 951 if (arg & TUN_F_TSO_ECN) 952 feature_mask |= NETIF_F_TSO_ECN; 953 if (arg & TUN_F_TSO4) 954 feature_mask |= NETIF_F_TSO; 955 if (arg & TUN_F_TSO6) 956 feature_mask |= NETIF_F_TSO6; 957 } 958 } 959 960 /* tun/tap driver inverts the usage for TSO offloads, where 961 * setting the TSO bit means that the userspace wants to 962 * accept TSO frames and turning it off means that user space 963 * does not support TSO. 964 * For macvtap, we have to invert it to mean the same thing. 965 * When user space turns off TSO, we turn off GSO/LRO so that 966 * user-space will not receive TSO frames. 967 */ 968 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) 969 features |= RX_OFFLOADS; 970 else 971 features &= ~RX_OFFLOADS; 972 973 /* tap_features are the same as features on tun/tap and 974 * reflect user expectations. 975 */ 976 vlan->tap_features = feature_mask; 977 vlan->set_features = features; 978 netdev_update_features(vlan->dev); 979 980 return 0; 981 } 982 983 /* 984 * provide compatibility with generic tun/tap interface 985 */ 986 static long macvtap_ioctl(struct file *file, unsigned int cmd, 987 unsigned long arg) 988 { 989 struct macvtap_queue *q = file->private_data; 990 struct macvlan_dev *vlan; 991 void __user *argp = (void __user *)arg; 992 struct ifreq __user *ifr = argp; 993 unsigned int __user *up = argp; 994 unsigned int u; 995 int __user *sp = argp; 996 int s; 997 int ret; 998 999 switch (cmd) { 1000 case TUNSETIFF: 1001 /* ignore the name, just look at flags */ 1002 if (get_user(u, &ifr->ifr_flags)) 1003 return -EFAULT; 1004 1005 ret = 0; 1006 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) != 1007 (IFF_NO_PI | IFF_TAP)) 1008 ret = -EINVAL; 1009 else 1010 q->flags = u; 1011 1012 return ret; 1013 1014 case TUNGETIFF: 1015 rtnl_lock(); 1016 vlan = macvtap_get_vlan(q); 1017 if (!vlan) { 1018 rtnl_unlock(); 1019 return -ENOLINK; 1020 } 1021 1022 ret = 0; 1023 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || 1024 put_user(q->flags, &ifr->ifr_flags)) 1025 ret = -EFAULT; 1026 macvtap_put_vlan(vlan); 1027 rtnl_unlock(); 1028 return ret; 1029 1030 case TUNSETQUEUE: 1031 if (get_user(u, &ifr->ifr_flags)) 1032 return -EFAULT; 1033 rtnl_lock(); 1034 ret = macvtap_ioctl_set_queue(file, u); 1035 rtnl_unlock(); 1036 return ret; 1037 1038 case TUNGETFEATURES: 1039 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR | 1040 IFF_MULTI_QUEUE, up)) 1041 return -EFAULT; 1042 return 0; 1043 1044 case TUNSETSNDBUF: 1045 if (get_user(u, up)) 1046 return -EFAULT; 1047 1048 q->sk.sk_sndbuf = u; 1049 return 0; 1050 1051 case TUNGETVNETHDRSZ: 1052 s = q->vnet_hdr_sz; 1053 if (put_user(s, sp)) 1054 return -EFAULT; 1055 return 0; 1056 1057 case TUNSETVNETHDRSZ: 1058 if (get_user(s, sp)) 1059 return -EFAULT; 1060 if (s < (int)sizeof(struct virtio_net_hdr)) 1061 return -EINVAL; 1062 1063 q->vnet_hdr_sz = s; 1064 return 0; 1065 1066 case TUNSETOFFLOAD: 1067 /* let the user check for future flags */ 1068 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1069 TUN_F_TSO_ECN)) 1070 return -EINVAL; 1071 1072 rtnl_lock(); 1073 ret = set_offload(q, arg); 1074 rtnl_unlock(); 1075 return ret; 1076 1077 default: 1078 return -EINVAL; 1079 } 1080 } 1081 1082 #ifdef CONFIG_COMPAT 1083 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd, 1084 unsigned long arg) 1085 { 1086 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 1087 } 1088 #endif 1089 1090 static const struct file_operations macvtap_fops = { 1091 .owner = THIS_MODULE, 1092 .open = macvtap_open, 1093 .release = macvtap_release, 1094 .aio_read = macvtap_aio_read, 1095 .aio_write = macvtap_aio_write, 1096 .poll = macvtap_poll, 1097 .llseek = no_llseek, 1098 .unlocked_ioctl = macvtap_ioctl, 1099 #ifdef CONFIG_COMPAT 1100 .compat_ioctl = macvtap_compat_ioctl, 1101 #endif 1102 }; 1103 1104 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock, 1105 struct msghdr *m, size_t total_len) 1106 { 1107 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1108 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen, 1109 m->msg_flags & MSG_DONTWAIT); 1110 } 1111 1112 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock, 1113 struct msghdr *m, size_t total_len, 1114 int flags) 1115 { 1116 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock); 1117 int ret; 1118 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1119 return -EINVAL; 1120 ret = macvtap_do_read(q, m->msg_iov, total_len, 1121 flags & MSG_DONTWAIT); 1122 if (ret > total_len) { 1123 m->msg_flags |= MSG_TRUNC; 1124 ret = flags & MSG_TRUNC ? ret : total_len; 1125 } 1126 return ret; 1127 } 1128 1129 /* Ops structure to mimic raw sockets with tun */ 1130 static const struct proto_ops macvtap_socket_ops = { 1131 .sendmsg = macvtap_sendmsg, 1132 .recvmsg = macvtap_recvmsg, 1133 }; 1134 1135 /* Get an underlying socket object from tun file. Returns error unless file is 1136 * attached to a device. The returned object works like a packet socket, it 1137 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for 1138 * holding a reference to the file for as long as the socket is in use. */ 1139 struct socket *macvtap_get_socket(struct file *file) 1140 { 1141 struct macvtap_queue *q; 1142 if (file->f_op != &macvtap_fops) 1143 return ERR_PTR(-EINVAL); 1144 q = file->private_data; 1145 if (!q) 1146 return ERR_PTR(-EBADFD); 1147 return &q->sock; 1148 } 1149 EXPORT_SYMBOL_GPL(macvtap_get_socket); 1150 1151 static int macvtap_device_event(struct notifier_block *unused, 1152 unsigned long event, void *ptr) 1153 { 1154 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1155 struct macvlan_dev *vlan; 1156 struct device *classdev; 1157 dev_t devt; 1158 int err; 1159 1160 if (dev->rtnl_link_ops != &macvtap_link_ops) 1161 return NOTIFY_DONE; 1162 1163 vlan = netdev_priv(dev); 1164 1165 switch (event) { 1166 case NETDEV_REGISTER: 1167 /* Create the device node here after the network device has 1168 * been registered but before register_netdevice has 1169 * finished running. 1170 */ 1171 err = macvtap_get_minor(vlan); 1172 if (err) 1173 return notifier_from_errno(err); 1174 1175 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1176 classdev = device_create(macvtap_class, &dev->dev, devt, 1177 dev, "tap%d", dev->ifindex); 1178 if (IS_ERR(classdev)) { 1179 macvtap_free_minor(vlan); 1180 return notifier_from_errno(PTR_ERR(classdev)); 1181 } 1182 break; 1183 case NETDEV_UNREGISTER: 1184 devt = MKDEV(MAJOR(macvtap_major), vlan->minor); 1185 device_destroy(macvtap_class, devt); 1186 macvtap_free_minor(vlan); 1187 break; 1188 } 1189 1190 return NOTIFY_DONE; 1191 } 1192 1193 static struct notifier_block macvtap_notifier_block __read_mostly = { 1194 .notifier_call = macvtap_device_event, 1195 }; 1196 1197 static int macvtap_init(void) 1198 { 1199 int err; 1200 1201 err = alloc_chrdev_region(&macvtap_major, 0, 1202 MACVTAP_NUM_DEVS, "macvtap"); 1203 if (err) 1204 goto out1; 1205 1206 cdev_init(&macvtap_cdev, &macvtap_fops); 1207 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS); 1208 if (err) 1209 goto out2; 1210 1211 macvtap_class = class_create(THIS_MODULE, "macvtap"); 1212 if (IS_ERR(macvtap_class)) { 1213 err = PTR_ERR(macvtap_class); 1214 goto out3; 1215 } 1216 1217 err = register_netdevice_notifier(&macvtap_notifier_block); 1218 if (err) 1219 goto out4; 1220 1221 err = macvlan_link_register(&macvtap_link_ops); 1222 if (err) 1223 goto out5; 1224 1225 return 0; 1226 1227 out5: 1228 unregister_netdevice_notifier(&macvtap_notifier_block); 1229 out4: 1230 class_unregister(macvtap_class); 1231 out3: 1232 cdev_del(&macvtap_cdev); 1233 out2: 1234 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1235 out1: 1236 return err; 1237 } 1238 module_init(macvtap_init); 1239 1240 static void macvtap_exit(void) 1241 { 1242 rtnl_link_unregister(&macvtap_link_ops); 1243 unregister_netdevice_notifier(&macvtap_notifier_block); 1244 class_unregister(macvtap_class); 1245 cdev_del(&macvtap_cdev); 1246 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS); 1247 } 1248 module_exit(macvtap_exit); 1249 1250 MODULE_ALIAS_RTNL_LINK("macvtap"); 1251 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); 1252 MODULE_LICENSE("GPL"); 1253