1 /* Copyright (C) 2009 Red Hat, Inc. 2 * Author: Michael S. Tsirkin <mst@redhat.com> 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. 5 * 6 * virtio-net server in host kernel. 7 */ 8 9 #include <linux/compat.h> 10 #include <linux/eventfd.h> 11 #include <linux/vhost.h> 12 #include <linux/virtio_net.h> 13 #include <linux/miscdevice.h> 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/mutex.h> 17 #include <linux/workqueue.h> 18 #include <linux/file.h> 19 #include <linux/slab.h> 20 #include <linux/sched/clock.h> 21 #include <linux/sched/signal.h> 22 #include <linux/vmalloc.h> 23 24 #include <linux/net.h> 25 #include <linux/if_packet.h> 26 #include <linux/if_arp.h> 27 #include <linux/if_tun.h> 28 #include <linux/if_macvlan.h> 29 #include <linux/if_tap.h> 30 #include <linux/if_vlan.h> 31 #include <linux/skb_array.h> 32 #include <linux/skbuff.h> 33 34 #include <net/sock.h> 35 #include <net/xdp.h> 36 37 #include "vhost.h" 38 39 static int experimental_zcopytx = 1; 40 module_param(experimental_zcopytx, int, 0444); 41 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" 42 " 1 -Enable; 0 - Disable"); 43 44 /* Max number of bytes transferred before requeueing the job. 45 * Using this limit prevents one virtqueue from starving others. */ 46 #define VHOST_NET_WEIGHT 0x80000 47 48 /* Max number of packets transferred before requeueing the job. 49 * Using this limit prevents one virtqueue from starving others with small 50 * pkts. 51 */ 52 #define VHOST_NET_PKT_WEIGHT 256 53 54 /* MAX number of TX used buffers for outstanding zerocopy */ 55 #define VHOST_MAX_PEND 128 56 #define VHOST_GOODCOPY_LEN 256 57 58 /* 59 * For transmit, used buffer len is unused; we override it to track buffer 60 * status internally; used for zerocopy tx only. 61 */ 62 /* Lower device DMA failed */ 63 #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) 64 /* Lower device DMA done */ 65 #define VHOST_DMA_DONE_LEN ((__force __virtio32)2) 66 /* Lower device DMA in progress */ 67 #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) 68 /* Buffer unused */ 69 #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) 70 71 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) 72 73 enum { 74 VHOST_NET_FEATURES = VHOST_FEATURES | 75 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | 76 (1ULL << VIRTIO_NET_F_MRG_RXBUF) | 77 (1ULL << VIRTIO_F_IOMMU_PLATFORM) 78 }; 79 80 enum { 81 VHOST_NET_VQ_RX = 0, 82 VHOST_NET_VQ_TX = 1, 83 VHOST_NET_VQ_MAX = 2, 84 }; 85 86 struct vhost_net_ubuf_ref { 87 /* refcount follows semantics similar to kref: 88 * 0: object is released 89 * 1: no outstanding ubufs 90 * >1: outstanding ubufs 91 */ 92 atomic_t refcount; 93 wait_queue_head_t wait; 94 struct vhost_virtqueue *vq; 95 }; 96 97 #define VHOST_RX_BATCH 64 98 struct vhost_net_buf { 99 void **queue; 100 int tail; 101 int head; 102 }; 103 104 struct vhost_net_virtqueue { 105 struct vhost_virtqueue vq; 106 size_t vhost_hlen; 107 size_t sock_hlen; 108 /* vhost zerocopy support fields below: */ 109 /* last used idx for outstanding DMA zerocopy buffers */ 110 int upend_idx; 111 /* For TX, first used idx for DMA done zerocopy buffers 112 * For RX, number of batched heads 113 */ 114 int done_idx; 115 /* an array of userspace buffers info */ 116 struct ubuf_info *ubuf_info; 117 /* Reference counting for outstanding ubufs. 118 * Protected by vq mutex. Writers must also take device mutex. */ 119 struct vhost_net_ubuf_ref *ubufs; 120 struct ptr_ring *rx_ring; 121 struct vhost_net_buf rxq; 122 }; 123 124 struct vhost_net { 125 struct vhost_dev dev; 126 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; 127 struct vhost_poll poll[VHOST_NET_VQ_MAX]; 128 /* Number of TX recently submitted. 129 * Protected by tx vq lock. */ 130 unsigned tx_packets; 131 /* Number of times zerocopy TX recently failed. 132 * Protected by tx vq lock. */ 133 unsigned tx_zcopy_err; 134 /* Flush in progress. Protected by tx vq lock. */ 135 bool tx_flush; 136 }; 137 138 static unsigned vhost_net_zcopy_mask __read_mostly; 139 140 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq) 141 { 142 if (rxq->tail != rxq->head) 143 return rxq->queue[rxq->head]; 144 else 145 return NULL; 146 } 147 148 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq) 149 { 150 return rxq->tail - rxq->head; 151 } 152 153 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq) 154 { 155 return rxq->tail == rxq->head; 156 } 157 158 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) 159 { 160 void *ret = vhost_net_buf_get_ptr(rxq); 161 ++rxq->head; 162 return ret; 163 } 164 165 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) 166 { 167 struct vhost_net_buf *rxq = &nvq->rxq; 168 169 rxq->head = 0; 170 rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, 171 VHOST_RX_BATCH); 172 return rxq->tail; 173 } 174 175 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) 176 { 177 struct vhost_net_buf *rxq = &nvq->rxq; 178 179 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { 180 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, 181 vhost_net_buf_get_size(rxq), 182 tun_ptr_free); 183 rxq->head = rxq->tail = 0; 184 } 185 } 186 187 static int vhost_net_buf_peek_len(void *ptr) 188 { 189 if (tun_is_xdp_frame(ptr)) { 190 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); 191 192 return xdpf->len; 193 } 194 195 return __skb_array_len_with_tag(ptr); 196 } 197 198 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) 199 { 200 struct vhost_net_buf *rxq = &nvq->rxq; 201 202 if (!vhost_net_buf_is_empty(rxq)) 203 goto out; 204 205 if (!vhost_net_buf_produce(nvq)) 206 return 0; 207 208 out: 209 return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq)); 210 } 211 212 static void vhost_net_buf_init(struct vhost_net_buf *rxq) 213 { 214 rxq->head = rxq->tail = 0; 215 } 216 217 static void vhost_net_enable_zcopy(int vq) 218 { 219 vhost_net_zcopy_mask |= 0x1 << vq; 220 } 221 222 static struct vhost_net_ubuf_ref * 223 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) 224 { 225 struct vhost_net_ubuf_ref *ubufs; 226 /* No zero copy backend? Nothing to count. */ 227 if (!zcopy) 228 return NULL; 229 ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); 230 if (!ubufs) 231 return ERR_PTR(-ENOMEM); 232 atomic_set(&ubufs->refcount, 1); 233 init_waitqueue_head(&ubufs->wait); 234 ubufs->vq = vq; 235 return ubufs; 236 } 237 238 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) 239 { 240 int r = atomic_sub_return(1, &ubufs->refcount); 241 if (unlikely(!r)) 242 wake_up(&ubufs->wait); 243 return r; 244 } 245 246 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) 247 { 248 vhost_net_ubuf_put(ubufs); 249 wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); 250 } 251 252 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) 253 { 254 vhost_net_ubuf_put_and_wait(ubufs); 255 kfree(ubufs); 256 } 257 258 static void vhost_net_clear_ubuf_info(struct vhost_net *n) 259 { 260 int i; 261 262 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 263 kfree(n->vqs[i].ubuf_info); 264 n->vqs[i].ubuf_info = NULL; 265 } 266 } 267 268 static int vhost_net_set_ubuf_info(struct vhost_net *n) 269 { 270 bool zcopy; 271 int i; 272 273 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 274 zcopy = vhost_net_zcopy_mask & (0x1 << i); 275 if (!zcopy) 276 continue; 277 n->vqs[i].ubuf_info = 278 kmalloc_array(UIO_MAXIOV, 279 sizeof(*n->vqs[i].ubuf_info), 280 GFP_KERNEL); 281 if (!n->vqs[i].ubuf_info) 282 goto err; 283 } 284 return 0; 285 286 err: 287 vhost_net_clear_ubuf_info(n); 288 return -ENOMEM; 289 } 290 291 static void vhost_net_vq_reset(struct vhost_net *n) 292 { 293 int i; 294 295 vhost_net_clear_ubuf_info(n); 296 297 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 298 n->vqs[i].done_idx = 0; 299 n->vqs[i].upend_idx = 0; 300 n->vqs[i].ubufs = NULL; 301 n->vqs[i].vhost_hlen = 0; 302 n->vqs[i].sock_hlen = 0; 303 vhost_net_buf_init(&n->vqs[i].rxq); 304 } 305 306 } 307 308 static void vhost_net_tx_packet(struct vhost_net *net) 309 { 310 ++net->tx_packets; 311 if (net->tx_packets < 1024) 312 return; 313 net->tx_packets = 0; 314 net->tx_zcopy_err = 0; 315 } 316 317 static void vhost_net_tx_err(struct vhost_net *net) 318 { 319 ++net->tx_zcopy_err; 320 } 321 322 static bool vhost_net_tx_select_zcopy(struct vhost_net *net) 323 { 324 /* TX flush waits for outstanding DMAs to be done. 325 * Don't start new DMAs. 326 */ 327 return !net->tx_flush && 328 net->tx_packets / 64 >= net->tx_zcopy_err; 329 } 330 331 static bool vhost_sock_zcopy(struct socket *sock) 332 { 333 return unlikely(experimental_zcopytx) && 334 sock_flag(sock->sk, SOCK_ZEROCOPY); 335 } 336 337 /* In case of DMA done not in order in lower device driver for some reason. 338 * upend_idx is used to track end of used idx, done_idx is used to track head 339 * of used idx. Once lower device DMA done contiguously, we will signal KVM 340 * guest used idx. 341 */ 342 static void vhost_zerocopy_signal_used(struct vhost_net *net, 343 struct vhost_virtqueue *vq) 344 { 345 struct vhost_net_virtqueue *nvq = 346 container_of(vq, struct vhost_net_virtqueue, vq); 347 int i, add; 348 int j = 0; 349 350 for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 351 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) 352 vhost_net_tx_err(net); 353 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { 354 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 355 ++j; 356 } else 357 break; 358 } 359 while (j) { 360 add = min(UIO_MAXIOV - nvq->done_idx, j); 361 vhost_add_used_and_signal_n(vq->dev, vq, 362 &vq->heads[nvq->done_idx], add); 363 nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; 364 j -= add; 365 } 366 } 367 368 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) 369 { 370 struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; 371 struct vhost_virtqueue *vq = ubufs->vq; 372 int cnt; 373 374 rcu_read_lock_bh(); 375 376 /* set len to mark this desc buffers done DMA */ 377 vq->heads[ubuf->desc].len = success ? 378 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; 379 cnt = vhost_net_ubuf_put(ubufs); 380 381 /* 382 * Trigger polling thread if guest stopped submitting new buffers: 383 * in this case, the refcount after decrement will eventually reach 1. 384 * We also trigger polling periodically after each 16 packets 385 * (the value 16 here is more or less arbitrary, it's tuned to trigger 386 * less than 10% of times). 387 */ 388 if (cnt <= 1 || !(cnt % 16)) 389 vhost_poll_queue(&vq->poll); 390 391 rcu_read_unlock_bh(); 392 } 393 394 static inline unsigned long busy_clock(void) 395 { 396 return local_clock() >> 10; 397 } 398 399 static bool vhost_can_busy_poll(unsigned long endtime) 400 { 401 return likely(!need_resched() && !time_after(busy_clock(), endtime) && 402 !signal_pending(current)); 403 } 404 405 static void vhost_net_disable_vq(struct vhost_net *n, 406 struct vhost_virtqueue *vq) 407 { 408 struct vhost_net_virtqueue *nvq = 409 container_of(vq, struct vhost_net_virtqueue, vq); 410 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 411 if (!vq->private_data) 412 return; 413 vhost_poll_stop(poll); 414 } 415 416 static int vhost_net_enable_vq(struct vhost_net *n, 417 struct vhost_virtqueue *vq) 418 { 419 struct vhost_net_virtqueue *nvq = 420 container_of(vq, struct vhost_net_virtqueue, vq); 421 struct vhost_poll *poll = n->poll + (nvq - n->vqs); 422 struct socket *sock; 423 424 sock = vq->private_data; 425 if (!sock) 426 return 0; 427 428 return vhost_poll_start(poll, sock->file); 429 } 430 431 static int vhost_net_tx_get_vq_desc(struct vhost_net *net, 432 struct vhost_virtqueue *vq, 433 struct iovec iov[], unsigned int iov_size, 434 unsigned int *out_num, unsigned int *in_num, 435 bool *busyloop_intr) 436 { 437 unsigned long uninitialized_var(endtime); 438 int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), 439 out_num, in_num, NULL, NULL); 440 441 if (r == vq->num && vq->busyloop_timeout) { 442 preempt_disable(); 443 endtime = busy_clock() + vq->busyloop_timeout; 444 while (vhost_can_busy_poll(endtime)) { 445 if (vhost_has_work(vq->dev)) { 446 *busyloop_intr = true; 447 break; 448 } 449 if (!vhost_vq_avail_empty(vq->dev, vq)) 450 break; 451 cpu_relax(); 452 } 453 preempt_enable(); 454 r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov), 455 out_num, in_num, NULL, NULL); 456 } 457 458 return r; 459 } 460 461 static bool vhost_exceeds_maxpend(struct vhost_net *net) 462 { 463 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 464 struct vhost_virtqueue *vq = &nvq->vq; 465 466 return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > 467 min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); 468 } 469 470 /* Expects to be always run from workqueue - which acts as 471 * read-size critical section for our kind of RCU. */ 472 static void handle_tx(struct vhost_net *net) 473 { 474 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; 475 struct vhost_virtqueue *vq = &nvq->vq; 476 unsigned out, in; 477 int head; 478 struct msghdr msg = { 479 .msg_name = NULL, 480 .msg_namelen = 0, 481 .msg_control = NULL, 482 .msg_controllen = 0, 483 .msg_flags = MSG_DONTWAIT, 484 }; 485 size_t len, total_len = 0; 486 int err; 487 size_t hdr_size; 488 struct socket *sock; 489 struct vhost_net_ubuf_ref *uninitialized_var(ubufs); 490 bool zcopy, zcopy_used; 491 int sent_pkts = 0; 492 493 mutex_lock(&vq->mutex); 494 sock = vq->private_data; 495 if (!sock) 496 goto out; 497 498 if (!vq_iotlb_prefetch(vq)) 499 goto out; 500 501 vhost_disable_notify(&net->dev, vq); 502 vhost_net_disable_vq(net, vq); 503 504 hdr_size = nvq->vhost_hlen; 505 zcopy = nvq->ubufs; 506 507 for (;;) { 508 bool busyloop_intr; 509 510 /* Release DMAs done buffers first */ 511 if (zcopy) 512 vhost_zerocopy_signal_used(net, vq); 513 514 busyloop_intr = false; 515 head = vhost_net_tx_get_vq_desc(net, vq, vq->iov, 516 ARRAY_SIZE(vq->iov), 517 &out, &in, &busyloop_intr); 518 /* On error, stop handling until the next kick. */ 519 if (unlikely(head < 0)) 520 break; 521 /* Nothing new? Wait for eventfd to tell us they refilled. */ 522 if (head == vq->num) { 523 if (unlikely(busyloop_intr)) { 524 vhost_poll_queue(&vq->poll); 525 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { 526 vhost_disable_notify(&net->dev, vq); 527 continue; 528 } 529 break; 530 } 531 if (in) { 532 vq_err(vq, "Unexpected descriptor format for TX: " 533 "out %d, int %d\n", out, in); 534 break; 535 } 536 /* Skip header. TODO: support TSO. */ 537 len = iov_length(vq->iov, out); 538 iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); 539 iov_iter_advance(&msg.msg_iter, hdr_size); 540 /* Sanity check */ 541 if (!msg_data_left(&msg)) { 542 vq_err(vq, "Unexpected header len for TX: " 543 "%zd expected %zd\n", 544 len, hdr_size); 545 break; 546 } 547 len = msg_data_left(&msg); 548 549 zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN 550 && !vhost_exceeds_maxpend(net) 551 && vhost_net_tx_select_zcopy(net); 552 553 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 554 if (zcopy_used) { 555 struct ubuf_info *ubuf; 556 ubuf = nvq->ubuf_info + nvq->upend_idx; 557 558 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); 559 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; 560 ubuf->callback = vhost_zerocopy_callback; 561 ubuf->ctx = nvq->ubufs; 562 ubuf->desc = nvq->upend_idx; 563 refcount_set(&ubuf->refcnt, 1); 564 msg.msg_control = ubuf; 565 msg.msg_controllen = sizeof(ubuf); 566 ubufs = nvq->ubufs; 567 atomic_inc(&ubufs->refcount); 568 nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; 569 } else { 570 msg.msg_control = NULL; 571 ubufs = NULL; 572 } 573 574 total_len += len; 575 if (total_len < VHOST_NET_WEIGHT && 576 !vhost_vq_avail_empty(&net->dev, vq) && 577 likely(!vhost_exceeds_maxpend(net))) { 578 msg.msg_flags |= MSG_MORE; 579 } else { 580 msg.msg_flags &= ~MSG_MORE; 581 } 582 583 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 584 err = sock->ops->sendmsg(sock, &msg, len); 585 if (unlikely(err < 0)) { 586 if (zcopy_used) { 587 vhost_net_ubuf_put(ubufs); 588 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) 589 % UIO_MAXIOV; 590 } 591 vhost_discard_vq_desc(vq, 1); 592 vhost_net_enable_vq(net, vq); 593 break; 594 } 595 if (err != len) 596 pr_debug("Truncated TX packet: " 597 " len %d != %zd\n", err, len); 598 if (!zcopy_used) 599 vhost_add_used_and_signal(&net->dev, vq, head, 0); 600 else 601 vhost_zerocopy_signal_used(net, vq); 602 vhost_net_tx_packet(net); 603 if (unlikely(total_len >= VHOST_NET_WEIGHT) || 604 unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) { 605 vhost_poll_queue(&vq->poll); 606 break; 607 } 608 } 609 out: 610 mutex_unlock(&vq->mutex); 611 } 612 613 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) 614 { 615 struct sk_buff *head; 616 int len = 0; 617 unsigned long flags; 618 619 if (rvq->rx_ring) 620 return vhost_net_buf_peek(rvq); 621 622 spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); 623 head = skb_peek(&sk->sk_receive_queue); 624 if (likely(head)) { 625 len = head->len; 626 if (skb_vlan_tag_present(head)) 627 len += VLAN_HLEN; 628 } 629 630 spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); 631 return len; 632 } 633 634 static int sk_has_rx_data(struct sock *sk) 635 { 636 struct socket *sock = sk->sk_socket; 637 638 if (sock->ops->peek_len) 639 return sock->ops->peek_len(sock); 640 641 return skb_queue_empty(&sk->sk_receive_queue); 642 } 643 644 static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) 645 { 646 struct vhost_virtqueue *vq = &nvq->vq; 647 struct vhost_dev *dev = vq->dev; 648 649 if (!nvq->done_idx) 650 return; 651 652 vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); 653 nvq->done_idx = 0; 654 } 655 656 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, 657 bool *busyloop_intr) 658 { 659 struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; 660 struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; 661 struct vhost_virtqueue *rvq = &rnvq->vq; 662 struct vhost_virtqueue *tvq = &tnvq->vq; 663 unsigned long uninitialized_var(endtime); 664 int len = peek_head_len(rnvq, sk); 665 666 if (!len && tvq->busyloop_timeout) { 667 /* Flush batched heads first */ 668 vhost_rx_signal_used(rnvq); 669 /* Both tx vq and rx socket were polled here */ 670 mutex_lock_nested(&tvq->mutex, 1); 671 vhost_disable_notify(&net->dev, tvq); 672 673 preempt_disable(); 674 endtime = busy_clock() + tvq->busyloop_timeout; 675 676 while (vhost_can_busy_poll(endtime)) { 677 if (vhost_has_work(&net->dev)) { 678 *busyloop_intr = true; 679 break; 680 } 681 if ((sk_has_rx_data(sk) && 682 !vhost_vq_avail_empty(&net->dev, rvq)) || 683 !vhost_vq_avail_empty(&net->dev, tvq)) 684 break; 685 cpu_relax(); 686 } 687 688 preempt_enable(); 689 690 if (!vhost_vq_avail_empty(&net->dev, tvq)) { 691 vhost_poll_queue(&tvq->poll); 692 } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) { 693 vhost_disable_notify(&net->dev, tvq); 694 vhost_poll_queue(&tvq->poll); 695 } 696 697 mutex_unlock(&tvq->mutex); 698 699 len = peek_head_len(rnvq, sk); 700 } 701 702 return len; 703 } 704 705 /* This is a multi-buffer version of vhost_get_desc, that works if 706 * vq has read descriptors only. 707 * @vq - the relevant virtqueue 708 * @datalen - data length we'll be reading 709 * @iovcount - returned count of io vectors we fill 710 * @log - vhost log 711 * @log_num - log offset 712 * @quota - headcount quota, 1 for big buffer 713 * returns number of buffer heads allocated, negative on error 714 */ 715 static int get_rx_bufs(struct vhost_virtqueue *vq, 716 struct vring_used_elem *heads, 717 int datalen, 718 unsigned *iovcount, 719 struct vhost_log *log, 720 unsigned *log_num, 721 unsigned int quota) 722 { 723 unsigned int out, in; 724 int seg = 0; 725 int headcount = 0; 726 unsigned d; 727 int r, nlogs = 0; 728 /* len is always initialized before use since we are always called with 729 * datalen > 0. 730 */ 731 u32 uninitialized_var(len); 732 733 while (datalen > 0 && headcount < quota) { 734 if (unlikely(seg >= UIO_MAXIOV)) { 735 r = -ENOBUFS; 736 goto err; 737 } 738 r = vhost_get_vq_desc(vq, vq->iov + seg, 739 ARRAY_SIZE(vq->iov) - seg, &out, 740 &in, log, log_num); 741 if (unlikely(r < 0)) 742 goto err; 743 744 d = r; 745 if (d == vq->num) { 746 r = 0; 747 goto err; 748 } 749 if (unlikely(out || in <= 0)) { 750 vq_err(vq, "unexpected descriptor format for RX: " 751 "out %d, in %d\n", out, in); 752 r = -EINVAL; 753 goto err; 754 } 755 if (unlikely(log)) { 756 nlogs += *log_num; 757 log += *log_num; 758 } 759 heads[headcount].id = cpu_to_vhost32(vq, d); 760 len = iov_length(vq->iov + seg, in); 761 heads[headcount].len = cpu_to_vhost32(vq, len); 762 datalen -= len; 763 ++headcount; 764 seg += in; 765 } 766 heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); 767 *iovcount = seg; 768 if (unlikely(log)) 769 *log_num = nlogs; 770 771 /* Detect overrun */ 772 if (unlikely(datalen > 0)) { 773 r = UIO_MAXIOV + 1; 774 goto err; 775 } 776 return headcount; 777 err: 778 vhost_discard_vq_desc(vq, headcount); 779 return r; 780 } 781 782 /* Expects to be always run from workqueue - which acts as 783 * read-size critical section for our kind of RCU. */ 784 static void handle_rx(struct vhost_net *net) 785 { 786 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; 787 struct vhost_virtqueue *vq = &nvq->vq; 788 unsigned uninitialized_var(in), log; 789 struct vhost_log *vq_log; 790 struct msghdr msg = { 791 .msg_name = NULL, 792 .msg_namelen = 0, 793 .msg_control = NULL, /* FIXME: get and handle RX aux data. */ 794 .msg_controllen = 0, 795 .msg_flags = MSG_DONTWAIT, 796 }; 797 struct virtio_net_hdr hdr = { 798 .flags = 0, 799 .gso_type = VIRTIO_NET_HDR_GSO_NONE 800 }; 801 size_t total_len = 0; 802 int err, mergeable; 803 s16 headcount; 804 size_t vhost_hlen, sock_hlen; 805 size_t vhost_len, sock_len; 806 bool busyloop_intr = false; 807 struct socket *sock; 808 struct iov_iter fixup; 809 __virtio16 num_buffers; 810 int recv_pkts = 0; 811 812 mutex_lock_nested(&vq->mutex, 0); 813 sock = vq->private_data; 814 if (!sock) 815 goto out; 816 817 if (!vq_iotlb_prefetch(vq)) 818 goto out; 819 820 vhost_disable_notify(&net->dev, vq); 821 vhost_net_disable_vq(net, vq); 822 823 vhost_hlen = nvq->vhost_hlen; 824 sock_hlen = nvq->sock_hlen; 825 826 vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? 827 vq->log : NULL; 828 mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); 829 830 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk, 831 &busyloop_intr))) { 832 sock_len += sock_hlen; 833 vhost_len = sock_len + vhost_hlen; 834 headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 835 vhost_len, &in, vq_log, &log, 836 likely(mergeable) ? UIO_MAXIOV : 1); 837 /* On error, stop handling until the next kick. */ 838 if (unlikely(headcount < 0)) 839 goto out; 840 /* OK, now we need to know about added descriptors. */ 841 if (!headcount) { 842 if (unlikely(busyloop_intr)) { 843 vhost_poll_queue(&vq->poll); 844 } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { 845 /* They have slipped one in as we were 846 * doing that: check again. */ 847 vhost_disable_notify(&net->dev, vq); 848 continue; 849 } 850 /* Nothing new? Wait for eventfd to tell us 851 * they refilled. */ 852 goto out; 853 } 854 busyloop_intr = false; 855 if (nvq->rx_ring) 856 msg.msg_control = vhost_net_buf_consume(&nvq->rxq); 857 /* On overrun, truncate and discard */ 858 if (unlikely(headcount > UIO_MAXIOV)) { 859 iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1); 860 err = sock->ops->recvmsg(sock, &msg, 861 1, MSG_DONTWAIT | MSG_TRUNC); 862 pr_debug("Discarded rx packet: len %zd\n", sock_len); 863 continue; 864 } 865 /* We don't need to be notified again. */ 866 iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); 867 fixup = msg.msg_iter; 868 if (unlikely((vhost_hlen))) { 869 /* We will supply the header ourselves 870 * TODO: support TSO. 871 */ 872 iov_iter_advance(&msg.msg_iter, vhost_hlen); 873 } 874 err = sock->ops->recvmsg(sock, &msg, 875 sock_len, MSG_DONTWAIT | MSG_TRUNC); 876 /* Userspace might have consumed the packet meanwhile: 877 * it's not supposed to do this usually, but might be hard 878 * to prevent. Discard data we got (if any) and keep going. */ 879 if (unlikely(err != sock_len)) { 880 pr_debug("Discarded rx packet: " 881 " len %d, expected %zd\n", err, sock_len); 882 vhost_discard_vq_desc(vq, headcount); 883 continue; 884 } 885 /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ 886 if (unlikely(vhost_hlen)) { 887 if (copy_to_iter(&hdr, sizeof(hdr), 888 &fixup) != sizeof(hdr)) { 889 vq_err(vq, "Unable to write vnet_hdr " 890 "at addr %p\n", vq->iov->iov_base); 891 goto out; 892 } 893 } else { 894 /* Header came from socket; we'll need to patch 895 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF 896 */ 897 iov_iter_advance(&fixup, sizeof(hdr)); 898 } 899 /* TODO: Should check and handle checksum. */ 900 901 num_buffers = cpu_to_vhost16(vq, headcount); 902 if (likely(mergeable) && 903 copy_to_iter(&num_buffers, sizeof num_buffers, 904 &fixup) != sizeof num_buffers) { 905 vq_err(vq, "Failed num_buffers write"); 906 vhost_discard_vq_desc(vq, headcount); 907 goto out; 908 } 909 nvq->done_idx += headcount; 910 if (nvq->done_idx > VHOST_RX_BATCH) 911 vhost_rx_signal_used(nvq); 912 if (unlikely(vq_log)) 913 vhost_log_write(vq, vq_log, log, vhost_len); 914 total_len += vhost_len; 915 if (unlikely(total_len >= VHOST_NET_WEIGHT) || 916 unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) { 917 vhost_poll_queue(&vq->poll); 918 goto out; 919 } 920 } 921 if (unlikely(busyloop_intr)) 922 vhost_poll_queue(&vq->poll); 923 else 924 vhost_net_enable_vq(net, vq); 925 out: 926 vhost_rx_signal_used(nvq); 927 mutex_unlock(&vq->mutex); 928 } 929 930 static void handle_tx_kick(struct vhost_work *work) 931 { 932 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 933 poll.work); 934 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); 935 936 handle_tx(net); 937 } 938 939 static void handle_rx_kick(struct vhost_work *work) 940 { 941 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 942 poll.work); 943 struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); 944 945 handle_rx(net); 946 } 947 948 static void handle_tx_net(struct vhost_work *work) 949 { 950 struct vhost_net *net = container_of(work, struct vhost_net, 951 poll[VHOST_NET_VQ_TX].work); 952 handle_tx(net); 953 } 954 955 static void handle_rx_net(struct vhost_work *work) 956 { 957 struct vhost_net *net = container_of(work, struct vhost_net, 958 poll[VHOST_NET_VQ_RX].work); 959 handle_rx(net); 960 } 961 962 static int vhost_net_open(struct inode *inode, struct file *f) 963 { 964 struct vhost_net *n; 965 struct vhost_dev *dev; 966 struct vhost_virtqueue **vqs; 967 void **queue; 968 int i; 969 970 n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); 971 if (!n) 972 return -ENOMEM; 973 vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); 974 if (!vqs) { 975 kvfree(n); 976 return -ENOMEM; 977 } 978 979 queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *), 980 GFP_KERNEL); 981 if (!queue) { 982 kfree(vqs); 983 kvfree(n); 984 return -ENOMEM; 985 } 986 n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; 987 988 dev = &n->dev; 989 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; 990 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; 991 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; 992 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; 993 for (i = 0; i < VHOST_NET_VQ_MAX; i++) { 994 n->vqs[i].ubufs = NULL; 995 n->vqs[i].ubuf_info = NULL; 996 n->vqs[i].upend_idx = 0; 997 n->vqs[i].done_idx = 0; 998 n->vqs[i].vhost_hlen = 0; 999 n->vqs[i].sock_hlen = 0; 1000 n->vqs[i].rx_ring = NULL; 1001 vhost_net_buf_init(&n->vqs[i].rxq); 1002 } 1003 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 1004 1005 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1006 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1007 1008 f->private_data = n; 1009 1010 return 0; 1011 } 1012 1013 static struct socket *vhost_net_stop_vq(struct vhost_net *n, 1014 struct vhost_virtqueue *vq) 1015 { 1016 struct socket *sock; 1017 struct vhost_net_virtqueue *nvq = 1018 container_of(vq, struct vhost_net_virtqueue, vq); 1019 1020 mutex_lock(&vq->mutex); 1021 sock = vq->private_data; 1022 vhost_net_disable_vq(n, vq); 1023 vq->private_data = NULL; 1024 vhost_net_buf_unproduce(nvq); 1025 nvq->rx_ring = NULL; 1026 mutex_unlock(&vq->mutex); 1027 return sock; 1028 } 1029 1030 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, 1031 struct socket **rx_sock) 1032 { 1033 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); 1034 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); 1035 } 1036 1037 static void vhost_net_flush_vq(struct vhost_net *n, int index) 1038 { 1039 vhost_poll_flush(n->poll + index); 1040 vhost_poll_flush(&n->vqs[index].vq.poll); 1041 } 1042 1043 static void vhost_net_flush(struct vhost_net *n) 1044 { 1045 vhost_net_flush_vq(n, VHOST_NET_VQ_TX); 1046 vhost_net_flush_vq(n, VHOST_NET_VQ_RX); 1047 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { 1048 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1049 n->tx_flush = true; 1050 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1051 /* Wait for all lower device DMAs done. */ 1052 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); 1053 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1054 n->tx_flush = false; 1055 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); 1056 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); 1057 } 1058 } 1059 1060 static int vhost_net_release(struct inode *inode, struct file *f) 1061 { 1062 struct vhost_net *n = f->private_data; 1063 struct socket *tx_sock; 1064 struct socket *rx_sock; 1065 1066 vhost_net_stop(n, &tx_sock, &rx_sock); 1067 vhost_net_flush(n); 1068 vhost_dev_stop(&n->dev); 1069 vhost_dev_cleanup(&n->dev); 1070 vhost_net_vq_reset(n); 1071 if (tx_sock) 1072 sockfd_put(tx_sock); 1073 if (rx_sock) 1074 sockfd_put(rx_sock); 1075 /* Make sure no callbacks are outstanding */ 1076 synchronize_rcu_bh(); 1077 /* We do an extra flush before freeing memory, 1078 * since jobs can re-queue themselves. */ 1079 vhost_net_flush(n); 1080 kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); 1081 kfree(n->dev.vqs); 1082 kvfree(n); 1083 return 0; 1084 } 1085 1086 static struct socket *get_raw_socket(int fd) 1087 { 1088 struct { 1089 struct sockaddr_ll sa; 1090 char buf[MAX_ADDR_LEN]; 1091 } uaddr; 1092 int r; 1093 struct socket *sock = sockfd_lookup(fd, &r); 1094 1095 if (!sock) 1096 return ERR_PTR(-ENOTSOCK); 1097 1098 /* Parameter checking */ 1099 if (sock->sk->sk_type != SOCK_RAW) { 1100 r = -ESOCKTNOSUPPORT; 1101 goto err; 1102 } 1103 1104 r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0); 1105 if (r < 0) 1106 goto err; 1107 1108 if (uaddr.sa.sll_family != AF_PACKET) { 1109 r = -EPFNOSUPPORT; 1110 goto err; 1111 } 1112 return sock; 1113 err: 1114 sockfd_put(sock); 1115 return ERR_PTR(r); 1116 } 1117 1118 static struct ptr_ring *get_tap_ptr_ring(int fd) 1119 { 1120 struct ptr_ring *ring; 1121 struct file *file = fget(fd); 1122 1123 if (!file) 1124 return NULL; 1125 ring = tun_get_tx_ring(file); 1126 if (!IS_ERR(ring)) 1127 goto out; 1128 ring = tap_get_ptr_ring(file); 1129 if (!IS_ERR(ring)) 1130 goto out; 1131 ring = NULL; 1132 out: 1133 fput(file); 1134 return ring; 1135 } 1136 1137 static struct socket *get_tap_socket(int fd) 1138 { 1139 struct file *file = fget(fd); 1140 struct socket *sock; 1141 1142 if (!file) 1143 return ERR_PTR(-EBADF); 1144 sock = tun_get_socket(file); 1145 if (!IS_ERR(sock)) 1146 return sock; 1147 sock = tap_get_socket(file); 1148 if (IS_ERR(sock)) 1149 fput(file); 1150 return sock; 1151 } 1152 1153 static struct socket *get_socket(int fd) 1154 { 1155 struct socket *sock; 1156 1157 /* special case to disable backend */ 1158 if (fd == -1) 1159 return NULL; 1160 sock = get_raw_socket(fd); 1161 if (!IS_ERR(sock)) 1162 return sock; 1163 sock = get_tap_socket(fd); 1164 if (!IS_ERR(sock)) 1165 return sock; 1166 return ERR_PTR(-ENOTSOCK); 1167 } 1168 1169 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) 1170 { 1171 struct socket *sock, *oldsock; 1172 struct vhost_virtqueue *vq; 1173 struct vhost_net_virtqueue *nvq; 1174 struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; 1175 int r; 1176 1177 mutex_lock(&n->dev.mutex); 1178 r = vhost_dev_check_owner(&n->dev); 1179 if (r) 1180 goto err; 1181 1182 if (index >= VHOST_NET_VQ_MAX) { 1183 r = -ENOBUFS; 1184 goto err; 1185 } 1186 vq = &n->vqs[index].vq; 1187 nvq = &n->vqs[index]; 1188 mutex_lock(&vq->mutex); 1189 1190 /* Verify that ring has been setup correctly. */ 1191 if (!vhost_vq_access_ok(vq)) { 1192 r = -EFAULT; 1193 goto err_vq; 1194 } 1195 sock = get_socket(fd); 1196 if (IS_ERR(sock)) { 1197 r = PTR_ERR(sock); 1198 goto err_vq; 1199 } 1200 1201 /* start polling new socket */ 1202 oldsock = vq->private_data; 1203 if (sock != oldsock) { 1204 ubufs = vhost_net_ubuf_alloc(vq, 1205 sock && vhost_sock_zcopy(sock)); 1206 if (IS_ERR(ubufs)) { 1207 r = PTR_ERR(ubufs); 1208 goto err_ubufs; 1209 } 1210 1211 vhost_net_disable_vq(n, vq); 1212 vq->private_data = sock; 1213 vhost_net_buf_unproduce(nvq); 1214 r = vhost_vq_init_access(vq); 1215 if (r) 1216 goto err_used; 1217 r = vhost_net_enable_vq(n, vq); 1218 if (r) 1219 goto err_used; 1220 if (index == VHOST_NET_VQ_RX) 1221 nvq->rx_ring = get_tap_ptr_ring(fd); 1222 1223 oldubufs = nvq->ubufs; 1224 nvq->ubufs = ubufs; 1225 1226 n->tx_packets = 0; 1227 n->tx_zcopy_err = 0; 1228 n->tx_flush = false; 1229 } 1230 1231 mutex_unlock(&vq->mutex); 1232 1233 if (oldubufs) { 1234 vhost_net_ubuf_put_wait_and_free(oldubufs); 1235 mutex_lock(&vq->mutex); 1236 vhost_zerocopy_signal_used(n, vq); 1237 mutex_unlock(&vq->mutex); 1238 } 1239 1240 if (oldsock) { 1241 vhost_net_flush_vq(n, index); 1242 sockfd_put(oldsock); 1243 } 1244 1245 mutex_unlock(&n->dev.mutex); 1246 return 0; 1247 1248 err_used: 1249 vq->private_data = oldsock; 1250 vhost_net_enable_vq(n, vq); 1251 if (ubufs) 1252 vhost_net_ubuf_put_wait_and_free(ubufs); 1253 err_ubufs: 1254 if (sock) 1255 sockfd_put(sock); 1256 err_vq: 1257 mutex_unlock(&vq->mutex); 1258 err: 1259 mutex_unlock(&n->dev.mutex); 1260 return r; 1261 } 1262 1263 static long vhost_net_reset_owner(struct vhost_net *n) 1264 { 1265 struct socket *tx_sock = NULL; 1266 struct socket *rx_sock = NULL; 1267 long err; 1268 struct vhost_umem *umem; 1269 1270 mutex_lock(&n->dev.mutex); 1271 err = vhost_dev_check_owner(&n->dev); 1272 if (err) 1273 goto done; 1274 umem = vhost_dev_reset_owner_prepare(); 1275 if (!umem) { 1276 err = -ENOMEM; 1277 goto done; 1278 } 1279 vhost_net_stop(n, &tx_sock, &rx_sock); 1280 vhost_net_flush(n); 1281 vhost_dev_stop(&n->dev); 1282 vhost_dev_reset_owner(&n->dev, umem); 1283 vhost_net_vq_reset(n); 1284 done: 1285 mutex_unlock(&n->dev.mutex); 1286 if (tx_sock) 1287 sockfd_put(tx_sock); 1288 if (rx_sock) 1289 sockfd_put(rx_sock); 1290 return err; 1291 } 1292 1293 static int vhost_net_set_features(struct vhost_net *n, u64 features) 1294 { 1295 size_t vhost_hlen, sock_hlen, hdr_len; 1296 int i; 1297 1298 hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | 1299 (1ULL << VIRTIO_F_VERSION_1))) ? 1300 sizeof(struct virtio_net_hdr_mrg_rxbuf) : 1301 sizeof(struct virtio_net_hdr); 1302 if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { 1303 /* vhost provides vnet_hdr */ 1304 vhost_hlen = hdr_len; 1305 sock_hlen = 0; 1306 } else { 1307 /* socket provides vnet_hdr */ 1308 vhost_hlen = 0; 1309 sock_hlen = hdr_len; 1310 } 1311 mutex_lock(&n->dev.mutex); 1312 if ((features & (1 << VHOST_F_LOG_ALL)) && 1313 !vhost_log_access_ok(&n->dev)) 1314 goto out_unlock; 1315 1316 if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { 1317 if (vhost_init_device_iotlb(&n->dev, true)) 1318 goto out_unlock; 1319 } 1320 1321 for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { 1322 mutex_lock(&n->vqs[i].vq.mutex); 1323 n->vqs[i].vq.acked_features = features; 1324 n->vqs[i].vhost_hlen = vhost_hlen; 1325 n->vqs[i].sock_hlen = sock_hlen; 1326 mutex_unlock(&n->vqs[i].vq.mutex); 1327 } 1328 mutex_unlock(&n->dev.mutex); 1329 return 0; 1330 1331 out_unlock: 1332 mutex_unlock(&n->dev.mutex); 1333 return -EFAULT; 1334 } 1335 1336 static long vhost_net_set_owner(struct vhost_net *n) 1337 { 1338 int r; 1339 1340 mutex_lock(&n->dev.mutex); 1341 if (vhost_dev_has_owner(&n->dev)) { 1342 r = -EBUSY; 1343 goto out; 1344 } 1345 r = vhost_net_set_ubuf_info(n); 1346 if (r) 1347 goto out; 1348 r = vhost_dev_set_owner(&n->dev); 1349 if (r) 1350 vhost_net_clear_ubuf_info(n); 1351 vhost_net_flush(n); 1352 out: 1353 mutex_unlock(&n->dev.mutex); 1354 return r; 1355 } 1356 1357 static long vhost_net_ioctl(struct file *f, unsigned int ioctl, 1358 unsigned long arg) 1359 { 1360 struct vhost_net *n = f->private_data; 1361 void __user *argp = (void __user *)arg; 1362 u64 __user *featurep = argp; 1363 struct vhost_vring_file backend; 1364 u64 features; 1365 int r; 1366 1367 switch (ioctl) { 1368 case VHOST_NET_SET_BACKEND: 1369 if (copy_from_user(&backend, argp, sizeof backend)) 1370 return -EFAULT; 1371 return vhost_net_set_backend(n, backend.index, backend.fd); 1372 case VHOST_GET_FEATURES: 1373 features = VHOST_NET_FEATURES; 1374 if (copy_to_user(featurep, &features, sizeof features)) 1375 return -EFAULT; 1376 return 0; 1377 case VHOST_SET_FEATURES: 1378 if (copy_from_user(&features, featurep, sizeof features)) 1379 return -EFAULT; 1380 if (features & ~VHOST_NET_FEATURES) 1381 return -EOPNOTSUPP; 1382 return vhost_net_set_features(n, features); 1383 case VHOST_RESET_OWNER: 1384 return vhost_net_reset_owner(n); 1385 case VHOST_SET_OWNER: 1386 return vhost_net_set_owner(n); 1387 default: 1388 mutex_lock(&n->dev.mutex); 1389 r = vhost_dev_ioctl(&n->dev, ioctl, argp); 1390 if (r == -ENOIOCTLCMD) 1391 r = vhost_vring_ioctl(&n->dev, ioctl, argp); 1392 else 1393 vhost_net_flush(n); 1394 mutex_unlock(&n->dev.mutex); 1395 return r; 1396 } 1397 } 1398 1399 #ifdef CONFIG_COMPAT 1400 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, 1401 unsigned long arg) 1402 { 1403 return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 1404 } 1405 #endif 1406 1407 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 1408 { 1409 struct file *file = iocb->ki_filp; 1410 struct vhost_net *n = file->private_data; 1411 struct vhost_dev *dev = &n->dev; 1412 int noblock = file->f_flags & O_NONBLOCK; 1413 1414 return vhost_chr_read_iter(dev, to, noblock); 1415 } 1416 1417 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb, 1418 struct iov_iter *from) 1419 { 1420 struct file *file = iocb->ki_filp; 1421 struct vhost_net *n = file->private_data; 1422 struct vhost_dev *dev = &n->dev; 1423 1424 return vhost_chr_write_iter(dev, from); 1425 } 1426 1427 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait) 1428 { 1429 struct vhost_net *n = file->private_data; 1430 struct vhost_dev *dev = &n->dev; 1431 1432 return vhost_chr_poll(file, dev, wait); 1433 } 1434 1435 static const struct file_operations vhost_net_fops = { 1436 .owner = THIS_MODULE, 1437 .release = vhost_net_release, 1438 .read_iter = vhost_net_chr_read_iter, 1439 .write_iter = vhost_net_chr_write_iter, 1440 .poll = vhost_net_chr_poll, 1441 .unlocked_ioctl = vhost_net_ioctl, 1442 #ifdef CONFIG_COMPAT 1443 .compat_ioctl = vhost_net_compat_ioctl, 1444 #endif 1445 .open = vhost_net_open, 1446 .llseek = noop_llseek, 1447 }; 1448 1449 static struct miscdevice vhost_net_misc = { 1450 .minor = VHOST_NET_MINOR, 1451 .name = "vhost-net", 1452 .fops = &vhost_net_fops, 1453 }; 1454 1455 static int vhost_net_init(void) 1456 { 1457 if (experimental_zcopytx) 1458 vhost_net_enable_zcopy(VHOST_NET_VQ_TX); 1459 return misc_register(&vhost_net_misc); 1460 } 1461 module_init(vhost_net_init); 1462 1463 static void vhost_net_exit(void) 1464 { 1465 misc_deregister(&vhost_net_misc); 1466 } 1467 module_exit(vhost_net_exit); 1468 1469 MODULE_VERSION("0.0.1"); 1470 MODULE_LICENSE("GPL v2"); 1471 MODULE_AUTHOR("Michael S. Tsirkin"); 1472 MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); 1473 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); 1474 MODULE_ALIAS("devname:vhost-net"); 1475