1 /* 2 * Vhost User library 3 * 4 * Copyright IBM, Corp. 2007 5 * Copyright (c) 2016 Red Hat, Inc. 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Marc-André Lureau <mlureau@redhat.com> 10 * Victor Kaplansky <victork@redhat.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2 or 13 * later. See the COPYING file in the top-level directory. 14 */ 15 16 /* this code avoids GLib dependency */ 17 #include <stdlib.h> 18 #include <stdio.h> 19 #include <unistd.h> 20 #include <stdarg.h> 21 #include <errno.h> 22 #include <string.h> 23 #include <assert.h> 24 #include <inttypes.h> 25 #include <sys/types.h> 26 #include <sys/socket.h> 27 #include <sys/eventfd.h> 28 #include <sys/mman.h> 29 #include <endian.h> 30 31 #if defined(__linux__) 32 #include <sys/syscall.h> 33 #include <fcntl.h> 34 #include <sys/ioctl.h> 35 #include <linux/vhost.h> 36 37 #ifdef __NR_userfaultfd 38 #include <linux/userfaultfd.h> 39 #endif 40 41 #endif 42 43 #include "include/atomic.h" 44 45 #include "libvhost-user.h" 46 47 /* usually provided by GLib */ 48 #ifndef MIN 49 #define MIN(x, y) ({ \ 50 typeof(x) _min1 = (x); \ 51 typeof(y) _min2 = (y); \ 52 (void) (&_min1 == &_min2); \ 53 _min1 < _min2 ? _min1 : _min2; }) 54 #endif 55 56 /* Round number down to multiple */ 57 #define ALIGN_DOWN(n, m) ((n) / (m) * (m)) 58 59 /* Round number up to multiple */ 60 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m)) 61 62 #ifndef unlikely 63 #define unlikely(x) __builtin_expect(!!(x), 0) 64 #endif 65 66 /* Align each region to cache line size in inflight buffer */ 67 #define INFLIGHT_ALIGNMENT 64 68 69 /* The version of inflight buffer */ 70 #define INFLIGHT_VERSION 1 71 72 /* The version of the protocol we support */ 73 #define VHOST_USER_VERSION 1 74 #define LIBVHOST_USER_DEBUG 0 75 76 #define DPRINT(...) \ 77 do { \ 78 if (LIBVHOST_USER_DEBUG) { \ 79 fprintf(stderr, __VA_ARGS__); \ 80 } \ 81 } while (0) 82 83 static inline 84 bool has_feature(uint64_t features, unsigned int fbit) 85 { 86 assert(fbit < 64); 87 return !!(features & (1ULL << fbit)); 88 } 89 90 static inline 91 bool vu_has_feature(VuDev *dev, 92 unsigned int fbit) 93 { 94 return has_feature(dev->features, fbit); 95 } 96 97 static inline bool vu_has_protocol_feature(VuDev *dev, unsigned int fbit) 98 { 99 return has_feature(dev->protocol_features, fbit); 100 } 101 102 const char * 103 vu_request_to_string(unsigned int req) 104 { 105 #define REQ(req) [req] = #req 106 static const char *vu_request_str[] = { 107 REQ(VHOST_USER_NONE), 108 REQ(VHOST_USER_GET_FEATURES), 109 REQ(VHOST_USER_SET_FEATURES), 110 REQ(VHOST_USER_SET_OWNER), 111 REQ(VHOST_USER_RESET_OWNER), 112 REQ(VHOST_USER_SET_MEM_TABLE), 113 REQ(VHOST_USER_SET_LOG_BASE), 114 REQ(VHOST_USER_SET_LOG_FD), 115 REQ(VHOST_USER_SET_VRING_NUM), 116 REQ(VHOST_USER_SET_VRING_ADDR), 117 REQ(VHOST_USER_SET_VRING_BASE), 118 REQ(VHOST_USER_GET_VRING_BASE), 119 REQ(VHOST_USER_SET_VRING_KICK), 120 REQ(VHOST_USER_SET_VRING_CALL), 121 REQ(VHOST_USER_SET_VRING_ERR), 122 REQ(VHOST_USER_GET_PROTOCOL_FEATURES), 123 REQ(VHOST_USER_SET_PROTOCOL_FEATURES), 124 REQ(VHOST_USER_GET_QUEUE_NUM), 125 REQ(VHOST_USER_SET_VRING_ENABLE), 126 REQ(VHOST_USER_SEND_RARP), 127 REQ(VHOST_USER_NET_SET_MTU), 128 REQ(VHOST_USER_SET_SLAVE_REQ_FD), 129 REQ(VHOST_USER_IOTLB_MSG), 130 REQ(VHOST_USER_SET_VRING_ENDIAN), 131 REQ(VHOST_USER_GET_CONFIG), 132 REQ(VHOST_USER_SET_CONFIG), 133 REQ(VHOST_USER_POSTCOPY_ADVISE), 134 REQ(VHOST_USER_POSTCOPY_LISTEN), 135 REQ(VHOST_USER_POSTCOPY_END), 136 REQ(VHOST_USER_GET_INFLIGHT_FD), 137 REQ(VHOST_USER_SET_INFLIGHT_FD), 138 REQ(VHOST_USER_GPU_SET_SOCKET), 139 REQ(VHOST_USER_VRING_KICK), 140 REQ(VHOST_USER_GET_MAX_MEM_SLOTS), 141 REQ(VHOST_USER_ADD_MEM_REG), 142 REQ(VHOST_USER_REM_MEM_REG), 143 REQ(VHOST_USER_MAX), 144 }; 145 #undef REQ 146 147 if (req < VHOST_USER_MAX) { 148 return vu_request_str[req]; 149 } else { 150 return "unknown"; 151 } 152 } 153 154 static void 155 vu_panic(VuDev *dev, const char *msg, ...) 156 { 157 char *buf = NULL; 158 va_list ap; 159 160 va_start(ap, msg); 161 if (vasprintf(&buf, msg, ap) < 0) { 162 buf = NULL; 163 } 164 va_end(ap); 165 166 dev->broken = true; 167 dev->panic(dev, buf); 168 free(buf); 169 170 /* 171 * FIXME: 172 * find a way to call virtio_error, or perhaps close the connection? 173 */ 174 } 175 176 /* Translate guest physical address to our virtual address. */ 177 void * 178 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr) 179 { 180 int i; 181 182 if (*plen == 0) { 183 return NULL; 184 } 185 186 /* Find matching memory region. */ 187 for (i = 0; i < dev->nregions; i++) { 188 VuDevRegion *r = &dev->regions[i]; 189 190 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) { 191 if ((guest_addr + *plen) > (r->gpa + r->size)) { 192 *plen = r->gpa + r->size - guest_addr; 193 } 194 return (void *)(uintptr_t) 195 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset; 196 } 197 } 198 199 return NULL; 200 } 201 202 /* Translate qemu virtual address to our virtual address. */ 203 static void * 204 qva_to_va(VuDev *dev, uint64_t qemu_addr) 205 { 206 int i; 207 208 /* Find matching memory region. */ 209 for (i = 0; i < dev->nregions; i++) { 210 VuDevRegion *r = &dev->regions[i]; 211 212 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) { 213 return (void *)(uintptr_t) 214 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset; 215 } 216 } 217 218 return NULL; 219 } 220 221 static void 222 vmsg_close_fds(VhostUserMsg *vmsg) 223 { 224 int i; 225 226 for (i = 0; i < vmsg->fd_num; i++) { 227 close(vmsg->fds[i]); 228 } 229 } 230 231 /* Set reply payload.u64 and clear request flags and fd_num */ 232 static void vmsg_set_reply_u64(VhostUserMsg *vmsg, uint64_t val) 233 { 234 vmsg->flags = 0; /* defaults will be set by vu_send_reply() */ 235 vmsg->size = sizeof(vmsg->payload.u64); 236 vmsg->payload.u64 = val; 237 vmsg->fd_num = 0; 238 } 239 240 /* A test to see if we have userfault available */ 241 static bool 242 have_userfault(void) 243 { 244 #if defined(__linux__) && defined(__NR_userfaultfd) &&\ 245 defined(UFFD_FEATURE_MISSING_SHMEM) &&\ 246 defined(UFFD_FEATURE_MISSING_HUGETLBFS) 247 /* Now test the kernel we're running on really has the features */ 248 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 249 struct uffdio_api api_struct; 250 if (ufd < 0) { 251 return false; 252 } 253 254 api_struct.api = UFFD_API; 255 api_struct.features = UFFD_FEATURE_MISSING_SHMEM | 256 UFFD_FEATURE_MISSING_HUGETLBFS; 257 if (ioctl(ufd, UFFDIO_API, &api_struct)) { 258 close(ufd); 259 return false; 260 } 261 close(ufd); 262 return true; 263 264 #else 265 return false; 266 #endif 267 } 268 269 static bool 270 vu_message_read_default(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 271 { 272 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 273 struct iovec iov = { 274 .iov_base = (char *)vmsg, 275 .iov_len = VHOST_USER_HDR_SIZE, 276 }; 277 struct msghdr msg = { 278 .msg_iov = &iov, 279 .msg_iovlen = 1, 280 .msg_control = control, 281 .msg_controllen = sizeof(control), 282 }; 283 size_t fd_size; 284 struct cmsghdr *cmsg; 285 int rc; 286 287 do { 288 rc = recvmsg(conn_fd, &msg, 0); 289 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 290 291 if (rc < 0) { 292 vu_panic(dev, "Error while recvmsg: %s", strerror(errno)); 293 return false; 294 } 295 296 vmsg->fd_num = 0; 297 for (cmsg = CMSG_FIRSTHDR(&msg); 298 cmsg != NULL; 299 cmsg = CMSG_NXTHDR(&msg, cmsg)) 300 { 301 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) { 302 fd_size = cmsg->cmsg_len - CMSG_LEN(0); 303 vmsg->fd_num = fd_size / sizeof(int); 304 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size); 305 break; 306 } 307 } 308 309 if (vmsg->size > sizeof(vmsg->payload)) { 310 vu_panic(dev, 311 "Error: too big message request: %d, size: vmsg->size: %u, " 312 "while sizeof(vmsg->payload) = %zu\n", 313 vmsg->request, vmsg->size, sizeof(vmsg->payload)); 314 goto fail; 315 } 316 317 if (vmsg->size) { 318 do { 319 rc = read(conn_fd, &vmsg->payload, vmsg->size); 320 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 321 322 if (rc <= 0) { 323 vu_panic(dev, "Error while reading: %s", strerror(errno)); 324 goto fail; 325 } 326 327 assert(rc == vmsg->size); 328 } 329 330 return true; 331 332 fail: 333 vmsg_close_fds(vmsg); 334 335 return false; 336 } 337 338 static bool 339 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 340 { 341 int rc; 342 uint8_t *p = (uint8_t *)vmsg; 343 char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = {}; 344 struct iovec iov = { 345 .iov_base = (char *)vmsg, 346 .iov_len = VHOST_USER_HDR_SIZE, 347 }; 348 struct msghdr msg = { 349 .msg_iov = &iov, 350 .msg_iovlen = 1, 351 .msg_control = control, 352 }; 353 struct cmsghdr *cmsg; 354 355 memset(control, 0, sizeof(control)); 356 assert(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS); 357 if (vmsg->fd_num > 0) { 358 size_t fdsize = vmsg->fd_num * sizeof(int); 359 msg.msg_controllen = CMSG_SPACE(fdsize); 360 cmsg = CMSG_FIRSTHDR(&msg); 361 cmsg->cmsg_len = CMSG_LEN(fdsize); 362 cmsg->cmsg_level = SOL_SOCKET; 363 cmsg->cmsg_type = SCM_RIGHTS; 364 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize); 365 } else { 366 msg.msg_controllen = 0; 367 } 368 369 do { 370 rc = sendmsg(conn_fd, &msg, 0); 371 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 372 373 if (vmsg->size) { 374 do { 375 if (vmsg->data) { 376 rc = write(conn_fd, vmsg->data, vmsg->size); 377 } else { 378 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size); 379 } 380 } while (rc < 0 && (errno == EINTR || errno == EAGAIN)); 381 } 382 383 if (rc <= 0) { 384 vu_panic(dev, "Error while writing: %s", strerror(errno)); 385 return false; 386 } 387 388 return true; 389 } 390 391 static bool 392 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg) 393 { 394 /* Set the version in the flags when sending the reply */ 395 vmsg->flags &= ~VHOST_USER_VERSION_MASK; 396 vmsg->flags |= VHOST_USER_VERSION; 397 vmsg->flags |= VHOST_USER_REPLY_MASK; 398 399 return vu_message_write(dev, conn_fd, vmsg); 400 } 401 402 /* 403 * Processes a reply on the slave channel. 404 * Entered with slave_mutex held and releases it before exit. 405 * Returns true on success. 406 */ 407 static bool 408 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg) 409 { 410 VhostUserMsg msg_reply; 411 bool result = false; 412 413 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 414 result = true; 415 goto out; 416 } 417 418 if (!vu_message_read_default(dev, dev->slave_fd, &msg_reply)) { 419 goto out; 420 } 421 422 if (msg_reply.request != vmsg->request) { 423 DPRINT("Received unexpected msg type. Expected %d received %d", 424 vmsg->request, msg_reply.request); 425 goto out; 426 } 427 428 result = msg_reply.payload.u64 == 0; 429 430 out: 431 pthread_mutex_unlock(&dev->slave_mutex); 432 return result; 433 } 434 435 /* Kick the log_call_fd if required. */ 436 static void 437 vu_log_kick(VuDev *dev) 438 { 439 if (dev->log_call_fd != -1) { 440 DPRINT("Kicking the QEMU's log...\n"); 441 if (eventfd_write(dev->log_call_fd, 1) < 0) { 442 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 443 } 444 } 445 } 446 447 static void 448 vu_log_page(uint8_t *log_table, uint64_t page) 449 { 450 DPRINT("Logged dirty guest page: %"PRId64"\n", page); 451 qatomic_or(&log_table[page / 8], 1 << (page % 8)); 452 } 453 454 static void 455 vu_log_write(VuDev *dev, uint64_t address, uint64_t length) 456 { 457 uint64_t page; 458 459 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) || 460 !dev->log_table || !length) { 461 return; 462 } 463 464 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8)); 465 466 page = address / VHOST_LOG_PAGE; 467 while (page * VHOST_LOG_PAGE < address + length) { 468 vu_log_page(dev->log_table, page); 469 page += 1; 470 } 471 472 vu_log_kick(dev); 473 } 474 475 static void 476 vu_kick_cb(VuDev *dev, int condition, void *data) 477 { 478 int index = (intptr_t)data; 479 VuVirtq *vq = &dev->vq[index]; 480 int sock = vq->kick_fd; 481 eventfd_t kick_data; 482 ssize_t rc; 483 484 rc = eventfd_read(sock, &kick_data); 485 if (rc == -1) { 486 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno)); 487 dev->remove_watch(dev, dev->vq[index].kick_fd); 488 } else { 489 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n", 490 kick_data, vq->handler, index); 491 if (vq->handler) { 492 vq->handler(dev, index); 493 } 494 } 495 } 496 497 static bool 498 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg) 499 { 500 vmsg->payload.u64 = 501 /* 502 * The following VIRTIO feature bits are supported by our virtqueue 503 * implementation: 504 */ 505 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY | 506 1ULL << VIRTIO_RING_F_INDIRECT_DESC | 507 1ULL << VIRTIO_RING_F_EVENT_IDX | 508 1ULL << VIRTIO_F_VERSION_1 | 509 510 /* vhost-user feature bits */ 511 1ULL << VHOST_F_LOG_ALL | 512 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 513 514 if (dev->iface->get_features) { 515 vmsg->payload.u64 |= dev->iface->get_features(dev); 516 } 517 518 vmsg->size = sizeof(vmsg->payload.u64); 519 vmsg->fd_num = 0; 520 521 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 522 523 return true; 524 } 525 526 static void 527 vu_set_enable_all_rings(VuDev *dev, bool enabled) 528 { 529 uint16_t i; 530 531 for (i = 0; i < dev->max_queues; i++) { 532 dev->vq[i].enable = enabled; 533 } 534 } 535 536 static bool 537 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg) 538 { 539 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 540 541 dev->features = vmsg->payload.u64; 542 if (!vu_has_feature(dev, VIRTIO_F_VERSION_1)) { 543 /* 544 * We only support devices conforming to VIRTIO 1.0 or 545 * later 546 */ 547 vu_panic(dev, "virtio legacy devices aren't supported by libvhost-user"); 548 return false; 549 } 550 551 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) { 552 vu_set_enable_all_rings(dev, true); 553 } 554 555 if (dev->iface->set_features) { 556 dev->iface->set_features(dev, dev->features); 557 } 558 559 return false; 560 } 561 562 static bool 563 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg) 564 { 565 return false; 566 } 567 568 static void 569 vu_close_log(VuDev *dev) 570 { 571 if (dev->log_table) { 572 if (munmap(dev->log_table, dev->log_size) != 0) { 573 perror("close log munmap() error"); 574 } 575 576 dev->log_table = NULL; 577 } 578 if (dev->log_call_fd != -1) { 579 close(dev->log_call_fd); 580 dev->log_call_fd = -1; 581 } 582 } 583 584 static bool 585 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg) 586 { 587 vu_set_enable_all_rings(dev, false); 588 589 return false; 590 } 591 592 static bool 593 map_ring(VuDev *dev, VuVirtq *vq) 594 { 595 vq->vring.desc = qva_to_va(dev, vq->vra.desc_user_addr); 596 vq->vring.used = qva_to_va(dev, vq->vra.used_user_addr); 597 vq->vring.avail = qva_to_va(dev, vq->vra.avail_user_addr); 598 599 DPRINT("Setting virtq addresses:\n"); 600 DPRINT(" vring_desc at %p\n", vq->vring.desc); 601 DPRINT(" vring_used at %p\n", vq->vring.used); 602 DPRINT(" vring_avail at %p\n", vq->vring.avail); 603 604 return !(vq->vring.desc && vq->vring.used && vq->vring.avail); 605 } 606 607 static bool 608 generate_faults(VuDev *dev) { 609 int i; 610 for (i = 0; i < dev->nregions; i++) { 611 VuDevRegion *dev_region = &dev->regions[i]; 612 int ret; 613 #ifdef UFFDIO_REGISTER 614 /* 615 * We should already have an open ufd. Mark each memory 616 * range as ufd. 617 * Discard any mapping we have here; note I can't use MADV_REMOVE 618 * or fallocate to make the hole since I don't want to lose 619 * data that's already arrived in the shared process. 620 * TODO: How to do hugepage 621 */ 622 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 623 dev_region->size + dev_region->mmap_offset, 624 MADV_DONTNEED); 625 if (ret) { 626 fprintf(stderr, 627 "%s: Failed to madvise(DONTNEED) region %d: %s\n", 628 __func__, i, strerror(errno)); 629 } 630 /* 631 * Turn off transparent hugepages so we dont get lose wakeups 632 * in neighbouring pages. 633 * TODO: Turn this backon later. 634 */ 635 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr, 636 dev_region->size + dev_region->mmap_offset, 637 MADV_NOHUGEPAGE); 638 if (ret) { 639 /* 640 * Note: This can happen legally on kernels that are configured 641 * without madvise'able hugepages 642 */ 643 fprintf(stderr, 644 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n", 645 __func__, i, strerror(errno)); 646 } 647 struct uffdio_register reg_struct; 648 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr; 649 reg_struct.range.len = dev_region->size + dev_region->mmap_offset; 650 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; 651 652 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, ®_struct)) { 653 vu_panic(dev, "%s: Failed to userfault region %d " 654 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n", 655 __func__, i, 656 dev_region->mmap_addr, 657 dev_region->size, dev_region->mmap_offset, 658 dev->postcopy_ufd, strerror(errno)); 659 return false; 660 } 661 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { 662 vu_panic(dev, "%s Region (%d) doesn't support COPY", 663 __func__, i); 664 return false; 665 } 666 DPRINT("%s: region %d: Registered userfault for %" 667 PRIx64 " + %" PRIx64 "\n", __func__, i, 668 (uint64_t)reg_struct.range.start, 669 (uint64_t)reg_struct.range.len); 670 /* Now it's registered we can let the client at it */ 671 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr, 672 dev_region->size + dev_region->mmap_offset, 673 PROT_READ | PROT_WRITE)) { 674 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)", 675 i, strerror(errno)); 676 return false; 677 } 678 /* TODO: Stash 'zero' support flags somewhere */ 679 #endif 680 } 681 682 return true; 683 } 684 685 static bool 686 vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 687 int i; 688 bool track_ramblocks = dev->postcopy_listening; 689 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 690 VuDevRegion *dev_region = &dev->regions[dev->nregions]; 691 void *mmap_addr; 692 693 if (vmsg->fd_num != 1) { 694 vmsg_close_fds(vmsg); 695 vu_panic(dev, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd " 696 "should be sent for this message type", vmsg->fd_num); 697 return false; 698 } 699 700 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 701 close(vmsg->fds[0]); 702 vu_panic(dev, "VHOST_USER_ADD_MEM_REG requires a message size of at " 703 "least %d bytes and only %d bytes were received", 704 VHOST_USER_MEM_REG_SIZE, vmsg->size); 705 return false; 706 } 707 708 if (dev->nregions == VHOST_USER_MAX_RAM_SLOTS) { 709 close(vmsg->fds[0]); 710 vu_panic(dev, "failing attempt to hot add memory via " 711 "VHOST_USER_ADD_MEM_REG message because the backend has " 712 "no free ram slots available"); 713 return false; 714 } 715 716 /* 717 * If we are in postcopy mode and we receive a u64 payload with a 0 value 718 * we know all the postcopy client bases have been received, and we 719 * should start generating faults. 720 */ 721 if (track_ramblocks && 722 vmsg->size == sizeof(vmsg->payload.u64) && 723 vmsg->payload.u64 == 0) { 724 (void)generate_faults(dev); 725 return false; 726 } 727 728 DPRINT("Adding region: %u\n", dev->nregions); 729 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 730 msg_region->guest_phys_addr); 731 DPRINT(" memory_size: 0x%016"PRIx64"\n", 732 msg_region->memory_size); 733 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 734 msg_region->userspace_addr); 735 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 736 msg_region->mmap_offset); 737 738 dev_region->gpa = msg_region->guest_phys_addr; 739 dev_region->size = msg_region->memory_size; 740 dev_region->qva = msg_region->userspace_addr; 741 dev_region->mmap_offset = msg_region->mmap_offset; 742 743 /* 744 * We don't use offset argument of mmap() since the 745 * mapped address has to be page aligned, and we use huge 746 * pages. 747 */ 748 if (track_ramblocks) { 749 /* 750 * In postcopy we're using PROT_NONE here to catch anyone 751 * accessing it before we userfault. 752 */ 753 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 754 PROT_NONE, MAP_SHARED | MAP_NORESERVE, 755 vmsg->fds[0], 0); 756 } else { 757 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 758 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, 759 vmsg->fds[0], 0); 760 } 761 762 if (mmap_addr == MAP_FAILED) { 763 vu_panic(dev, "region mmap error: %s", strerror(errno)); 764 } else { 765 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 766 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 767 dev_region->mmap_addr); 768 } 769 770 close(vmsg->fds[0]); 771 772 if (track_ramblocks) { 773 /* 774 * Return the address to QEMU so that it can translate the ufd 775 * fault addresses back. 776 */ 777 msg_region->userspace_addr = (uintptr_t)(mmap_addr + 778 dev_region->mmap_offset); 779 780 /* Send the message back to qemu with the addresses filled in. */ 781 vmsg->fd_num = 0; 782 if (!vu_send_reply(dev, dev->sock, vmsg)) { 783 vu_panic(dev, "failed to respond to add-mem-region for postcopy"); 784 return false; 785 } 786 787 DPRINT("Successfully added new region in postcopy\n"); 788 dev->nregions++; 789 return false; 790 791 } else { 792 for (i = 0; i < dev->max_queues; i++) { 793 if (dev->vq[i].vring.desc) { 794 if (map_ring(dev, &dev->vq[i])) { 795 vu_panic(dev, "remapping queue %d for new memory region", 796 i); 797 } 798 } 799 } 800 801 DPRINT("Successfully added new region\n"); 802 dev->nregions++; 803 return false; 804 } 805 } 806 807 static inline bool reg_equal(VuDevRegion *vudev_reg, 808 VhostUserMemoryRegion *msg_reg) 809 { 810 if (vudev_reg->gpa == msg_reg->guest_phys_addr && 811 vudev_reg->qva == msg_reg->userspace_addr && 812 vudev_reg->size == msg_reg->memory_size) { 813 return true; 814 } 815 816 return false; 817 } 818 819 static bool 820 vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { 821 VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m; 822 int i; 823 bool found = false; 824 825 if (vmsg->fd_num > 1) { 826 vmsg_close_fds(vmsg); 827 vu_panic(dev, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd " 828 "should be sent for this message type", vmsg->fd_num); 829 return false; 830 } 831 832 if (vmsg->size < VHOST_USER_MEM_REG_SIZE) { 833 vmsg_close_fds(vmsg); 834 vu_panic(dev, "VHOST_USER_REM_MEM_REG requires a message size of at " 835 "least %d bytes and only %d bytes were received", 836 VHOST_USER_MEM_REG_SIZE, vmsg->size); 837 return false; 838 } 839 840 DPRINT("Removing region:\n"); 841 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 842 msg_region->guest_phys_addr); 843 DPRINT(" memory_size: 0x%016"PRIx64"\n", 844 msg_region->memory_size); 845 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 846 msg_region->userspace_addr); 847 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 848 msg_region->mmap_offset); 849 850 for (i = 0; i < dev->nregions; i++) { 851 if (reg_equal(&dev->regions[i], msg_region)) { 852 VuDevRegion *r = &dev->regions[i]; 853 void *m = (void *) (uintptr_t) r->mmap_addr; 854 855 if (m) { 856 munmap(m, r->size + r->mmap_offset); 857 } 858 859 /* 860 * Shift all affected entries by 1 to close the hole at index i and 861 * zero out the last entry. 862 */ 863 memmove(dev->regions + i, dev->regions + i + 1, 864 sizeof(VuDevRegion) * (dev->nregions - i - 1)); 865 memset(dev->regions + dev->nregions - 1, 0, sizeof(VuDevRegion)); 866 DPRINT("Successfully removed a region\n"); 867 dev->nregions--; 868 i--; 869 870 found = true; 871 872 /* Continue the search for eventual duplicates. */ 873 } 874 } 875 876 if (!found) { 877 vu_panic(dev, "Specified region not found\n"); 878 } 879 880 vmsg_close_fds(vmsg); 881 882 return false; 883 } 884 885 static bool 886 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg) 887 { 888 int i; 889 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 890 dev->nregions = memory->nregions; 891 892 DPRINT("Nregions: %u\n", memory->nregions); 893 for (i = 0; i < dev->nregions; i++) { 894 void *mmap_addr; 895 VhostUserMemoryRegion *msg_region = &memory->regions[i]; 896 VuDevRegion *dev_region = &dev->regions[i]; 897 898 DPRINT("Region %d\n", i); 899 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 900 msg_region->guest_phys_addr); 901 DPRINT(" memory_size: 0x%016"PRIx64"\n", 902 msg_region->memory_size); 903 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 904 msg_region->userspace_addr); 905 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 906 msg_region->mmap_offset); 907 908 dev_region->gpa = msg_region->guest_phys_addr; 909 dev_region->size = msg_region->memory_size; 910 dev_region->qva = msg_region->userspace_addr; 911 dev_region->mmap_offset = msg_region->mmap_offset; 912 913 /* We don't use offset argument of mmap() since the 914 * mapped address has to be page aligned, and we use huge 915 * pages. 916 * In postcopy we're using PROT_NONE here to catch anyone 917 * accessing it before we userfault 918 */ 919 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 920 PROT_NONE, MAP_SHARED | MAP_NORESERVE, 921 vmsg->fds[i], 0); 922 923 if (mmap_addr == MAP_FAILED) { 924 vu_panic(dev, "region mmap error: %s", strerror(errno)); 925 } else { 926 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 927 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 928 dev_region->mmap_addr); 929 } 930 931 /* Return the address to QEMU so that it can translate the ufd 932 * fault addresses back. 933 */ 934 msg_region->userspace_addr = (uintptr_t)(mmap_addr + 935 dev_region->mmap_offset); 936 close(vmsg->fds[i]); 937 } 938 939 /* Send the message back to qemu with the addresses filled in */ 940 vmsg->fd_num = 0; 941 if (!vu_send_reply(dev, dev->sock, vmsg)) { 942 vu_panic(dev, "failed to respond to set-mem-table for postcopy"); 943 return false; 944 } 945 946 /* Wait for QEMU to confirm that it's registered the handler for the 947 * faults. 948 */ 949 if (!dev->read_msg(dev, dev->sock, vmsg) || 950 vmsg->size != sizeof(vmsg->payload.u64) || 951 vmsg->payload.u64 != 0) { 952 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table"); 953 return false; 954 } 955 956 /* OK, now we can go and register the memory and generate faults */ 957 (void)generate_faults(dev); 958 959 return false; 960 } 961 962 static bool 963 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg) 964 { 965 int i; 966 VhostUserMemory m = vmsg->payload.memory, *memory = &m; 967 968 for (i = 0; i < dev->nregions; i++) { 969 VuDevRegion *r = &dev->regions[i]; 970 void *m = (void *) (uintptr_t) r->mmap_addr; 971 972 if (m) { 973 munmap(m, r->size + r->mmap_offset); 974 } 975 } 976 dev->nregions = memory->nregions; 977 978 if (dev->postcopy_listening) { 979 return vu_set_mem_table_exec_postcopy(dev, vmsg); 980 } 981 982 DPRINT("Nregions: %u\n", memory->nregions); 983 for (i = 0; i < dev->nregions; i++) { 984 void *mmap_addr; 985 VhostUserMemoryRegion *msg_region = &memory->regions[i]; 986 VuDevRegion *dev_region = &dev->regions[i]; 987 988 DPRINT("Region %d\n", i); 989 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n", 990 msg_region->guest_phys_addr); 991 DPRINT(" memory_size: 0x%016"PRIx64"\n", 992 msg_region->memory_size); 993 DPRINT(" userspace_addr 0x%016"PRIx64"\n", 994 msg_region->userspace_addr); 995 DPRINT(" mmap_offset 0x%016"PRIx64"\n", 996 msg_region->mmap_offset); 997 998 dev_region->gpa = msg_region->guest_phys_addr; 999 dev_region->size = msg_region->memory_size; 1000 dev_region->qva = msg_region->userspace_addr; 1001 dev_region->mmap_offset = msg_region->mmap_offset; 1002 1003 /* We don't use offset argument of mmap() since the 1004 * mapped address has to be page aligned, and we use huge 1005 * pages. */ 1006 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset, 1007 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, 1008 vmsg->fds[i], 0); 1009 1010 if (mmap_addr == MAP_FAILED) { 1011 vu_panic(dev, "region mmap error: %s", strerror(errno)); 1012 } else { 1013 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr; 1014 DPRINT(" mmap_addr: 0x%016"PRIx64"\n", 1015 dev_region->mmap_addr); 1016 } 1017 1018 close(vmsg->fds[i]); 1019 } 1020 1021 for (i = 0; i < dev->max_queues; i++) { 1022 if (dev->vq[i].vring.desc) { 1023 if (map_ring(dev, &dev->vq[i])) { 1024 vu_panic(dev, "remapping queue %d during setmemtable", i); 1025 } 1026 } 1027 } 1028 1029 return false; 1030 } 1031 1032 static bool 1033 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1034 { 1035 int fd; 1036 uint64_t log_mmap_size, log_mmap_offset; 1037 void *rc; 1038 1039 if (vmsg->fd_num != 1 || 1040 vmsg->size != sizeof(vmsg->payload.log)) { 1041 vu_panic(dev, "Invalid log_base message"); 1042 return true; 1043 } 1044 1045 fd = vmsg->fds[0]; 1046 log_mmap_offset = vmsg->payload.log.mmap_offset; 1047 log_mmap_size = vmsg->payload.log.mmap_size; 1048 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset); 1049 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size); 1050 1051 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 1052 log_mmap_offset); 1053 close(fd); 1054 if (rc == MAP_FAILED) { 1055 perror("log mmap error"); 1056 } 1057 1058 if (dev->log_table) { 1059 munmap(dev->log_table, dev->log_size); 1060 } 1061 dev->log_table = rc; 1062 dev->log_size = log_mmap_size; 1063 1064 vmsg->size = sizeof(vmsg->payload.u64); 1065 vmsg->fd_num = 0; 1066 1067 return true; 1068 } 1069 1070 static bool 1071 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg) 1072 { 1073 if (vmsg->fd_num != 1) { 1074 vu_panic(dev, "Invalid log_fd message"); 1075 return false; 1076 } 1077 1078 if (dev->log_call_fd != -1) { 1079 close(dev->log_call_fd); 1080 } 1081 dev->log_call_fd = vmsg->fds[0]; 1082 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]); 1083 1084 return false; 1085 } 1086 1087 static bool 1088 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1089 { 1090 unsigned int index = vmsg->payload.state.index; 1091 unsigned int num = vmsg->payload.state.num; 1092 1093 DPRINT("State.index: %u\n", index); 1094 DPRINT("State.num: %u\n", num); 1095 dev->vq[index].vring.num = num; 1096 1097 return false; 1098 } 1099 1100 static bool 1101 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg) 1102 { 1103 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr; 1104 unsigned int index = vra->index; 1105 VuVirtq *vq = &dev->vq[index]; 1106 1107 DPRINT("vhost_vring_addr:\n"); 1108 DPRINT(" index: %d\n", vra->index); 1109 DPRINT(" flags: %d\n", vra->flags); 1110 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->desc_user_addr); 1111 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->used_user_addr); 1112 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->avail_user_addr); 1113 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", (uint64_t)vra->log_guest_addr); 1114 1115 vq->vra = *vra; 1116 vq->vring.flags = vra->flags; 1117 vq->vring.log_guest_addr = vra->log_guest_addr; 1118 1119 1120 if (map_ring(dev, vq)) { 1121 vu_panic(dev, "Invalid vring_addr message"); 1122 return false; 1123 } 1124 1125 vq->used_idx = le16toh(vq->vring.used->idx); 1126 1127 if (vq->last_avail_idx != vq->used_idx) { 1128 bool resume = dev->iface->queue_is_processed_in_order && 1129 dev->iface->queue_is_processed_in_order(dev, index); 1130 1131 DPRINT("Last avail index != used index: %u != %u%s\n", 1132 vq->last_avail_idx, vq->used_idx, 1133 resume ? ", resuming" : ""); 1134 1135 if (resume) { 1136 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx; 1137 } 1138 } 1139 1140 return false; 1141 } 1142 1143 static bool 1144 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1145 { 1146 unsigned int index = vmsg->payload.state.index; 1147 unsigned int num = vmsg->payload.state.num; 1148 1149 DPRINT("State.index: %u\n", index); 1150 DPRINT("State.num: %u\n", num); 1151 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num; 1152 1153 return false; 1154 } 1155 1156 static bool 1157 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg) 1158 { 1159 unsigned int index = vmsg->payload.state.index; 1160 1161 DPRINT("State.index: %u\n", index); 1162 vmsg->payload.state.num = dev->vq[index].last_avail_idx; 1163 vmsg->size = sizeof(vmsg->payload.state); 1164 1165 dev->vq[index].started = false; 1166 if (dev->iface->queue_set_started) { 1167 dev->iface->queue_set_started(dev, index, false); 1168 } 1169 1170 if (dev->vq[index].call_fd != -1) { 1171 close(dev->vq[index].call_fd); 1172 dev->vq[index].call_fd = -1; 1173 } 1174 if (dev->vq[index].kick_fd != -1) { 1175 dev->remove_watch(dev, dev->vq[index].kick_fd); 1176 close(dev->vq[index].kick_fd); 1177 dev->vq[index].kick_fd = -1; 1178 } 1179 1180 return true; 1181 } 1182 1183 static bool 1184 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg) 1185 { 1186 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1187 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1188 1189 if (index >= dev->max_queues) { 1190 vmsg_close_fds(vmsg); 1191 vu_panic(dev, "Invalid queue index: %u", index); 1192 return false; 1193 } 1194 1195 if (nofd) { 1196 vmsg_close_fds(vmsg); 1197 return true; 1198 } 1199 1200 if (vmsg->fd_num != 1) { 1201 vmsg_close_fds(vmsg); 1202 vu_panic(dev, "Invalid fds in request: %d", vmsg->request); 1203 return false; 1204 } 1205 1206 return true; 1207 } 1208 1209 static int 1210 inflight_desc_compare(const void *a, const void *b) 1211 { 1212 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a, 1213 *desc1 = (VuVirtqInflightDesc *)b; 1214 1215 if (desc1->counter > desc0->counter && 1216 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) { 1217 return 1; 1218 } 1219 1220 return -1; 1221 } 1222 1223 static int 1224 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq) 1225 { 1226 int i = 0; 1227 1228 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 1229 return 0; 1230 } 1231 1232 if (unlikely(!vq->inflight)) { 1233 return -1; 1234 } 1235 1236 if (unlikely(!vq->inflight->version)) { 1237 /* initialize the buffer */ 1238 vq->inflight->version = INFLIGHT_VERSION; 1239 return 0; 1240 } 1241 1242 vq->used_idx = le16toh(vq->vring.used->idx); 1243 vq->resubmit_num = 0; 1244 vq->resubmit_list = NULL; 1245 vq->counter = 0; 1246 1247 if (unlikely(vq->inflight->used_idx != vq->used_idx)) { 1248 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0; 1249 1250 barrier(); 1251 1252 vq->inflight->used_idx = vq->used_idx; 1253 } 1254 1255 for (i = 0; i < vq->inflight->desc_num; i++) { 1256 if (vq->inflight->desc[i].inflight == 1) { 1257 vq->inuse++; 1258 } 1259 } 1260 1261 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx; 1262 1263 if (vq->inuse) { 1264 vq->resubmit_list = calloc(vq->inuse, sizeof(VuVirtqInflightDesc)); 1265 if (!vq->resubmit_list) { 1266 return -1; 1267 } 1268 1269 for (i = 0; i < vq->inflight->desc_num; i++) { 1270 if (vq->inflight->desc[i].inflight) { 1271 vq->resubmit_list[vq->resubmit_num].index = i; 1272 vq->resubmit_list[vq->resubmit_num].counter = 1273 vq->inflight->desc[i].counter; 1274 vq->resubmit_num++; 1275 } 1276 } 1277 1278 if (vq->resubmit_num > 1) { 1279 qsort(vq->resubmit_list, vq->resubmit_num, 1280 sizeof(VuVirtqInflightDesc), inflight_desc_compare); 1281 } 1282 vq->counter = vq->resubmit_list[0].counter + 1; 1283 } 1284 1285 /* in case of I/O hang after reconnecting */ 1286 if (eventfd_write(vq->kick_fd, 1)) { 1287 return -1; 1288 } 1289 1290 return 0; 1291 } 1292 1293 static bool 1294 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg) 1295 { 1296 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1297 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1298 1299 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1300 1301 if (!vu_check_queue_msg_file(dev, vmsg)) { 1302 return false; 1303 } 1304 1305 if (dev->vq[index].kick_fd != -1) { 1306 dev->remove_watch(dev, dev->vq[index].kick_fd); 1307 close(dev->vq[index].kick_fd); 1308 dev->vq[index].kick_fd = -1; 1309 } 1310 1311 dev->vq[index].kick_fd = nofd ? -1 : vmsg->fds[0]; 1312 DPRINT("Got kick_fd: %d for vq: %d\n", dev->vq[index].kick_fd, index); 1313 1314 dev->vq[index].started = true; 1315 if (dev->iface->queue_set_started) { 1316 dev->iface->queue_set_started(dev, index, true); 1317 } 1318 1319 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) { 1320 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN, 1321 vu_kick_cb, (void *)(long)index); 1322 1323 DPRINT("Waiting for kicks on fd: %d for vq: %d\n", 1324 dev->vq[index].kick_fd, index); 1325 } 1326 1327 if (vu_check_queue_inflights(dev, &dev->vq[index])) { 1328 vu_panic(dev, "Failed to check inflights for vq: %d\n", index); 1329 } 1330 1331 return false; 1332 } 1333 1334 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, 1335 vu_queue_handler_cb handler) 1336 { 1337 int qidx = vq - dev->vq; 1338 1339 vq->handler = handler; 1340 if (vq->kick_fd >= 0) { 1341 if (handler) { 1342 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN, 1343 vu_kick_cb, (void *)(long)qidx); 1344 } else { 1345 dev->remove_watch(dev, vq->kick_fd); 1346 } 1347 } 1348 } 1349 1350 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, 1351 int size, int offset) 1352 { 1353 int qidx = vq - dev->vq; 1354 int fd_num = 0; 1355 VhostUserMsg vmsg = { 1356 .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, 1357 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1358 .size = sizeof(vmsg.payload.area), 1359 .payload.area = { 1360 .u64 = qidx & VHOST_USER_VRING_IDX_MASK, 1361 .size = size, 1362 .offset = offset, 1363 }, 1364 }; 1365 1366 if (fd == -1) { 1367 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; 1368 } else { 1369 vmsg.fds[fd_num++] = fd; 1370 } 1371 1372 vmsg.fd_num = fd_num; 1373 1374 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) { 1375 return false; 1376 } 1377 1378 pthread_mutex_lock(&dev->slave_mutex); 1379 if (!vu_message_write(dev, dev->slave_fd, &vmsg)) { 1380 pthread_mutex_unlock(&dev->slave_mutex); 1381 return false; 1382 } 1383 1384 /* Also unlocks the slave_mutex */ 1385 return vu_process_message_reply(dev, &vmsg); 1386 } 1387 1388 static bool 1389 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg) 1390 { 1391 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1392 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1393 1394 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1395 1396 if (!vu_check_queue_msg_file(dev, vmsg)) { 1397 return false; 1398 } 1399 1400 if (dev->vq[index].call_fd != -1) { 1401 close(dev->vq[index].call_fd); 1402 dev->vq[index].call_fd = -1; 1403 } 1404 1405 dev->vq[index].call_fd = nofd ? -1 : vmsg->fds[0]; 1406 1407 /* in case of I/O hang after reconnecting */ 1408 if (dev->vq[index].call_fd != -1 && eventfd_write(vmsg->fds[0], 1)) { 1409 return -1; 1410 } 1411 1412 DPRINT("Got call_fd: %d for vq: %d\n", dev->vq[index].call_fd, index); 1413 1414 return false; 1415 } 1416 1417 static bool 1418 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg) 1419 { 1420 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK; 1421 bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK; 1422 1423 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64); 1424 1425 if (!vu_check_queue_msg_file(dev, vmsg)) { 1426 return false; 1427 } 1428 1429 if (dev->vq[index].err_fd != -1) { 1430 close(dev->vq[index].err_fd); 1431 dev->vq[index].err_fd = -1; 1432 } 1433 1434 dev->vq[index].err_fd = nofd ? -1 : vmsg->fds[0]; 1435 1436 return false; 1437 } 1438 1439 static bool 1440 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1441 { 1442 /* 1443 * Note that we support, but intentionally do not set, 1444 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that 1445 * a device implementation can return it in its callback 1446 * (get_protocol_features) if it wants to use this for 1447 * simulation, but it is otherwise not desirable (if even 1448 * implemented by the master.) 1449 */ 1450 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_MQ | 1451 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | 1452 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | 1453 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | 1454 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | 1455 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | 1456 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS; 1457 1458 if (have_userfault()) { 1459 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT; 1460 } 1461 1462 if (dev->iface->get_config && dev->iface->set_config) { 1463 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1464 } 1465 1466 if (dev->iface->get_protocol_features) { 1467 features |= dev->iface->get_protocol_features(dev); 1468 } 1469 1470 vmsg_set_reply_u64(vmsg, features); 1471 return true; 1472 } 1473 1474 static bool 1475 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg) 1476 { 1477 uint64_t features = vmsg->payload.u64; 1478 1479 DPRINT("u64: 0x%016"PRIx64"\n", features); 1480 1481 dev->protocol_features = vmsg->payload.u64; 1482 1483 if (vu_has_protocol_feature(dev, 1484 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 1485 (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ) || 1486 !vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 1487 /* 1488 * The use case for using messages for kick/call is simulation, to make 1489 * the kick and call synchronous. To actually get that behaviour, both 1490 * of the other features are required. 1491 * Theoretically, one could use only kick messages, or do them without 1492 * having F_REPLY_ACK, but too many (possibly pending) messages on the 1493 * socket will eventually cause the master to hang, to avoid this in 1494 * scenarios where not desired enforce that the settings are in a way 1495 * that actually enables the simulation case. 1496 */ 1497 vu_panic(dev, 1498 "F_IN_BAND_NOTIFICATIONS requires F_SLAVE_REQ && F_REPLY_ACK"); 1499 return false; 1500 } 1501 1502 if (dev->iface->set_protocol_features) { 1503 dev->iface->set_protocol_features(dev, features); 1504 } 1505 1506 return false; 1507 } 1508 1509 static bool 1510 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg) 1511 { 1512 vmsg_set_reply_u64(vmsg, dev->max_queues); 1513 return true; 1514 } 1515 1516 static bool 1517 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg) 1518 { 1519 unsigned int index = vmsg->payload.state.index; 1520 unsigned int enable = vmsg->payload.state.num; 1521 1522 DPRINT("State.index: %u\n", index); 1523 DPRINT("State.enable: %u\n", enable); 1524 1525 if (index >= dev->max_queues) { 1526 vu_panic(dev, "Invalid vring_enable index: %u", index); 1527 return false; 1528 } 1529 1530 dev->vq[index].enable = enable; 1531 return false; 1532 } 1533 1534 static bool 1535 vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg) 1536 { 1537 if (vmsg->fd_num != 1) { 1538 vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num); 1539 return false; 1540 } 1541 1542 if (dev->slave_fd != -1) { 1543 close(dev->slave_fd); 1544 } 1545 dev->slave_fd = vmsg->fds[0]; 1546 DPRINT("Got slave_fd: %d\n", vmsg->fds[0]); 1547 1548 return false; 1549 } 1550 1551 static bool 1552 vu_get_config(VuDev *dev, VhostUserMsg *vmsg) 1553 { 1554 int ret = -1; 1555 1556 if (dev->iface->get_config) { 1557 ret = dev->iface->get_config(dev, vmsg->payload.config.region, 1558 vmsg->payload.config.size); 1559 } 1560 1561 if (ret) { 1562 /* resize to zero to indicate an error to master */ 1563 vmsg->size = 0; 1564 } 1565 1566 return true; 1567 } 1568 1569 static bool 1570 vu_set_config(VuDev *dev, VhostUserMsg *vmsg) 1571 { 1572 int ret = -1; 1573 1574 if (dev->iface->set_config) { 1575 ret = dev->iface->set_config(dev, vmsg->payload.config.region, 1576 vmsg->payload.config.offset, 1577 vmsg->payload.config.size, 1578 vmsg->payload.config.flags); 1579 if (ret) { 1580 vu_panic(dev, "Set virtio configuration space failed"); 1581 } 1582 } 1583 1584 return false; 1585 } 1586 1587 static bool 1588 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg) 1589 { 1590 dev->postcopy_ufd = -1; 1591 #ifdef UFFDIO_API 1592 struct uffdio_api api_struct; 1593 1594 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 1595 vmsg->size = 0; 1596 #endif 1597 1598 if (dev->postcopy_ufd == -1) { 1599 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno)); 1600 goto out; 1601 } 1602 1603 #ifdef UFFDIO_API 1604 api_struct.api = UFFD_API; 1605 api_struct.features = 0; 1606 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { 1607 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno)); 1608 close(dev->postcopy_ufd); 1609 dev->postcopy_ufd = -1; 1610 goto out; 1611 } 1612 /* TODO: Stash feature flags somewhere */ 1613 #endif 1614 1615 out: 1616 /* Return a ufd to the QEMU */ 1617 vmsg->fd_num = 1; 1618 vmsg->fds[0] = dev->postcopy_ufd; 1619 return true; /* = send a reply */ 1620 } 1621 1622 static bool 1623 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg) 1624 { 1625 if (dev->nregions) { 1626 vu_panic(dev, "Regions already registered at postcopy-listen"); 1627 vmsg_set_reply_u64(vmsg, -1); 1628 return true; 1629 } 1630 dev->postcopy_listening = true; 1631 1632 vmsg_set_reply_u64(vmsg, 0); 1633 return true; 1634 } 1635 1636 static bool 1637 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg) 1638 { 1639 DPRINT("%s: Entry\n", __func__); 1640 dev->postcopy_listening = false; 1641 if (dev->postcopy_ufd > 0) { 1642 close(dev->postcopy_ufd); 1643 dev->postcopy_ufd = -1; 1644 DPRINT("%s: Done close\n", __func__); 1645 } 1646 1647 vmsg_set_reply_u64(vmsg, 0); 1648 DPRINT("%s: exit\n", __func__); 1649 return true; 1650 } 1651 1652 static inline uint64_t 1653 vu_inflight_queue_size(uint16_t queue_size) 1654 { 1655 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size + 1656 sizeof(uint16_t), INFLIGHT_ALIGNMENT); 1657 } 1658 1659 #ifdef MFD_ALLOW_SEALING 1660 static void * 1661 memfd_alloc(const char *name, size_t size, unsigned int flags, int *fd) 1662 { 1663 void *ptr; 1664 int ret; 1665 1666 *fd = memfd_create(name, MFD_ALLOW_SEALING); 1667 if (*fd < 0) { 1668 return NULL; 1669 } 1670 1671 ret = ftruncate(*fd, size); 1672 if (ret < 0) { 1673 close(*fd); 1674 return NULL; 1675 } 1676 1677 ret = fcntl(*fd, F_ADD_SEALS, flags); 1678 if (ret < 0) { 1679 close(*fd); 1680 return NULL; 1681 } 1682 1683 ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0); 1684 if (ptr == MAP_FAILED) { 1685 close(*fd); 1686 return NULL; 1687 } 1688 1689 return ptr; 1690 } 1691 #endif 1692 1693 static bool 1694 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1695 { 1696 int fd = -1; 1697 void *addr = NULL; 1698 uint64_t mmap_size; 1699 uint16_t num_queues, queue_size; 1700 1701 if (vmsg->size != sizeof(vmsg->payload.inflight)) { 1702 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size); 1703 vmsg->payload.inflight.mmap_size = 0; 1704 return true; 1705 } 1706 1707 num_queues = vmsg->payload.inflight.num_queues; 1708 queue_size = vmsg->payload.inflight.queue_size; 1709 1710 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1711 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1712 1713 mmap_size = vu_inflight_queue_size(queue_size) * num_queues; 1714 1715 #ifdef MFD_ALLOW_SEALING 1716 addr = memfd_alloc("vhost-inflight", mmap_size, 1717 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1718 &fd); 1719 #else 1720 vu_panic(dev, "Not implemented: memfd support is missing"); 1721 #endif 1722 1723 if (!addr) { 1724 vu_panic(dev, "Failed to alloc vhost inflight area"); 1725 vmsg->payload.inflight.mmap_size = 0; 1726 return true; 1727 } 1728 1729 memset(addr, 0, mmap_size); 1730 1731 dev->inflight_info.addr = addr; 1732 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size; 1733 dev->inflight_info.fd = vmsg->fds[0] = fd; 1734 vmsg->fd_num = 1; 1735 vmsg->payload.inflight.mmap_offset = 0; 1736 1737 DPRINT("send inflight mmap_size: %"PRId64"\n", 1738 vmsg->payload.inflight.mmap_size); 1739 DPRINT("send inflight mmap offset: %"PRId64"\n", 1740 vmsg->payload.inflight.mmap_offset); 1741 1742 return true; 1743 } 1744 1745 static bool 1746 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg) 1747 { 1748 int fd, i; 1749 uint64_t mmap_size, mmap_offset; 1750 uint16_t num_queues, queue_size; 1751 void *rc; 1752 1753 if (vmsg->fd_num != 1 || 1754 vmsg->size != sizeof(vmsg->payload.inflight)) { 1755 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d", 1756 vmsg->size, vmsg->fd_num); 1757 return false; 1758 } 1759 1760 fd = vmsg->fds[0]; 1761 mmap_size = vmsg->payload.inflight.mmap_size; 1762 mmap_offset = vmsg->payload.inflight.mmap_offset; 1763 num_queues = vmsg->payload.inflight.num_queues; 1764 queue_size = vmsg->payload.inflight.queue_size; 1765 1766 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size); 1767 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset); 1768 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues); 1769 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size); 1770 1771 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1772 fd, mmap_offset); 1773 1774 if (rc == MAP_FAILED) { 1775 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno)); 1776 return false; 1777 } 1778 1779 if (dev->inflight_info.fd) { 1780 close(dev->inflight_info.fd); 1781 } 1782 1783 if (dev->inflight_info.addr) { 1784 munmap(dev->inflight_info.addr, dev->inflight_info.size); 1785 } 1786 1787 dev->inflight_info.fd = fd; 1788 dev->inflight_info.addr = rc; 1789 dev->inflight_info.size = mmap_size; 1790 1791 for (i = 0; i < num_queues; i++) { 1792 dev->vq[i].inflight = (VuVirtqInflight *)rc; 1793 dev->vq[i].inflight->desc_num = queue_size; 1794 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size)); 1795 } 1796 1797 return false; 1798 } 1799 1800 static bool 1801 vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) 1802 { 1803 unsigned int index = vmsg->payload.state.index; 1804 1805 if (index >= dev->max_queues) { 1806 vu_panic(dev, "Invalid queue index: %u", index); 1807 return false; 1808 } 1809 1810 DPRINT("Got kick message: handler:%p idx:%u\n", 1811 dev->vq[index].handler, index); 1812 1813 if (!dev->vq[index].started) { 1814 dev->vq[index].started = true; 1815 1816 if (dev->iface->queue_set_started) { 1817 dev->iface->queue_set_started(dev, index, true); 1818 } 1819 } 1820 1821 if (dev->vq[index].handler) { 1822 dev->vq[index].handler(dev, index); 1823 } 1824 1825 return false; 1826 } 1827 1828 static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) 1829 { 1830 vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; 1831 vmsg->size = sizeof(vmsg->payload.u64); 1832 vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS; 1833 vmsg->fd_num = 0; 1834 1835 if (!vu_message_write(dev, dev->sock, vmsg)) { 1836 vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno)); 1837 } 1838 1839 DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); 1840 1841 return false; 1842 } 1843 1844 static bool 1845 vu_process_message(VuDev *dev, VhostUserMsg *vmsg) 1846 { 1847 int do_reply = 0; 1848 1849 /* Print out generic part of the request. */ 1850 DPRINT("================ Vhost user message ================\n"); 1851 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request), 1852 vmsg->request); 1853 DPRINT("Flags: 0x%x\n", vmsg->flags); 1854 DPRINT("Size: %u\n", vmsg->size); 1855 1856 if (vmsg->fd_num) { 1857 int i; 1858 DPRINT("Fds:"); 1859 for (i = 0; i < vmsg->fd_num; i++) { 1860 DPRINT(" %d", vmsg->fds[i]); 1861 } 1862 DPRINT("\n"); 1863 } 1864 1865 if (dev->iface->process_msg && 1866 dev->iface->process_msg(dev, vmsg, &do_reply)) { 1867 return do_reply; 1868 } 1869 1870 switch (vmsg->request) { 1871 case VHOST_USER_GET_FEATURES: 1872 return vu_get_features_exec(dev, vmsg); 1873 case VHOST_USER_SET_FEATURES: 1874 return vu_set_features_exec(dev, vmsg); 1875 case VHOST_USER_GET_PROTOCOL_FEATURES: 1876 return vu_get_protocol_features_exec(dev, vmsg); 1877 case VHOST_USER_SET_PROTOCOL_FEATURES: 1878 return vu_set_protocol_features_exec(dev, vmsg); 1879 case VHOST_USER_SET_OWNER: 1880 return vu_set_owner_exec(dev, vmsg); 1881 case VHOST_USER_RESET_OWNER: 1882 return vu_reset_device_exec(dev, vmsg); 1883 case VHOST_USER_SET_MEM_TABLE: 1884 return vu_set_mem_table_exec(dev, vmsg); 1885 case VHOST_USER_SET_LOG_BASE: 1886 return vu_set_log_base_exec(dev, vmsg); 1887 case VHOST_USER_SET_LOG_FD: 1888 return vu_set_log_fd_exec(dev, vmsg); 1889 case VHOST_USER_SET_VRING_NUM: 1890 return vu_set_vring_num_exec(dev, vmsg); 1891 case VHOST_USER_SET_VRING_ADDR: 1892 return vu_set_vring_addr_exec(dev, vmsg); 1893 case VHOST_USER_SET_VRING_BASE: 1894 return vu_set_vring_base_exec(dev, vmsg); 1895 case VHOST_USER_GET_VRING_BASE: 1896 return vu_get_vring_base_exec(dev, vmsg); 1897 case VHOST_USER_SET_VRING_KICK: 1898 return vu_set_vring_kick_exec(dev, vmsg); 1899 case VHOST_USER_SET_VRING_CALL: 1900 return vu_set_vring_call_exec(dev, vmsg); 1901 case VHOST_USER_SET_VRING_ERR: 1902 return vu_set_vring_err_exec(dev, vmsg); 1903 case VHOST_USER_GET_QUEUE_NUM: 1904 return vu_get_queue_num_exec(dev, vmsg); 1905 case VHOST_USER_SET_VRING_ENABLE: 1906 return vu_set_vring_enable_exec(dev, vmsg); 1907 case VHOST_USER_SET_SLAVE_REQ_FD: 1908 return vu_set_slave_req_fd(dev, vmsg); 1909 case VHOST_USER_GET_CONFIG: 1910 return vu_get_config(dev, vmsg); 1911 case VHOST_USER_SET_CONFIG: 1912 return vu_set_config(dev, vmsg); 1913 case VHOST_USER_NONE: 1914 /* if you need processing before exit, override iface->process_msg */ 1915 exit(0); 1916 case VHOST_USER_POSTCOPY_ADVISE: 1917 return vu_set_postcopy_advise(dev, vmsg); 1918 case VHOST_USER_POSTCOPY_LISTEN: 1919 return vu_set_postcopy_listen(dev, vmsg); 1920 case VHOST_USER_POSTCOPY_END: 1921 return vu_set_postcopy_end(dev, vmsg); 1922 case VHOST_USER_GET_INFLIGHT_FD: 1923 return vu_get_inflight_fd(dev, vmsg); 1924 case VHOST_USER_SET_INFLIGHT_FD: 1925 return vu_set_inflight_fd(dev, vmsg); 1926 case VHOST_USER_VRING_KICK: 1927 return vu_handle_vring_kick(dev, vmsg); 1928 case VHOST_USER_GET_MAX_MEM_SLOTS: 1929 return vu_handle_get_max_memslots(dev, vmsg); 1930 case VHOST_USER_ADD_MEM_REG: 1931 return vu_add_mem_reg(dev, vmsg); 1932 case VHOST_USER_REM_MEM_REG: 1933 return vu_rem_mem_reg(dev, vmsg); 1934 default: 1935 vmsg_close_fds(vmsg); 1936 vu_panic(dev, "Unhandled request: %d", vmsg->request); 1937 } 1938 1939 return false; 1940 } 1941 1942 bool 1943 vu_dispatch(VuDev *dev) 1944 { 1945 VhostUserMsg vmsg = { 0, }; 1946 int reply_requested; 1947 bool need_reply, success = false; 1948 1949 if (!dev->read_msg(dev, dev->sock, &vmsg)) { 1950 goto end; 1951 } 1952 1953 need_reply = vmsg.flags & VHOST_USER_NEED_REPLY_MASK; 1954 1955 reply_requested = vu_process_message(dev, &vmsg); 1956 if (!reply_requested && need_reply) { 1957 vmsg_set_reply_u64(&vmsg, 0); 1958 reply_requested = 1; 1959 } 1960 1961 if (!reply_requested) { 1962 success = true; 1963 goto end; 1964 } 1965 1966 if (!vu_send_reply(dev, dev->sock, &vmsg)) { 1967 goto end; 1968 } 1969 1970 success = true; 1971 1972 end: 1973 free(vmsg.data); 1974 return success; 1975 } 1976 1977 void 1978 vu_deinit(VuDev *dev) 1979 { 1980 int i; 1981 1982 for (i = 0; i < dev->nregions; i++) { 1983 VuDevRegion *r = &dev->regions[i]; 1984 void *m = (void *) (uintptr_t) r->mmap_addr; 1985 if (m != MAP_FAILED) { 1986 munmap(m, r->size + r->mmap_offset); 1987 } 1988 } 1989 dev->nregions = 0; 1990 1991 for (i = 0; i < dev->max_queues; i++) { 1992 VuVirtq *vq = &dev->vq[i]; 1993 1994 if (vq->call_fd != -1) { 1995 close(vq->call_fd); 1996 vq->call_fd = -1; 1997 } 1998 1999 if (vq->kick_fd != -1) { 2000 dev->remove_watch(dev, vq->kick_fd); 2001 close(vq->kick_fd); 2002 vq->kick_fd = -1; 2003 } 2004 2005 if (vq->err_fd != -1) { 2006 close(vq->err_fd); 2007 vq->err_fd = -1; 2008 } 2009 2010 if (vq->resubmit_list) { 2011 free(vq->resubmit_list); 2012 vq->resubmit_list = NULL; 2013 } 2014 2015 vq->inflight = NULL; 2016 } 2017 2018 if (dev->inflight_info.addr) { 2019 munmap(dev->inflight_info.addr, dev->inflight_info.size); 2020 dev->inflight_info.addr = NULL; 2021 } 2022 2023 if (dev->inflight_info.fd > 0) { 2024 close(dev->inflight_info.fd); 2025 dev->inflight_info.fd = -1; 2026 } 2027 2028 vu_close_log(dev); 2029 if (dev->slave_fd != -1) { 2030 close(dev->slave_fd); 2031 dev->slave_fd = -1; 2032 } 2033 pthread_mutex_destroy(&dev->slave_mutex); 2034 2035 if (dev->sock != -1) { 2036 close(dev->sock); 2037 } 2038 2039 free(dev->vq); 2040 dev->vq = NULL; 2041 } 2042 2043 bool 2044 vu_init(VuDev *dev, 2045 uint16_t max_queues, 2046 int socket, 2047 vu_panic_cb panic, 2048 vu_read_msg_cb read_msg, 2049 vu_set_watch_cb set_watch, 2050 vu_remove_watch_cb remove_watch, 2051 const VuDevIface *iface) 2052 { 2053 uint16_t i; 2054 2055 assert(max_queues > 0); 2056 assert(socket >= 0); 2057 assert(set_watch); 2058 assert(remove_watch); 2059 assert(iface); 2060 assert(panic); 2061 2062 memset(dev, 0, sizeof(*dev)); 2063 2064 dev->sock = socket; 2065 dev->panic = panic; 2066 dev->read_msg = read_msg ? read_msg : vu_message_read_default; 2067 dev->set_watch = set_watch; 2068 dev->remove_watch = remove_watch; 2069 dev->iface = iface; 2070 dev->log_call_fd = -1; 2071 pthread_mutex_init(&dev->slave_mutex, NULL); 2072 dev->slave_fd = -1; 2073 dev->max_queues = max_queues; 2074 2075 dev->vq = malloc(max_queues * sizeof(dev->vq[0])); 2076 if (!dev->vq) { 2077 DPRINT("%s: failed to malloc virtqueues\n", __func__); 2078 return false; 2079 } 2080 2081 for (i = 0; i < max_queues; i++) { 2082 dev->vq[i] = (VuVirtq) { 2083 .call_fd = -1, .kick_fd = -1, .err_fd = -1, 2084 .notification = true, 2085 }; 2086 } 2087 2088 return true; 2089 } 2090 2091 VuVirtq * 2092 vu_get_queue(VuDev *dev, int qidx) 2093 { 2094 assert(qidx < dev->max_queues); 2095 return &dev->vq[qidx]; 2096 } 2097 2098 bool 2099 vu_queue_enabled(VuDev *dev, VuVirtq *vq) 2100 { 2101 return vq->enable; 2102 } 2103 2104 bool 2105 vu_queue_started(const VuDev *dev, const VuVirtq *vq) 2106 { 2107 return vq->started; 2108 } 2109 2110 static inline uint16_t 2111 vring_avail_flags(VuVirtq *vq) 2112 { 2113 return le16toh(vq->vring.avail->flags); 2114 } 2115 2116 static inline uint16_t 2117 vring_avail_idx(VuVirtq *vq) 2118 { 2119 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx); 2120 2121 return vq->shadow_avail_idx; 2122 } 2123 2124 static inline uint16_t 2125 vring_avail_ring(VuVirtq *vq, int i) 2126 { 2127 return le16toh(vq->vring.avail->ring[i]); 2128 } 2129 2130 static inline uint16_t 2131 vring_get_used_event(VuVirtq *vq) 2132 { 2133 return vring_avail_ring(vq, vq->vring.num); 2134 } 2135 2136 static int 2137 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx) 2138 { 2139 uint16_t num_heads = vring_avail_idx(vq) - idx; 2140 2141 /* Check it isn't doing very strange things with descriptor numbers. */ 2142 if (num_heads > vq->vring.num) { 2143 vu_panic(dev, "Guest moved used index from %u to %u", 2144 idx, vq->shadow_avail_idx); 2145 return -1; 2146 } 2147 if (num_heads) { 2148 /* On success, callers read a descriptor at vq->last_avail_idx. 2149 * Make sure descriptor read does not bypass avail index read. */ 2150 smp_rmb(); 2151 } 2152 2153 return num_heads; 2154 } 2155 2156 static bool 2157 virtqueue_get_head(VuDev *dev, VuVirtq *vq, 2158 unsigned int idx, unsigned int *head) 2159 { 2160 /* Grab the next descriptor number they're advertising, and increment 2161 * the index we've seen. */ 2162 *head = vring_avail_ring(vq, idx % vq->vring.num); 2163 2164 /* If their number is silly, that's a fatal mistake. */ 2165 if (*head >= vq->vring.num) { 2166 vu_panic(dev, "Guest says index %u is available", *head); 2167 return false; 2168 } 2169 2170 return true; 2171 } 2172 2173 static int 2174 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc, 2175 uint64_t addr, size_t len) 2176 { 2177 struct vring_desc *ori_desc; 2178 uint64_t read_len; 2179 2180 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) { 2181 return -1; 2182 } 2183 2184 if (len == 0) { 2185 return -1; 2186 } 2187 2188 while (len) { 2189 read_len = len; 2190 ori_desc = vu_gpa_to_va(dev, &read_len, addr); 2191 if (!ori_desc) { 2192 return -1; 2193 } 2194 2195 memcpy(desc, ori_desc, read_len); 2196 len -= read_len; 2197 addr += read_len; 2198 desc += read_len; 2199 } 2200 2201 return 0; 2202 } 2203 2204 enum { 2205 VIRTQUEUE_READ_DESC_ERROR = -1, 2206 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */ 2207 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */ 2208 }; 2209 2210 static int 2211 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc, 2212 int i, unsigned int max, unsigned int *next) 2213 { 2214 /* If this descriptor says it doesn't chain, we're done. */ 2215 if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) { 2216 return VIRTQUEUE_READ_DESC_DONE; 2217 } 2218 2219 /* Check they're not leading us off end of descriptors. */ 2220 *next = le16toh(desc[i].next); 2221 /* Make sure compiler knows to grab that: we don't want it changing! */ 2222 smp_wmb(); 2223 2224 if (*next >= max) { 2225 vu_panic(dev, "Desc next is %u", *next); 2226 return VIRTQUEUE_READ_DESC_ERROR; 2227 } 2228 2229 return VIRTQUEUE_READ_DESC_MORE; 2230 } 2231 2232 void 2233 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes, 2234 unsigned int *out_bytes, 2235 unsigned max_in_bytes, unsigned max_out_bytes) 2236 { 2237 unsigned int idx; 2238 unsigned int total_bufs, in_total, out_total; 2239 int rc; 2240 2241 idx = vq->last_avail_idx; 2242 2243 total_bufs = in_total = out_total = 0; 2244 if (unlikely(dev->broken) || 2245 unlikely(!vq->vring.avail)) { 2246 goto done; 2247 } 2248 2249 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) { 2250 unsigned int max, desc_len, num_bufs, indirect = 0; 2251 uint64_t desc_addr, read_len; 2252 struct vring_desc *desc; 2253 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2254 unsigned int i; 2255 2256 max = vq->vring.num; 2257 num_bufs = total_bufs; 2258 if (!virtqueue_get_head(dev, vq, idx++, &i)) { 2259 goto err; 2260 } 2261 desc = vq->vring.desc; 2262 2263 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2264 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2265 vu_panic(dev, "Invalid size for indirect buffer table"); 2266 goto err; 2267 } 2268 2269 /* If we've got too many, that implies a descriptor loop. */ 2270 if (num_bufs >= max) { 2271 vu_panic(dev, "Looped descriptor"); 2272 goto err; 2273 } 2274 2275 /* loop over the indirect descriptor table */ 2276 indirect = 1; 2277 desc_addr = le64toh(desc[i].addr); 2278 desc_len = le32toh(desc[i].len); 2279 max = desc_len / sizeof(struct vring_desc); 2280 read_len = desc_len; 2281 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2282 if (unlikely(desc && read_len != desc_len)) { 2283 /* Failed to use zero copy */ 2284 desc = NULL; 2285 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2286 desc_addr, 2287 desc_len)) { 2288 desc = desc_buf; 2289 } 2290 } 2291 if (!desc) { 2292 vu_panic(dev, "Invalid indirect buffer table"); 2293 goto err; 2294 } 2295 num_bufs = i = 0; 2296 } 2297 2298 do { 2299 /* If we've got too many, that implies a descriptor loop. */ 2300 if (++num_bufs > max) { 2301 vu_panic(dev, "Looped descriptor"); 2302 goto err; 2303 } 2304 2305 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2306 in_total += le32toh(desc[i].len); 2307 } else { 2308 out_total += le32toh(desc[i].len); 2309 } 2310 if (in_total >= max_in_bytes && out_total >= max_out_bytes) { 2311 goto done; 2312 } 2313 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2314 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2315 2316 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2317 goto err; 2318 } 2319 2320 if (!indirect) { 2321 total_bufs = num_bufs; 2322 } else { 2323 total_bufs++; 2324 } 2325 } 2326 if (rc < 0) { 2327 goto err; 2328 } 2329 done: 2330 if (in_bytes) { 2331 *in_bytes = in_total; 2332 } 2333 if (out_bytes) { 2334 *out_bytes = out_total; 2335 } 2336 return; 2337 2338 err: 2339 in_total = out_total = 0; 2340 goto done; 2341 } 2342 2343 bool 2344 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, 2345 unsigned int out_bytes) 2346 { 2347 unsigned int in_total, out_total; 2348 2349 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total, 2350 in_bytes, out_bytes); 2351 2352 return in_bytes <= in_total && out_bytes <= out_total; 2353 } 2354 2355 /* Fetch avail_idx from VQ memory only when we really need to know if 2356 * guest has added some buffers. */ 2357 bool 2358 vu_queue_empty(VuDev *dev, VuVirtq *vq) 2359 { 2360 if (unlikely(dev->broken) || 2361 unlikely(!vq->vring.avail)) { 2362 return true; 2363 } 2364 2365 if (vq->shadow_avail_idx != vq->last_avail_idx) { 2366 return false; 2367 } 2368 2369 return vring_avail_idx(vq) == vq->last_avail_idx; 2370 } 2371 2372 static bool 2373 vring_notify(VuDev *dev, VuVirtq *vq) 2374 { 2375 uint16_t old, new; 2376 bool v; 2377 2378 /* We need to expose used array entries before checking used event. */ 2379 smp_mb(); 2380 2381 /* Always notify when queue is empty (when feature acknowledge) */ 2382 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) && 2383 !vq->inuse && vu_queue_empty(dev, vq)) { 2384 return true; 2385 } 2386 2387 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2388 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT); 2389 } 2390 2391 v = vq->signalled_used_valid; 2392 vq->signalled_used_valid = true; 2393 old = vq->signalled_used; 2394 new = vq->signalled_used = vq->used_idx; 2395 return !v || vring_need_event(vring_get_used_event(vq), new, old); 2396 } 2397 2398 static void _vu_queue_notify(VuDev *dev, VuVirtq *vq, bool sync) 2399 { 2400 if (unlikely(dev->broken) || 2401 unlikely(!vq->vring.avail)) { 2402 return; 2403 } 2404 2405 if (!vring_notify(dev, vq)) { 2406 DPRINT("skipped notify...\n"); 2407 return; 2408 } 2409 2410 if (vq->call_fd < 0 && 2411 vu_has_protocol_feature(dev, 2412 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) && 2413 vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 2414 VhostUserMsg vmsg = { 2415 .request = VHOST_USER_SLAVE_VRING_CALL, 2416 .flags = VHOST_USER_VERSION, 2417 .size = sizeof(vmsg.payload.state), 2418 .payload.state = { 2419 .index = vq - dev->vq, 2420 }, 2421 }; 2422 bool ack = sync && 2423 vu_has_protocol_feature(dev, 2424 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2425 2426 if (ack) { 2427 vmsg.flags |= VHOST_USER_NEED_REPLY_MASK; 2428 } 2429 2430 vu_message_write(dev, dev->slave_fd, &vmsg); 2431 if (ack) { 2432 vu_message_read_default(dev, dev->slave_fd, &vmsg); 2433 } 2434 return; 2435 } 2436 2437 if (eventfd_write(vq->call_fd, 1) < 0) { 2438 vu_panic(dev, "Error writing eventfd: %s", strerror(errno)); 2439 } 2440 } 2441 2442 void vu_queue_notify(VuDev *dev, VuVirtq *vq) 2443 { 2444 _vu_queue_notify(dev, vq, false); 2445 } 2446 2447 void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq) 2448 { 2449 _vu_queue_notify(dev, vq, true); 2450 } 2451 2452 static inline void 2453 vring_used_flags_set_bit(VuVirtq *vq, int mask) 2454 { 2455 uint16_t *flags; 2456 2457 flags = (uint16_t *)((char*)vq->vring.used + 2458 offsetof(struct vring_used, flags)); 2459 *flags = htole16(le16toh(*flags) | mask); 2460 } 2461 2462 static inline void 2463 vring_used_flags_unset_bit(VuVirtq *vq, int mask) 2464 { 2465 uint16_t *flags; 2466 2467 flags = (uint16_t *)((char*)vq->vring.used + 2468 offsetof(struct vring_used, flags)); 2469 *flags = htole16(le16toh(*flags) & ~mask); 2470 } 2471 2472 static inline void 2473 vring_set_avail_event(VuVirtq *vq, uint16_t val) 2474 { 2475 uint16_t *avail; 2476 2477 if (!vq->notification) { 2478 return; 2479 } 2480 2481 avail = (uint16_t *)&vq->vring.used->ring[vq->vring.num]; 2482 *avail = htole16(val); 2483 } 2484 2485 void 2486 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable) 2487 { 2488 vq->notification = enable; 2489 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2490 vring_set_avail_event(vq, vring_avail_idx(vq)); 2491 } else if (enable) { 2492 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY); 2493 } else { 2494 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY); 2495 } 2496 if (enable) { 2497 /* Expose avail event/used flags before caller checks the avail idx. */ 2498 smp_mb(); 2499 } 2500 } 2501 2502 static bool 2503 virtqueue_map_desc(VuDev *dev, 2504 unsigned int *p_num_sg, struct iovec *iov, 2505 unsigned int max_num_sg, bool is_write, 2506 uint64_t pa, size_t sz) 2507 { 2508 unsigned num_sg = *p_num_sg; 2509 2510 assert(num_sg <= max_num_sg); 2511 2512 if (!sz) { 2513 vu_panic(dev, "virtio: zero sized buffers are not allowed"); 2514 return false; 2515 } 2516 2517 while (sz) { 2518 uint64_t len = sz; 2519 2520 if (num_sg == max_num_sg) { 2521 vu_panic(dev, "virtio: too many descriptors in indirect table"); 2522 return false; 2523 } 2524 2525 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa); 2526 if (iov[num_sg].iov_base == NULL) { 2527 vu_panic(dev, "virtio: invalid address for buffers"); 2528 return false; 2529 } 2530 iov[num_sg].iov_len = len; 2531 num_sg++; 2532 sz -= len; 2533 pa += len; 2534 } 2535 2536 *p_num_sg = num_sg; 2537 return true; 2538 } 2539 2540 static void * 2541 virtqueue_alloc_element(size_t sz, 2542 unsigned out_num, unsigned in_num) 2543 { 2544 VuVirtqElement *elem; 2545 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0])); 2546 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]); 2547 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]); 2548 2549 assert(sz >= sizeof(VuVirtqElement)); 2550 elem = malloc(out_sg_end); 2551 elem->out_num = out_num; 2552 elem->in_num = in_num; 2553 elem->in_sg = (void *)elem + in_sg_ofs; 2554 elem->out_sg = (void *)elem + out_sg_ofs; 2555 return elem; 2556 } 2557 2558 static void * 2559 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz) 2560 { 2561 struct vring_desc *desc = vq->vring.desc; 2562 uint64_t desc_addr, read_len; 2563 unsigned int desc_len; 2564 unsigned int max = vq->vring.num; 2565 unsigned int i = idx; 2566 VuVirtqElement *elem; 2567 unsigned int out_num = 0, in_num = 0; 2568 struct iovec iov[VIRTQUEUE_MAX_SIZE]; 2569 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2570 int rc; 2571 2572 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2573 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2574 vu_panic(dev, "Invalid size for indirect buffer table"); 2575 return NULL; 2576 } 2577 2578 /* loop over the indirect descriptor table */ 2579 desc_addr = le64toh(desc[i].addr); 2580 desc_len = le32toh(desc[i].len); 2581 max = desc_len / sizeof(struct vring_desc); 2582 read_len = desc_len; 2583 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2584 if (unlikely(desc && read_len != desc_len)) { 2585 /* Failed to use zero copy */ 2586 desc = NULL; 2587 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2588 desc_addr, 2589 desc_len)) { 2590 desc = desc_buf; 2591 } 2592 } 2593 if (!desc) { 2594 vu_panic(dev, "Invalid indirect buffer table"); 2595 return NULL; 2596 } 2597 i = 0; 2598 } 2599 2600 /* Collect all the descriptors */ 2601 do { 2602 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2603 if (!virtqueue_map_desc(dev, &in_num, iov + out_num, 2604 VIRTQUEUE_MAX_SIZE - out_num, true, 2605 le64toh(desc[i].addr), 2606 le32toh(desc[i].len))) { 2607 return NULL; 2608 } 2609 } else { 2610 if (in_num) { 2611 vu_panic(dev, "Incorrect order for descriptors"); 2612 return NULL; 2613 } 2614 if (!virtqueue_map_desc(dev, &out_num, iov, 2615 VIRTQUEUE_MAX_SIZE, false, 2616 le64toh(desc[i].addr), 2617 le32toh(desc[i].len))) { 2618 return NULL; 2619 } 2620 } 2621 2622 /* If we've got too many, that implies a descriptor loop. */ 2623 if ((in_num + out_num) > max) { 2624 vu_panic(dev, "Looped descriptor"); 2625 return NULL; 2626 } 2627 rc = virtqueue_read_next_desc(dev, desc, i, max, &i); 2628 } while (rc == VIRTQUEUE_READ_DESC_MORE); 2629 2630 if (rc == VIRTQUEUE_READ_DESC_ERROR) { 2631 vu_panic(dev, "read descriptor error"); 2632 return NULL; 2633 } 2634 2635 /* Now copy what we have collected and mapped */ 2636 elem = virtqueue_alloc_element(sz, out_num, in_num); 2637 elem->index = idx; 2638 for (i = 0; i < out_num; i++) { 2639 elem->out_sg[i] = iov[i]; 2640 } 2641 for (i = 0; i < in_num; i++) { 2642 elem->in_sg[i] = iov[out_num + i]; 2643 } 2644 2645 return elem; 2646 } 2647 2648 static int 2649 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx) 2650 { 2651 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2652 return 0; 2653 } 2654 2655 if (unlikely(!vq->inflight)) { 2656 return -1; 2657 } 2658 2659 vq->inflight->desc[desc_idx].counter = vq->counter++; 2660 vq->inflight->desc[desc_idx].inflight = 1; 2661 2662 return 0; 2663 } 2664 2665 static int 2666 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2667 { 2668 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2669 return 0; 2670 } 2671 2672 if (unlikely(!vq->inflight)) { 2673 return -1; 2674 } 2675 2676 vq->inflight->last_batch_head = desc_idx; 2677 2678 return 0; 2679 } 2680 2681 static int 2682 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx) 2683 { 2684 if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2685 return 0; 2686 } 2687 2688 if (unlikely(!vq->inflight)) { 2689 return -1; 2690 } 2691 2692 barrier(); 2693 2694 vq->inflight->desc[desc_idx].inflight = 0; 2695 2696 barrier(); 2697 2698 vq->inflight->used_idx = vq->used_idx; 2699 2700 return 0; 2701 } 2702 2703 void * 2704 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz) 2705 { 2706 int i; 2707 unsigned int head; 2708 VuVirtqElement *elem; 2709 2710 if (unlikely(dev->broken) || 2711 unlikely(!vq->vring.avail)) { 2712 return NULL; 2713 } 2714 2715 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) { 2716 i = (--vq->resubmit_num); 2717 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz); 2718 2719 if (!vq->resubmit_num) { 2720 free(vq->resubmit_list); 2721 vq->resubmit_list = NULL; 2722 } 2723 2724 return elem; 2725 } 2726 2727 if (vu_queue_empty(dev, vq)) { 2728 return NULL; 2729 } 2730 /* 2731 * Needed after virtio_queue_empty(), see comment in 2732 * virtqueue_num_heads(). 2733 */ 2734 smp_rmb(); 2735 2736 if (vq->inuse >= vq->vring.num) { 2737 vu_panic(dev, "Virtqueue size exceeded"); 2738 return NULL; 2739 } 2740 2741 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) { 2742 return NULL; 2743 } 2744 2745 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 2746 vring_set_avail_event(vq, vq->last_avail_idx); 2747 } 2748 2749 elem = vu_queue_map_desc(dev, vq, head, sz); 2750 2751 if (!elem) { 2752 return NULL; 2753 } 2754 2755 vq->inuse++; 2756 2757 vu_queue_inflight_get(dev, vq, head); 2758 2759 return elem; 2760 } 2761 2762 static void 2763 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2764 size_t len) 2765 { 2766 vq->inuse--; 2767 /* unmap, when DMA support is added */ 2768 } 2769 2770 void 2771 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, 2772 size_t len) 2773 { 2774 vq->last_avail_idx--; 2775 vu_queue_detach_element(dev, vq, elem, len); 2776 } 2777 2778 bool 2779 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num) 2780 { 2781 if (num > vq->inuse) { 2782 return false; 2783 } 2784 vq->last_avail_idx -= num; 2785 vq->inuse -= num; 2786 return true; 2787 } 2788 2789 static inline 2790 void vring_used_write(VuDev *dev, VuVirtq *vq, 2791 struct vring_used_elem *uelem, int i) 2792 { 2793 struct vring_used *used = vq->vring.used; 2794 2795 used->ring[i] = *uelem; 2796 vu_log_write(dev, vq->vring.log_guest_addr + 2797 offsetof(struct vring_used, ring[i]), 2798 sizeof(used->ring[i])); 2799 } 2800 2801 2802 static void 2803 vu_log_queue_fill(VuDev *dev, VuVirtq *vq, 2804 const VuVirtqElement *elem, 2805 unsigned int len) 2806 { 2807 struct vring_desc *desc = vq->vring.desc; 2808 unsigned int i, max, min, desc_len; 2809 uint64_t desc_addr, read_len; 2810 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE]; 2811 unsigned num_bufs = 0; 2812 2813 max = vq->vring.num; 2814 i = elem->index; 2815 2816 if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) { 2817 if (le32toh(desc[i].len) % sizeof(struct vring_desc)) { 2818 vu_panic(dev, "Invalid size for indirect buffer table"); 2819 return; 2820 } 2821 2822 /* loop over the indirect descriptor table */ 2823 desc_addr = le64toh(desc[i].addr); 2824 desc_len = le32toh(desc[i].len); 2825 max = desc_len / sizeof(struct vring_desc); 2826 read_len = desc_len; 2827 desc = vu_gpa_to_va(dev, &read_len, desc_addr); 2828 if (unlikely(desc && read_len != desc_len)) { 2829 /* Failed to use zero copy */ 2830 desc = NULL; 2831 if (!virtqueue_read_indirect_desc(dev, desc_buf, 2832 desc_addr, 2833 desc_len)) { 2834 desc = desc_buf; 2835 } 2836 } 2837 if (!desc) { 2838 vu_panic(dev, "Invalid indirect buffer table"); 2839 return; 2840 } 2841 i = 0; 2842 } 2843 2844 do { 2845 if (++num_bufs > max) { 2846 vu_panic(dev, "Looped descriptor"); 2847 return; 2848 } 2849 2850 if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) { 2851 min = MIN(le32toh(desc[i].len), len); 2852 vu_log_write(dev, le64toh(desc[i].addr), min); 2853 len -= min; 2854 } 2855 2856 } while (len > 0 && 2857 (virtqueue_read_next_desc(dev, desc, i, max, &i) 2858 == VIRTQUEUE_READ_DESC_MORE)); 2859 } 2860 2861 void 2862 vu_queue_fill(VuDev *dev, VuVirtq *vq, 2863 const VuVirtqElement *elem, 2864 unsigned int len, unsigned int idx) 2865 { 2866 struct vring_used_elem uelem; 2867 2868 if (unlikely(dev->broken) || 2869 unlikely(!vq->vring.avail)) { 2870 return; 2871 } 2872 2873 vu_log_queue_fill(dev, vq, elem, len); 2874 2875 idx = (idx + vq->used_idx) % vq->vring.num; 2876 2877 uelem.id = htole32(elem->index); 2878 uelem.len = htole32(len); 2879 vring_used_write(dev, vq, &uelem, idx); 2880 } 2881 2882 static inline 2883 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val) 2884 { 2885 vq->vring.used->idx = htole16(val); 2886 vu_log_write(dev, 2887 vq->vring.log_guest_addr + offsetof(struct vring_used, idx), 2888 sizeof(vq->vring.used->idx)); 2889 2890 vq->used_idx = val; 2891 } 2892 2893 void 2894 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count) 2895 { 2896 uint16_t old, new; 2897 2898 if (unlikely(dev->broken) || 2899 unlikely(!vq->vring.avail)) { 2900 return; 2901 } 2902 2903 /* Make sure buffer is written before we update index. */ 2904 smp_wmb(); 2905 2906 old = vq->used_idx; 2907 new = old + count; 2908 vring_used_idx_set(dev, vq, new); 2909 vq->inuse -= count; 2910 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) { 2911 vq->signalled_used_valid = false; 2912 } 2913 } 2914 2915 void 2916 vu_queue_push(VuDev *dev, VuVirtq *vq, 2917 const VuVirtqElement *elem, unsigned int len) 2918 { 2919 vu_queue_fill(dev, vq, elem, len, 0); 2920 vu_queue_inflight_pre_put(dev, vq, elem->index); 2921 vu_queue_flush(dev, vq, 1); 2922 vu_queue_inflight_post_put(dev, vq, elem->index); 2923 } 2924