1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "hw/virtio/vhost.h" 19 #include "hw/hw.h" 20 #include "qemu/atomic.h" 21 #include "qemu/range.h" 22 #include "qemu/error-report.h" 23 #include "qemu/memfd.h" 24 #include <linux/vhost.h> 25 #include "exec/address-spaces.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "hw/virtio/virtio-access.h" 28 #include "migration/blocker.h" 29 #include "sysemu/dma.h" 30 31 /* enabled until disconnected backend stabilizes */ 32 #define _VHOST_DEBUG 1 33 34 #ifdef _VHOST_DEBUG 35 #define VHOST_OPS_DEBUG(fmt, ...) \ 36 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ 37 strerror(errno), errno); } while (0) 38 #else 39 #define VHOST_OPS_DEBUG(fmt, ...) \ 40 do { } while (0) 41 #endif 42 43 static struct vhost_log *vhost_log; 44 static struct vhost_log *vhost_log_shm; 45 46 static unsigned int used_memslots; 47 static QLIST_HEAD(, vhost_dev) vhost_devices = 48 QLIST_HEAD_INITIALIZER(vhost_devices); 49 50 bool vhost_has_free_slot(void) 51 { 52 unsigned int slots_limit = ~0U; 53 struct vhost_dev *hdev; 54 55 QLIST_FOREACH(hdev, &vhost_devices, entry) { 56 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); 57 slots_limit = MIN(slots_limit, r); 58 } 59 return slots_limit > used_memslots; 60 } 61 62 static void vhost_dev_sync_region(struct vhost_dev *dev, 63 MemoryRegionSection *section, 64 uint64_t mfirst, uint64_t mlast, 65 uint64_t rfirst, uint64_t rlast) 66 { 67 vhost_log_chunk_t *log = dev->log->log; 68 69 uint64_t start = MAX(mfirst, rfirst); 70 uint64_t end = MIN(mlast, rlast); 71 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; 72 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; 73 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; 74 75 if (end < start) { 76 return; 77 } 78 assert(end / VHOST_LOG_CHUNK < dev->log_size); 79 assert(start / VHOST_LOG_CHUNK < dev->log_size); 80 81 for (;from < to; ++from) { 82 vhost_log_chunk_t log; 83 /* We first check with non-atomic: much cheaper, 84 * and we expect non-dirty to be the common case. */ 85 if (!*from) { 86 addr += VHOST_LOG_CHUNK; 87 continue; 88 } 89 /* Data must be read atomically. We don't really need barrier semantics 90 * but it's easier to use atomic_* than roll our own. */ 91 log = atomic_xchg(from, 0); 92 while (log) { 93 int bit = ctzl(log); 94 hwaddr page_addr; 95 hwaddr section_offset; 96 hwaddr mr_offset; 97 page_addr = addr + bit * VHOST_LOG_PAGE; 98 section_offset = page_addr - section->offset_within_address_space; 99 mr_offset = section_offset + section->offset_within_region; 100 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 101 log &= ~(0x1ull << bit); 102 } 103 addr += VHOST_LOG_CHUNK; 104 } 105 } 106 107 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 108 MemoryRegionSection *section, 109 hwaddr first, 110 hwaddr last) 111 { 112 int i; 113 hwaddr start_addr; 114 hwaddr end_addr; 115 116 if (!dev->log_enabled || !dev->started) { 117 return 0; 118 } 119 start_addr = section->offset_within_address_space; 120 end_addr = range_get_last(start_addr, int128_get64(section->size)); 121 start_addr = MAX(first, start_addr); 122 end_addr = MIN(last, end_addr); 123 124 for (i = 0; i < dev->mem->nregions; ++i) { 125 struct vhost_memory_region *reg = dev->mem->regions + i; 126 vhost_dev_sync_region(dev, section, start_addr, end_addr, 127 reg->guest_phys_addr, 128 range_get_last(reg->guest_phys_addr, 129 reg->memory_size)); 130 } 131 for (i = 0; i < dev->nvqs; ++i) { 132 struct vhost_virtqueue *vq = dev->vqs + i; 133 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, 134 range_get_last(vq->used_phys, vq->used_size)); 135 } 136 return 0; 137 } 138 139 static void vhost_log_sync(MemoryListener *listener, 140 MemoryRegionSection *section) 141 { 142 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 143 memory_listener); 144 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 145 } 146 147 static void vhost_log_sync_range(struct vhost_dev *dev, 148 hwaddr first, hwaddr last) 149 { 150 int i; 151 /* FIXME: this is N^2 in number of sections */ 152 for (i = 0; i < dev->n_mem_sections; ++i) { 153 MemoryRegionSection *section = &dev->mem_sections[i]; 154 vhost_sync_dirty_bitmap(dev, section, first, last); 155 } 156 } 157 158 /* Assign/unassign. Keep an unsorted array of non-overlapping 159 * memory regions in dev->mem. */ 160 static void vhost_dev_unassign_memory(struct vhost_dev *dev, 161 uint64_t start_addr, 162 uint64_t size) 163 { 164 int from, to, n = dev->mem->nregions; 165 /* Track overlapping/split regions for sanity checking. */ 166 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; 167 168 for (from = 0, to = 0; from < n; ++from, ++to) { 169 struct vhost_memory_region *reg = dev->mem->regions + to; 170 uint64_t reglast; 171 uint64_t memlast; 172 uint64_t change; 173 174 /* clone old region */ 175 if (to != from) { 176 memcpy(reg, dev->mem->regions + from, sizeof *reg); 177 } 178 179 /* No overlap is simple */ 180 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, 181 start_addr, size)) { 182 continue; 183 } 184 185 /* Split only happens if supplied region 186 * is in the middle of an existing one. Thus it can not 187 * overlap with any other existing region. */ 188 assert(!split); 189 190 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 191 memlast = range_get_last(start_addr, size); 192 193 /* Remove whole region */ 194 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { 195 --dev->mem->nregions; 196 --to; 197 ++overlap_middle; 198 continue; 199 } 200 201 /* Shrink region */ 202 if (memlast >= reglast) { 203 reg->memory_size = start_addr - reg->guest_phys_addr; 204 assert(reg->memory_size); 205 assert(!overlap_end); 206 ++overlap_end; 207 continue; 208 } 209 210 /* Shift region */ 211 if (start_addr <= reg->guest_phys_addr) { 212 change = memlast + 1 - reg->guest_phys_addr; 213 reg->memory_size -= change; 214 reg->guest_phys_addr += change; 215 reg->userspace_addr += change; 216 assert(reg->memory_size); 217 assert(!overlap_start); 218 ++overlap_start; 219 continue; 220 } 221 222 /* This only happens if supplied region 223 * is in the middle of an existing one. Thus it can not 224 * overlap with any other existing region. */ 225 assert(!overlap_start); 226 assert(!overlap_end); 227 assert(!overlap_middle); 228 /* Split region: shrink first part, shift second part. */ 229 memcpy(dev->mem->regions + n, reg, sizeof *reg); 230 reg->memory_size = start_addr - reg->guest_phys_addr; 231 assert(reg->memory_size); 232 change = memlast + 1 - reg->guest_phys_addr; 233 reg = dev->mem->regions + n; 234 reg->memory_size -= change; 235 assert(reg->memory_size); 236 reg->guest_phys_addr += change; 237 reg->userspace_addr += change; 238 /* Never add more than 1 region */ 239 assert(dev->mem->nregions == n); 240 ++dev->mem->nregions; 241 ++split; 242 } 243 } 244 245 /* Called after unassign, so no regions overlap the given range. */ 246 static void vhost_dev_assign_memory(struct vhost_dev *dev, 247 uint64_t start_addr, 248 uint64_t size, 249 uint64_t uaddr) 250 { 251 int from, to; 252 struct vhost_memory_region *merged = NULL; 253 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { 254 struct vhost_memory_region *reg = dev->mem->regions + to; 255 uint64_t prlast, urlast; 256 uint64_t pmlast, umlast; 257 uint64_t s, e, u; 258 259 /* clone old region */ 260 if (to != from) { 261 memcpy(reg, dev->mem->regions + from, sizeof *reg); 262 } 263 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); 264 pmlast = range_get_last(start_addr, size); 265 urlast = range_get_last(reg->userspace_addr, reg->memory_size); 266 umlast = range_get_last(uaddr, size); 267 268 /* check for overlapping regions: should never happen. */ 269 assert(prlast < start_addr || pmlast < reg->guest_phys_addr); 270 /* Not an adjacent or overlapping region - do not merge. */ 271 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && 272 (pmlast + 1 != reg->guest_phys_addr || 273 umlast + 1 != reg->userspace_addr)) { 274 continue; 275 } 276 277 if (dev->vhost_ops->vhost_backend_can_merge && 278 !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size, 279 reg->userspace_addr, 280 reg->memory_size)) { 281 continue; 282 } 283 284 if (merged) { 285 --to; 286 assert(to >= 0); 287 } else { 288 merged = reg; 289 } 290 u = MIN(uaddr, reg->userspace_addr); 291 s = MIN(start_addr, reg->guest_phys_addr); 292 e = MAX(pmlast, prlast); 293 uaddr = merged->userspace_addr = u; 294 start_addr = merged->guest_phys_addr = s; 295 size = merged->memory_size = e - s + 1; 296 assert(merged->memory_size); 297 } 298 299 if (!merged) { 300 struct vhost_memory_region *reg = dev->mem->regions + to; 301 memset(reg, 0, sizeof *reg); 302 reg->memory_size = size; 303 assert(reg->memory_size); 304 reg->guest_phys_addr = start_addr; 305 reg->userspace_addr = uaddr; 306 ++to; 307 } 308 assert(to <= dev->mem->nregions + 1); 309 dev->mem->nregions = to; 310 } 311 312 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 313 { 314 uint64_t log_size = 0; 315 int i; 316 for (i = 0; i < dev->mem->nregions; ++i) { 317 struct vhost_memory_region *reg = dev->mem->regions + i; 318 uint64_t last = range_get_last(reg->guest_phys_addr, 319 reg->memory_size); 320 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 321 } 322 for (i = 0; i < dev->nvqs; ++i) { 323 struct vhost_virtqueue *vq = dev->vqs + i; 324 uint64_t last = vq->used_phys + vq->used_size - 1; 325 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 326 } 327 return log_size; 328 } 329 330 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) 331 { 332 struct vhost_log *log; 333 uint64_t logsize = size * sizeof(*(log->log)); 334 int fd = -1; 335 336 log = g_new0(struct vhost_log, 1); 337 if (share) { 338 log->log = qemu_memfd_alloc("vhost-log", logsize, 339 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 340 &fd); 341 memset(log->log, 0, logsize); 342 } else { 343 log->log = g_malloc0(logsize); 344 } 345 346 log->size = size; 347 log->refcnt = 1; 348 log->fd = fd; 349 350 return log; 351 } 352 353 static struct vhost_log *vhost_log_get(uint64_t size, bool share) 354 { 355 struct vhost_log *log = share ? vhost_log_shm : vhost_log; 356 357 if (!log || log->size != size) { 358 log = vhost_log_alloc(size, share); 359 if (share) { 360 vhost_log_shm = log; 361 } else { 362 vhost_log = log; 363 } 364 } else { 365 ++log->refcnt; 366 } 367 368 return log; 369 } 370 371 static void vhost_log_put(struct vhost_dev *dev, bool sync) 372 { 373 struct vhost_log *log = dev->log; 374 375 if (!log) { 376 return; 377 } 378 dev->log = NULL; 379 dev->log_size = 0; 380 381 --log->refcnt; 382 if (log->refcnt == 0) { 383 /* Sync only the range covered by the old log */ 384 if (dev->log_size && sync) { 385 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 386 } 387 388 if (vhost_log == log) { 389 g_free(log->log); 390 vhost_log = NULL; 391 } else if (vhost_log_shm == log) { 392 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), 393 log->fd); 394 vhost_log_shm = NULL; 395 } 396 397 g_free(log); 398 } 399 } 400 401 static bool vhost_dev_log_is_shared(struct vhost_dev *dev) 402 { 403 return dev->vhost_ops->vhost_requires_shm_log && 404 dev->vhost_ops->vhost_requires_shm_log(dev); 405 } 406 407 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) 408 { 409 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); 410 uint64_t log_base = (uintptr_t)log->log; 411 int r; 412 413 /* inform backend of log switching, this must be done before 414 releasing the current log, to ensure no logging is lost */ 415 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); 416 if (r < 0) { 417 VHOST_OPS_DEBUG("vhost_set_log_base failed"); 418 } 419 420 vhost_log_put(dev, true); 421 dev->log = log; 422 dev->log_size = size; 423 } 424 425 static int vhost_dev_has_iommu(struct vhost_dev *dev) 426 { 427 VirtIODevice *vdev = dev->vdev; 428 429 return virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); 430 } 431 432 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, 433 hwaddr *plen, int is_write) 434 { 435 if (!vhost_dev_has_iommu(dev)) { 436 return cpu_physical_memory_map(addr, plen, is_write); 437 } else { 438 return (void *)(uintptr_t)addr; 439 } 440 } 441 442 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, 443 hwaddr len, int is_write, 444 hwaddr access_len) 445 { 446 if (!vhost_dev_has_iommu(dev)) { 447 cpu_physical_memory_unmap(buffer, len, is_write, access_len); 448 } 449 } 450 451 static int vhost_verify_ring_part_mapping(struct vhost_dev *dev, 452 void *part, 453 uint64_t part_addr, 454 uint64_t part_size, 455 uint64_t start_addr, 456 uint64_t size) 457 { 458 hwaddr l; 459 void *p; 460 int r = 0; 461 462 if (!ranges_overlap(start_addr, size, part_addr, part_size)) { 463 return 0; 464 } 465 l = part_size; 466 p = vhost_memory_map(dev, part_addr, &l, 1); 467 if (!p || l != part_size) { 468 r = -ENOMEM; 469 } 470 if (p != part) { 471 r = -EBUSY; 472 } 473 vhost_memory_unmap(dev, p, l, 0, 0); 474 return r; 475 } 476 477 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 478 uint64_t start_addr, 479 uint64_t size) 480 { 481 int i, j; 482 int r = 0; 483 const char *part_name[] = { 484 "descriptor table", 485 "available ring", 486 "used ring" 487 }; 488 489 for (i = 0; i < dev->nvqs; ++i) { 490 struct vhost_virtqueue *vq = dev->vqs + i; 491 492 j = 0; 493 r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys, 494 vq->desc_size, start_addr, size); 495 if (!r) { 496 break; 497 } 498 499 j++; 500 r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys, 501 vq->avail_size, start_addr, size); 502 if (!r) { 503 break; 504 } 505 506 j++; 507 r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys, 508 vq->used_size, start_addr, size); 509 if (!r) { 510 break; 511 } 512 } 513 514 if (r == -ENOMEM) { 515 error_report("Unable to map %s for ring %d", part_name[j], i); 516 } else if (r == -EBUSY) { 517 error_report("%s relocated for ring %d", part_name[j], i); 518 } 519 return r; 520 } 521 522 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, 523 uint64_t start_addr, 524 uint64_t size) 525 { 526 int i, n = dev->mem->nregions; 527 for (i = 0; i < n; ++i) { 528 struct vhost_memory_region *reg = dev->mem->regions + i; 529 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, 530 start_addr, size)) { 531 return reg; 532 } 533 } 534 return NULL; 535 } 536 537 static bool vhost_dev_cmp_memory(struct vhost_dev *dev, 538 uint64_t start_addr, 539 uint64_t size, 540 uint64_t uaddr) 541 { 542 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); 543 uint64_t reglast; 544 uint64_t memlast; 545 546 if (!reg) { 547 return true; 548 } 549 550 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 551 memlast = range_get_last(start_addr, size); 552 553 /* Need to extend region? */ 554 if (start_addr < reg->guest_phys_addr || memlast > reglast) { 555 return true; 556 } 557 /* userspace_addr changed? */ 558 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; 559 } 560 561 static void vhost_set_memory(MemoryListener *listener, 562 MemoryRegionSection *section, 563 bool add) 564 { 565 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 566 memory_listener); 567 hwaddr start_addr = section->offset_within_address_space; 568 ram_addr_t size = int128_get64(section->size); 569 bool log_dirty = 570 memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION); 571 int s = offsetof(struct vhost_memory, regions) + 572 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; 573 void *ram; 574 575 dev->mem = g_realloc(dev->mem, s); 576 577 if (log_dirty) { 578 add = false; 579 } 580 581 assert(size); 582 583 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ 584 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; 585 if (add) { 586 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { 587 /* Region exists with same address. Nothing to do. */ 588 return; 589 } 590 } else { 591 if (!vhost_dev_find_reg(dev, start_addr, size)) { 592 /* Removing region that we don't access. Nothing to do. */ 593 return; 594 } 595 } 596 597 vhost_dev_unassign_memory(dev, start_addr, size); 598 if (add) { 599 /* Add given mapping, merging adjacent regions if any */ 600 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); 601 } else { 602 /* Remove old mapping for this memory, if any. */ 603 vhost_dev_unassign_memory(dev, start_addr, size); 604 } 605 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); 606 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); 607 dev->memory_changed = true; 608 used_memslots = dev->mem->nregions; 609 } 610 611 static bool vhost_section(MemoryRegionSection *section) 612 { 613 return memory_region_is_ram(section->mr) && 614 !memory_region_is_rom(section->mr); 615 } 616 617 static void vhost_begin(MemoryListener *listener) 618 { 619 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 620 memory_listener); 621 dev->mem_changed_end_addr = 0; 622 dev->mem_changed_start_addr = -1; 623 } 624 625 static void vhost_commit(MemoryListener *listener) 626 { 627 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 628 memory_listener); 629 hwaddr start_addr = 0; 630 ram_addr_t size = 0; 631 uint64_t log_size; 632 int r; 633 634 if (!dev->memory_changed) { 635 return; 636 } 637 if (!dev->started) { 638 return; 639 } 640 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { 641 return; 642 } 643 644 if (dev->started) { 645 start_addr = dev->mem_changed_start_addr; 646 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; 647 648 r = vhost_verify_ring_mappings(dev, start_addr, size); 649 assert(r >= 0); 650 } 651 652 if (!dev->log_enabled) { 653 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 654 if (r < 0) { 655 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 656 } 657 dev->memory_changed = false; 658 return; 659 } 660 log_size = vhost_get_log_size(dev); 661 /* We allocate an extra 4K bytes to log, 662 * to reduce the * number of reallocations. */ 663 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 664 /* To log more, must increase log size before table update. */ 665 if (dev->log_size < log_size) { 666 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 667 } 668 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 669 if (r < 0) { 670 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 671 } 672 /* To log less, can only decrease log size after table update. */ 673 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 674 vhost_dev_log_resize(dev, log_size); 675 } 676 dev->memory_changed = false; 677 } 678 679 static void vhost_region_add(MemoryListener *listener, 680 MemoryRegionSection *section) 681 { 682 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 683 memory_listener); 684 685 if (!vhost_section(section)) { 686 return; 687 } 688 689 ++dev->n_mem_sections; 690 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, 691 dev->n_mem_sections); 692 dev->mem_sections[dev->n_mem_sections - 1] = *section; 693 memory_region_ref(section->mr); 694 vhost_set_memory(listener, section, true); 695 } 696 697 static void vhost_region_del(MemoryListener *listener, 698 MemoryRegionSection *section) 699 { 700 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 701 memory_listener); 702 int i; 703 704 if (!vhost_section(section)) { 705 return; 706 } 707 708 vhost_set_memory(listener, section, false); 709 memory_region_unref(section->mr); 710 for (i = 0; i < dev->n_mem_sections; ++i) { 711 if (dev->mem_sections[i].offset_within_address_space 712 == section->offset_within_address_space) { 713 --dev->n_mem_sections; 714 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], 715 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); 716 break; 717 } 718 } 719 } 720 721 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 722 { 723 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); 724 struct vhost_dev *hdev = iommu->hdev; 725 hwaddr iova = iotlb->iova + iommu->iommu_offset; 726 727 if (vhost_backend_invalidate_device_iotlb(hdev, iova, 728 iotlb->addr_mask + 1)) { 729 error_report("Fail to invalidate device iotlb"); 730 } 731 } 732 733 static void vhost_iommu_region_add(MemoryListener *listener, 734 MemoryRegionSection *section) 735 { 736 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 737 iommu_listener); 738 struct vhost_iommu *iommu; 739 Int128 end; 740 741 if (!memory_region_is_iommu(section->mr)) { 742 return; 743 } 744 745 iommu = g_malloc0(sizeof(*iommu)); 746 end = int128_add(int128_make64(section->offset_within_region), 747 section->size); 748 end = int128_sub(end, int128_one()); 749 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, 750 IOMMU_NOTIFIER_UNMAP, 751 section->offset_within_region, 752 int128_get64(end)); 753 iommu->mr = section->mr; 754 iommu->iommu_offset = section->offset_within_address_space - 755 section->offset_within_region; 756 iommu->hdev = dev; 757 memory_region_register_iommu_notifier(section->mr, &iommu->n); 758 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); 759 /* TODO: can replay help performance here? */ 760 } 761 762 static void vhost_iommu_region_del(MemoryListener *listener, 763 MemoryRegionSection *section) 764 { 765 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 766 iommu_listener); 767 struct vhost_iommu *iommu; 768 769 if (!memory_region_is_iommu(section->mr)) { 770 return; 771 } 772 773 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { 774 if (iommu->mr == section->mr && 775 iommu->n.start == section->offset_within_region) { 776 memory_region_unregister_iommu_notifier(iommu->mr, 777 &iommu->n); 778 QLIST_REMOVE(iommu, iommu_next); 779 g_free(iommu); 780 break; 781 } 782 } 783 } 784 785 static void vhost_region_nop(MemoryListener *listener, 786 MemoryRegionSection *section) 787 { 788 } 789 790 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 791 struct vhost_virtqueue *vq, 792 unsigned idx, bool enable_log) 793 { 794 struct vhost_vring_addr addr = { 795 .index = idx, 796 .desc_user_addr = (uint64_t)(unsigned long)vq->desc, 797 .avail_user_addr = (uint64_t)(unsigned long)vq->avail, 798 .used_user_addr = (uint64_t)(unsigned long)vq->used, 799 .log_guest_addr = vq->used_phys, 800 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, 801 }; 802 int r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); 803 if (r < 0) { 804 VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); 805 return -errno; 806 } 807 return 0; 808 } 809 810 static int vhost_dev_set_features(struct vhost_dev *dev, 811 bool enable_log) 812 { 813 uint64_t features = dev->acked_features; 814 int r; 815 if (enable_log) { 816 features |= 0x1ULL << VHOST_F_LOG_ALL; 817 } 818 r = dev->vhost_ops->vhost_set_features(dev, features); 819 if (r < 0) { 820 VHOST_OPS_DEBUG("vhost_set_features failed"); 821 } 822 return r < 0 ? -errno : 0; 823 } 824 825 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 826 { 827 int r, i, idx; 828 r = vhost_dev_set_features(dev, enable_log); 829 if (r < 0) { 830 goto err_features; 831 } 832 for (i = 0; i < dev->nvqs; ++i) { 833 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 834 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 835 enable_log); 836 if (r < 0) { 837 goto err_vq; 838 } 839 } 840 return 0; 841 err_vq: 842 for (; i >= 0; --i) { 843 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 844 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 845 dev->log_enabled); 846 } 847 vhost_dev_set_features(dev, dev->log_enabled); 848 err_features: 849 return r; 850 } 851 852 static int vhost_migration_log(MemoryListener *listener, int enable) 853 { 854 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 855 memory_listener); 856 int r; 857 if (!!enable == dev->log_enabled) { 858 return 0; 859 } 860 if (!dev->started) { 861 dev->log_enabled = enable; 862 return 0; 863 } 864 if (!enable) { 865 r = vhost_dev_set_log(dev, false); 866 if (r < 0) { 867 return r; 868 } 869 vhost_log_put(dev, false); 870 } else { 871 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 872 r = vhost_dev_set_log(dev, true); 873 if (r < 0) { 874 return r; 875 } 876 } 877 dev->log_enabled = enable; 878 return 0; 879 } 880 881 static void vhost_log_global_start(MemoryListener *listener) 882 { 883 int r; 884 885 r = vhost_migration_log(listener, true); 886 if (r < 0) { 887 abort(); 888 } 889 } 890 891 static void vhost_log_global_stop(MemoryListener *listener) 892 { 893 int r; 894 895 r = vhost_migration_log(listener, false); 896 if (r < 0) { 897 abort(); 898 } 899 } 900 901 static void vhost_log_start(MemoryListener *listener, 902 MemoryRegionSection *section, 903 int old, int new) 904 { 905 /* FIXME: implement */ 906 } 907 908 static void vhost_log_stop(MemoryListener *listener, 909 MemoryRegionSection *section, 910 int old, int new) 911 { 912 /* FIXME: implement */ 913 } 914 915 /* The vhost driver natively knows how to handle the vrings of non 916 * cross-endian legacy devices and modern devices. Only legacy devices 917 * exposed to a bi-endian guest may require the vhost driver to use a 918 * specific endianness. 919 */ 920 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) 921 { 922 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 923 return false; 924 } 925 #ifdef HOST_WORDS_BIGENDIAN 926 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; 927 #else 928 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; 929 #endif 930 } 931 932 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, 933 bool is_big_endian, 934 int vhost_vq_index) 935 { 936 struct vhost_vring_state s = { 937 .index = vhost_vq_index, 938 .num = is_big_endian 939 }; 940 941 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { 942 return 0; 943 } 944 945 VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); 946 if (errno == ENOTTY) { 947 error_report("vhost does not support cross-endian"); 948 return -ENOSYS; 949 } 950 951 return -errno; 952 } 953 954 static int vhost_memory_region_lookup(struct vhost_dev *hdev, 955 uint64_t gpa, uint64_t *uaddr, 956 uint64_t *len) 957 { 958 int i; 959 960 for (i = 0; i < hdev->mem->nregions; i++) { 961 struct vhost_memory_region *reg = hdev->mem->regions + i; 962 963 if (gpa >= reg->guest_phys_addr && 964 reg->guest_phys_addr + reg->memory_size > gpa) { 965 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; 966 *len = reg->guest_phys_addr + reg->memory_size - gpa; 967 return 0; 968 } 969 } 970 971 return -EFAULT; 972 } 973 974 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) 975 { 976 IOMMUTLBEntry iotlb; 977 uint64_t uaddr, len; 978 int ret = -EFAULT; 979 980 rcu_read_lock(); 981 982 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, 983 iova, write); 984 if (iotlb.target_as != NULL) { 985 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, 986 &uaddr, &len); 987 if (ret) { 988 error_report("Fail to lookup the translated address " 989 "%"PRIx64, iotlb.translated_addr); 990 goto out; 991 } 992 993 len = MIN(iotlb.addr_mask + 1, len); 994 iova = iova & ~iotlb.addr_mask; 995 996 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, 997 len, iotlb.perm); 998 if (ret) { 999 error_report("Fail to update device iotlb"); 1000 goto out; 1001 } 1002 } 1003 out: 1004 rcu_read_unlock(); 1005 1006 return ret; 1007 } 1008 1009 static int vhost_virtqueue_start(struct vhost_dev *dev, 1010 struct VirtIODevice *vdev, 1011 struct vhost_virtqueue *vq, 1012 unsigned idx) 1013 { 1014 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1015 VirtioBusState *vbus = VIRTIO_BUS(qbus); 1016 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 1017 hwaddr s, l, a; 1018 int r; 1019 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1020 struct vhost_vring_file file = { 1021 .index = vhost_vq_index 1022 }; 1023 struct vhost_vring_state state = { 1024 .index = vhost_vq_index 1025 }; 1026 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 1027 1028 1029 vq->num = state.num = virtio_queue_get_num(vdev, idx); 1030 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); 1031 if (r) { 1032 VHOST_OPS_DEBUG("vhost_set_vring_num failed"); 1033 return -errno; 1034 } 1035 1036 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 1037 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); 1038 if (r) { 1039 VHOST_OPS_DEBUG("vhost_set_vring_base failed"); 1040 return -errno; 1041 } 1042 1043 if (vhost_needs_vring_endian(vdev)) { 1044 r = vhost_virtqueue_set_vring_endian_legacy(dev, 1045 virtio_is_big_endian(vdev), 1046 vhost_vq_index); 1047 if (r) { 1048 return -errno; 1049 } 1050 } 1051 1052 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); 1053 vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx); 1054 vq->desc = vhost_memory_map(dev, a, &l, 0); 1055 if (!vq->desc || l != s) { 1056 r = -ENOMEM; 1057 goto fail_alloc_desc; 1058 } 1059 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); 1060 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); 1061 vq->avail = vhost_memory_map(dev, a, &l, 0); 1062 if (!vq->avail || l != s) { 1063 r = -ENOMEM; 1064 goto fail_alloc_avail; 1065 } 1066 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 1067 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 1068 vq->used = vhost_memory_map(dev, a, &l, 1); 1069 if (!vq->used || l != s) { 1070 r = -ENOMEM; 1071 goto fail_alloc_used; 1072 } 1073 1074 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 1075 if (r < 0) { 1076 r = -errno; 1077 goto fail_alloc; 1078 } 1079 1080 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 1081 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); 1082 if (r) { 1083 VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); 1084 r = -errno; 1085 goto fail_kick; 1086 } 1087 1088 /* Clear and discard previous events if any. */ 1089 event_notifier_test_and_clear(&vq->masked_notifier); 1090 1091 /* Init vring in unmasked state, unless guest_notifier_mask 1092 * will do it later. 1093 */ 1094 if (!vdev->use_guest_notifier_mask) { 1095 /* TODO: check and handle errors. */ 1096 vhost_virtqueue_mask(dev, vdev, idx, false); 1097 } 1098 1099 if (k->query_guest_notifiers && 1100 k->query_guest_notifiers(qbus->parent) && 1101 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { 1102 file.fd = -1; 1103 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1104 if (r) { 1105 goto fail_vector; 1106 } 1107 } 1108 1109 return 0; 1110 1111 fail_vector: 1112 fail_kick: 1113 fail_alloc: 1114 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1115 0, 0); 1116 fail_alloc_used: 1117 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1118 0, 0); 1119 fail_alloc_avail: 1120 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1121 0, 0); 1122 fail_alloc_desc: 1123 return r; 1124 } 1125 1126 static void vhost_virtqueue_stop(struct vhost_dev *dev, 1127 struct VirtIODevice *vdev, 1128 struct vhost_virtqueue *vq, 1129 unsigned idx) 1130 { 1131 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1132 struct vhost_vring_state state = { 1133 .index = vhost_vq_index, 1134 }; 1135 int r; 1136 1137 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); 1138 if (r < 0) { 1139 VHOST_OPS_DEBUG("vhost VQ %d ring restore failed: %d", idx, r); 1140 } else { 1141 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 1142 } 1143 virtio_queue_invalidate_signalled_used(vdev, idx); 1144 virtio_queue_update_used_idx(vdev, idx); 1145 1146 /* In the cross-endian case, we need to reset the vring endianness to 1147 * native as legacy devices expect so by default. 1148 */ 1149 if (vhost_needs_vring_endian(vdev)) { 1150 vhost_virtqueue_set_vring_endian_legacy(dev, 1151 !virtio_is_big_endian(vdev), 1152 vhost_vq_index); 1153 } 1154 1155 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1156 1, virtio_queue_get_used_size(vdev, idx)); 1157 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1158 0, virtio_queue_get_avail_size(vdev, idx)); 1159 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1160 0, virtio_queue_get_desc_size(vdev, idx)); 1161 } 1162 1163 static void vhost_eventfd_add(MemoryListener *listener, 1164 MemoryRegionSection *section, 1165 bool match_data, uint64_t data, EventNotifier *e) 1166 { 1167 } 1168 1169 static void vhost_eventfd_del(MemoryListener *listener, 1170 MemoryRegionSection *section, 1171 bool match_data, uint64_t data, EventNotifier *e) 1172 { 1173 } 1174 1175 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, 1176 int n, uint32_t timeout) 1177 { 1178 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1179 struct vhost_vring_state state = { 1180 .index = vhost_vq_index, 1181 .num = timeout, 1182 }; 1183 int r; 1184 1185 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { 1186 return -EINVAL; 1187 } 1188 1189 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); 1190 if (r) { 1191 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); 1192 return r; 1193 } 1194 1195 return 0; 1196 } 1197 1198 static int vhost_virtqueue_init(struct vhost_dev *dev, 1199 struct vhost_virtqueue *vq, int n) 1200 { 1201 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1202 struct vhost_vring_file file = { 1203 .index = vhost_vq_index, 1204 }; 1205 int r = event_notifier_init(&vq->masked_notifier, 0); 1206 if (r < 0) { 1207 return r; 1208 } 1209 1210 file.fd = event_notifier_get_fd(&vq->masked_notifier); 1211 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1212 if (r) { 1213 VHOST_OPS_DEBUG("vhost_set_vring_call failed"); 1214 r = -errno; 1215 goto fail_call; 1216 } 1217 1218 vq->dev = dev; 1219 1220 return 0; 1221 fail_call: 1222 event_notifier_cleanup(&vq->masked_notifier); 1223 return r; 1224 } 1225 1226 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 1227 { 1228 event_notifier_cleanup(&vq->masked_notifier); 1229 } 1230 1231 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 1232 VhostBackendType backend_type, uint32_t busyloop_timeout) 1233 { 1234 uint64_t features; 1235 int i, r, n_initialized_vqs = 0; 1236 Error *local_err = NULL; 1237 1238 hdev->vdev = NULL; 1239 hdev->migration_blocker = NULL; 1240 1241 r = vhost_set_backend_type(hdev, backend_type); 1242 assert(r >= 0); 1243 1244 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); 1245 if (r < 0) { 1246 goto fail; 1247 } 1248 1249 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { 1250 error_report("vhost backend memory slots limit is less" 1251 " than current number of present memory slots"); 1252 r = -1; 1253 goto fail; 1254 } 1255 1256 r = hdev->vhost_ops->vhost_set_owner(hdev); 1257 if (r < 0) { 1258 VHOST_OPS_DEBUG("vhost_set_owner failed"); 1259 goto fail; 1260 } 1261 1262 r = hdev->vhost_ops->vhost_get_features(hdev, &features); 1263 if (r < 0) { 1264 VHOST_OPS_DEBUG("vhost_get_features failed"); 1265 goto fail; 1266 } 1267 1268 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { 1269 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); 1270 if (r < 0) { 1271 goto fail; 1272 } 1273 } 1274 1275 if (busyloop_timeout) { 1276 for (i = 0; i < hdev->nvqs; ++i) { 1277 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 1278 busyloop_timeout); 1279 if (r < 0) { 1280 goto fail_busyloop; 1281 } 1282 } 1283 } 1284 1285 hdev->features = features; 1286 1287 hdev->memory_listener = (MemoryListener) { 1288 .begin = vhost_begin, 1289 .commit = vhost_commit, 1290 .region_add = vhost_region_add, 1291 .region_del = vhost_region_del, 1292 .region_nop = vhost_region_nop, 1293 .log_start = vhost_log_start, 1294 .log_stop = vhost_log_stop, 1295 .log_sync = vhost_log_sync, 1296 .log_global_start = vhost_log_global_start, 1297 .log_global_stop = vhost_log_global_stop, 1298 .eventfd_add = vhost_eventfd_add, 1299 .eventfd_del = vhost_eventfd_del, 1300 .priority = 10 1301 }; 1302 1303 hdev->iommu_listener = (MemoryListener) { 1304 .region_add = vhost_iommu_region_add, 1305 .region_del = vhost_iommu_region_del, 1306 }; 1307 1308 if (hdev->migration_blocker == NULL) { 1309 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { 1310 error_setg(&hdev->migration_blocker, 1311 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); 1312 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) { 1313 error_setg(&hdev->migration_blocker, 1314 "Migration disabled: failed to allocate shared memory"); 1315 } 1316 } 1317 1318 if (hdev->migration_blocker != NULL) { 1319 r = migrate_add_blocker(hdev->migration_blocker, &local_err); 1320 if (local_err) { 1321 error_report_err(local_err); 1322 error_free(hdev->migration_blocker); 1323 goto fail_busyloop; 1324 } 1325 } 1326 1327 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 1328 hdev->n_mem_sections = 0; 1329 hdev->mem_sections = NULL; 1330 hdev->log = NULL; 1331 hdev->log_size = 0; 1332 hdev->log_enabled = false; 1333 hdev->started = false; 1334 hdev->memory_changed = false; 1335 memory_listener_register(&hdev->memory_listener, &address_space_memory); 1336 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); 1337 return 0; 1338 1339 fail_busyloop: 1340 while (--i >= 0) { 1341 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); 1342 } 1343 fail: 1344 hdev->nvqs = n_initialized_vqs; 1345 vhost_dev_cleanup(hdev); 1346 return r; 1347 } 1348 1349 void vhost_dev_cleanup(struct vhost_dev *hdev) 1350 { 1351 int i; 1352 1353 for (i = 0; i < hdev->nvqs; ++i) { 1354 vhost_virtqueue_cleanup(hdev->vqs + i); 1355 } 1356 if (hdev->mem) { 1357 /* those are only safe after successful init */ 1358 memory_listener_unregister(&hdev->memory_listener); 1359 QLIST_REMOVE(hdev, entry); 1360 } 1361 if (hdev->migration_blocker) { 1362 migrate_del_blocker(hdev->migration_blocker); 1363 error_free(hdev->migration_blocker); 1364 } 1365 g_free(hdev->mem); 1366 g_free(hdev->mem_sections); 1367 if (hdev->vhost_ops) { 1368 hdev->vhost_ops->vhost_backend_cleanup(hdev); 1369 } 1370 assert(!hdev->log); 1371 1372 memset(hdev, 0, sizeof(struct vhost_dev)); 1373 } 1374 1375 /* Stop processing guest IO notifications in qemu. 1376 * Start processing them in vhost in kernel. 1377 */ 1378 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1379 { 1380 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1381 int i, r, e; 1382 1383 /* We will pass the notifiers to the kernel, make sure that QEMU 1384 * doesn't interfere. 1385 */ 1386 r = virtio_device_grab_ioeventfd(vdev); 1387 if (r < 0) { 1388 error_report("binding does not support host notifiers"); 1389 goto fail; 1390 } 1391 1392 for (i = 0; i < hdev->nvqs; ++i) { 1393 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1394 true); 1395 if (r < 0) { 1396 error_report("vhost VQ %d notifier binding failed: %d", i, -r); 1397 goto fail_vq; 1398 } 1399 } 1400 1401 return 0; 1402 fail_vq: 1403 while (--i >= 0) { 1404 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1405 false); 1406 if (e < 0) { 1407 error_report("vhost VQ %d notifier cleanup error: %d", i, -r); 1408 } 1409 assert (e >= 0); 1410 } 1411 virtio_device_release_ioeventfd(vdev); 1412 fail: 1413 return r; 1414 } 1415 1416 /* Stop processing guest IO notifications in vhost. 1417 * Start processing them in qemu. 1418 * This might actually run the qemu handlers right away, 1419 * so virtio in qemu must be completely setup when this is called. 1420 */ 1421 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1422 { 1423 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1424 int i, r; 1425 1426 for (i = 0; i < hdev->nvqs; ++i) { 1427 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1428 false); 1429 if (r < 0) { 1430 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); 1431 } 1432 assert (r >= 0); 1433 } 1434 virtio_device_release_ioeventfd(vdev); 1435 } 1436 1437 /* Test and clear event pending status. 1438 * Should be called after unmask to avoid losing events. 1439 */ 1440 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 1441 { 1442 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 1443 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 1444 return event_notifier_test_and_clear(&vq->masked_notifier); 1445 } 1446 1447 /* Mask/unmask events from this vq. */ 1448 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 1449 bool mask) 1450 { 1451 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 1452 int r, index = n - hdev->vq_index; 1453 struct vhost_vring_file file; 1454 1455 /* should only be called after backend is connected */ 1456 assert(hdev->vhost_ops); 1457 1458 if (mask) { 1459 assert(vdev->use_guest_notifier_mask); 1460 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); 1461 } else { 1462 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); 1463 } 1464 1465 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); 1466 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); 1467 if (r < 0) { 1468 VHOST_OPS_DEBUG("vhost_set_vring_call failed"); 1469 } 1470 } 1471 1472 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 1473 uint64_t features) 1474 { 1475 const int *bit = feature_bits; 1476 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1477 uint64_t bit_mask = (1ULL << *bit); 1478 if (!(hdev->features & bit_mask)) { 1479 features &= ~bit_mask; 1480 } 1481 bit++; 1482 } 1483 return features; 1484 } 1485 1486 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 1487 uint64_t features) 1488 { 1489 const int *bit = feature_bits; 1490 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1491 uint64_t bit_mask = (1ULL << *bit); 1492 if (features & bit_mask) { 1493 hdev->acked_features |= bit_mask; 1494 } 1495 bit++; 1496 } 1497 } 1498 1499 /* Host notifiers must be enabled at this point. */ 1500 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) 1501 { 1502 int i, r; 1503 1504 /* should only be called after backend is connected */ 1505 assert(hdev->vhost_ops); 1506 1507 hdev->started = true; 1508 hdev->vdev = vdev; 1509 1510 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1511 if (r < 0) { 1512 goto fail_features; 1513 } 1514 1515 if (vhost_dev_has_iommu(hdev)) { 1516 memory_listener_register(&hdev->iommu_listener, vdev->dma_as); 1517 } 1518 1519 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); 1520 if (r < 0) { 1521 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 1522 r = -errno; 1523 goto fail_mem; 1524 } 1525 for (i = 0; i < hdev->nvqs; ++i) { 1526 r = vhost_virtqueue_start(hdev, 1527 vdev, 1528 hdev->vqs + i, 1529 hdev->vq_index + i); 1530 if (r < 0) { 1531 goto fail_vq; 1532 } 1533 } 1534 1535 if (hdev->log_enabled) { 1536 uint64_t log_base; 1537 1538 hdev->log_size = vhost_get_log_size(hdev); 1539 hdev->log = vhost_log_get(hdev->log_size, 1540 vhost_dev_log_is_shared(hdev)); 1541 log_base = (uintptr_t)hdev->log->log; 1542 r = hdev->vhost_ops->vhost_set_log_base(hdev, 1543 hdev->log_size ? log_base : 0, 1544 hdev->log); 1545 if (r < 0) { 1546 VHOST_OPS_DEBUG("vhost_set_log_base failed"); 1547 r = -errno; 1548 goto fail_log; 1549 } 1550 } 1551 1552 if (vhost_dev_has_iommu(hdev)) { 1553 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); 1554 1555 /* Update used ring information for IOTLB to work correctly, 1556 * vhost-kernel code requires for this.*/ 1557 for (i = 0; i < hdev->nvqs; ++i) { 1558 struct vhost_virtqueue *vq = hdev->vqs + i; 1559 vhost_device_iotlb_miss(hdev, vq->used_phys, true); 1560 } 1561 } 1562 return 0; 1563 fail_log: 1564 vhost_log_put(hdev, false); 1565 fail_vq: 1566 while (--i >= 0) { 1567 vhost_virtqueue_stop(hdev, 1568 vdev, 1569 hdev->vqs + i, 1570 hdev->vq_index + i); 1571 } 1572 i = hdev->nvqs; 1573 1574 fail_mem: 1575 fail_features: 1576 1577 hdev->started = false; 1578 return r; 1579 } 1580 1581 /* Host notifiers must be enabled at this point. */ 1582 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) 1583 { 1584 int i; 1585 1586 /* should only be called after backend is connected */ 1587 assert(hdev->vhost_ops); 1588 1589 for (i = 0; i < hdev->nvqs; ++i) { 1590 vhost_virtqueue_stop(hdev, 1591 vdev, 1592 hdev->vqs + i, 1593 hdev->vq_index + i); 1594 } 1595 1596 if (vhost_dev_has_iommu(hdev)) { 1597 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); 1598 memory_listener_unregister(&hdev->iommu_listener); 1599 } 1600 vhost_log_put(hdev, true); 1601 hdev->started = false; 1602 hdev->vdev = NULL; 1603 } 1604 1605 int vhost_net_set_backend(struct vhost_dev *hdev, 1606 struct vhost_vring_file *file) 1607 { 1608 if (hdev->vhost_ops->vhost_net_set_backend) { 1609 return hdev->vhost_ops->vhost_net_set_backend(hdev, file); 1610 } 1611 1612 return -1; 1613 } 1614