1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include "qemu/osdep.h" 17 #include "qapi/error.h" 18 #include "hw/virtio/vhost.h" 19 #include "qemu/atomic.h" 20 #include "qemu/range.h" 21 #include "qemu/error-report.h" 22 #include "qemu/memfd.h" 23 #include "standard-headers/linux/vhost_types.h" 24 #include "exec/address-spaces.h" 25 #include "hw/virtio/virtio-bus.h" 26 #include "hw/virtio/virtio-access.h" 27 #include "migration/blocker.h" 28 #include "migration/qemu-file-types.h" 29 #include "sysemu/dma.h" 30 #include "sysemu/tcg.h" 31 #include "trace.h" 32 33 /* enabled until disconnected backend stabilizes */ 34 #define _VHOST_DEBUG 1 35 36 #ifdef _VHOST_DEBUG 37 #define VHOST_OPS_DEBUG(fmt, ...) \ 38 do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ 39 strerror(errno), errno); } while (0) 40 #else 41 #define VHOST_OPS_DEBUG(fmt, ...) \ 42 do { } while (0) 43 #endif 44 45 static struct vhost_log *vhost_log; 46 static struct vhost_log *vhost_log_shm; 47 48 static unsigned int used_memslots; 49 static QLIST_HEAD(, vhost_dev) vhost_devices = 50 QLIST_HEAD_INITIALIZER(vhost_devices); 51 52 bool vhost_has_free_slot(void) 53 { 54 unsigned int slots_limit = ~0U; 55 struct vhost_dev *hdev; 56 57 QLIST_FOREACH(hdev, &vhost_devices, entry) { 58 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); 59 slots_limit = MIN(slots_limit, r); 60 } 61 return slots_limit > used_memslots; 62 } 63 64 static void vhost_dev_sync_region(struct vhost_dev *dev, 65 MemoryRegionSection *section, 66 uint64_t mfirst, uint64_t mlast, 67 uint64_t rfirst, uint64_t rlast) 68 { 69 vhost_log_chunk_t *log = dev->log->log; 70 71 uint64_t start = MAX(mfirst, rfirst); 72 uint64_t end = MIN(mlast, rlast); 73 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK; 74 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1; 75 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); 76 77 if (end < start) { 78 return; 79 } 80 assert(end / VHOST_LOG_CHUNK < dev->log_size); 81 assert(start / VHOST_LOG_CHUNK < dev->log_size); 82 83 for (;from < to; ++from) { 84 vhost_log_chunk_t log; 85 /* We first check with non-atomic: much cheaper, 86 * and we expect non-dirty to be the common case. */ 87 if (!*from) { 88 addr += VHOST_LOG_CHUNK; 89 continue; 90 } 91 /* Data must be read atomically. We don't really need barrier semantics 92 * but it's easier to use atomic_* than roll our own. */ 93 log = qatomic_xchg(from, 0); 94 while (log) { 95 int bit = ctzl(log); 96 hwaddr page_addr; 97 hwaddr section_offset; 98 hwaddr mr_offset; 99 page_addr = addr + bit * VHOST_LOG_PAGE; 100 section_offset = page_addr - section->offset_within_address_space; 101 mr_offset = section_offset + section->offset_within_region; 102 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 103 log &= ~(0x1ull << bit); 104 } 105 addr += VHOST_LOG_CHUNK; 106 } 107 } 108 109 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 110 MemoryRegionSection *section, 111 hwaddr first, 112 hwaddr last) 113 { 114 int i; 115 hwaddr start_addr; 116 hwaddr end_addr; 117 118 if (!dev->log_enabled || !dev->started) { 119 return 0; 120 } 121 start_addr = section->offset_within_address_space; 122 end_addr = range_get_last(start_addr, int128_get64(section->size)); 123 start_addr = MAX(first, start_addr); 124 end_addr = MIN(last, end_addr); 125 126 for (i = 0; i < dev->mem->nregions; ++i) { 127 struct vhost_memory_region *reg = dev->mem->regions + i; 128 vhost_dev_sync_region(dev, section, start_addr, end_addr, 129 reg->guest_phys_addr, 130 range_get_last(reg->guest_phys_addr, 131 reg->memory_size)); 132 } 133 for (i = 0; i < dev->nvqs; ++i) { 134 struct vhost_virtqueue *vq = dev->vqs + i; 135 136 if (!vq->used_phys && !vq->used_size) { 137 continue; 138 } 139 140 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, 141 range_get_last(vq->used_phys, vq->used_size)); 142 } 143 return 0; 144 } 145 146 static void vhost_log_sync(MemoryListener *listener, 147 MemoryRegionSection *section) 148 { 149 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 150 memory_listener); 151 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 152 } 153 154 static void vhost_log_sync_range(struct vhost_dev *dev, 155 hwaddr first, hwaddr last) 156 { 157 int i; 158 /* FIXME: this is N^2 in number of sections */ 159 for (i = 0; i < dev->n_mem_sections; ++i) { 160 MemoryRegionSection *section = &dev->mem_sections[i]; 161 vhost_sync_dirty_bitmap(dev, section, first, last); 162 } 163 } 164 165 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 166 { 167 uint64_t log_size = 0; 168 int i; 169 for (i = 0; i < dev->mem->nregions; ++i) { 170 struct vhost_memory_region *reg = dev->mem->regions + i; 171 uint64_t last = range_get_last(reg->guest_phys_addr, 172 reg->memory_size); 173 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 174 } 175 for (i = 0; i < dev->nvqs; ++i) { 176 struct vhost_virtqueue *vq = dev->vqs + i; 177 178 if (!vq->used_phys && !vq->used_size) { 179 continue; 180 } 181 182 uint64_t last = vq->used_phys + vq->used_size - 1; 183 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 184 } 185 return log_size; 186 } 187 188 static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) 189 { 190 Error *err = NULL; 191 struct vhost_log *log; 192 uint64_t logsize = size * sizeof(*(log->log)); 193 int fd = -1; 194 195 log = g_new0(struct vhost_log, 1); 196 if (share) { 197 log->log = qemu_memfd_alloc("vhost-log", logsize, 198 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 199 &fd, &err); 200 if (err) { 201 error_report_err(err); 202 g_free(log); 203 return NULL; 204 } 205 memset(log->log, 0, logsize); 206 } else { 207 log->log = g_malloc0(logsize); 208 } 209 210 log->size = size; 211 log->refcnt = 1; 212 log->fd = fd; 213 214 return log; 215 } 216 217 static struct vhost_log *vhost_log_get(uint64_t size, bool share) 218 { 219 struct vhost_log *log = share ? vhost_log_shm : vhost_log; 220 221 if (!log || log->size != size) { 222 log = vhost_log_alloc(size, share); 223 if (share) { 224 vhost_log_shm = log; 225 } else { 226 vhost_log = log; 227 } 228 } else { 229 ++log->refcnt; 230 } 231 232 return log; 233 } 234 235 static void vhost_log_put(struct vhost_dev *dev, bool sync) 236 { 237 struct vhost_log *log = dev->log; 238 239 if (!log) { 240 return; 241 } 242 243 --log->refcnt; 244 if (log->refcnt == 0) { 245 /* Sync only the range covered by the old log */ 246 if (dev->log_size && sync) { 247 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 248 } 249 250 if (vhost_log == log) { 251 g_free(log->log); 252 vhost_log = NULL; 253 } else if (vhost_log_shm == log) { 254 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), 255 log->fd); 256 vhost_log_shm = NULL; 257 } 258 259 g_free(log); 260 } 261 262 dev->log = NULL; 263 dev->log_size = 0; 264 } 265 266 static bool vhost_dev_log_is_shared(struct vhost_dev *dev) 267 { 268 return dev->vhost_ops->vhost_requires_shm_log && 269 dev->vhost_ops->vhost_requires_shm_log(dev); 270 } 271 272 static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) 273 { 274 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); 275 uint64_t log_base = (uintptr_t)log->log; 276 int r; 277 278 /* inform backend of log switching, this must be done before 279 releasing the current log, to ensure no logging is lost */ 280 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); 281 if (r < 0) { 282 VHOST_OPS_DEBUG("vhost_set_log_base failed"); 283 } 284 285 vhost_log_put(dev, true); 286 dev->log = log; 287 dev->log_size = size; 288 } 289 290 static int vhost_dev_has_iommu(struct vhost_dev *dev) 291 { 292 VirtIODevice *vdev = dev->vdev; 293 294 /* 295 * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support 296 * incremental memory mapping API via IOTLB API. For platform that 297 * does not have IOMMU, there's no need to enable this feature 298 * which may cause unnecessary IOTLB miss/update trnasactions. 299 */ 300 return vdev->dma_as != &address_space_memory && 301 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); 302 } 303 304 static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, 305 hwaddr *plen, bool is_write) 306 { 307 if (!vhost_dev_has_iommu(dev)) { 308 return cpu_physical_memory_map(addr, plen, is_write); 309 } else { 310 return (void *)(uintptr_t)addr; 311 } 312 } 313 314 static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, 315 hwaddr len, int is_write, 316 hwaddr access_len) 317 { 318 if (!vhost_dev_has_iommu(dev)) { 319 cpu_physical_memory_unmap(buffer, len, is_write, access_len); 320 } 321 } 322 323 static int vhost_verify_ring_part_mapping(void *ring_hva, 324 uint64_t ring_gpa, 325 uint64_t ring_size, 326 void *reg_hva, 327 uint64_t reg_gpa, 328 uint64_t reg_size) 329 { 330 uint64_t hva_ring_offset; 331 uint64_t ring_last = range_get_last(ring_gpa, ring_size); 332 uint64_t reg_last = range_get_last(reg_gpa, reg_size); 333 334 if (ring_last < reg_gpa || ring_gpa > reg_last) { 335 return 0; 336 } 337 /* check that whole ring's is mapped */ 338 if (ring_last > reg_last) { 339 return -ENOMEM; 340 } 341 /* check that ring's MemoryRegion wasn't replaced */ 342 hva_ring_offset = ring_gpa - reg_gpa; 343 if (ring_hva != reg_hva + hva_ring_offset) { 344 return -EBUSY; 345 } 346 347 return 0; 348 } 349 350 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 351 void *reg_hva, 352 uint64_t reg_gpa, 353 uint64_t reg_size) 354 { 355 int i, j; 356 int r = 0; 357 const char *part_name[] = { 358 "descriptor table", 359 "available ring", 360 "used ring" 361 }; 362 363 if (vhost_dev_has_iommu(dev)) { 364 return 0; 365 } 366 367 for (i = 0; i < dev->nvqs; ++i) { 368 struct vhost_virtqueue *vq = dev->vqs + i; 369 370 if (vq->desc_phys == 0) { 371 continue; 372 } 373 374 j = 0; 375 r = vhost_verify_ring_part_mapping( 376 vq->desc, vq->desc_phys, vq->desc_size, 377 reg_hva, reg_gpa, reg_size); 378 if (r) { 379 break; 380 } 381 382 j++; 383 r = vhost_verify_ring_part_mapping( 384 vq->avail, vq->avail_phys, vq->avail_size, 385 reg_hva, reg_gpa, reg_size); 386 if (r) { 387 break; 388 } 389 390 j++; 391 r = vhost_verify_ring_part_mapping( 392 vq->used, vq->used_phys, vq->used_size, 393 reg_hva, reg_gpa, reg_size); 394 if (r) { 395 break; 396 } 397 } 398 399 if (r == -ENOMEM) { 400 error_report("Unable to map %s for ring %d", part_name[j], i); 401 } else if (r == -EBUSY) { 402 error_report("%s relocated for ring %d", part_name[j], i); 403 } 404 return r; 405 } 406 407 /* 408 * vhost_section: identify sections needed for vhost access 409 * 410 * We only care about RAM sections here (where virtqueue and guest 411 * internals accessed by virtio might live). If we find one we still 412 * allow the backend to potentially filter it out of our list. 413 */ 414 static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) 415 { 416 MemoryRegion *mr = section->mr; 417 418 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { 419 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); 420 uint8_t handled_dirty; 421 422 /* 423 * Kernel based vhost doesn't handle any block which is doing 424 * dirty-tracking other than migration for which it has 425 * specific logging support. However for TCG the kernel never 426 * gets involved anyway so we can also ignore it's 427 * self-modiying code detection flags. However a vhost-user 428 * client could still confuse a TCG guest if it re-writes 429 * executable memory that has already been translated. 430 */ 431 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | 432 (1 << DIRTY_MEMORY_CODE); 433 434 if (dirty_mask & ~handled_dirty) { 435 trace_vhost_reject_section(mr->name, 1); 436 return false; 437 } 438 439 if (dev->vhost_ops->vhost_backend_mem_section_filter && 440 !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) { 441 trace_vhost_reject_section(mr->name, 2); 442 return false; 443 } 444 445 trace_vhost_section(mr->name); 446 return true; 447 } else { 448 trace_vhost_reject_section(mr->name, 3); 449 return false; 450 } 451 } 452 453 static void vhost_begin(MemoryListener *listener) 454 { 455 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 456 memory_listener); 457 dev->tmp_sections = NULL; 458 dev->n_tmp_sections = 0; 459 } 460 461 static void vhost_commit(MemoryListener *listener) 462 { 463 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 464 memory_listener); 465 MemoryRegionSection *old_sections; 466 int n_old_sections; 467 uint64_t log_size; 468 size_t regions_size; 469 int r; 470 int i; 471 bool changed = false; 472 473 /* Note we can be called before the device is started, but then 474 * starting the device calls set_mem_table, so we need to have 475 * built the data structures. 476 */ 477 old_sections = dev->mem_sections; 478 n_old_sections = dev->n_mem_sections; 479 dev->mem_sections = dev->tmp_sections; 480 dev->n_mem_sections = dev->n_tmp_sections; 481 482 if (dev->n_mem_sections != n_old_sections) { 483 changed = true; 484 } else { 485 /* Same size, lets check the contents */ 486 for (int i = 0; i < n_old_sections; i++) { 487 if (!MemoryRegionSection_eq(&old_sections[i], 488 &dev->mem_sections[i])) { 489 changed = true; 490 break; 491 } 492 } 493 } 494 495 trace_vhost_commit(dev->started, changed); 496 if (!changed) { 497 goto out; 498 } 499 500 /* Rebuild the regions list from the new sections list */ 501 regions_size = offsetof(struct vhost_memory, regions) + 502 dev->n_mem_sections * sizeof dev->mem->regions[0]; 503 dev->mem = g_realloc(dev->mem, regions_size); 504 dev->mem->nregions = dev->n_mem_sections; 505 used_memslots = dev->mem->nregions; 506 for (i = 0; i < dev->n_mem_sections; i++) { 507 struct vhost_memory_region *cur_vmr = dev->mem->regions + i; 508 struct MemoryRegionSection *mrs = dev->mem_sections + i; 509 510 cur_vmr->guest_phys_addr = mrs->offset_within_address_space; 511 cur_vmr->memory_size = int128_get64(mrs->size); 512 cur_vmr->userspace_addr = 513 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + 514 mrs->offset_within_region; 515 cur_vmr->flags_padding = 0; 516 } 517 518 if (!dev->started) { 519 goto out; 520 } 521 522 for (i = 0; i < dev->mem->nregions; i++) { 523 if (vhost_verify_ring_mappings(dev, 524 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, 525 dev->mem->regions[i].guest_phys_addr, 526 dev->mem->regions[i].memory_size)) { 527 error_report("Verify ring failure on region %d", i); 528 abort(); 529 } 530 } 531 532 if (!dev->log_enabled) { 533 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 534 if (r < 0) { 535 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 536 } 537 goto out; 538 } 539 log_size = vhost_get_log_size(dev); 540 /* We allocate an extra 4K bytes to log, 541 * to reduce the * number of reallocations. */ 542 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 543 /* To log more, must increase log size before table update. */ 544 if (dev->log_size < log_size) { 545 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 546 } 547 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); 548 if (r < 0) { 549 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 550 } 551 /* To log less, can only decrease log size after table update. */ 552 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 553 vhost_dev_log_resize(dev, log_size); 554 } 555 556 out: 557 /* Deref the old list of sections, this must happen _after_ the 558 * vhost_set_mem_table to ensure the client isn't still using the 559 * section we're about to unref. 560 */ 561 while (n_old_sections--) { 562 memory_region_unref(old_sections[n_old_sections].mr); 563 } 564 g_free(old_sections); 565 return; 566 } 567 568 /* Adds the section data to the tmp_section structure. 569 * It relies on the listener calling us in memory address order 570 * and for each region (via the _add and _nop methods) to 571 * join neighbours. 572 */ 573 static void vhost_region_add_section(struct vhost_dev *dev, 574 MemoryRegionSection *section) 575 { 576 bool need_add = true; 577 uint64_t mrs_size = int128_get64(section->size); 578 uint64_t mrs_gpa = section->offset_within_address_space; 579 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + 580 section->offset_within_region; 581 RAMBlock *mrs_rb = section->mr->ram_block; 582 583 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, 584 mrs_host); 585 586 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { 587 /* Round the section to it's page size */ 588 /* First align the start down to a page boundary */ 589 size_t mrs_page = qemu_ram_pagesize(mrs_rb); 590 uint64_t alignage = mrs_host & (mrs_page - 1); 591 if (alignage) { 592 mrs_host -= alignage; 593 mrs_size += alignage; 594 mrs_gpa -= alignage; 595 } 596 /* Now align the size up to a page boundary */ 597 alignage = mrs_size & (mrs_page - 1); 598 if (alignage) { 599 mrs_size += mrs_page - alignage; 600 } 601 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, 602 mrs_size, mrs_host); 603 } 604 605 if (dev->n_tmp_sections) { 606 /* Since we already have at least one section, lets see if 607 * this extends it; since we're scanning in order, we only 608 * have to look at the last one, and the FlatView that calls 609 * us shouldn't have overlaps. 610 */ 611 MemoryRegionSection *prev_sec = dev->tmp_sections + 612 (dev->n_tmp_sections - 1); 613 uint64_t prev_gpa_start = prev_sec->offset_within_address_space; 614 uint64_t prev_size = int128_get64(prev_sec->size); 615 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); 616 uint64_t prev_host_start = 617 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + 618 prev_sec->offset_within_region; 619 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); 620 621 if (mrs_gpa <= (prev_gpa_end + 1)) { 622 /* OK, looks like overlapping/intersecting - it's possible that 623 * the rounding to page sizes has made them overlap, but they should 624 * match up in the same RAMBlock if they do. 625 */ 626 if (mrs_gpa < prev_gpa_start) { 627 error_report("%s:Section '%s' rounded to %"PRIx64 628 " prior to previous '%s' %"PRIx64, 629 __func__, section->mr->name, mrs_gpa, 630 prev_sec->mr->name, prev_gpa_start); 631 /* A way to cleanly fail here would be better */ 632 return; 633 } 634 /* Offset from the start of the previous GPA to this GPA */ 635 size_t offset = mrs_gpa - prev_gpa_start; 636 637 if (prev_host_start + offset == mrs_host && 638 section->mr == prev_sec->mr && 639 (!dev->vhost_ops->vhost_backend_can_merge || 640 dev->vhost_ops->vhost_backend_can_merge(dev, 641 mrs_host, mrs_size, 642 prev_host_start, prev_size))) { 643 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); 644 need_add = false; 645 prev_sec->offset_within_address_space = 646 MIN(prev_gpa_start, mrs_gpa); 647 prev_sec->offset_within_region = 648 MIN(prev_host_start, mrs_host) - 649 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); 650 prev_sec->size = int128_make64(max_end - MIN(prev_host_start, 651 mrs_host)); 652 trace_vhost_region_add_section_merge(section->mr->name, 653 int128_get64(prev_sec->size), 654 prev_sec->offset_within_address_space, 655 prev_sec->offset_within_region); 656 } else { 657 /* adjoining regions are fine, but overlapping ones with 658 * different blocks/offsets shouldn't happen 659 */ 660 if (mrs_gpa != prev_gpa_end + 1) { 661 error_report("%s: Overlapping but not coherent sections " 662 "at %"PRIx64, 663 __func__, mrs_gpa); 664 return; 665 } 666 } 667 } 668 } 669 670 if (need_add) { 671 ++dev->n_tmp_sections; 672 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, 673 dev->n_tmp_sections); 674 dev->tmp_sections[dev->n_tmp_sections - 1] = *section; 675 /* The flatview isn't stable and we don't use it, making it NULL 676 * means we can memcmp the list. 677 */ 678 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; 679 memory_region_ref(section->mr); 680 } 681 } 682 683 /* Used for both add and nop callbacks */ 684 static void vhost_region_addnop(MemoryListener *listener, 685 MemoryRegionSection *section) 686 { 687 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 688 memory_listener); 689 690 if (!vhost_section(dev, section)) { 691 return; 692 } 693 vhost_region_add_section(dev, section); 694 } 695 696 static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 697 { 698 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); 699 struct vhost_dev *hdev = iommu->hdev; 700 hwaddr iova = iotlb->iova + iommu->iommu_offset; 701 702 if (vhost_backend_invalidate_device_iotlb(hdev, iova, 703 iotlb->addr_mask + 1)) { 704 error_report("Fail to invalidate device iotlb"); 705 } 706 } 707 708 static void vhost_iommu_region_add(MemoryListener *listener, 709 MemoryRegionSection *section) 710 { 711 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 712 iommu_listener); 713 struct vhost_iommu *iommu; 714 Int128 end; 715 int iommu_idx; 716 IOMMUMemoryRegion *iommu_mr; 717 718 if (!memory_region_is_iommu(section->mr)) { 719 return; 720 } 721 722 iommu_mr = IOMMU_MEMORY_REGION(section->mr); 723 724 iommu = g_malloc0(sizeof(*iommu)); 725 end = int128_add(int128_make64(section->offset_within_region), 726 section->size); 727 end = int128_sub(end, int128_one()); 728 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 729 MEMTXATTRS_UNSPECIFIED); 730 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, 731 IOMMU_NOTIFIER_UNMAP, 732 section->offset_within_region, 733 int128_get64(end), 734 iommu_idx); 735 iommu->mr = section->mr; 736 iommu->iommu_offset = section->offset_within_address_space - 737 section->offset_within_region; 738 iommu->hdev = dev; 739 memory_region_register_iommu_notifier(section->mr, &iommu->n, 740 &error_fatal); 741 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); 742 /* TODO: can replay help performance here? */ 743 } 744 745 static void vhost_iommu_region_del(MemoryListener *listener, 746 MemoryRegionSection *section) 747 { 748 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 749 iommu_listener); 750 struct vhost_iommu *iommu; 751 752 if (!memory_region_is_iommu(section->mr)) { 753 return; 754 } 755 756 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { 757 if (iommu->mr == section->mr && 758 iommu->n.start == section->offset_within_region) { 759 memory_region_unregister_iommu_notifier(iommu->mr, 760 &iommu->n); 761 QLIST_REMOVE(iommu, iommu_next); 762 g_free(iommu); 763 break; 764 } 765 } 766 } 767 768 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 769 struct vhost_virtqueue *vq, 770 unsigned idx, bool enable_log) 771 { 772 struct vhost_vring_addr addr; 773 int r; 774 memset(&addr, 0, sizeof(struct vhost_vring_addr)); 775 776 if (dev->vhost_ops->vhost_vq_get_addr) { 777 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); 778 if (r < 0) { 779 VHOST_OPS_DEBUG("vhost_vq_get_addr failed"); 780 return -errno; 781 } 782 } else { 783 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; 784 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; 785 addr.used_user_addr = (uint64_t)(unsigned long)vq->used; 786 } 787 addr.index = idx; 788 addr.log_guest_addr = vq->used_phys; 789 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; 790 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); 791 if (r < 0) { 792 VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); 793 return -errno; 794 } 795 return 0; 796 } 797 798 static int vhost_dev_set_features(struct vhost_dev *dev, 799 bool enable_log) 800 { 801 uint64_t features = dev->acked_features; 802 int r; 803 if (enable_log) { 804 features |= 0x1ULL << VHOST_F_LOG_ALL; 805 } 806 if (!vhost_dev_has_iommu(dev)) { 807 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); 808 } 809 if (dev->vhost_ops->vhost_force_iommu) { 810 if (dev->vhost_ops->vhost_force_iommu(dev) == true) { 811 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; 812 } 813 } 814 r = dev->vhost_ops->vhost_set_features(dev, features); 815 if (r < 0) { 816 VHOST_OPS_DEBUG("vhost_set_features failed"); 817 goto out; 818 } 819 if (dev->vhost_ops->vhost_set_backend_cap) { 820 r = dev->vhost_ops->vhost_set_backend_cap(dev); 821 if (r < 0) { 822 VHOST_OPS_DEBUG("vhost_set_backend_cap failed"); 823 goto out; 824 } 825 } 826 827 out: 828 return r < 0 ? -errno : 0; 829 } 830 831 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 832 { 833 int r, i, idx; 834 hwaddr addr; 835 836 r = vhost_dev_set_features(dev, enable_log); 837 if (r < 0) { 838 goto err_features; 839 } 840 for (i = 0; i < dev->nvqs; ++i) { 841 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 842 addr = virtio_queue_get_desc_addr(dev->vdev, idx); 843 if (!addr) { 844 /* 845 * The queue might not be ready for start. If this 846 * is the case there is no reason to continue the process. 847 * The similar logic is used by the vhost_virtqueue_start() 848 * routine. 849 */ 850 continue; 851 } 852 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 853 enable_log); 854 if (r < 0) { 855 goto err_vq; 856 } 857 } 858 return 0; 859 err_vq: 860 for (; i >= 0; --i) { 861 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); 862 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, 863 dev->log_enabled); 864 } 865 vhost_dev_set_features(dev, dev->log_enabled); 866 err_features: 867 return r; 868 } 869 870 static int vhost_migration_log(MemoryListener *listener, bool enable) 871 { 872 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 873 memory_listener); 874 int r; 875 if (enable == dev->log_enabled) { 876 return 0; 877 } 878 if (!dev->started) { 879 dev->log_enabled = enable; 880 return 0; 881 } 882 883 r = 0; 884 if (!enable) { 885 r = vhost_dev_set_log(dev, false); 886 if (r < 0) { 887 goto check_dev_state; 888 } 889 vhost_log_put(dev, false); 890 } else { 891 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 892 r = vhost_dev_set_log(dev, true); 893 if (r < 0) { 894 goto check_dev_state; 895 } 896 } 897 898 check_dev_state: 899 dev->log_enabled = enable; 900 /* 901 * vhost-user-* devices could change their state during log 902 * initialization due to disconnect. So check dev state after 903 * vhost communication. 904 */ 905 if (!dev->started) { 906 /* 907 * Since device is in the stopped state, it is okay for 908 * migration. Return success. 909 */ 910 r = 0; 911 } 912 if (r) { 913 /* An error is occured. */ 914 dev->log_enabled = false; 915 } 916 917 return r; 918 } 919 920 static void vhost_log_global_start(MemoryListener *listener) 921 { 922 int r; 923 924 r = vhost_migration_log(listener, true); 925 if (r < 0) { 926 abort(); 927 } 928 } 929 930 static void vhost_log_global_stop(MemoryListener *listener) 931 { 932 int r; 933 934 r = vhost_migration_log(listener, false); 935 if (r < 0) { 936 abort(); 937 } 938 } 939 940 static void vhost_log_start(MemoryListener *listener, 941 MemoryRegionSection *section, 942 int old, int new) 943 { 944 /* FIXME: implement */ 945 } 946 947 static void vhost_log_stop(MemoryListener *listener, 948 MemoryRegionSection *section, 949 int old, int new) 950 { 951 /* FIXME: implement */ 952 } 953 954 /* The vhost driver natively knows how to handle the vrings of non 955 * cross-endian legacy devices and modern devices. Only legacy devices 956 * exposed to a bi-endian guest may require the vhost driver to use a 957 * specific endianness. 958 */ 959 static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) 960 { 961 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { 962 return false; 963 } 964 #ifdef HOST_WORDS_BIGENDIAN 965 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; 966 #else 967 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; 968 #endif 969 } 970 971 static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, 972 bool is_big_endian, 973 int vhost_vq_index) 974 { 975 struct vhost_vring_state s = { 976 .index = vhost_vq_index, 977 .num = is_big_endian 978 }; 979 980 if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { 981 return 0; 982 } 983 984 VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); 985 if (errno == ENOTTY) { 986 error_report("vhost does not support cross-endian"); 987 return -ENOSYS; 988 } 989 990 return -errno; 991 } 992 993 static int vhost_memory_region_lookup(struct vhost_dev *hdev, 994 uint64_t gpa, uint64_t *uaddr, 995 uint64_t *len) 996 { 997 int i; 998 999 for (i = 0; i < hdev->mem->nregions; i++) { 1000 struct vhost_memory_region *reg = hdev->mem->regions + i; 1001 1002 if (gpa >= reg->guest_phys_addr && 1003 reg->guest_phys_addr + reg->memory_size > gpa) { 1004 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; 1005 *len = reg->guest_phys_addr + reg->memory_size - gpa; 1006 return 0; 1007 } 1008 } 1009 1010 return -EFAULT; 1011 } 1012 1013 int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) 1014 { 1015 IOMMUTLBEntry iotlb; 1016 uint64_t uaddr, len; 1017 int ret = -EFAULT; 1018 1019 RCU_READ_LOCK_GUARD(); 1020 1021 trace_vhost_iotlb_miss(dev, 1); 1022 1023 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, 1024 iova, write, 1025 MEMTXATTRS_UNSPECIFIED); 1026 if (iotlb.target_as != NULL) { 1027 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, 1028 &uaddr, &len); 1029 if (ret) { 1030 trace_vhost_iotlb_miss(dev, 3); 1031 error_report("Fail to lookup the translated address " 1032 "%"PRIx64, iotlb.translated_addr); 1033 goto out; 1034 } 1035 1036 len = MIN(iotlb.addr_mask + 1, len); 1037 iova = iova & ~iotlb.addr_mask; 1038 1039 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, 1040 len, iotlb.perm); 1041 if (ret) { 1042 trace_vhost_iotlb_miss(dev, 4); 1043 error_report("Fail to update device iotlb"); 1044 goto out; 1045 } 1046 } 1047 1048 trace_vhost_iotlb_miss(dev, 2); 1049 1050 out: 1051 return ret; 1052 } 1053 1054 static int vhost_virtqueue_start(struct vhost_dev *dev, 1055 struct VirtIODevice *vdev, 1056 struct vhost_virtqueue *vq, 1057 unsigned idx) 1058 { 1059 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1060 VirtioBusState *vbus = VIRTIO_BUS(qbus); 1061 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 1062 hwaddr s, l, a; 1063 int r; 1064 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1065 struct vhost_vring_file file = { 1066 .index = vhost_vq_index 1067 }; 1068 struct vhost_vring_state state = { 1069 .index = vhost_vq_index 1070 }; 1071 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 1072 1073 a = virtio_queue_get_desc_addr(vdev, idx); 1074 if (a == 0) { 1075 /* Queue might not be ready for start */ 1076 return 0; 1077 } 1078 1079 vq->num = state.num = virtio_queue_get_num(vdev, idx); 1080 r = dev->vhost_ops->vhost_set_vring_num(dev, &state); 1081 if (r) { 1082 VHOST_OPS_DEBUG("vhost_set_vring_num failed"); 1083 return -errno; 1084 } 1085 1086 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 1087 r = dev->vhost_ops->vhost_set_vring_base(dev, &state); 1088 if (r) { 1089 VHOST_OPS_DEBUG("vhost_set_vring_base failed"); 1090 return -errno; 1091 } 1092 1093 if (vhost_needs_vring_endian(vdev)) { 1094 r = vhost_virtqueue_set_vring_endian_legacy(dev, 1095 virtio_is_big_endian(vdev), 1096 vhost_vq_index); 1097 if (r) { 1098 return -errno; 1099 } 1100 } 1101 1102 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); 1103 vq->desc_phys = a; 1104 vq->desc = vhost_memory_map(dev, a, &l, false); 1105 if (!vq->desc || l != s) { 1106 r = -ENOMEM; 1107 goto fail_alloc_desc; 1108 } 1109 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); 1110 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); 1111 vq->avail = vhost_memory_map(dev, a, &l, false); 1112 if (!vq->avail || l != s) { 1113 r = -ENOMEM; 1114 goto fail_alloc_avail; 1115 } 1116 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 1117 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 1118 vq->used = vhost_memory_map(dev, a, &l, true); 1119 if (!vq->used || l != s) { 1120 r = -ENOMEM; 1121 goto fail_alloc_used; 1122 } 1123 1124 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 1125 if (r < 0) { 1126 r = -errno; 1127 goto fail_alloc; 1128 } 1129 1130 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 1131 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); 1132 if (r) { 1133 VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); 1134 r = -errno; 1135 goto fail_kick; 1136 } 1137 1138 /* Clear and discard previous events if any. */ 1139 event_notifier_test_and_clear(&vq->masked_notifier); 1140 1141 /* Init vring in unmasked state, unless guest_notifier_mask 1142 * will do it later. 1143 */ 1144 if (!vdev->use_guest_notifier_mask) { 1145 /* TODO: check and handle errors. */ 1146 vhost_virtqueue_mask(dev, vdev, idx, false); 1147 } 1148 1149 if (k->query_guest_notifiers && 1150 k->query_guest_notifiers(qbus->parent) && 1151 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { 1152 file.fd = -1; 1153 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1154 if (r) { 1155 goto fail_vector; 1156 } 1157 } 1158 1159 return 0; 1160 1161 fail_vector: 1162 fail_kick: 1163 fail_alloc: 1164 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1165 0, 0); 1166 fail_alloc_used: 1167 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1168 0, 0); 1169 fail_alloc_avail: 1170 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1171 0, 0); 1172 fail_alloc_desc: 1173 return r; 1174 } 1175 1176 static void vhost_virtqueue_stop(struct vhost_dev *dev, 1177 struct VirtIODevice *vdev, 1178 struct vhost_virtqueue *vq, 1179 unsigned idx) 1180 { 1181 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); 1182 struct vhost_vring_state state = { 1183 .index = vhost_vq_index, 1184 }; 1185 int r; 1186 1187 if (virtio_queue_get_desc_addr(vdev, idx) == 0) { 1188 /* Don't stop the virtqueue which might have not been started */ 1189 return; 1190 } 1191 1192 r = dev->vhost_ops->vhost_get_vring_base(dev, &state); 1193 if (r < 0) { 1194 VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r); 1195 /* Connection to the backend is broken, so let's sync internal 1196 * last avail idx to the device used idx. 1197 */ 1198 virtio_queue_restore_last_avail_idx(vdev, idx); 1199 } else { 1200 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 1201 } 1202 virtio_queue_invalidate_signalled_used(vdev, idx); 1203 virtio_queue_update_used_idx(vdev, idx); 1204 1205 /* In the cross-endian case, we need to reset the vring endianness to 1206 * native as legacy devices expect so by default. 1207 */ 1208 if (vhost_needs_vring_endian(vdev)) { 1209 vhost_virtqueue_set_vring_endian_legacy(dev, 1210 !virtio_is_big_endian(vdev), 1211 vhost_vq_index); 1212 } 1213 1214 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), 1215 1, virtio_queue_get_used_size(vdev, idx)); 1216 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), 1217 0, virtio_queue_get_avail_size(vdev, idx)); 1218 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), 1219 0, virtio_queue_get_desc_size(vdev, idx)); 1220 } 1221 1222 static void vhost_eventfd_add(MemoryListener *listener, 1223 MemoryRegionSection *section, 1224 bool match_data, uint64_t data, EventNotifier *e) 1225 { 1226 } 1227 1228 static void vhost_eventfd_del(MemoryListener *listener, 1229 MemoryRegionSection *section, 1230 bool match_data, uint64_t data, EventNotifier *e) 1231 { 1232 } 1233 1234 static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, 1235 int n, uint32_t timeout) 1236 { 1237 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1238 struct vhost_vring_state state = { 1239 .index = vhost_vq_index, 1240 .num = timeout, 1241 }; 1242 int r; 1243 1244 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { 1245 return -EINVAL; 1246 } 1247 1248 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); 1249 if (r) { 1250 VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); 1251 return r; 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int vhost_virtqueue_init(struct vhost_dev *dev, 1258 struct vhost_virtqueue *vq, int n) 1259 { 1260 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); 1261 struct vhost_vring_file file = { 1262 .index = vhost_vq_index, 1263 }; 1264 int r = event_notifier_init(&vq->masked_notifier, 0); 1265 if (r < 0) { 1266 return r; 1267 } 1268 1269 file.fd = event_notifier_get_fd(&vq->masked_notifier); 1270 r = dev->vhost_ops->vhost_set_vring_call(dev, &file); 1271 if (r) { 1272 VHOST_OPS_DEBUG("vhost_set_vring_call failed"); 1273 r = -errno; 1274 goto fail_call; 1275 } 1276 1277 vq->dev = dev; 1278 1279 return 0; 1280 fail_call: 1281 event_notifier_cleanup(&vq->masked_notifier); 1282 return r; 1283 } 1284 1285 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 1286 { 1287 event_notifier_cleanup(&vq->masked_notifier); 1288 } 1289 1290 int vhost_dev_init(struct vhost_dev *hdev, void *opaque, 1291 VhostBackendType backend_type, uint32_t busyloop_timeout) 1292 { 1293 uint64_t features; 1294 int i, r, n_initialized_vqs = 0; 1295 Error *local_err = NULL; 1296 1297 hdev->vdev = NULL; 1298 hdev->migration_blocker = NULL; 1299 1300 r = vhost_set_backend_type(hdev, backend_type); 1301 assert(r >= 0); 1302 1303 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque); 1304 if (r < 0) { 1305 goto fail; 1306 } 1307 1308 r = hdev->vhost_ops->vhost_set_owner(hdev); 1309 if (r < 0) { 1310 VHOST_OPS_DEBUG("vhost_set_owner failed"); 1311 goto fail; 1312 } 1313 1314 r = hdev->vhost_ops->vhost_get_features(hdev, &features); 1315 if (r < 0) { 1316 VHOST_OPS_DEBUG("vhost_get_features failed"); 1317 goto fail; 1318 } 1319 1320 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { 1321 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); 1322 if (r < 0) { 1323 goto fail; 1324 } 1325 } 1326 1327 if (busyloop_timeout) { 1328 for (i = 0; i < hdev->nvqs; ++i) { 1329 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 1330 busyloop_timeout); 1331 if (r < 0) { 1332 goto fail_busyloop; 1333 } 1334 } 1335 } 1336 1337 hdev->features = features; 1338 1339 hdev->memory_listener = (MemoryListener) { 1340 .begin = vhost_begin, 1341 .commit = vhost_commit, 1342 .region_add = vhost_region_addnop, 1343 .region_nop = vhost_region_addnop, 1344 .log_start = vhost_log_start, 1345 .log_stop = vhost_log_stop, 1346 .log_sync = vhost_log_sync, 1347 .log_global_start = vhost_log_global_start, 1348 .log_global_stop = vhost_log_global_stop, 1349 .eventfd_add = vhost_eventfd_add, 1350 .eventfd_del = vhost_eventfd_del, 1351 .priority = 10 1352 }; 1353 1354 hdev->iommu_listener = (MemoryListener) { 1355 .region_add = vhost_iommu_region_add, 1356 .region_del = vhost_iommu_region_del, 1357 }; 1358 1359 if (hdev->migration_blocker == NULL) { 1360 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { 1361 error_setg(&hdev->migration_blocker, 1362 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); 1363 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { 1364 error_setg(&hdev->migration_blocker, 1365 "Migration disabled: failed to allocate shared memory"); 1366 } 1367 } 1368 1369 if (hdev->migration_blocker != NULL) { 1370 r = migrate_add_blocker(hdev->migration_blocker, &local_err); 1371 if (local_err) { 1372 error_report_err(local_err); 1373 error_free(hdev->migration_blocker); 1374 goto fail_busyloop; 1375 } 1376 } 1377 1378 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 1379 hdev->n_mem_sections = 0; 1380 hdev->mem_sections = NULL; 1381 hdev->log = NULL; 1382 hdev->log_size = 0; 1383 hdev->log_enabled = false; 1384 hdev->started = false; 1385 memory_listener_register(&hdev->memory_listener, &address_space_memory); 1386 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); 1387 1388 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { 1389 error_report("vhost backend memory slots limit is less" 1390 " than current number of present memory slots"); 1391 r = -1; 1392 if (busyloop_timeout) { 1393 goto fail_busyloop; 1394 } else { 1395 goto fail; 1396 } 1397 } 1398 1399 return 0; 1400 1401 fail_busyloop: 1402 while (--i >= 0) { 1403 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); 1404 } 1405 fail: 1406 hdev->nvqs = n_initialized_vqs; 1407 vhost_dev_cleanup(hdev); 1408 return r; 1409 } 1410 1411 void vhost_dev_cleanup(struct vhost_dev *hdev) 1412 { 1413 int i; 1414 1415 for (i = 0; i < hdev->nvqs; ++i) { 1416 vhost_virtqueue_cleanup(hdev->vqs + i); 1417 } 1418 if (hdev->mem) { 1419 /* those are only safe after successful init */ 1420 memory_listener_unregister(&hdev->memory_listener); 1421 QLIST_REMOVE(hdev, entry); 1422 } 1423 if (hdev->migration_blocker) { 1424 migrate_del_blocker(hdev->migration_blocker); 1425 error_free(hdev->migration_blocker); 1426 } 1427 g_free(hdev->mem); 1428 g_free(hdev->mem_sections); 1429 if (hdev->vhost_ops) { 1430 hdev->vhost_ops->vhost_backend_cleanup(hdev); 1431 } 1432 assert(!hdev->log); 1433 1434 memset(hdev, 0, sizeof(struct vhost_dev)); 1435 } 1436 1437 /* Stop processing guest IO notifications in qemu. 1438 * Start processing them in vhost in kernel. 1439 */ 1440 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1441 { 1442 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1443 int i, r, e; 1444 1445 /* We will pass the notifiers to the kernel, make sure that QEMU 1446 * doesn't interfere. 1447 */ 1448 r = virtio_device_grab_ioeventfd(vdev); 1449 if (r < 0) { 1450 error_report("binding does not support host notifiers"); 1451 goto fail; 1452 } 1453 1454 for (i = 0; i < hdev->nvqs; ++i) { 1455 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1456 true); 1457 if (r < 0) { 1458 error_report("vhost VQ %d notifier binding failed: %d", i, -r); 1459 goto fail_vq; 1460 } 1461 } 1462 1463 return 0; 1464 fail_vq: 1465 while (--i >= 0) { 1466 e = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1467 false); 1468 if (e < 0) { 1469 error_report("vhost VQ %d notifier cleanup error: %d", i, -r); 1470 } 1471 assert (e >= 0); 1472 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); 1473 } 1474 virtio_device_release_ioeventfd(vdev); 1475 fail: 1476 return r; 1477 } 1478 1479 /* Stop processing guest IO notifications in vhost. 1480 * Start processing them in qemu. 1481 * This might actually run the qemu handlers right away, 1482 * so virtio in qemu must be completely setup when this is called. 1483 */ 1484 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 1485 { 1486 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 1487 int i, r; 1488 1489 for (i = 0; i < hdev->nvqs; ++i) { 1490 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, 1491 false); 1492 if (r < 0) { 1493 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); 1494 } 1495 assert (r >= 0); 1496 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); 1497 } 1498 virtio_device_release_ioeventfd(vdev); 1499 } 1500 1501 /* Test and clear event pending status. 1502 * Should be called after unmask to avoid losing events. 1503 */ 1504 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 1505 { 1506 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 1507 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 1508 return event_notifier_test_and_clear(&vq->masked_notifier); 1509 } 1510 1511 /* Mask/unmask events from this vq. */ 1512 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 1513 bool mask) 1514 { 1515 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 1516 int r, index = n - hdev->vq_index; 1517 struct vhost_vring_file file; 1518 1519 /* should only be called after backend is connected */ 1520 assert(hdev->vhost_ops); 1521 1522 if (mask) { 1523 assert(vdev->use_guest_notifier_mask); 1524 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); 1525 } else { 1526 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); 1527 } 1528 1529 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); 1530 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); 1531 if (r < 0) { 1532 VHOST_OPS_DEBUG("vhost_set_vring_call failed"); 1533 } 1534 } 1535 1536 uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, 1537 uint64_t features) 1538 { 1539 const int *bit = feature_bits; 1540 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1541 uint64_t bit_mask = (1ULL << *bit); 1542 if (!(hdev->features & bit_mask)) { 1543 features &= ~bit_mask; 1544 } 1545 bit++; 1546 } 1547 return features; 1548 } 1549 1550 void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, 1551 uint64_t features) 1552 { 1553 const int *bit = feature_bits; 1554 while (*bit != VHOST_INVALID_FEATURE_BIT) { 1555 uint64_t bit_mask = (1ULL << *bit); 1556 if (features & bit_mask) { 1557 hdev->acked_features |= bit_mask; 1558 } 1559 bit++; 1560 } 1561 } 1562 1563 int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, 1564 uint32_t config_len) 1565 { 1566 assert(hdev->vhost_ops); 1567 1568 if (hdev->vhost_ops->vhost_get_config) { 1569 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len); 1570 } 1571 1572 return -1; 1573 } 1574 1575 int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, 1576 uint32_t offset, uint32_t size, uint32_t flags) 1577 { 1578 assert(hdev->vhost_ops); 1579 1580 if (hdev->vhost_ops->vhost_set_config) { 1581 return hdev->vhost_ops->vhost_set_config(hdev, data, offset, 1582 size, flags); 1583 } 1584 1585 return -1; 1586 } 1587 1588 void vhost_dev_set_config_notifier(struct vhost_dev *hdev, 1589 const VhostDevConfigOps *ops) 1590 { 1591 hdev->config_ops = ops; 1592 } 1593 1594 void vhost_dev_free_inflight(struct vhost_inflight *inflight) 1595 { 1596 if (inflight && inflight->addr) { 1597 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); 1598 inflight->addr = NULL; 1599 inflight->fd = -1; 1600 } 1601 } 1602 1603 static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, 1604 uint64_t new_size) 1605 { 1606 Error *err = NULL; 1607 int fd = -1; 1608 void *addr = qemu_memfd_alloc("vhost-inflight", new_size, 1609 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, 1610 &fd, &err); 1611 1612 if (err) { 1613 error_report_err(err); 1614 return -1; 1615 } 1616 1617 vhost_dev_free_inflight(inflight); 1618 inflight->offset = 0; 1619 inflight->addr = addr; 1620 inflight->fd = fd; 1621 inflight->size = new_size; 1622 1623 return 0; 1624 } 1625 1626 void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) 1627 { 1628 if (inflight->addr) { 1629 qemu_put_be64(f, inflight->size); 1630 qemu_put_be16(f, inflight->queue_size); 1631 qemu_put_buffer(f, inflight->addr, inflight->size); 1632 } else { 1633 qemu_put_be64(f, 0); 1634 } 1635 } 1636 1637 int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) 1638 { 1639 uint64_t size; 1640 1641 size = qemu_get_be64(f); 1642 if (!size) { 1643 return 0; 1644 } 1645 1646 if (inflight->size != size) { 1647 if (vhost_dev_resize_inflight(inflight, size)) { 1648 return -1; 1649 } 1650 } 1651 inflight->queue_size = qemu_get_be16(f); 1652 1653 qemu_get_buffer(f, inflight->addr, size); 1654 1655 return 0; 1656 } 1657 1658 int vhost_dev_set_inflight(struct vhost_dev *dev, 1659 struct vhost_inflight *inflight) 1660 { 1661 int r; 1662 1663 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { 1664 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); 1665 if (r) { 1666 VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); 1667 return -errno; 1668 } 1669 } 1670 1671 return 0; 1672 } 1673 1674 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, 1675 struct vhost_inflight *inflight) 1676 { 1677 int r; 1678 1679 if (dev->vhost_ops->vhost_get_inflight_fd) { 1680 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); 1681 if (r) { 1682 VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); 1683 return -errno; 1684 } 1685 } 1686 1687 return 0; 1688 } 1689 1690 /* Host notifiers must be enabled at this point. */ 1691 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) 1692 { 1693 int i, r; 1694 1695 /* should only be called after backend is connected */ 1696 assert(hdev->vhost_ops); 1697 1698 hdev->started = true; 1699 hdev->vdev = vdev; 1700 1701 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1702 if (r < 0) { 1703 goto fail_features; 1704 } 1705 1706 if (vhost_dev_has_iommu(hdev)) { 1707 memory_listener_register(&hdev->iommu_listener, vdev->dma_as); 1708 } 1709 1710 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); 1711 if (r < 0) { 1712 VHOST_OPS_DEBUG("vhost_set_mem_table failed"); 1713 r = -errno; 1714 goto fail_mem; 1715 } 1716 for (i = 0; i < hdev->nvqs; ++i) { 1717 r = vhost_virtqueue_start(hdev, 1718 vdev, 1719 hdev->vqs + i, 1720 hdev->vq_index + i); 1721 if (r < 0) { 1722 goto fail_vq; 1723 } 1724 } 1725 1726 if (hdev->log_enabled) { 1727 uint64_t log_base; 1728 1729 hdev->log_size = vhost_get_log_size(hdev); 1730 hdev->log = vhost_log_get(hdev->log_size, 1731 vhost_dev_log_is_shared(hdev)); 1732 log_base = (uintptr_t)hdev->log->log; 1733 r = hdev->vhost_ops->vhost_set_log_base(hdev, 1734 hdev->log_size ? log_base : 0, 1735 hdev->log); 1736 if (r < 0) { 1737 VHOST_OPS_DEBUG("vhost_set_log_base failed"); 1738 r = -errno; 1739 goto fail_log; 1740 } 1741 } 1742 if (hdev->vhost_ops->vhost_dev_start) { 1743 r = hdev->vhost_ops->vhost_dev_start(hdev, true); 1744 if (r) { 1745 goto fail_log; 1746 } 1747 } 1748 if (vhost_dev_has_iommu(hdev) && 1749 hdev->vhost_ops->vhost_set_iotlb_callback) { 1750 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); 1751 1752 /* Update used ring information for IOTLB to work correctly, 1753 * vhost-kernel code requires for this.*/ 1754 for (i = 0; i < hdev->nvqs; ++i) { 1755 struct vhost_virtqueue *vq = hdev->vqs + i; 1756 vhost_device_iotlb_miss(hdev, vq->used_phys, true); 1757 } 1758 } 1759 return 0; 1760 fail_log: 1761 vhost_log_put(hdev, false); 1762 fail_vq: 1763 while (--i >= 0) { 1764 vhost_virtqueue_stop(hdev, 1765 vdev, 1766 hdev->vqs + i, 1767 hdev->vq_index + i); 1768 } 1769 1770 fail_mem: 1771 fail_features: 1772 1773 hdev->started = false; 1774 return r; 1775 } 1776 1777 /* Host notifiers must be enabled at this point. */ 1778 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) 1779 { 1780 int i; 1781 1782 /* should only be called after backend is connected */ 1783 assert(hdev->vhost_ops); 1784 1785 if (hdev->vhost_ops->vhost_dev_start) { 1786 hdev->vhost_ops->vhost_dev_start(hdev, false); 1787 } 1788 for (i = 0; i < hdev->nvqs; ++i) { 1789 vhost_virtqueue_stop(hdev, 1790 vdev, 1791 hdev->vqs + i, 1792 hdev->vq_index + i); 1793 } 1794 1795 if (vhost_dev_has_iommu(hdev)) { 1796 if (hdev->vhost_ops->vhost_set_iotlb_callback) { 1797 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); 1798 } 1799 memory_listener_unregister(&hdev->iommu_listener); 1800 } 1801 vhost_log_put(hdev, true); 1802 hdev->started = false; 1803 hdev->vdev = NULL; 1804 } 1805 1806 int vhost_net_set_backend(struct vhost_dev *hdev, 1807 struct vhost_vring_file *file) 1808 { 1809 if (hdev->vhost_ops->vhost_net_set_backend) { 1810 return hdev->vhost_ops->vhost_net_set_backend(hdev, file); 1811 } 1812 1813 return -1; 1814 } 1815