1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/hw.h" 19 #include "qemu/atomic.h" 20 #include "qemu/range.h" 21 #include <linux/vhost.h> 22 #include "exec/address-spaces.h" 23 #include "hw/virtio/virtio-bus.h" 24 25 static void vhost_dev_sync_region(struct vhost_dev *dev, 26 MemoryRegionSection *section, 27 uint64_t mfirst, uint64_t mlast, 28 uint64_t rfirst, uint64_t rlast) 29 { 30 uint64_t start = MAX(mfirst, rfirst); 31 uint64_t end = MIN(mlast, rlast); 32 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK; 33 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1; 34 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; 35 36 if (end < start) { 37 return; 38 } 39 assert(end / VHOST_LOG_CHUNK < dev->log_size); 40 assert(start / VHOST_LOG_CHUNK < dev->log_size); 41 42 for (;from < to; ++from) { 43 vhost_log_chunk_t log; 44 int bit; 45 /* We first check with non-atomic: much cheaper, 46 * and we expect non-dirty to be the common case. */ 47 if (!*from) { 48 addr += VHOST_LOG_CHUNK; 49 continue; 50 } 51 /* Data must be read atomically. We don't really need barrier semantics 52 * but it's easier to use atomic_* than roll our own. */ 53 log = atomic_xchg(from, 0); 54 while ((bit = sizeof(log) > sizeof(int) ? 55 ffsll(log) : ffs(log))) { 56 hwaddr page_addr; 57 hwaddr section_offset; 58 hwaddr mr_offset; 59 bit -= 1; 60 page_addr = addr + bit * VHOST_LOG_PAGE; 61 section_offset = page_addr - section->offset_within_address_space; 62 mr_offset = section_offset + section->offset_within_region; 63 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 64 log &= ~(0x1ull << bit); 65 } 66 addr += VHOST_LOG_CHUNK; 67 } 68 } 69 70 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 71 MemoryRegionSection *section, 72 hwaddr first, 73 hwaddr last) 74 { 75 int i; 76 hwaddr start_addr; 77 hwaddr end_addr; 78 79 if (!dev->log_enabled || !dev->started) { 80 return 0; 81 } 82 start_addr = section->offset_within_address_space; 83 end_addr = range_get_last(start_addr, int128_get64(section->size)); 84 start_addr = MAX(first, start_addr); 85 end_addr = MIN(last, end_addr); 86 87 for (i = 0; i < dev->mem->nregions; ++i) { 88 struct vhost_memory_region *reg = dev->mem->regions + i; 89 vhost_dev_sync_region(dev, section, start_addr, end_addr, 90 reg->guest_phys_addr, 91 range_get_last(reg->guest_phys_addr, 92 reg->memory_size)); 93 } 94 for (i = 0; i < dev->nvqs; ++i) { 95 struct vhost_virtqueue *vq = dev->vqs + i; 96 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, 97 range_get_last(vq->used_phys, vq->used_size)); 98 } 99 return 0; 100 } 101 102 static void vhost_log_sync(MemoryListener *listener, 103 MemoryRegionSection *section) 104 { 105 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 106 memory_listener); 107 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 108 } 109 110 static void vhost_log_sync_range(struct vhost_dev *dev, 111 hwaddr first, hwaddr last) 112 { 113 int i; 114 /* FIXME: this is N^2 in number of sections */ 115 for (i = 0; i < dev->n_mem_sections; ++i) { 116 MemoryRegionSection *section = &dev->mem_sections[i]; 117 vhost_sync_dirty_bitmap(dev, section, first, last); 118 } 119 } 120 121 /* Assign/unassign. Keep an unsorted array of non-overlapping 122 * memory regions in dev->mem. */ 123 static void vhost_dev_unassign_memory(struct vhost_dev *dev, 124 uint64_t start_addr, 125 uint64_t size) 126 { 127 int from, to, n = dev->mem->nregions; 128 /* Track overlapping/split regions for sanity checking. */ 129 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; 130 131 for (from = 0, to = 0; from < n; ++from, ++to) { 132 struct vhost_memory_region *reg = dev->mem->regions + to; 133 uint64_t reglast; 134 uint64_t memlast; 135 uint64_t change; 136 137 /* clone old region */ 138 if (to != from) { 139 memcpy(reg, dev->mem->regions + from, sizeof *reg); 140 } 141 142 /* No overlap is simple */ 143 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, 144 start_addr, size)) { 145 continue; 146 } 147 148 /* Split only happens if supplied region 149 * is in the middle of an existing one. Thus it can not 150 * overlap with any other existing region. */ 151 assert(!split); 152 153 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 154 memlast = range_get_last(start_addr, size); 155 156 /* Remove whole region */ 157 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { 158 --dev->mem->nregions; 159 --to; 160 ++overlap_middle; 161 continue; 162 } 163 164 /* Shrink region */ 165 if (memlast >= reglast) { 166 reg->memory_size = start_addr - reg->guest_phys_addr; 167 assert(reg->memory_size); 168 assert(!overlap_end); 169 ++overlap_end; 170 continue; 171 } 172 173 /* Shift region */ 174 if (start_addr <= reg->guest_phys_addr) { 175 change = memlast + 1 - reg->guest_phys_addr; 176 reg->memory_size -= change; 177 reg->guest_phys_addr += change; 178 reg->userspace_addr += change; 179 assert(reg->memory_size); 180 assert(!overlap_start); 181 ++overlap_start; 182 continue; 183 } 184 185 /* This only happens if supplied region 186 * is in the middle of an existing one. Thus it can not 187 * overlap with any other existing region. */ 188 assert(!overlap_start); 189 assert(!overlap_end); 190 assert(!overlap_middle); 191 /* Split region: shrink first part, shift second part. */ 192 memcpy(dev->mem->regions + n, reg, sizeof *reg); 193 reg->memory_size = start_addr - reg->guest_phys_addr; 194 assert(reg->memory_size); 195 change = memlast + 1 - reg->guest_phys_addr; 196 reg = dev->mem->regions + n; 197 reg->memory_size -= change; 198 assert(reg->memory_size); 199 reg->guest_phys_addr += change; 200 reg->userspace_addr += change; 201 /* Never add more than 1 region */ 202 assert(dev->mem->nregions == n); 203 ++dev->mem->nregions; 204 ++split; 205 } 206 } 207 208 /* Called after unassign, so no regions overlap the given range. */ 209 static void vhost_dev_assign_memory(struct vhost_dev *dev, 210 uint64_t start_addr, 211 uint64_t size, 212 uint64_t uaddr) 213 { 214 int from, to; 215 struct vhost_memory_region *merged = NULL; 216 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { 217 struct vhost_memory_region *reg = dev->mem->regions + to; 218 uint64_t prlast, urlast; 219 uint64_t pmlast, umlast; 220 uint64_t s, e, u; 221 222 /* clone old region */ 223 if (to != from) { 224 memcpy(reg, dev->mem->regions + from, sizeof *reg); 225 } 226 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); 227 pmlast = range_get_last(start_addr, size); 228 urlast = range_get_last(reg->userspace_addr, reg->memory_size); 229 umlast = range_get_last(uaddr, size); 230 231 /* check for overlapping regions: should never happen. */ 232 assert(prlast < start_addr || pmlast < reg->guest_phys_addr); 233 /* Not an adjacent or overlapping region - do not merge. */ 234 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && 235 (pmlast + 1 != reg->guest_phys_addr || 236 umlast + 1 != reg->userspace_addr)) { 237 continue; 238 } 239 240 if (merged) { 241 --to; 242 assert(to >= 0); 243 } else { 244 merged = reg; 245 } 246 u = MIN(uaddr, reg->userspace_addr); 247 s = MIN(start_addr, reg->guest_phys_addr); 248 e = MAX(pmlast, prlast); 249 uaddr = merged->userspace_addr = u; 250 start_addr = merged->guest_phys_addr = s; 251 size = merged->memory_size = e - s + 1; 252 assert(merged->memory_size); 253 } 254 255 if (!merged) { 256 struct vhost_memory_region *reg = dev->mem->regions + to; 257 memset(reg, 0, sizeof *reg); 258 reg->memory_size = size; 259 assert(reg->memory_size); 260 reg->guest_phys_addr = start_addr; 261 reg->userspace_addr = uaddr; 262 ++to; 263 } 264 assert(to <= dev->mem->nregions + 1); 265 dev->mem->nregions = to; 266 } 267 268 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 269 { 270 uint64_t log_size = 0; 271 int i; 272 for (i = 0; i < dev->mem->nregions; ++i) { 273 struct vhost_memory_region *reg = dev->mem->regions + i; 274 uint64_t last = range_get_last(reg->guest_phys_addr, 275 reg->memory_size); 276 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 277 } 278 for (i = 0; i < dev->nvqs; ++i) { 279 struct vhost_virtqueue *vq = dev->vqs + i; 280 uint64_t last = vq->used_phys + vq->used_size - 1; 281 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 282 } 283 return log_size; 284 } 285 286 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) 287 { 288 vhost_log_chunk_t *log; 289 uint64_t log_base; 290 int r; 291 292 log = g_malloc0(size * sizeof *log); 293 log_base = (uint64_t)(unsigned long)log; 294 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base); 295 assert(r >= 0); 296 /* Sync only the range covered by the old log */ 297 if (dev->log_size) { 298 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 299 } 300 if (dev->log) { 301 g_free(dev->log); 302 } 303 dev->log = log; 304 dev->log_size = size; 305 } 306 307 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 308 uint64_t start_addr, 309 uint64_t size) 310 { 311 int i; 312 for (i = 0; i < dev->nvqs; ++i) { 313 struct vhost_virtqueue *vq = dev->vqs + i; 314 hwaddr l; 315 void *p; 316 317 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { 318 continue; 319 } 320 l = vq->ring_size; 321 p = cpu_physical_memory_map(vq->ring_phys, &l, 1); 322 if (!p || l != vq->ring_size) { 323 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); 324 return -ENOMEM; 325 } 326 if (p != vq->ring) { 327 fprintf(stderr, "Ring buffer relocated for ring %d\n", i); 328 return -EBUSY; 329 } 330 cpu_physical_memory_unmap(p, l, 0, 0); 331 } 332 return 0; 333 } 334 335 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, 336 uint64_t start_addr, 337 uint64_t size) 338 { 339 int i, n = dev->mem->nregions; 340 for (i = 0; i < n; ++i) { 341 struct vhost_memory_region *reg = dev->mem->regions + i; 342 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, 343 start_addr, size)) { 344 return reg; 345 } 346 } 347 return NULL; 348 } 349 350 static bool vhost_dev_cmp_memory(struct vhost_dev *dev, 351 uint64_t start_addr, 352 uint64_t size, 353 uint64_t uaddr) 354 { 355 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); 356 uint64_t reglast; 357 uint64_t memlast; 358 359 if (!reg) { 360 return true; 361 } 362 363 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 364 memlast = range_get_last(start_addr, size); 365 366 /* Need to extend region? */ 367 if (start_addr < reg->guest_phys_addr || memlast > reglast) { 368 return true; 369 } 370 /* userspace_addr changed? */ 371 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; 372 } 373 374 static void vhost_set_memory(MemoryListener *listener, 375 MemoryRegionSection *section, 376 bool add) 377 { 378 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 379 memory_listener); 380 hwaddr start_addr = section->offset_within_address_space; 381 ram_addr_t size = int128_get64(section->size); 382 bool log_dirty = memory_region_is_logging(section->mr); 383 int s = offsetof(struct vhost_memory, regions) + 384 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; 385 void *ram; 386 387 dev->mem = g_realloc(dev->mem, s); 388 389 if (log_dirty) { 390 add = false; 391 } 392 393 assert(size); 394 395 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ 396 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; 397 if (add) { 398 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { 399 /* Region exists with same address. Nothing to do. */ 400 return; 401 } 402 } else { 403 if (!vhost_dev_find_reg(dev, start_addr, size)) { 404 /* Removing region that we don't access. Nothing to do. */ 405 return; 406 } 407 } 408 409 vhost_dev_unassign_memory(dev, start_addr, size); 410 if (add) { 411 /* Add given mapping, merging adjacent regions if any */ 412 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); 413 } else { 414 /* Remove old mapping for this memory, if any. */ 415 vhost_dev_unassign_memory(dev, start_addr, size); 416 } 417 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); 418 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); 419 dev->memory_changed = true; 420 } 421 422 static bool vhost_section(MemoryRegionSection *section) 423 { 424 return memory_region_is_ram(section->mr); 425 } 426 427 static void vhost_begin(MemoryListener *listener) 428 { 429 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 430 memory_listener); 431 dev->mem_changed_end_addr = 0; 432 dev->mem_changed_start_addr = -1; 433 } 434 435 static void vhost_commit(MemoryListener *listener) 436 { 437 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 438 memory_listener); 439 hwaddr start_addr = 0; 440 ram_addr_t size = 0; 441 uint64_t log_size; 442 int r; 443 444 if (!dev->memory_changed) { 445 return; 446 } 447 if (!dev->started) { 448 return; 449 } 450 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { 451 return; 452 } 453 454 if (dev->started) { 455 start_addr = dev->mem_changed_start_addr; 456 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; 457 458 r = vhost_verify_ring_mappings(dev, start_addr, size); 459 assert(r >= 0); 460 } 461 462 if (!dev->log_enabled) { 463 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); 464 assert(r >= 0); 465 dev->memory_changed = false; 466 return; 467 } 468 log_size = vhost_get_log_size(dev); 469 /* We allocate an extra 4K bytes to log, 470 * to reduce the * number of reallocations. */ 471 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 472 /* To log more, must increase log size before table update. */ 473 if (dev->log_size < log_size) { 474 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 475 } 476 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); 477 assert(r >= 0); 478 /* To log less, can only decrease log size after table update. */ 479 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 480 vhost_dev_log_resize(dev, log_size); 481 } 482 dev->memory_changed = false; 483 } 484 485 static void vhost_region_add(MemoryListener *listener, 486 MemoryRegionSection *section) 487 { 488 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 489 memory_listener); 490 491 if (!vhost_section(section)) { 492 return; 493 } 494 495 ++dev->n_mem_sections; 496 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, 497 dev->n_mem_sections); 498 dev->mem_sections[dev->n_mem_sections - 1] = *section; 499 memory_region_ref(section->mr); 500 vhost_set_memory(listener, section, true); 501 } 502 503 static void vhost_region_del(MemoryListener *listener, 504 MemoryRegionSection *section) 505 { 506 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 507 memory_listener); 508 int i; 509 510 if (!vhost_section(section)) { 511 return; 512 } 513 514 vhost_set_memory(listener, section, false); 515 memory_region_unref(section->mr); 516 for (i = 0; i < dev->n_mem_sections; ++i) { 517 if (dev->mem_sections[i].offset_within_address_space 518 == section->offset_within_address_space) { 519 --dev->n_mem_sections; 520 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], 521 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); 522 break; 523 } 524 } 525 } 526 527 static void vhost_region_nop(MemoryListener *listener, 528 MemoryRegionSection *section) 529 { 530 } 531 532 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 533 struct vhost_virtqueue *vq, 534 unsigned idx, bool enable_log) 535 { 536 struct vhost_vring_addr addr = { 537 .index = idx, 538 .desc_user_addr = (uint64_t)(unsigned long)vq->desc, 539 .avail_user_addr = (uint64_t)(unsigned long)vq->avail, 540 .used_user_addr = (uint64_t)(unsigned long)vq->used, 541 .log_guest_addr = vq->used_phys, 542 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, 543 }; 544 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr); 545 if (r < 0) { 546 return -errno; 547 } 548 return 0; 549 } 550 551 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) 552 { 553 uint64_t features = dev->acked_features; 554 int r; 555 if (enable_log) { 556 features |= 0x1 << VHOST_F_LOG_ALL; 557 } 558 r = ioctl(dev->control, VHOST_SET_FEATURES, &features); 559 return r < 0 ? -errno : 0; 560 } 561 562 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 563 { 564 int r, t, i; 565 r = vhost_dev_set_features(dev, enable_log); 566 if (r < 0) { 567 goto err_features; 568 } 569 for (i = 0; i < dev->nvqs; ++i) { 570 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, 571 enable_log); 572 if (r < 0) { 573 goto err_vq; 574 } 575 } 576 return 0; 577 err_vq: 578 for (; i >= 0; --i) { 579 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, 580 dev->log_enabled); 581 assert(t >= 0); 582 } 583 t = vhost_dev_set_features(dev, dev->log_enabled); 584 assert(t >= 0); 585 err_features: 586 return r; 587 } 588 589 static int vhost_migration_log(MemoryListener *listener, int enable) 590 { 591 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 592 memory_listener); 593 int r; 594 if (!!enable == dev->log_enabled) { 595 return 0; 596 } 597 if (!dev->started) { 598 dev->log_enabled = enable; 599 return 0; 600 } 601 if (!enable) { 602 r = vhost_dev_set_log(dev, false); 603 if (r < 0) { 604 return r; 605 } 606 if (dev->log) { 607 g_free(dev->log); 608 } 609 dev->log = NULL; 610 dev->log_size = 0; 611 } else { 612 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 613 r = vhost_dev_set_log(dev, true); 614 if (r < 0) { 615 return r; 616 } 617 } 618 dev->log_enabled = enable; 619 return 0; 620 } 621 622 static void vhost_log_global_start(MemoryListener *listener) 623 { 624 int r; 625 626 r = vhost_migration_log(listener, true); 627 if (r < 0) { 628 abort(); 629 } 630 } 631 632 static void vhost_log_global_stop(MemoryListener *listener) 633 { 634 int r; 635 636 r = vhost_migration_log(listener, false); 637 if (r < 0) { 638 abort(); 639 } 640 } 641 642 static void vhost_log_start(MemoryListener *listener, 643 MemoryRegionSection *section) 644 { 645 /* FIXME: implement */ 646 } 647 648 static void vhost_log_stop(MemoryListener *listener, 649 MemoryRegionSection *section) 650 { 651 /* FIXME: implement */ 652 } 653 654 static int vhost_virtqueue_start(struct vhost_dev *dev, 655 struct VirtIODevice *vdev, 656 struct vhost_virtqueue *vq, 657 unsigned idx) 658 { 659 hwaddr s, l, a; 660 int r; 661 int vhost_vq_index = idx - dev->vq_index; 662 struct vhost_vring_file file = { 663 .index = vhost_vq_index 664 }; 665 struct vhost_vring_state state = { 666 .index = vhost_vq_index 667 }; 668 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 669 670 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 671 672 vq->num = state.num = virtio_queue_get_num(vdev, idx); 673 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); 674 if (r) { 675 return -errno; 676 } 677 678 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 679 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); 680 if (r) { 681 return -errno; 682 } 683 684 s = l = virtio_queue_get_desc_size(vdev, idx); 685 a = virtio_queue_get_desc_addr(vdev, idx); 686 vq->desc = cpu_physical_memory_map(a, &l, 0); 687 if (!vq->desc || l != s) { 688 r = -ENOMEM; 689 goto fail_alloc_desc; 690 } 691 s = l = virtio_queue_get_avail_size(vdev, idx); 692 a = virtio_queue_get_avail_addr(vdev, idx); 693 vq->avail = cpu_physical_memory_map(a, &l, 0); 694 if (!vq->avail || l != s) { 695 r = -ENOMEM; 696 goto fail_alloc_avail; 697 } 698 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 699 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 700 vq->used = cpu_physical_memory_map(a, &l, 1); 701 if (!vq->used || l != s) { 702 r = -ENOMEM; 703 goto fail_alloc_used; 704 } 705 706 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); 707 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); 708 vq->ring = cpu_physical_memory_map(a, &l, 1); 709 if (!vq->ring || l != s) { 710 r = -ENOMEM; 711 goto fail_alloc_ring; 712 } 713 714 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 715 if (r < 0) { 716 r = -errno; 717 goto fail_alloc; 718 } 719 720 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 721 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); 722 if (r) { 723 r = -errno; 724 goto fail_kick; 725 } 726 727 /* Clear and discard previous events if any. */ 728 event_notifier_test_and_clear(&vq->masked_notifier); 729 730 return 0; 731 732 fail_kick: 733 fail_alloc: 734 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 735 0, 0); 736 fail_alloc_ring: 737 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 738 0, 0); 739 fail_alloc_used: 740 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 741 0, 0); 742 fail_alloc_avail: 743 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 744 0, 0); 745 fail_alloc_desc: 746 return r; 747 } 748 749 static void vhost_virtqueue_stop(struct vhost_dev *dev, 750 struct VirtIODevice *vdev, 751 struct vhost_virtqueue *vq, 752 unsigned idx) 753 { 754 struct vhost_vring_state state = { 755 .index = idx - dev->vq_index 756 }; 757 int r; 758 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 759 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); 760 if (r < 0) { 761 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); 762 fflush(stderr); 763 } 764 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 765 assert (r >= 0); 766 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 767 0, virtio_queue_get_ring_size(vdev, idx)); 768 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 769 1, virtio_queue_get_used_size(vdev, idx)); 770 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 771 0, virtio_queue_get_avail_size(vdev, idx)); 772 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 773 0, virtio_queue_get_desc_size(vdev, idx)); 774 } 775 776 static void vhost_eventfd_add(MemoryListener *listener, 777 MemoryRegionSection *section, 778 bool match_data, uint64_t data, EventNotifier *e) 779 { 780 } 781 782 static void vhost_eventfd_del(MemoryListener *listener, 783 MemoryRegionSection *section, 784 bool match_data, uint64_t data, EventNotifier *e) 785 { 786 } 787 788 static int vhost_virtqueue_init(struct vhost_dev *dev, 789 struct vhost_virtqueue *vq, int n) 790 { 791 struct vhost_vring_file file = { 792 .index = n, 793 }; 794 int r = event_notifier_init(&vq->masked_notifier, 0); 795 if (r < 0) { 796 return r; 797 } 798 799 file.fd = event_notifier_get_fd(&vq->masked_notifier); 800 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); 801 if (r) { 802 r = -errno; 803 goto fail_call; 804 } 805 return 0; 806 fail_call: 807 event_notifier_cleanup(&vq->masked_notifier); 808 return r; 809 } 810 811 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 812 { 813 event_notifier_cleanup(&vq->masked_notifier); 814 } 815 816 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath, 817 bool force) 818 { 819 uint64_t features; 820 int i, r; 821 if (devfd >= 0) { 822 hdev->control = devfd; 823 } else { 824 hdev->control = open(devpath, O_RDWR); 825 if (hdev->control < 0) { 826 return -errno; 827 } 828 } 829 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL); 830 if (r < 0) { 831 goto fail; 832 } 833 834 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features); 835 if (r < 0) { 836 goto fail; 837 } 838 839 for (i = 0; i < hdev->nvqs; ++i) { 840 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i); 841 if (r < 0) { 842 goto fail_vq; 843 } 844 } 845 hdev->features = features; 846 847 hdev->memory_listener = (MemoryListener) { 848 .begin = vhost_begin, 849 .commit = vhost_commit, 850 .region_add = vhost_region_add, 851 .region_del = vhost_region_del, 852 .region_nop = vhost_region_nop, 853 .log_start = vhost_log_start, 854 .log_stop = vhost_log_stop, 855 .log_sync = vhost_log_sync, 856 .log_global_start = vhost_log_global_start, 857 .log_global_stop = vhost_log_global_stop, 858 .eventfd_add = vhost_eventfd_add, 859 .eventfd_del = vhost_eventfd_del, 860 .priority = 10 861 }; 862 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 863 hdev->n_mem_sections = 0; 864 hdev->mem_sections = NULL; 865 hdev->log = NULL; 866 hdev->log_size = 0; 867 hdev->log_enabled = false; 868 hdev->started = false; 869 hdev->memory_changed = false; 870 memory_listener_register(&hdev->memory_listener, &address_space_memory); 871 hdev->force = force; 872 return 0; 873 fail_vq: 874 while (--i >= 0) { 875 vhost_virtqueue_cleanup(hdev->vqs + i); 876 } 877 fail: 878 r = -errno; 879 close(hdev->control); 880 return r; 881 } 882 883 void vhost_dev_cleanup(struct vhost_dev *hdev) 884 { 885 int i; 886 for (i = 0; i < hdev->nvqs; ++i) { 887 vhost_virtqueue_cleanup(hdev->vqs + i); 888 } 889 memory_listener_unregister(&hdev->memory_listener); 890 g_free(hdev->mem); 891 g_free(hdev->mem_sections); 892 close(hdev->control); 893 } 894 895 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev) 896 { 897 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 898 VirtioBusState *vbus = VIRTIO_BUS(qbus); 899 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 900 901 return !k->query_guest_notifiers || 902 k->query_guest_notifiers(qbus->parent) || 903 hdev->force; 904 } 905 906 /* Stop processing guest IO notifications in qemu. 907 * Start processing them in vhost in kernel. 908 */ 909 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 910 { 911 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 912 VirtioBusState *vbus = VIRTIO_BUS(qbus); 913 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 914 int i, r; 915 if (!k->set_host_notifier) { 916 fprintf(stderr, "binding does not support host notifiers\n"); 917 r = -ENOSYS; 918 goto fail; 919 } 920 921 for (i = 0; i < hdev->nvqs; ++i) { 922 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); 923 if (r < 0) { 924 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); 925 goto fail_vq; 926 } 927 } 928 929 return 0; 930 fail_vq: 931 while (--i >= 0) { 932 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); 933 if (r < 0) { 934 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); 935 fflush(stderr); 936 } 937 assert (r >= 0); 938 } 939 fail: 940 return r; 941 } 942 943 /* Stop processing guest IO notifications in vhost. 944 * Start processing them in qemu. 945 * This might actually run the qemu handlers right away, 946 * so virtio in qemu must be completely setup when this is called. 947 */ 948 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 949 { 950 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 951 VirtioBusState *vbus = VIRTIO_BUS(qbus); 952 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 953 int i, r; 954 955 for (i = 0; i < hdev->nvqs; ++i) { 956 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); 957 if (r < 0) { 958 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); 959 fflush(stderr); 960 } 961 assert (r >= 0); 962 } 963 } 964 965 /* Test and clear event pending status. 966 * Should be called after unmask to avoid losing events. 967 */ 968 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 969 { 970 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 971 assert(hdev->started); 972 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 973 return event_notifier_test_and_clear(&vq->masked_notifier); 974 } 975 976 /* Mask/unmask events from this vq. */ 977 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 978 bool mask) 979 { 980 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 981 int r, index = n - hdev->vq_index; 982 983 assert(hdev->started); 984 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 985 986 struct vhost_vring_file file = { 987 .index = index 988 }; 989 if (mask) { 990 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); 991 } else { 992 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); 993 } 994 r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file); 995 assert(r >= 0); 996 } 997 998 /* Host notifiers must be enabled at this point. */ 999 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) 1000 { 1001 int i, r; 1002 1003 hdev->started = true; 1004 1005 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1006 if (r < 0) { 1007 goto fail_features; 1008 } 1009 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem); 1010 if (r < 0) { 1011 r = -errno; 1012 goto fail_mem; 1013 } 1014 for (i = 0; i < hdev->nvqs; ++i) { 1015 r = vhost_virtqueue_start(hdev, 1016 vdev, 1017 hdev->vqs + i, 1018 hdev->vq_index + i); 1019 if (r < 0) { 1020 goto fail_vq; 1021 } 1022 } 1023 1024 if (hdev->log_enabled) { 1025 hdev->log_size = vhost_get_log_size(hdev); 1026 hdev->log = hdev->log_size ? 1027 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL; 1028 r = ioctl(hdev->control, VHOST_SET_LOG_BASE, 1029 (uint64_t)(unsigned long)hdev->log); 1030 if (r < 0) { 1031 r = -errno; 1032 goto fail_log; 1033 } 1034 } 1035 1036 return 0; 1037 fail_log: 1038 fail_vq: 1039 while (--i >= 0) { 1040 vhost_virtqueue_stop(hdev, 1041 vdev, 1042 hdev->vqs + i, 1043 hdev->vq_index + i); 1044 } 1045 i = hdev->nvqs; 1046 fail_mem: 1047 fail_features: 1048 1049 hdev->started = false; 1050 return r; 1051 } 1052 1053 /* Host notifiers must be enabled at this point. */ 1054 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) 1055 { 1056 int i; 1057 1058 for (i = 0; i < hdev->nvqs; ++i) { 1059 vhost_virtqueue_stop(hdev, 1060 vdev, 1061 hdev->vqs + i, 1062 hdev->vq_index + i); 1063 } 1064 vhost_log_sync_range(hdev, 0, ~0x0ull); 1065 1066 hdev->started = false; 1067 g_free(hdev->log); 1068 hdev->log = NULL; 1069 hdev->log_size = 0; 1070 } 1071 1072