1 /* 2 * vhost support 3 * 4 * Copyright Red Hat, Inc. 2010 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/hw.h" 19 #include "qemu/range.h" 20 #include <linux/vhost.h> 21 #include "exec/address-spaces.h" 22 #include "hw/virtio/virtio-bus.h" 23 24 static void vhost_dev_sync_region(struct vhost_dev *dev, 25 MemoryRegionSection *section, 26 uint64_t mfirst, uint64_t mlast, 27 uint64_t rfirst, uint64_t rlast) 28 { 29 uint64_t start = MAX(mfirst, rfirst); 30 uint64_t end = MIN(mlast, rlast); 31 vhost_log_chunk_t *from = dev->log + start / VHOST_LOG_CHUNK; 32 vhost_log_chunk_t *to = dev->log + end / VHOST_LOG_CHUNK + 1; 33 uint64_t addr = (start / VHOST_LOG_CHUNK) * VHOST_LOG_CHUNK; 34 35 if (end < start) { 36 return; 37 } 38 assert(end / VHOST_LOG_CHUNK < dev->log_size); 39 assert(start / VHOST_LOG_CHUNK < dev->log_size); 40 41 for (;from < to; ++from) { 42 vhost_log_chunk_t log; 43 int bit; 44 /* We first check with non-atomic: much cheaper, 45 * and we expect non-dirty to be the common case. */ 46 if (!*from) { 47 addr += VHOST_LOG_CHUNK; 48 continue; 49 } 50 /* Data must be read atomically. We don't really 51 * need the barrier semantics of __sync 52 * builtins, but it's easier to use them than 53 * roll our own. */ 54 log = __sync_fetch_and_and(from, 0); 55 while ((bit = sizeof(log) > sizeof(int) ? 56 ffsll(log) : ffs(log))) { 57 hwaddr page_addr; 58 hwaddr section_offset; 59 hwaddr mr_offset; 60 bit -= 1; 61 page_addr = addr + bit * VHOST_LOG_PAGE; 62 section_offset = page_addr - section->offset_within_address_space; 63 mr_offset = section_offset + section->offset_within_region; 64 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); 65 log &= ~(0x1ull << bit); 66 } 67 addr += VHOST_LOG_CHUNK; 68 } 69 } 70 71 static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, 72 MemoryRegionSection *section, 73 hwaddr first, 74 hwaddr last) 75 { 76 int i; 77 hwaddr start_addr; 78 hwaddr end_addr; 79 80 if (!dev->log_enabled || !dev->started) { 81 return 0; 82 } 83 start_addr = section->offset_within_address_space; 84 end_addr = range_get_last(start_addr, int128_get64(section->size)); 85 start_addr = MAX(first, start_addr); 86 end_addr = MIN(last, end_addr); 87 88 for (i = 0; i < dev->mem->nregions; ++i) { 89 struct vhost_memory_region *reg = dev->mem->regions + i; 90 vhost_dev_sync_region(dev, section, start_addr, end_addr, 91 reg->guest_phys_addr, 92 range_get_last(reg->guest_phys_addr, 93 reg->memory_size)); 94 } 95 for (i = 0; i < dev->nvqs; ++i) { 96 struct vhost_virtqueue *vq = dev->vqs + i; 97 vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, 98 range_get_last(vq->used_phys, vq->used_size)); 99 } 100 return 0; 101 } 102 103 static void vhost_log_sync(MemoryListener *listener, 104 MemoryRegionSection *section) 105 { 106 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 107 memory_listener); 108 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); 109 } 110 111 static void vhost_log_sync_range(struct vhost_dev *dev, 112 hwaddr first, hwaddr last) 113 { 114 int i; 115 /* FIXME: this is N^2 in number of sections */ 116 for (i = 0; i < dev->n_mem_sections; ++i) { 117 MemoryRegionSection *section = &dev->mem_sections[i]; 118 vhost_sync_dirty_bitmap(dev, section, first, last); 119 } 120 } 121 122 /* Assign/unassign. Keep an unsorted array of non-overlapping 123 * memory regions in dev->mem. */ 124 static void vhost_dev_unassign_memory(struct vhost_dev *dev, 125 uint64_t start_addr, 126 uint64_t size) 127 { 128 int from, to, n = dev->mem->nregions; 129 /* Track overlapping/split regions for sanity checking. */ 130 int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; 131 132 for (from = 0, to = 0; from < n; ++from, ++to) { 133 struct vhost_memory_region *reg = dev->mem->regions + to; 134 uint64_t reglast; 135 uint64_t memlast; 136 uint64_t change; 137 138 /* clone old region */ 139 if (to != from) { 140 memcpy(reg, dev->mem->regions + from, sizeof *reg); 141 } 142 143 /* No overlap is simple */ 144 if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, 145 start_addr, size)) { 146 continue; 147 } 148 149 /* Split only happens if supplied region 150 * is in the middle of an existing one. Thus it can not 151 * overlap with any other existing region. */ 152 assert(!split); 153 154 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 155 memlast = range_get_last(start_addr, size); 156 157 /* Remove whole region */ 158 if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { 159 --dev->mem->nregions; 160 --to; 161 ++overlap_middle; 162 continue; 163 } 164 165 /* Shrink region */ 166 if (memlast >= reglast) { 167 reg->memory_size = start_addr - reg->guest_phys_addr; 168 assert(reg->memory_size); 169 assert(!overlap_end); 170 ++overlap_end; 171 continue; 172 } 173 174 /* Shift region */ 175 if (start_addr <= reg->guest_phys_addr) { 176 change = memlast + 1 - reg->guest_phys_addr; 177 reg->memory_size -= change; 178 reg->guest_phys_addr += change; 179 reg->userspace_addr += change; 180 assert(reg->memory_size); 181 assert(!overlap_start); 182 ++overlap_start; 183 continue; 184 } 185 186 /* This only happens if supplied region 187 * is in the middle of an existing one. Thus it can not 188 * overlap with any other existing region. */ 189 assert(!overlap_start); 190 assert(!overlap_end); 191 assert(!overlap_middle); 192 /* Split region: shrink first part, shift second part. */ 193 memcpy(dev->mem->regions + n, reg, sizeof *reg); 194 reg->memory_size = start_addr - reg->guest_phys_addr; 195 assert(reg->memory_size); 196 change = memlast + 1 - reg->guest_phys_addr; 197 reg = dev->mem->regions + n; 198 reg->memory_size -= change; 199 assert(reg->memory_size); 200 reg->guest_phys_addr += change; 201 reg->userspace_addr += change; 202 /* Never add more than 1 region */ 203 assert(dev->mem->nregions == n); 204 ++dev->mem->nregions; 205 ++split; 206 } 207 } 208 209 /* Called after unassign, so no regions overlap the given range. */ 210 static void vhost_dev_assign_memory(struct vhost_dev *dev, 211 uint64_t start_addr, 212 uint64_t size, 213 uint64_t uaddr) 214 { 215 int from, to; 216 struct vhost_memory_region *merged = NULL; 217 for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { 218 struct vhost_memory_region *reg = dev->mem->regions + to; 219 uint64_t prlast, urlast; 220 uint64_t pmlast, umlast; 221 uint64_t s, e, u; 222 223 /* clone old region */ 224 if (to != from) { 225 memcpy(reg, dev->mem->regions + from, sizeof *reg); 226 } 227 prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); 228 pmlast = range_get_last(start_addr, size); 229 urlast = range_get_last(reg->userspace_addr, reg->memory_size); 230 umlast = range_get_last(uaddr, size); 231 232 /* check for overlapping regions: should never happen. */ 233 assert(prlast < start_addr || pmlast < reg->guest_phys_addr); 234 /* Not an adjacent or overlapping region - do not merge. */ 235 if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && 236 (pmlast + 1 != reg->guest_phys_addr || 237 umlast + 1 != reg->userspace_addr)) { 238 continue; 239 } 240 241 if (merged) { 242 --to; 243 assert(to >= 0); 244 } else { 245 merged = reg; 246 } 247 u = MIN(uaddr, reg->userspace_addr); 248 s = MIN(start_addr, reg->guest_phys_addr); 249 e = MAX(pmlast, prlast); 250 uaddr = merged->userspace_addr = u; 251 start_addr = merged->guest_phys_addr = s; 252 size = merged->memory_size = e - s + 1; 253 assert(merged->memory_size); 254 } 255 256 if (!merged) { 257 struct vhost_memory_region *reg = dev->mem->regions + to; 258 memset(reg, 0, sizeof *reg); 259 reg->memory_size = size; 260 assert(reg->memory_size); 261 reg->guest_phys_addr = start_addr; 262 reg->userspace_addr = uaddr; 263 ++to; 264 } 265 assert(to <= dev->mem->nregions + 1); 266 dev->mem->nregions = to; 267 } 268 269 static uint64_t vhost_get_log_size(struct vhost_dev *dev) 270 { 271 uint64_t log_size = 0; 272 int i; 273 for (i = 0; i < dev->mem->nregions; ++i) { 274 struct vhost_memory_region *reg = dev->mem->regions + i; 275 uint64_t last = range_get_last(reg->guest_phys_addr, 276 reg->memory_size); 277 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 278 } 279 for (i = 0; i < dev->nvqs; ++i) { 280 struct vhost_virtqueue *vq = dev->vqs + i; 281 uint64_t last = vq->used_phys + vq->used_size - 1; 282 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); 283 } 284 return log_size; 285 } 286 287 static inline void vhost_dev_log_resize(struct vhost_dev* dev, uint64_t size) 288 { 289 vhost_log_chunk_t *log; 290 uint64_t log_base; 291 int r; 292 293 log = g_malloc0(size * sizeof *log); 294 log_base = (uint64_t)(unsigned long)log; 295 r = ioctl(dev->control, VHOST_SET_LOG_BASE, &log_base); 296 assert(r >= 0); 297 /* Sync only the range covered by the old log */ 298 if (dev->log_size) { 299 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); 300 } 301 if (dev->log) { 302 g_free(dev->log); 303 } 304 dev->log = log; 305 dev->log_size = size; 306 } 307 308 static int vhost_verify_ring_mappings(struct vhost_dev *dev, 309 uint64_t start_addr, 310 uint64_t size) 311 { 312 int i; 313 for (i = 0; i < dev->nvqs; ++i) { 314 struct vhost_virtqueue *vq = dev->vqs + i; 315 hwaddr l; 316 void *p; 317 318 if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) { 319 continue; 320 } 321 l = vq->ring_size; 322 p = cpu_physical_memory_map(vq->ring_phys, &l, 1); 323 if (!p || l != vq->ring_size) { 324 fprintf(stderr, "Unable to map ring buffer for ring %d\n", i); 325 return -ENOMEM; 326 } 327 if (p != vq->ring) { 328 fprintf(stderr, "Ring buffer relocated for ring %d\n", i); 329 return -EBUSY; 330 } 331 cpu_physical_memory_unmap(p, l, 0, 0); 332 } 333 return 0; 334 } 335 336 static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, 337 uint64_t start_addr, 338 uint64_t size) 339 { 340 int i, n = dev->mem->nregions; 341 for (i = 0; i < n; ++i) { 342 struct vhost_memory_region *reg = dev->mem->regions + i; 343 if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, 344 start_addr, size)) { 345 return reg; 346 } 347 } 348 return NULL; 349 } 350 351 static bool vhost_dev_cmp_memory(struct vhost_dev *dev, 352 uint64_t start_addr, 353 uint64_t size, 354 uint64_t uaddr) 355 { 356 struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); 357 uint64_t reglast; 358 uint64_t memlast; 359 360 if (!reg) { 361 return true; 362 } 363 364 reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); 365 memlast = range_get_last(start_addr, size); 366 367 /* Need to extend region? */ 368 if (start_addr < reg->guest_phys_addr || memlast > reglast) { 369 return true; 370 } 371 /* userspace_addr changed? */ 372 return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; 373 } 374 375 static void vhost_set_memory(MemoryListener *listener, 376 MemoryRegionSection *section, 377 bool add) 378 { 379 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 380 memory_listener); 381 hwaddr start_addr = section->offset_within_address_space; 382 ram_addr_t size = int128_get64(section->size); 383 bool log_dirty = memory_region_is_logging(section->mr); 384 int s = offsetof(struct vhost_memory, regions) + 385 (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; 386 void *ram; 387 388 dev->mem = g_realloc(dev->mem, s); 389 390 if (log_dirty) { 391 add = false; 392 } 393 394 assert(size); 395 396 /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ 397 ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; 398 if (add) { 399 if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { 400 /* Region exists with same address. Nothing to do. */ 401 return; 402 } 403 } else { 404 if (!vhost_dev_find_reg(dev, start_addr, size)) { 405 /* Removing region that we don't access. Nothing to do. */ 406 return; 407 } 408 } 409 410 vhost_dev_unassign_memory(dev, start_addr, size); 411 if (add) { 412 /* Add given mapping, merging adjacent regions if any */ 413 vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); 414 } else { 415 /* Remove old mapping for this memory, if any. */ 416 vhost_dev_unassign_memory(dev, start_addr, size); 417 } 418 dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); 419 dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); 420 dev->memory_changed = true; 421 } 422 423 static bool vhost_section(MemoryRegionSection *section) 424 { 425 return memory_region_is_ram(section->mr); 426 } 427 428 static void vhost_begin(MemoryListener *listener) 429 { 430 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 431 memory_listener); 432 dev->mem_changed_end_addr = 0; 433 dev->mem_changed_start_addr = -1; 434 } 435 436 static void vhost_commit(MemoryListener *listener) 437 { 438 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 439 memory_listener); 440 hwaddr start_addr = 0; 441 ram_addr_t size = 0; 442 uint64_t log_size; 443 int r; 444 445 if (!dev->memory_changed) { 446 return; 447 } 448 if (!dev->started) { 449 return; 450 } 451 if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { 452 return; 453 } 454 455 if (dev->started) { 456 start_addr = dev->mem_changed_start_addr; 457 size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; 458 459 r = vhost_verify_ring_mappings(dev, start_addr, size); 460 assert(r >= 0); 461 } 462 463 if (!dev->log_enabled) { 464 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); 465 assert(r >= 0); 466 dev->memory_changed = false; 467 return; 468 } 469 log_size = vhost_get_log_size(dev); 470 /* We allocate an extra 4K bytes to log, 471 * to reduce the * number of reallocations. */ 472 #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) 473 /* To log more, must increase log size before table update. */ 474 if (dev->log_size < log_size) { 475 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); 476 } 477 r = ioctl(dev->control, VHOST_SET_MEM_TABLE, dev->mem); 478 assert(r >= 0); 479 /* To log less, can only decrease log size after table update. */ 480 if (dev->log_size > log_size + VHOST_LOG_BUFFER) { 481 vhost_dev_log_resize(dev, log_size); 482 } 483 dev->memory_changed = false; 484 } 485 486 static void vhost_region_add(MemoryListener *listener, 487 MemoryRegionSection *section) 488 { 489 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 490 memory_listener); 491 492 if (!vhost_section(section)) { 493 return; 494 } 495 496 ++dev->n_mem_sections; 497 dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, 498 dev->n_mem_sections); 499 dev->mem_sections[dev->n_mem_sections - 1] = *section; 500 vhost_set_memory(listener, section, true); 501 } 502 503 static void vhost_region_del(MemoryListener *listener, 504 MemoryRegionSection *section) 505 { 506 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 507 memory_listener); 508 int i; 509 510 if (!vhost_section(section)) { 511 return; 512 } 513 514 vhost_set_memory(listener, section, false); 515 for (i = 0; i < dev->n_mem_sections; ++i) { 516 if (dev->mem_sections[i].offset_within_address_space 517 == section->offset_within_address_space) { 518 --dev->n_mem_sections; 519 memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], 520 (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); 521 break; 522 } 523 } 524 } 525 526 static void vhost_region_nop(MemoryListener *listener, 527 MemoryRegionSection *section) 528 { 529 } 530 531 static int vhost_virtqueue_set_addr(struct vhost_dev *dev, 532 struct vhost_virtqueue *vq, 533 unsigned idx, bool enable_log) 534 { 535 struct vhost_vring_addr addr = { 536 .index = idx, 537 .desc_user_addr = (uint64_t)(unsigned long)vq->desc, 538 .avail_user_addr = (uint64_t)(unsigned long)vq->avail, 539 .used_user_addr = (uint64_t)(unsigned long)vq->used, 540 .log_guest_addr = vq->used_phys, 541 .flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0, 542 }; 543 int r = ioctl(dev->control, VHOST_SET_VRING_ADDR, &addr); 544 if (r < 0) { 545 return -errno; 546 } 547 return 0; 548 } 549 550 static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) 551 { 552 uint64_t features = dev->acked_features; 553 int r; 554 if (enable_log) { 555 features |= 0x1 << VHOST_F_LOG_ALL; 556 } 557 r = ioctl(dev->control, VHOST_SET_FEATURES, &features); 558 return r < 0 ? -errno : 0; 559 } 560 561 static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) 562 { 563 int r, t, i; 564 r = vhost_dev_set_features(dev, enable_log); 565 if (r < 0) { 566 goto err_features; 567 } 568 for (i = 0; i < dev->nvqs; ++i) { 569 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, 570 enable_log); 571 if (r < 0) { 572 goto err_vq; 573 } 574 } 575 return 0; 576 err_vq: 577 for (; i >= 0; --i) { 578 t = vhost_virtqueue_set_addr(dev, dev->vqs + i, i, 579 dev->log_enabled); 580 assert(t >= 0); 581 } 582 t = vhost_dev_set_features(dev, dev->log_enabled); 583 assert(t >= 0); 584 err_features: 585 return r; 586 } 587 588 static int vhost_migration_log(MemoryListener *listener, int enable) 589 { 590 struct vhost_dev *dev = container_of(listener, struct vhost_dev, 591 memory_listener); 592 int r; 593 if (!!enable == dev->log_enabled) { 594 return 0; 595 } 596 if (!dev->started) { 597 dev->log_enabled = enable; 598 return 0; 599 } 600 if (!enable) { 601 r = vhost_dev_set_log(dev, false); 602 if (r < 0) { 603 return r; 604 } 605 if (dev->log) { 606 g_free(dev->log); 607 } 608 dev->log = NULL; 609 dev->log_size = 0; 610 } else { 611 vhost_dev_log_resize(dev, vhost_get_log_size(dev)); 612 r = vhost_dev_set_log(dev, true); 613 if (r < 0) { 614 return r; 615 } 616 } 617 dev->log_enabled = enable; 618 return 0; 619 } 620 621 static void vhost_log_global_start(MemoryListener *listener) 622 { 623 int r; 624 625 r = vhost_migration_log(listener, true); 626 if (r < 0) { 627 abort(); 628 } 629 } 630 631 static void vhost_log_global_stop(MemoryListener *listener) 632 { 633 int r; 634 635 r = vhost_migration_log(listener, false); 636 if (r < 0) { 637 abort(); 638 } 639 } 640 641 static void vhost_log_start(MemoryListener *listener, 642 MemoryRegionSection *section) 643 { 644 /* FIXME: implement */ 645 } 646 647 static void vhost_log_stop(MemoryListener *listener, 648 MemoryRegionSection *section) 649 { 650 /* FIXME: implement */ 651 } 652 653 static int vhost_virtqueue_start(struct vhost_dev *dev, 654 struct VirtIODevice *vdev, 655 struct vhost_virtqueue *vq, 656 unsigned idx) 657 { 658 hwaddr s, l, a; 659 int r; 660 int vhost_vq_index = idx - dev->vq_index; 661 struct vhost_vring_file file = { 662 .index = vhost_vq_index 663 }; 664 struct vhost_vring_state state = { 665 .index = vhost_vq_index 666 }; 667 struct VirtQueue *vvq = virtio_get_queue(vdev, idx); 668 669 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 670 671 vq->num = state.num = virtio_queue_get_num(vdev, idx); 672 r = ioctl(dev->control, VHOST_SET_VRING_NUM, &state); 673 if (r) { 674 return -errno; 675 } 676 677 state.num = virtio_queue_get_last_avail_idx(vdev, idx); 678 r = ioctl(dev->control, VHOST_SET_VRING_BASE, &state); 679 if (r) { 680 return -errno; 681 } 682 683 s = l = virtio_queue_get_desc_size(vdev, idx); 684 a = virtio_queue_get_desc_addr(vdev, idx); 685 vq->desc = cpu_physical_memory_map(a, &l, 0); 686 if (!vq->desc || l != s) { 687 r = -ENOMEM; 688 goto fail_alloc_desc; 689 } 690 s = l = virtio_queue_get_avail_size(vdev, idx); 691 a = virtio_queue_get_avail_addr(vdev, idx); 692 vq->avail = cpu_physical_memory_map(a, &l, 0); 693 if (!vq->avail || l != s) { 694 r = -ENOMEM; 695 goto fail_alloc_avail; 696 } 697 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); 698 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); 699 vq->used = cpu_physical_memory_map(a, &l, 1); 700 if (!vq->used || l != s) { 701 r = -ENOMEM; 702 goto fail_alloc_used; 703 } 704 705 vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx); 706 vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx); 707 vq->ring = cpu_physical_memory_map(a, &l, 1); 708 if (!vq->ring || l != s) { 709 r = -ENOMEM; 710 goto fail_alloc_ring; 711 } 712 713 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); 714 if (r < 0) { 715 r = -errno; 716 goto fail_alloc; 717 } 718 719 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); 720 r = ioctl(dev->control, VHOST_SET_VRING_KICK, &file); 721 if (r) { 722 r = -errno; 723 goto fail_kick; 724 } 725 726 /* Clear and discard previous events if any. */ 727 event_notifier_test_and_clear(&vq->masked_notifier); 728 729 return 0; 730 731 fail_kick: 732 fail_alloc: 733 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 734 0, 0); 735 fail_alloc_ring: 736 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 737 0, 0); 738 fail_alloc_used: 739 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 740 0, 0); 741 fail_alloc_avail: 742 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 743 0, 0); 744 fail_alloc_desc: 745 return r; 746 } 747 748 static void vhost_virtqueue_stop(struct vhost_dev *dev, 749 struct VirtIODevice *vdev, 750 struct vhost_virtqueue *vq, 751 unsigned idx) 752 { 753 struct vhost_vring_state state = { 754 .index = idx - dev->vq_index 755 }; 756 int r; 757 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 758 r = ioctl(dev->control, VHOST_GET_VRING_BASE, &state); 759 if (r < 0) { 760 fprintf(stderr, "vhost VQ %d ring restore failed: %d\n", idx, r); 761 fflush(stderr); 762 } 763 virtio_queue_set_last_avail_idx(vdev, idx, state.num); 764 assert (r >= 0); 765 cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), 766 0, virtio_queue_get_ring_size(vdev, idx)); 767 cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx), 768 1, virtio_queue_get_used_size(vdev, idx)); 769 cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx), 770 0, virtio_queue_get_avail_size(vdev, idx)); 771 cpu_physical_memory_unmap(vq->desc, virtio_queue_get_desc_size(vdev, idx), 772 0, virtio_queue_get_desc_size(vdev, idx)); 773 } 774 775 static void vhost_eventfd_add(MemoryListener *listener, 776 MemoryRegionSection *section, 777 bool match_data, uint64_t data, EventNotifier *e) 778 { 779 } 780 781 static void vhost_eventfd_del(MemoryListener *listener, 782 MemoryRegionSection *section, 783 bool match_data, uint64_t data, EventNotifier *e) 784 { 785 } 786 787 static int vhost_virtqueue_init(struct vhost_dev *dev, 788 struct vhost_virtqueue *vq, int n) 789 { 790 struct vhost_vring_file file = { 791 .index = n, 792 }; 793 int r = event_notifier_init(&vq->masked_notifier, 0); 794 if (r < 0) { 795 return r; 796 } 797 798 file.fd = event_notifier_get_fd(&vq->masked_notifier); 799 r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file); 800 if (r) { 801 r = -errno; 802 goto fail_call; 803 } 804 return 0; 805 fail_call: 806 event_notifier_cleanup(&vq->masked_notifier); 807 return r; 808 } 809 810 static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) 811 { 812 event_notifier_cleanup(&vq->masked_notifier); 813 } 814 815 int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath, 816 bool force) 817 { 818 uint64_t features; 819 int i, r; 820 if (devfd >= 0) { 821 hdev->control = devfd; 822 } else { 823 hdev->control = open(devpath, O_RDWR); 824 if (hdev->control < 0) { 825 return -errno; 826 } 827 } 828 r = ioctl(hdev->control, VHOST_SET_OWNER, NULL); 829 if (r < 0) { 830 goto fail; 831 } 832 833 r = ioctl(hdev->control, VHOST_GET_FEATURES, &features); 834 if (r < 0) { 835 goto fail; 836 } 837 838 for (i = 0; i < hdev->nvqs; ++i) { 839 r = vhost_virtqueue_init(hdev, hdev->vqs + i, i); 840 if (r < 0) { 841 goto fail_vq; 842 } 843 } 844 hdev->features = features; 845 846 hdev->memory_listener = (MemoryListener) { 847 .begin = vhost_begin, 848 .commit = vhost_commit, 849 .region_add = vhost_region_add, 850 .region_del = vhost_region_del, 851 .region_nop = vhost_region_nop, 852 .log_start = vhost_log_start, 853 .log_stop = vhost_log_stop, 854 .log_sync = vhost_log_sync, 855 .log_global_start = vhost_log_global_start, 856 .log_global_stop = vhost_log_global_stop, 857 .eventfd_add = vhost_eventfd_add, 858 .eventfd_del = vhost_eventfd_del, 859 .priority = 10 860 }; 861 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); 862 hdev->n_mem_sections = 0; 863 hdev->mem_sections = NULL; 864 hdev->log = NULL; 865 hdev->log_size = 0; 866 hdev->log_enabled = false; 867 hdev->started = false; 868 hdev->memory_changed = false; 869 memory_listener_register(&hdev->memory_listener, &address_space_memory); 870 hdev->force = force; 871 return 0; 872 fail_vq: 873 while (--i >= 0) { 874 vhost_virtqueue_cleanup(hdev->vqs + i); 875 } 876 fail: 877 r = -errno; 878 close(hdev->control); 879 return r; 880 } 881 882 void vhost_dev_cleanup(struct vhost_dev *hdev) 883 { 884 int i; 885 for (i = 0; i < hdev->nvqs; ++i) { 886 vhost_virtqueue_cleanup(hdev->vqs + i); 887 } 888 memory_listener_unregister(&hdev->memory_listener); 889 g_free(hdev->mem); 890 g_free(hdev->mem_sections); 891 close(hdev->control); 892 } 893 894 bool vhost_dev_query(struct vhost_dev *hdev, VirtIODevice *vdev) 895 { 896 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 897 VirtioBusState *vbus = VIRTIO_BUS(qbus); 898 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 899 900 return !k->query_guest_notifiers || 901 k->query_guest_notifiers(qbus->parent) || 902 hdev->force; 903 } 904 905 /* Stop processing guest IO notifications in qemu. 906 * Start processing them in vhost in kernel. 907 */ 908 int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 909 { 910 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 911 VirtioBusState *vbus = VIRTIO_BUS(qbus); 912 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 913 int i, r; 914 if (!k->set_host_notifier) { 915 fprintf(stderr, "binding does not support host notifiers\n"); 916 r = -ENOSYS; 917 goto fail; 918 } 919 920 for (i = 0; i < hdev->nvqs; ++i) { 921 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, true); 922 if (r < 0) { 923 fprintf(stderr, "vhost VQ %d notifier binding failed: %d\n", i, -r); 924 goto fail_vq; 925 } 926 } 927 928 return 0; 929 fail_vq: 930 while (--i >= 0) { 931 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); 932 if (r < 0) { 933 fprintf(stderr, "vhost VQ %d notifier cleanup error: %d\n", i, -r); 934 fflush(stderr); 935 } 936 assert (r >= 0); 937 } 938 fail: 939 return r; 940 } 941 942 /* Stop processing guest IO notifications in vhost. 943 * Start processing them in qemu. 944 * This might actually run the qemu handlers right away, 945 * so virtio in qemu must be completely setup when this is called. 946 */ 947 void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) 948 { 949 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); 950 VirtioBusState *vbus = VIRTIO_BUS(qbus); 951 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 952 int i, r; 953 954 for (i = 0; i < hdev->nvqs; ++i) { 955 r = k->set_host_notifier(qbus->parent, hdev->vq_index + i, false); 956 if (r < 0) { 957 fprintf(stderr, "vhost VQ %d notifier cleanup failed: %d\n", i, -r); 958 fflush(stderr); 959 } 960 assert (r >= 0); 961 } 962 } 963 964 /* Test and clear event pending status. 965 * Should be called after unmask to avoid losing events. 966 */ 967 bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) 968 { 969 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; 970 assert(hdev->started); 971 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 972 return event_notifier_test_and_clear(&vq->masked_notifier); 973 } 974 975 /* Mask/unmask events from this vq. */ 976 void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, 977 bool mask) 978 { 979 struct VirtQueue *vvq = virtio_get_queue(vdev, n); 980 int r, index = n - hdev->vq_index; 981 982 assert(hdev->started); 983 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); 984 985 struct vhost_vring_file file = { 986 .index = index 987 }; 988 if (mask) { 989 file.fd = event_notifier_get_fd(&hdev->vqs[index].masked_notifier); 990 } else { 991 file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); 992 } 993 r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file); 994 assert(r >= 0); 995 } 996 997 /* Host notifiers must be enabled at this point. */ 998 int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) 999 { 1000 int i, r; 1001 1002 hdev->started = true; 1003 1004 r = vhost_dev_set_features(hdev, hdev->log_enabled); 1005 if (r < 0) { 1006 goto fail_features; 1007 } 1008 r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem); 1009 if (r < 0) { 1010 r = -errno; 1011 goto fail_mem; 1012 } 1013 for (i = 0; i < hdev->nvqs; ++i) { 1014 r = vhost_virtqueue_start(hdev, 1015 vdev, 1016 hdev->vqs + i, 1017 hdev->vq_index + i); 1018 if (r < 0) { 1019 goto fail_vq; 1020 } 1021 } 1022 1023 if (hdev->log_enabled) { 1024 hdev->log_size = vhost_get_log_size(hdev); 1025 hdev->log = hdev->log_size ? 1026 g_malloc0(hdev->log_size * sizeof *hdev->log) : NULL; 1027 r = ioctl(hdev->control, VHOST_SET_LOG_BASE, 1028 (uint64_t)(unsigned long)hdev->log); 1029 if (r < 0) { 1030 r = -errno; 1031 goto fail_log; 1032 } 1033 } 1034 1035 return 0; 1036 fail_log: 1037 fail_vq: 1038 while (--i >= 0) { 1039 vhost_virtqueue_stop(hdev, 1040 vdev, 1041 hdev->vqs + i, 1042 hdev->vq_index + i); 1043 } 1044 i = hdev->nvqs; 1045 fail_mem: 1046 fail_features: 1047 1048 hdev->started = false; 1049 return r; 1050 } 1051 1052 /* Host notifiers must be enabled at this point. */ 1053 void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) 1054 { 1055 int i; 1056 1057 for (i = 0; i < hdev->nvqs; ++i) { 1058 vhost_virtqueue_stop(hdev, 1059 vdev, 1060 hdev->vqs + i, 1061 hdev->vq_index + i); 1062 } 1063 vhost_log_sync_range(hdev, 0, ~0x0ull); 1064 1065 hdev->started = false; 1066 g_free(hdev->log); 1067 hdev->log = NULL; 1068 hdev->log_size = 0; 1069 } 1070 1071