1 /* 2 * vhost-vdpa 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <linux/vhost.h> 14 #include <linux/vfio.h> 15 #include <sys/eventfd.h> 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/virtio/vhost-backend.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/virtio/vhost-shadow-virtqueue.h" 21 #include "hw/virtio/vhost-vdpa.h" 22 #include "exec/address-spaces.h" 23 #include "migration/blocker.h" 24 #include "qemu/cutils.h" 25 #include "qemu/main-loop.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qapi/error.h" 29 30 /* 31 * Return one past the end of the end of section. Be careful with uint64_t 32 * conversions! 33 */ 34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) 35 { 36 Int128 llend = int128_make64(section->offset_within_address_space); 37 llend = int128_add(llend, section->size); 38 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 39 40 return llend; 41 } 42 43 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, 44 uint64_t iova_min, 45 uint64_t iova_max) 46 { 47 Int128 llend; 48 49 if ((!memory_region_is_ram(section->mr) && 50 !memory_region_is_iommu(section->mr)) || 51 memory_region_is_protected(section->mr) || 52 /* vhost-vDPA doesn't allow MMIO to be mapped */ 53 memory_region_is_ram_device(section->mr)) { 54 return true; 55 } 56 57 if (section->offset_within_address_space < iova_min) { 58 error_report("RAM section out of device range (min=0x%" PRIx64 59 ", addr=0x%" HWADDR_PRIx ")", 60 iova_min, section->offset_within_address_space); 61 return true; 62 } 63 64 llend = vhost_vdpa_section_end(section); 65 if (int128_gt(llend, int128_make64(iova_max))) { 66 error_report("RAM section out of device range (max=0x%" PRIx64 67 ", end addr=0x%" PRIx64 ")", 68 iova_max, int128_get64(llend)); 69 return true; 70 } 71 72 return false; 73 } 74 75 /* 76 * The caller must set asid = 0 if the device does not support asid. 77 * This is not an ABI break since it is set to 0 by the initializer anyway. 78 */ 79 int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 80 hwaddr size, void *vaddr, bool readonly) 81 { 82 struct vhost_msg_v2 msg = {}; 83 int fd = v->device_fd; 84 int ret = 0; 85 86 msg.type = v->msg_type; 87 msg.asid = asid; 88 msg.iotlb.iova = iova; 89 msg.iotlb.size = size; 90 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; 91 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; 92 msg.iotlb.type = VHOST_IOTLB_UPDATE; 93 94 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, 95 msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, 96 msg.iotlb.type); 97 98 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 99 error_report("failed to write, fd=%d, errno=%d (%s)", 100 fd, errno, strerror(errno)); 101 return -EIO ; 102 } 103 104 return ret; 105 } 106 107 /* 108 * The caller must set asid = 0 if the device does not support asid. 109 * This is not an ABI break since it is set to 0 by the initializer anyway. 110 */ 111 int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 112 hwaddr size) 113 { 114 struct vhost_msg_v2 msg = {}; 115 int fd = v->device_fd; 116 int ret = 0; 117 118 msg.type = v->msg_type; 119 msg.asid = asid; 120 msg.iotlb.iova = iova; 121 msg.iotlb.size = size; 122 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; 123 124 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, 125 msg.iotlb.size, msg.iotlb.type); 126 127 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 128 error_report("failed to write, fd=%d, errno=%d (%s)", 129 fd, errno, strerror(errno)); 130 return -EIO ; 131 } 132 133 return ret; 134 } 135 136 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) 137 { 138 int fd = v->device_fd; 139 struct vhost_msg_v2 msg = { 140 .type = v->msg_type, 141 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, 142 }; 143 144 trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); 145 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 146 error_report("failed to write, fd=%d, errno=%d (%s)", 147 fd, errno, strerror(errno)); 148 } 149 } 150 151 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) 152 { 153 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && 154 !v->iotlb_batch_begin_sent) { 155 vhost_vdpa_listener_begin_batch(v); 156 } 157 158 v->iotlb_batch_begin_sent = true; 159 } 160 161 static void vhost_vdpa_listener_commit(MemoryListener *listener) 162 { 163 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 164 struct vhost_dev *dev = v->dev; 165 struct vhost_msg_v2 msg = {}; 166 int fd = v->device_fd; 167 168 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { 169 return; 170 } 171 172 if (!v->iotlb_batch_begin_sent) { 173 return; 174 } 175 176 msg.type = v->msg_type; 177 msg.iotlb.type = VHOST_IOTLB_BATCH_END; 178 179 trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); 180 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 181 error_report("failed to write, fd=%d, errno=%d (%s)", 182 fd, errno, strerror(errno)); 183 } 184 185 v->iotlb_batch_begin_sent = false; 186 } 187 188 static void vhost_vdpa_listener_region_add(MemoryListener *listener, 189 MemoryRegionSection *section) 190 { 191 DMAMap mem_region = {}; 192 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 193 hwaddr iova; 194 Int128 llend, llsize; 195 void *vaddr; 196 int ret; 197 198 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 199 v->iova_range.last)) { 200 return; 201 } 202 203 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 204 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 205 error_report("%s received unaligned region", __func__); 206 return; 207 } 208 209 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 210 llend = vhost_vdpa_section_end(section); 211 if (int128_ge(int128_make64(iova), llend)) { 212 return; 213 } 214 215 memory_region_ref(section->mr); 216 217 /* Here we assume that memory_region_is_ram(section->mr)==true */ 218 219 vaddr = memory_region_get_ram_ptr(section->mr) + 220 section->offset_within_region + 221 (iova - section->offset_within_address_space); 222 223 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), 224 vaddr, section->readonly); 225 226 llsize = int128_sub(llend, int128_make64(iova)); 227 if (v->shadow_data) { 228 int r; 229 230 mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, 231 mem_region.size = int128_get64(llsize) - 1, 232 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), 233 234 r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); 235 if (unlikely(r != IOVA_OK)) { 236 error_report("Can't allocate a mapping (%d)", r); 237 goto fail; 238 } 239 240 iova = mem_region.iova; 241 } 242 243 vhost_vdpa_iotlb_batch_begin_once(v); 244 ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, 245 int128_get64(llsize), vaddr, section->readonly); 246 if (ret) { 247 error_report("vhost vdpa map fail!"); 248 goto fail_map; 249 } 250 251 return; 252 253 fail_map: 254 if (v->shadow_data) { 255 vhost_iova_tree_remove(v->iova_tree, mem_region); 256 } 257 258 fail: 259 /* 260 * On the initfn path, store the first error in the container so we 261 * can gracefully fail. Runtime, there's not much we can do other 262 * than throw a hardware error. 263 */ 264 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); 265 return; 266 267 } 268 269 static void vhost_vdpa_listener_region_del(MemoryListener *listener, 270 MemoryRegionSection *section) 271 { 272 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 273 hwaddr iova; 274 Int128 llend, llsize; 275 int ret; 276 277 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 278 v->iova_range.last)) { 279 return; 280 } 281 282 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 283 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 284 error_report("%s received unaligned region", __func__); 285 return; 286 } 287 288 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 289 llend = vhost_vdpa_section_end(section); 290 291 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); 292 293 if (int128_ge(int128_make64(iova), llend)) { 294 return; 295 } 296 297 llsize = int128_sub(llend, int128_make64(iova)); 298 299 if (v->shadow_data) { 300 const DMAMap *result; 301 const void *vaddr = memory_region_get_ram_ptr(section->mr) + 302 section->offset_within_region + 303 (iova - section->offset_within_address_space); 304 DMAMap mem_region = { 305 .translated_addr = (hwaddr)(uintptr_t)vaddr, 306 .size = int128_get64(llsize) - 1, 307 }; 308 309 result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); 310 if (!result) { 311 /* The memory listener map wasn't mapped */ 312 return; 313 } 314 iova = result->iova; 315 vhost_iova_tree_remove(v->iova_tree, *result); 316 } 317 vhost_vdpa_iotlb_batch_begin_once(v); 318 ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, 319 int128_get64(llsize)); 320 if (ret) { 321 error_report("vhost_vdpa dma unmap error!"); 322 } 323 324 memory_region_unref(section->mr); 325 } 326 /* 327 * IOTLB API is used by vhost-vdpa which requires incremental updating 328 * of the mapping. So we can not use generic vhost memory listener which 329 * depends on the addnop(). 330 */ 331 static const MemoryListener vhost_vdpa_memory_listener = { 332 .name = "vhost-vdpa", 333 .commit = vhost_vdpa_listener_commit, 334 .region_add = vhost_vdpa_listener_region_add, 335 .region_del = vhost_vdpa_listener_region_del, 336 }; 337 338 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, 339 void *arg) 340 { 341 struct vhost_vdpa *v = dev->opaque; 342 int fd = v->device_fd; 343 int ret; 344 345 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 346 347 ret = ioctl(fd, request, arg); 348 return ret < 0 ? -errno : ret; 349 } 350 351 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) 352 { 353 uint8_t s; 354 int ret; 355 356 trace_vhost_vdpa_add_status(dev, status); 357 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 358 if (ret < 0) { 359 return ret; 360 } 361 362 s |= status; 363 364 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); 365 if (ret < 0) { 366 return ret; 367 } 368 369 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 370 if (ret < 0) { 371 return ret; 372 } 373 374 if (!(s & status)) { 375 return -EIO; 376 } 377 378 return 0; 379 } 380 381 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range) 382 { 383 int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); 384 385 return ret < 0 ? -errno : 0; 386 } 387 388 /* 389 * The use of this function is for requests that only need to be 390 * applied once. Typically such request occurs at the beginning 391 * of operation, and before setting up queues. It should not be 392 * used for request that performs operation until all queues are 393 * set, which would need to check dev->vq_index_end instead. 394 */ 395 static bool vhost_vdpa_first_dev(struct vhost_dev *dev) 396 { 397 struct vhost_vdpa *v = dev->opaque; 398 399 return v->index == 0; 400 } 401 402 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, 403 uint64_t *features) 404 { 405 int ret; 406 407 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); 408 trace_vhost_vdpa_get_features(dev, *features); 409 return ret; 410 } 411 412 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) 413 { 414 g_autoptr(GPtrArray) shadow_vqs = NULL; 415 416 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); 417 for (unsigned n = 0; n < hdev->nvqs; ++n) { 418 VhostShadowVirtqueue *svq; 419 420 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); 421 g_ptr_array_add(shadow_vqs, svq); 422 } 423 424 v->shadow_vqs = g_steal_pointer(&shadow_vqs); 425 } 426 427 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) 428 { 429 struct vhost_vdpa *v; 430 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 431 trace_vhost_vdpa_init(dev, opaque); 432 int ret; 433 434 v = opaque; 435 v->dev = dev; 436 dev->opaque = opaque ; 437 v->listener = vhost_vdpa_memory_listener; 438 v->msg_type = VHOST_IOTLB_MSG_V2; 439 vhost_vdpa_init_svq(dev, v); 440 441 error_propagate(&dev->migration_blocker, v->migration_blocker); 442 if (!vhost_vdpa_first_dev(dev)) { 443 return 0; 444 } 445 446 /* 447 * If dev->shadow_vqs_enabled at initialization that means the device has 448 * been started with x-svq=on, so don't block migration 449 */ 450 if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { 451 /* We don't have dev->features yet */ 452 uint64_t features; 453 ret = vhost_vdpa_get_dev_features(dev, &features); 454 if (unlikely(ret)) { 455 error_setg_errno(errp, -ret, "Could not get device features"); 456 return ret; 457 } 458 vhost_svq_valid_features(features, &dev->migration_blocker); 459 } 460 461 /* 462 * Similar to VFIO, we end up pinning all guest memory and have to 463 * disable discarding of RAM. 464 */ 465 ret = ram_block_discard_disable(true); 466 if (ret) { 467 error_report("Cannot set discarding of RAM broken"); 468 return ret; 469 } 470 471 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 472 VIRTIO_CONFIG_S_DRIVER); 473 474 return 0; 475 } 476 477 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, 478 int queue_index) 479 { 480 size_t page_size = qemu_real_host_page_size(); 481 struct vhost_vdpa *v = dev->opaque; 482 VirtIODevice *vdev = dev->vdev; 483 VhostVDPAHostNotifier *n; 484 485 n = &v->notifier[queue_index]; 486 487 if (n->addr) { 488 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); 489 object_unparent(OBJECT(&n->mr)); 490 munmap(n->addr, page_size); 491 n->addr = NULL; 492 } 493 } 494 495 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) 496 { 497 size_t page_size = qemu_real_host_page_size(); 498 struct vhost_vdpa *v = dev->opaque; 499 VirtIODevice *vdev = dev->vdev; 500 VhostVDPAHostNotifier *n; 501 int fd = v->device_fd; 502 void *addr; 503 char *name; 504 505 vhost_vdpa_host_notifier_uninit(dev, queue_index); 506 507 n = &v->notifier[queue_index]; 508 509 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, 510 queue_index * page_size); 511 if (addr == MAP_FAILED) { 512 goto err; 513 } 514 515 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", 516 v, queue_index); 517 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 518 page_size, addr); 519 g_free(name); 520 521 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { 522 object_unparent(OBJECT(&n->mr)); 523 munmap(addr, page_size); 524 goto err; 525 } 526 n->addr = addr; 527 528 return 0; 529 530 err: 531 return -1; 532 } 533 534 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) 535 { 536 int i; 537 538 /* 539 * Pack all the changes to the memory regions in a single 540 * transaction to avoid a few updating of the address space 541 * topology. 542 */ 543 memory_region_transaction_begin(); 544 545 for (i = dev->vq_index; i < dev->vq_index + n; i++) { 546 vhost_vdpa_host_notifier_uninit(dev, i); 547 } 548 549 memory_region_transaction_commit(); 550 } 551 552 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) 553 { 554 struct vhost_vdpa *v = dev->opaque; 555 int i; 556 557 if (v->shadow_vqs_enabled) { 558 /* FIXME SVQ is not compatible with host notifiers mr */ 559 return; 560 } 561 562 /* 563 * Pack all the changes to the memory regions in a single 564 * transaction to avoid a few updating of the address space 565 * topology. 566 */ 567 memory_region_transaction_begin(); 568 569 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { 570 if (vhost_vdpa_host_notifier_init(dev, i)) { 571 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); 572 break; 573 } 574 } 575 576 memory_region_transaction_commit(); 577 } 578 579 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) 580 { 581 struct vhost_vdpa *v = dev->opaque; 582 size_t idx; 583 584 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { 585 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); 586 } 587 g_ptr_array_free(v->shadow_vqs, true); 588 } 589 590 static int vhost_vdpa_cleanup(struct vhost_dev *dev) 591 { 592 struct vhost_vdpa *v; 593 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 594 v = dev->opaque; 595 trace_vhost_vdpa_cleanup(dev, v); 596 if (vhost_vdpa_first_dev(dev)) { 597 ram_block_discard_disable(false); 598 } 599 600 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 601 memory_listener_unregister(&v->listener); 602 vhost_vdpa_svq_cleanup(dev); 603 604 dev->opaque = NULL; 605 606 return 0; 607 } 608 609 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) 610 { 611 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); 612 return INT_MAX; 613 } 614 615 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, 616 struct vhost_memory *mem) 617 { 618 if (!vhost_vdpa_first_dev(dev)) { 619 return 0; 620 } 621 622 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); 623 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && 624 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { 625 int i; 626 for (i = 0; i < mem->nregions; i++) { 627 trace_vhost_vdpa_dump_regions(dev, i, 628 mem->regions[i].guest_phys_addr, 629 mem->regions[i].memory_size, 630 mem->regions[i].userspace_addr, 631 mem->regions[i].flags_padding); 632 } 633 } 634 if (mem->padding) { 635 return -EINVAL; 636 } 637 638 return 0; 639 } 640 641 static int vhost_vdpa_set_features(struct vhost_dev *dev, 642 uint64_t features) 643 { 644 struct vhost_vdpa *v = dev->opaque; 645 int ret; 646 647 if (!vhost_vdpa_first_dev(dev)) { 648 return 0; 649 } 650 651 if (v->shadow_vqs_enabled) { 652 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { 653 /* 654 * QEMU is just trying to enable or disable logging. SVQ handles 655 * this sepparately, so no need to forward this. 656 */ 657 v->acked_features = features; 658 return 0; 659 } 660 661 v->acked_features = features; 662 663 /* We must not ack _F_LOG if SVQ is enabled */ 664 features &= ~BIT_ULL(VHOST_F_LOG_ALL); 665 } 666 667 trace_vhost_vdpa_set_features(dev, features); 668 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); 669 if (ret) { 670 return ret; 671 } 672 673 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 674 } 675 676 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) 677 { 678 uint64_t features; 679 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 680 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | 681 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | 682 0x1ULL << VHOST_BACKEND_F_SUSPEND; 683 int r; 684 685 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { 686 return -EFAULT; 687 } 688 689 features &= f; 690 691 if (vhost_vdpa_first_dev(dev)) { 692 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); 693 if (r) { 694 return -EFAULT; 695 } 696 } 697 698 dev->backend_cap = features; 699 700 return 0; 701 } 702 703 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, 704 uint32_t *device_id) 705 { 706 int ret; 707 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); 708 trace_vhost_vdpa_get_device_id(dev, *device_id); 709 return ret; 710 } 711 712 static int vhost_vdpa_reset_device(struct vhost_dev *dev) 713 { 714 struct vhost_vdpa *v = dev->opaque; 715 int ret; 716 uint8_t status = 0; 717 718 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); 719 trace_vhost_vdpa_reset_device(dev, status); 720 v->suspended = false; 721 return ret; 722 } 723 724 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) 725 { 726 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 727 728 trace_vhost_vdpa_get_vq_index(dev, idx, idx); 729 return idx; 730 } 731 732 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) 733 { 734 int i; 735 trace_vhost_vdpa_set_vring_ready(dev); 736 for (i = 0; i < dev->nvqs; ++i) { 737 struct vhost_vring_state state = { 738 .index = dev->vq_index + i, 739 .num = 1, 740 }; 741 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); 742 } 743 return 0; 744 } 745 746 static int vhost_vdpa_set_config_call(struct vhost_dev *dev, 747 int fd) 748 { 749 trace_vhost_vdpa_set_config_call(dev, fd); 750 return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); 751 } 752 753 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, 754 uint32_t config_len) 755 { 756 int b, len; 757 char line[QEMU_HEXDUMP_LINE_LEN]; 758 759 for (b = 0; b < config_len; b += 16) { 760 len = config_len - b; 761 qemu_hexdump_line(line, b, config, len, false); 762 trace_vhost_vdpa_dump_config(dev, line); 763 } 764 } 765 766 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, 767 uint32_t offset, uint32_t size, 768 uint32_t flags) 769 { 770 struct vhost_vdpa_config *config; 771 int ret; 772 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 773 774 trace_vhost_vdpa_set_config(dev, offset, size, flags); 775 config = g_malloc(size + config_size); 776 config->off = offset; 777 config->len = size; 778 memcpy(config->buf, data, size); 779 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && 780 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 781 vhost_vdpa_dump_config(dev, data, size); 782 } 783 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); 784 g_free(config); 785 return ret; 786 } 787 788 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, 789 uint32_t config_len, Error **errp) 790 { 791 struct vhost_vdpa_config *v_config; 792 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 793 int ret; 794 795 trace_vhost_vdpa_get_config(dev, config, config_len); 796 v_config = g_malloc(config_len + config_size); 797 v_config->len = config_len; 798 v_config->off = 0; 799 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); 800 memcpy(config, v_config->buf, config_len); 801 g_free(v_config); 802 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && 803 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 804 vhost_vdpa_dump_config(dev, config, config_len); 805 } 806 return ret; 807 } 808 809 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, 810 struct vhost_vring_state *ring) 811 { 812 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); 813 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); 814 } 815 816 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, 817 struct vhost_vring_file *file) 818 { 819 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); 820 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); 821 } 822 823 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, 824 struct vhost_vring_file *file) 825 { 826 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); 827 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); 828 } 829 830 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, 831 struct vhost_vring_addr *addr) 832 { 833 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, 834 addr->desc_user_addr, addr->used_user_addr, 835 addr->avail_user_addr, 836 addr->log_guest_addr); 837 838 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); 839 840 } 841 842 /** 843 * Set the shadow virtqueue descriptors to the device 844 * 845 * @dev: The vhost device model 846 * @svq: The shadow virtqueue 847 * @idx: The index of the virtqueue in the vhost device 848 * @errp: Error 849 * 850 * Note that this function does not rewind kick file descriptor if cannot set 851 * call one. 852 */ 853 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, 854 VhostShadowVirtqueue *svq, unsigned idx, 855 Error **errp) 856 { 857 struct vhost_vring_file file = { 858 .index = dev->vq_index + idx, 859 }; 860 const EventNotifier *event_notifier = &svq->hdev_kick; 861 int r; 862 863 r = event_notifier_init(&svq->hdev_kick, 0); 864 if (r != 0) { 865 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); 866 goto err_init_hdev_kick; 867 } 868 869 r = event_notifier_init(&svq->hdev_call, 0); 870 if (r != 0) { 871 error_setg_errno(errp, -r, "Couldn't create call event notifier"); 872 goto err_init_hdev_call; 873 } 874 875 file.fd = event_notifier_get_fd(event_notifier); 876 r = vhost_vdpa_set_vring_dev_kick(dev, &file); 877 if (unlikely(r != 0)) { 878 error_setg_errno(errp, -r, "Can't set device kick fd"); 879 goto err_init_set_dev_fd; 880 } 881 882 event_notifier = &svq->hdev_call; 883 file.fd = event_notifier_get_fd(event_notifier); 884 r = vhost_vdpa_set_vring_dev_call(dev, &file); 885 if (unlikely(r != 0)) { 886 error_setg_errno(errp, -r, "Can't set device call fd"); 887 goto err_init_set_dev_fd; 888 } 889 890 return 0; 891 892 err_init_set_dev_fd: 893 event_notifier_set_handler(&svq->hdev_call, NULL); 894 895 err_init_hdev_call: 896 event_notifier_cleanup(&svq->hdev_kick); 897 898 err_init_hdev_kick: 899 return r; 900 } 901 902 /** 903 * Unmap a SVQ area in the device 904 */ 905 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) 906 { 907 const DMAMap needle = { 908 .translated_addr = addr, 909 }; 910 const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); 911 hwaddr size; 912 int r; 913 914 if (unlikely(!result)) { 915 error_report("Unable to find SVQ address to unmap"); 916 return; 917 } 918 919 size = ROUND_UP(result->size, qemu_real_host_page_size()); 920 r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); 921 if (unlikely(r < 0)) { 922 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); 923 return; 924 } 925 926 vhost_iova_tree_remove(v->iova_tree, *result); 927 } 928 929 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, 930 const VhostShadowVirtqueue *svq) 931 { 932 struct vhost_vdpa *v = dev->opaque; 933 struct vhost_vring_addr svq_addr; 934 935 vhost_svq_get_vring_addr(svq, &svq_addr); 936 937 vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); 938 939 vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); 940 } 941 942 /** 943 * Map the SVQ area in the device 944 * 945 * @v: Vhost-vdpa device 946 * @needle: The area to search iova 947 * @errorp: Error pointer 948 */ 949 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, 950 Error **errp) 951 { 952 int r; 953 954 r = vhost_iova_tree_map_alloc(v->iova_tree, needle); 955 if (unlikely(r != IOVA_OK)) { 956 error_setg(errp, "Cannot allocate iova (%d)", r); 957 return false; 958 } 959 960 r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, 961 needle->size + 1, 962 (void *)(uintptr_t)needle->translated_addr, 963 needle->perm == IOMMU_RO); 964 if (unlikely(r != 0)) { 965 error_setg_errno(errp, -r, "Cannot map region to device"); 966 vhost_iova_tree_remove(v->iova_tree, *needle); 967 } 968 969 return r == 0; 970 } 971 972 /** 973 * Map the shadow virtqueue rings in the device 974 * 975 * @dev: The vhost device 976 * @svq: The shadow virtqueue 977 * @addr: Assigned IOVA addresses 978 * @errp: Error pointer 979 */ 980 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, 981 const VhostShadowVirtqueue *svq, 982 struct vhost_vring_addr *addr, 983 Error **errp) 984 { 985 ERRP_GUARD(); 986 DMAMap device_region, driver_region; 987 struct vhost_vring_addr svq_addr; 988 struct vhost_vdpa *v = dev->opaque; 989 size_t device_size = vhost_svq_device_area_size(svq); 990 size_t driver_size = vhost_svq_driver_area_size(svq); 991 size_t avail_offset; 992 bool ok; 993 994 vhost_svq_get_vring_addr(svq, &svq_addr); 995 996 driver_region = (DMAMap) { 997 .translated_addr = svq_addr.desc_user_addr, 998 .size = driver_size - 1, 999 .perm = IOMMU_RO, 1000 }; 1001 ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); 1002 if (unlikely(!ok)) { 1003 error_prepend(errp, "Cannot create vq driver region: "); 1004 return false; 1005 } 1006 addr->desc_user_addr = driver_region.iova; 1007 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; 1008 addr->avail_user_addr = driver_region.iova + avail_offset; 1009 1010 device_region = (DMAMap) { 1011 .translated_addr = svq_addr.used_user_addr, 1012 .size = device_size - 1, 1013 .perm = IOMMU_RW, 1014 }; 1015 ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); 1016 if (unlikely(!ok)) { 1017 error_prepend(errp, "Cannot create vq device region: "); 1018 vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); 1019 } 1020 addr->used_user_addr = device_region.iova; 1021 1022 return ok; 1023 } 1024 1025 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, 1026 VhostShadowVirtqueue *svq, unsigned idx, 1027 Error **errp) 1028 { 1029 uint16_t vq_index = dev->vq_index + idx; 1030 struct vhost_vring_state s = { 1031 .index = vq_index, 1032 }; 1033 int r; 1034 1035 r = vhost_vdpa_set_dev_vring_base(dev, &s); 1036 if (unlikely(r)) { 1037 error_setg_errno(errp, -r, "Cannot set vring base"); 1038 return false; 1039 } 1040 1041 r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); 1042 return r == 0; 1043 } 1044 1045 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) 1046 { 1047 struct vhost_vdpa *v = dev->opaque; 1048 Error *err = NULL; 1049 unsigned i; 1050 1051 if (!v->shadow_vqs_enabled) { 1052 return true; 1053 } 1054 1055 for (i = 0; i < v->shadow_vqs->len; ++i) { 1056 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); 1057 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1058 struct vhost_vring_addr addr = { 1059 .index = dev->vq_index + i, 1060 }; 1061 int r; 1062 bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); 1063 if (unlikely(!ok)) { 1064 goto err; 1065 } 1066 1067 vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); 1068 ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); 1069 if (unlikely(!ok)) { 1070 goto err_map; 1071 } 1072 1073 /* Override vring GPA set by vhost subsystem */ 1074 r = vhost_vdpa_set_vring_dev_addr(dev, &addr); 1075 if (unlikely(r != 0)) { 1076 error_setg_errno(&err, -r, "Cannot set device address"); 1077 goto err_set_addr; 1078 } 1079 } 1080 1081 return true; 1082 1083 err_set_addr: 1084 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); 1085 1086 err_map: 1087 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); 1088 1089 err: 1090 error_reportf_err(err, "Cannot setup SVQ %u: ", i); 1091 for (unsigned j = 0; j < i; ++j) { 1092 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); 1093 vhost_vdpa_svq_unmap_rings(dev, svq); 1094 vhost_svq_stop(svq); 1095 } 1096 1097 return false; 1098 } 1099 1100 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) 1101 { 1102 struct vhost_vdpa *v = dev->opaque; 1103 1104 if (!v->shadow_vqs_enabled) { 1105 return; 1106 } 1107 1108 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { 1109 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1110 1111 vhost_svq_stop(svq); 1112 vhost_vdpa_svq_unmap_rings(dev, svq); 1113 1114 event_notifier_cleanup(&svq->hdev_kick); 1115 event_notifier_cleanup(&svq->hdev_call); 1116 } 1117 } 1118 1119 static void vhost_vdpa_suspend(struct vhost_dev *dev) 1120 { 1121 struct vhost_vdpa *v = dev->opaque; 1122 int r; 1123 1124 if (!vhost_vdpa_first_dev(dev)) { 1125 return; 1126 } 1127 1128 if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { 1129 trace_vhost_vdpa_suspend(dev); 1130 r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); 1131 if (unlikely(r)) { 1132 error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); 1133 } else { 1134 v->suspended = true; 1135 return; 1136 } 1137 } 1138 1139 vhost_vdpa_reset_device(dev); 1140 } 1141 1142 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) 1143 { 1144 struct vhost_vdpa *v = dev->opaque; 1145 bool ok; 1146 trace_vhost_vdpa_dev_start(dev, started); 1147 1148 if (started) { 1149 vhost_vdpa_host_notifiers_init(dev); 1150 ok = vhost_vdpa_svqs_start(dev); 1151 if (unlikely(!ok)) { 1152 return -1; 1153 } 1154 vhost_vdpa_set_vring_ready(dev); 1155 } else { 1156 vhost_vdpa_suspend(dev); 1157 vhost_vdpa_svqs_stop(dev); 1158 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 1159 } 1160 1161 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1162 return 0; 1163 } 1164 1165 if (started) { 1166 memory_listener_register(&v->listener, &address_space_memory); 1167 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 1168 } 1169 1170 return 0; 1171 } 1172 1173 static void vhost_vdpa_reset_status(struct vhost_dev *dev) 1174 { 1175 struct vhost_vdpa *v = dev->opaque; 1176 1177 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1178 return; 1179 } 1180 1181 vhost_vdpa_reset_device(dev); 1182 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 1183 VIRTIO_CONFIG_S_DRIVER); 1184 memory_listener_unregister(&v->listener); 1185 } 1186 1187 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, 1188 struct vhost_log *log) 1189 { 1190 struct vhost_vdpa *v = dev->opaque; 1191 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { 1192 return 0; 1193 } 1194 1195 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, 1196 log->log); 1197 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); 1198 } 1199 1200 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, 1201 struct vhost_vring_addr *addr) 1202 { 1203 struct vhost_vdpa *v = dev->opaque; 1204 1205 if (v->shadow_vqs_enabled) { 1206 /* 1207 * Device vring addr was set at device start. SVQ base is handled by 1208 * VirtQueue code. 1209 */ 1210 return 0; 1211 } 1212 1213 return vhost_vdpa_set_vring_dev_addr(dev, addr); 1214 } 1215 1216 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, 1217 struct vhost_vring_state *ring) 1218 { 1219 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); 1220 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); 1221 } 1222 1223 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, 1224 struct vhost_vring_state *ring) 1225 { 1226 struct vhost_vdpa *v = dev->opaque; 1227 1228 if (v->shadow_vqs_enabled) { 1229 /* 1230 * Device vring base was set at device start. SVQ base is handled by 1231 * VirtQueue code. 1232 */ 1233 return 0; 1234 } 1235 1236 return vhost_vdpa_set_dev_vring_base(dev, ring); 1237 } 1238 1239 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, 1240 struct vhost_vring_state *ring) 1241 { 1242 struct vhost_vdpa *v = dev->opaque; 1243 int ret; 1244 1245 if (v->shadow_vqs_enabled) { 1246 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); 1247 return 0; 1248 } 1249 1250 if (!v->suspended) { 1251 /* 1252 * Cannot trust in value returned by device, let vhost recover used 1253 * idx from guest. 1254 */ 1255 return -1; 1256 } 1257 1258 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); 1259 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); 1260 return ret; 1261 } 1262 1263 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, 1264 struct vhost_vring_file *file) 1265 { 1266 struct vhost_vdpa *v = dev->opaque; 1267 int vdpa_idx = file->index - dev->vq_index; 1268 1269 if (v->shadow_vqs_enabled) { 1270 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1271 vhost_svq_set_svq_kick_fd(svq, file->fd); 1272 return 0; 1273 } else { 1274 return vhost_vdpa_set_vring_dev_kick(dev, file); 1275 } 1276 } 1277 1278 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, 1279 struct vhost_vring_file *file) 1280 { 1281 struct vhost_vdpa *v = dev->opaque; 1282 int vdpa_idx = file->index - dev->vq_index; 1283 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1284 1285 /* Remember last call fd because we can switch to SVQ anytime. */ 1286 vhost_svq_set_svq_call_fd(svq, file->fd); 1287 if (v->shadow_vqs_enabled) { 1288 return 0; 1289 } 1290 1291 return vhost_vdpa_set_vring_dev_call(dev, file); 1292 } 1293 1294 static int vhost_vdpa_get_features(struct vhost_dev *dev, 1295 uint64_t *features) 1296 { 1297 int ret = vhost_vdpa_get_dev_features(dev, features); 1298 1299 if (ret == 0) { 1300 /* Add SVQ logging capabilities */ 1301 *features |= BIT_ULL(VHOST_F_LOG_ALL); 1302 } 1303 1304 return ret; 1305 } 1306 1307 static int vhost_vdpa_set_owner(struct vhost_dev *dev) 1308 { 1309 if (!vhost_vdpa_first_dev(dev)) { 1310 return 0; 1311 } 1312 1313 trace_vhost_vdpa_set_owner(dev); 1314 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); 1315 } 1316 1317 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, 1318 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) 1319 { 1320 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 1321 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; 1322 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; 1323 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; 1324 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, 1325 addr->avail_user_addr, addr->used_user_addr); 1326 return 0; 1327 } 1328 1329 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) 1330 { 1331 return true; 1332 } 1333 1334 const VhostOps vdpa_ops = { 1335 .backend_type = VHOST_BACKEND_TYPE_VDPA, 1336 .vhost_backend_init = vhost_vdpa_init, 1337 .vhost_backend_cleanup = vhost_vdpa_cleanup, 1338 .vhost_set_log_base = vhost_vdpa_set_log_base, 1339 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, 1340 .vhost_set_vring_num = vhost_vdpa_set_vring_num, 1341 .vhost_set_vring_base = vhost_vdpa_set_vring_base, 1342 .vhost_get_vring_base = vhost_vdpa_get_vring_base, 1343 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, 1344 .vhost_set_vring_call = vhost_vdpa_set_vring_call, 1345 .vhost_get_features = vhost_vdpa_get_features, 1346 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, 1347 .vhost_set_owner = vhost_vdpa_set_owner, 1348 .vhost_set_vring_endian = NULL, 1349 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, 1350 .vhost_set_mem_table = vhost_vdpa_set_mem_table, 1351 .vhost_set_features = vhost_vdpa_set_features, 1352 .vhost_reset_device = vhost_vdpa_reset_device, 1353 .vhost_get_vq_index = vhost_vdpa_get_vq_index, 1354 .vhost_get_config = vhost_vdpa_get_config, 1355 .vhost_set_config = vhost_vdpa_set_config, 1356 .vhost_requires_shm_log = NULL, 1357 .vhost_migration_done = NULL, 1358 .vhost_backend_can_merge = NULL, 1359 .vhost_net_set_mtu = NULL, 1360 .vhost_set_iotlb_callback = NULL, 1361 .vhost_send_device_iotlb_msg = NULL, 1362 .vhost_dev_start = vhost_vdpa_dev_start, 1363 .vhost_get_device_id = vhost_vdpa_get_device_id, 1364 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, 1365 .vhost_force_iommu = vhost_vdpa_force_iommu, 1366 .vhost_set_config_call = vhost_vdpa_set_config_call, 1367 .vhost_reset_status = vhost_vdpa_reset_status, 1368 }; 1369