1 /* 2 * vhost-vdpa 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <linux/vhost.h> 14 #include <linux/vfio.h> 15 #include <sys/eventfd.h> 16 #include <sys/ioctl.h> 17 #include "exec/target_page.h" 18 #include "hw/virtio/vhost.h" 19 #include "hw/virtio/vhost-backend.h" 20 #include "hw/virtio/virtio-net.h" 21 #include "hw/virtio/vhost-shadow-virtqueue.h" 22 #include "hw/virtio/vhost-vdpa.h" 23 #include "exec/address-spaces.h" 24 #include "migration/blocker.h" 25 #include "qemu/cutils.h" 26 #include "qemu/main-loop.h" 27 #include "trace.h" 28 #include "qapi/error.h" 29 30 /* 31 * Return one past the end of the end of section. Be careful with uint64_t 32 * conversions! 33 */ 34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section, 35 int page_mask) 36 { 37 Int128 llend = int128_make64(section->offset_within_address_space); 38 llend = int128_add(llend, section->size); 39 llend = int128_and(llend, int128_exts64(page_mask)); 40 41 return llend; 42 } 43 44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, 45 uint64_t iova_min, 46 uint64_t iova_max, 47 int page_mask) 48 { 49 Int128 llend; 50 51 if ((!memory_region_is_ram(section->mr) && 52 !memory_region_is_iommu(section->mr)) || 53 memory_region_is_protected(section->mr) || 54 /* vhost-vDPA doesn't allow MMIO to be mapped */ 55 memory_region_is_ram_device(section->mr)) { 56 return true; 57 } 58 59 if (section->offset_within_address_space < iova_min) { 60 error_report("RAM section out of device range (min=0x%" PRIx64 61 ", addr=0x%" HWADDR_PRIx ")", 62 iova_min, section->offset_within_address_space); 63 return true; 64 } 65 /* 66 * While using vIOMMU, sometimes the section will be larger than iova_max, 67 * but the memory that actually maps is smaller, so move the check to 68 * function vhost_vdpa_iommu_map_notify(). That function will use the actual 69 * size that maps to the kernel 70 */ 71 72 if (!memory_region_is_iommu(section->mr)) { 73 llend = vhost_vdpa_section_end(section, page_mask); 74 if (int128_gt(llend, int128_make64(iova_max))) { 75 error_report("RAM section out of device range (max=0x%" PRIx64 76 ", end addr=0x%" PRIx64 ")", 77 iova_max, int128_get64(llend)); 78 return true; 79 } 80 } 81 82 return false; 83 } 84 85 /* 86 * The caller must set asid = 0 if the device does not support asid. 87 * This is not an ABI break since it is set to 0 by the initializer anyway. 88 */ 89 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, 90 hwaddr size, void *vaddr, bool readonly) 91 { 92 struct vhost_msg_v2 msg = {}; 93 int fd = s->device_fd; 94 int ret = 0; 95 96 msg.type = VHOST_IOTLB_MSG_V2; 97 msg.asid = asid; 98 msg.iotlb.iova = iova; 99 msg.iotlb.size = size; 100 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; 101 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; 102 msg.iotlb.type = VHOST_IOTLB_UPDATE; 103 104 trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova, 105 msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, 106 msg.iotlb.type); 107 108 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 109 error_report("failed to write, fd=%d, errno=%d (%s)", 110 fd, errno, strerror(errno)); 111 return -EIO ; 112 } 113 114 return ret; 115 } 116 117 /* 118 * The caller must set asid = 0 if the device does not support asid. 119 * This is not an ABI break since it is set to 0 by the initializer anyway. 120 */ 121 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, 122 hwaddr size) 123 { 124 struct vhost_msg_v2 msg = {}; 125 int fd = s->device_fd; 126 int ret = 0; 127 128 msg.type = VHOST_IOTLB_MSG_V2; 129 msg.asid = asid; 130 msg.iotlb.iova = iova; 131 msg.iotlb.size = size; 132 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; 133 134 trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova, 135 msg.iotlb.size, msg.iotlb.type); 136 137 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 138 error_report("failed to write, fd=%d, errno=%d (%s)", 139 fd, errno, strerror(errno)); 140 return -EIO ; 141 } 142 143 return ret; 144 } 145 146 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s) 147 { 148 int fd = s->device_fd; 149 struct vhost_msg_v2 msg = { 150 .type = VHOST_IOTLB_MSG_V2, 151 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, 152 }; 153 154 trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type); 155 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 156 error_report("failed to write, fd=%d, errno=%d (%s)", 157 fd, errno, strerror(errno)); 158 } 159 } 160 161 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s) 162 { 163 if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && 164 !s->iotlb_batch_begin_sent) { 165 vhost_vdpa_listener_begin_batch(s); 166 } 167 168 s->iotlb_batch_begin_sent = true; 169 } 170 171 static void vhost_vdpa_listener_commit(MemoryListener *listener) 172 { 173 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 174 struct vhost_msg_v2 msg = {}; 175 int fd = v->shared->device_fd; 176 177 if (!(v->shared->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { 178 return; 179 } 180 181 if (!v->shared->iotlb_batch_begin_sent) { 182 return; 183 } 184 185 msg.type = VHOST_IOTLB_MSG_V2; 186 msg.iotlb.type = VHOST_IOTLB_BATCH_END; 187 188 trace_vhost_vdpa_listener_commit(v->shared, fd, msg.type, msg.iotlb.type); 189 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 190 error_report("failed to write, fd=%d, errno=%d (%s)", 191 fd, errno, strerror(errno)); 192 } 193 194 v->shared->iotlb_batch_begin_sent = false; 195 } 196 197 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) 198 { 199 struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); 200 201 hwaddr iova = iotlb->iova + iommu->iommu_offset; 202 struct vhost_vdpa *v = iommu->dev; 203 void *vaddr; 204 int ret; 205 Int128 llend; 206 207 if (iotlb->target_as != &address_space_memory) { 208 error_report("Wrong target AS \"%s\", only system memory is allowed", 209 iotlb->target_as->name ? iotlb->target_as->name : "none"); 210 return; 211 } 212 RCU_READ_LOCK_GUARD(); 213 /* check if RAM section out of device range */ 214 llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); 215 if (int128_gt(llend, int128_make64(v->shared->iova_range.last))) { 216 error_report("RAM section out of device range (max=0x%" PRIx64 217 ", end addr=0x%" PRIx64 ")", 218 v->shared->iova_range.last, int128_get64(llend)); 219 return; 220 } 221 222 if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { 223 bool read_only; 224 225 if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { 226 return; 227 } 228 ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova, 229 iotlb->addr_mask + 1, vaddr, read_only); 230 if (ret) { 231 error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " 232 "0x%" HWADDR_PRIx ", %p) = %d (%m)", 233 v, iova, iotlb->addr_mask + 1, vaddr, ret); 234 } 235 } else { 236 ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova, 237 iotlb->addr_mask + 1); 238 if (ret) { 239 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " 240 "0x%" HWADDR_PRIx ") = %d (%m)", 241 v, iova, iotlb->addr_mask + 1, ret); 242 } 243 } 244 } 245 246 static void vhost_vdpa_iommu_region_add(MemoryListener *listener, 247 MemoryRegionSection *section) 248 { 249 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 250 251 struct vdpa_iommu *iommu; 252 Int128 end; 253 int iommu_idx; 254 IOMMUMemoryRegion *iommu_mr; 255 int ret; 256 257 iommu_mr = IOMMU_MEMORY_REGION(section->mr); 258 259 iommu = g_malloc0(sizeof(*iommu)); 260 end = int128_add(int128_make64(section->offset_within_region), 261 section->size); 262 end = int128_sub(end, int128_one()); 263 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, 264 MEMTXATTRS_UNSPECIFIED); 265 iommu->iommu_mr = iommu_mr; 266 iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify, 267 IOMMU_NOTIFIER_IOTLB_EVENTS, 268 section->offset_within_region, 269 int128_get64(end), 270 iommu_idx); 271 iommu->iommu_offset = section->offset_within_address_space - 272 section->offset_within_region; 273 iommu->dev = v; 274 275 ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); 276 if (ret) { 277 g_free(iommu); 278 return; 279 } 280 281 QLIST_INSERT_HEAD(&v->shared->iommu_list, iommu, iommu_next); 282 memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); 283 284 return; 285 } 286 287 static void vhost_vdpa_iommu_region_del(MemoryListener *listener, 288 MemoryRegionSection *section) 289 { 290 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 291 292 struct vdpa_iommu *iommu; 293 294 QLIST_FOREACH(iommu, &v->shared->iommu_list, iommu_next) 295 { 296 if (MEMORY_REGION(iommu->iommu_mr) == section->mr && 297 iommu->n.start == section->offset_within_region) { 298 memory_region_unregister_iommu_notifier(section->mr, &iommu->n); 299 QLIST_REMOVE(iommu, iommu_next); 300 g_free(iommu); 301 break; 302 } 303 } 304 } 305 306 static void vhost_vdpa_listener_region_add(MemoryListener *listener, 307 MemoryRegionSection *section) 308 { 309 DMAMap mem_region = {}; 310 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 311 hwaddr iova; 312 Int128 llend, llsize; 313 void *vaddr; 314 int ret; 315 int page_size = qemu_target_page_size(); 316 int page_mask = -page_size; 317 318 if (vhost_vdpa_listener_skipped_section(section, 319 v->shared->iova_range.first, 320 v->shared->iova_range.last, 321 page_mask)) { 322 return; 323 } 324 if (memory_region_is_iommu(section->mr)) { 325 vhost_vdpa_iommu_region_add(listener, section); 326 return; 327 } 328 329 if (unlikely((section->offset_within_address_space & ~page_mask) != 330 (section->offset_within_region & ~page_mask))) { 331 trace_vhost_vdpa_listener_region_add_unaligned(v->shared, 332 section->mr->name, 333 section->offset_within_address_space & ~page_mask, 334 section->offset_within_region & ~page_mask); 335 return; 336 } 337 338 iova = ROUND_UP(section->offset_within_address_space, page_size); 339 llend = vhost_vdpa_section_end(section, page_mask); 340 if (int128_ge(int128_make64(iova), llend)) { 341 return; 342 } 343 344 memory_region_ref(section->mr); 345 346 /* Here we assume that memory_region_is_ram(section->mr)==true */ 347 348 vaddr = memory_region_get_ram_ptr(section->mr) + 349 section->offset_within_region + 350 (iova - section->offset_within_address_space); 351 352 trace_vhost_vdpa_listener_region_add(v->shared, iova, int128_get64(llend), 353 vaddr, section->readonly); 354 355 llsize = int128_sub(llend, int128_make64(iova)); 356 if (v->shared->shadow_data) { 357 int r; 358 359 mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, 360 mem_region.size = int128_get64(llsize) - 1, 361 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), 362 363 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &mem_region); 364 if (unlikely(r != IOVA_OK)) { 365 error_report("Can't allocate a mapping (%d)", r); 366 goto fail; 367 } 368 369 iova = mem_region.iova; 370 } 371 372 vhost_vdpa_iotlb_batch_begin_once(v->shared); 373 ret = vhost_vdpa_dma_map(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova, 374 int128_get64(llsize), vaddr, section->readonly); 375 if (ret) { 376 error_report("vhost vdpa map fail!"); 377 goto fail_map; 378 } 379 380 return; 381 382 fail_map: 383 if (v->shared->shadow_data) { 384 vhost_iova_tree_remove(v->shared->iova_tree, mem_region); 385 } 386 387 fail: 388 /* 389 * On the initfn path, store the first error in the container so we 390 * can gracefully fail. Runtime, there's not much we can do other 391 * than throw a hardware error. 392 */ 393 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); 394 return; 395 396 } 397 398 static void vhost_vdpa_listener_region_del(MemoryListener *listener, 399 MemoryRegionSection *section) 400 { 401 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 402 hwaddr iova; 403 Int128 llend, llsize; 404 int ret; 405 int page_size = qemu_target_page_size(); 406 int page_mask = -page_size; 407 408 if (vhost_vdpa_listener_skipped_section(section, 409 v->shared->iova_range.first, 410 v->shared->iova_range.last, 411 page_mask)) { 412 return; 413 } 414 if (memory_region_is_iommu(section->mr)) { 415 vhost_vdpa_iommu_region_del(listener, section); 416 } 417 418 if (unlikely((section->offset_within_address_space & ~page_mask) != 419 (section->offset_within_region & ~page_mask))) { 420 trace_vhost_vdpa_listener_region_del_unaligned(v->shared, 421 section->mr->name, 422 section->offset_within_address_space & ~page_mask, 423 section->offset_within_region & ~page_mask); 424 return; 425 } 426 427 iova = ROUND_UP(section->offset_within_address_space, page_size); 428 llend = vhost_vdpa_section_end(section, page_mask); 429 430 trace_vhost_vdpa_listener_region_del(v->shared, iova, 431 int128_get64(int128_sub(llend, int128_one()))); 432 433 if (int128_ge(int128_make64(iova), llend)) { 434 return; 435 } 436 437 llsize = int128_sub(llend, int128_make64(iova)); 438 439 if (v->shared->shadow_data) { 440 const DMAMap *result; 441 const void *vaddr = memory_region_get_ram_ptr(section->mr) + 442 section->offset_within_region + 443 (iova - section->offset_within_address_space); 444 DMAMap mem_region = { 445 .translated_addr = (hwaddr)(uintptr_t)vaddr, 446 .size = int128_get64(llsize) - 1, 447 }; 448 449 result = vhost_iova_tree_find_iova(v->shared->iova_tree, &mem_region); 450 if (!result) { 451 /* The memory listener map wasn't mapped */ 452 return; 453 } 454 iova = result->iova; 455 vhost_iova_tree_remove(v->shared->iova_tree, *result); 456 } 457 vhost_vdpa_iotlb_batch_begin_once(v->shared); 458 /* 459 * The unmap ioctl doesn't accept a full 64-bit. need to check it 460 */ 461 if (int128_eq(llsize, int128_2_64())) { 462 llsize = int128_rshift(llsize, 1); 463 ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova, 464 int128_get64(llsize)); 465 466 if (ret) { 467 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " 468 "0x%" HWADDR_PRIx ") = %d (%m)", 469 v, iova, int128_get64(llsize), ret); 470 } 471 iova += int128_get64(llsize); 472 } 473 ret = vhost_vdpa_dma_unmap(v->shared, VHOST_VDPA_GUEST_PA_ASID, iova, 474 int128_get64(llsize)); 475 476 if (ret) { 477 error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " 478 "0x%" HWADDR_PRIx ") = %d (%m)", 479 v, iova, int128_get64(llsize), ret); 480 } 481 482 memory_region_unref(section->mr); 483 } 484 /* 485 * IOTLB API is used by vhost-vdpa which requires incremental updating 486 * of the mapping. So we can not use generic vhost memory listener which 487 * depends on the addnop(). 488 */ 489 static const MemoryListener vhost_vdpa_memory_listener = { 490 .name = "vhost-vdpa", 491 .commit = vhost_vdpa_listener_commit, 492 .region_add = vhost_vdpa_listener_region_add, 493 .region_del = vhost_vdpa_listener_region_del, 494 }; 495 496 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, 497 void *arg) 498 { 499 struct vhost_vdpa *v = dev->opaque; 500 int fd = v->shared->device_fd; 501 int ret; 502 503 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 504 505 ret = ioctl(fd, request, arg); 506 return ret < 0 ? -errno : ret; 507 } 508 509 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) 510 { 511 uint8_t s; 512 int ret; 513 514 trace_vhost_vdpa_add_status(dev, status); 515 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 516 if (ret < 0) { 517 return ret; 518 } 519 if ((s & status) == status) { 520 /* Don't set bits already set */ 521 return 0; 522 } 523 524 s |= status; 525 526 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); 527 if (ret < 0) { 528 return ret; 529 } 530 531 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 532 if (ret < 0) { 533 return ret; 534 } 535 536 if (!(s & status)) { 537 return -EIO; 538 } 539 540 return 0; 541 } 542 543 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range) 544 { 545 int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); 546 547 return ret < 0 ? -errno : 0; 548 } 549 550 /* 551 * The use of this function is for requests that only need to be 552 * applied once. Typically such request occurs at the beginning 553 * of operation, and before setting up queues. It should not be 554 * used for request that performs operation until all queues are 555 * set, which would need to check dev->vq_index_end instead. 556 */ 557 static bool vhost_vdpa_first_dev(struct vhost_dev *dev) 558 { 559 struct vhost_vdpa *v = dev->opaque; 560 561 return v->index == 0; 562 } 563 564 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, 565 uint64_t *features) 566 { 567 int ret; 568 569 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); 570 trace_vhost_vdpa_get_features(dev, *features); 571 return ret; 572 } 573 574 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) 575 { 576 g_autoptr(GPtrArray) shadow_vqs = NULL; 577 578 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); 579 for (unsigned n = 0; n < hdev->nvqs; ++n) { 580 VhostShadowVirtqueue *svq; 581 582 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); 583 g_ptr_array_add(shadow_vqs, svq); 584 } 585 586 v->shadow_vqs = g_steal_pointer(&shadow_vqs); 587 } 588 589 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) 590 { 591 struct vhost_vdpa *v = opaque; 592 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 593 trace_vhost_vdpa_init(dev, v->shared, opaque); 594 int ret; 595 596 v->dev = dev; 597 dev->opaque = opaque ; 598 v->listener = vhost_vdpa_memory_listener; 599 vhost_vdpa_init_svq(dev, v); 600 601 error_propagate(&dev->migration_blocker, v->migration_blocker); 602 if (!vhost_vdpa_first_dev(dev)) { 603 return 0; 604 } 605 606 /* 607 * If dev->shadow_vqs_enabled at initialization that means the device has 608 * been started with x-svq=on, so don't block migration 609 */ 610 if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) { 611 /* We don't have dev->features yet */ 612 uint64_t features; 613 ret = vhost_vdpa_get_dev_features(dev, &features); 614 if (unlikely(ret)) { 615 error_setg_errno(errp, -ret, "Could not get device features"); 616 return ret; 617 } 618 vhost_svq_valid_features(features, &dev->migration_blocker); 619 } 620 621 /* 622 * Similar to VFIO, we end up pinning all guest memory and have to 623 * disable discarding of RAM. 624 */ 625 ret = ram_block_discard_disable(true); 626 if (ret) { 627 error_report("Cannot set discarding of RAM broken"); 628 return ret; 629 } 630 631 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 632 VIRTIO_CONFIG_S_DRIVER); 633 634 return 0; 635 } 636 637 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, 638 int queue_index) 639 { 640 size_t page_size = qemu_real_host_page_size(); 641 struct vhost_vdpa *v = dev->opaque; 642 VirtIODevice *vdev = dev->vdev; 643 VhostVDPAHostNotifier *n; 644 645 n = &v->notifier[queue_index]; 646 647 if (n->addr) { 648 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); 649 object_unparent(OBJECT(&n->mr)); 650 munmap(n->addr, page_size); 651 n->addr = NULL; 652 } 653 } 654 655 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) 656 { 657 size_t page_size = qemu_real_host_page_size(); 658 struct vhost_vdpa *v = dev->opaque; 659 VirtIODevice *vdev = dev->vdev; 660 VhostVDPAHostNotifier *n; 661 int fd = v->shared->device_fd; 662 void *addr; 663 char *name; 664 665 vhost_vdpa_host_notifier_uninit(dev, queue_index); 666 667 n = &v->notifier[queue_index]; 668 669 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, 670 queue_index * page_size); 671 if (addr == MAP_FAILED) { 672 goto err; 673 } 674 675 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", 676 v, queue_index); 677 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 678 page_size, addr); 679 g_free(name); 680 681 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { 682 object_unparent(OBJECT(&n->mr)); 683 munmap(addr, page_size); 684 goto err; 685 } 686 n->addr = addr; 687 688 return 0; 689 690 err: 691 return -1; 692 } 693 694 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) 695 { 696 int i; 697 698 /* 699 * Pack all the changes to the memory regions in a single 700 * transaction to avoid a few updating of the address space 701 * topology. 702 */ 703 memory_region_transaction_begin(); 704 705 for (i = dev->vq_index; i < dev->vq_index + n; i++) { 706 vhost_vdpa_host_notifier_uninit(dev, i); 707 } 708 709 memory_region_transaction_commit(); 710 } 711 712 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) 713 { 714 struct vhost_vdpa *v = dev->opaque; 715 int i; 716 717 if (v->shadow_vqs_enabled) { 718 /* FIXME SVQ is not compatible with host notifiers mr */ 719 return; 720 } 721 722 /* 723 * Pack all the changes to the memory regions in a single 724 * transaction to avoid a few updating of the address space 725 * topology. 726 */ 727 memory_region_transaction_begin(); 728 729 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { 730 if (vhost_vdpa_host_notifier_init(dev, i)) { 731 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); 732 break; 733 } 734 } 735 736 memory_region_transaction_commit(); 737 } 738 739 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) 740 { 741 struct vhost_vdpa *v = dev->opaque; 742 size_t idx; 743 744 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { 745 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); 746 } 747 g_ptr_array_free(v->shadow_vqs, true); 748 } 749 750 static int vhost_vdpa_cleanup(struct vhost_dev *dev) 751 { 752 struct vhost_vdpa *v; 753 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 754 v = dev->opaque; 755 trace_vhost_vdpa_cleanup(dev, v); 756 if (vhost_vdpa_first_dev(dev)) { 757 ram_block_discard_disable(false); 758 } 759 760 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 761 memory_listener_unregister(&v->listener); 762 vhost_vdpa_svq_cleanup(dev); 763 764 dev->opaque = NULL; 765 766 return 0; 767 } 768 769 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) 770 { 771 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); 772 return INT_MAX; 773 } 774 775 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, 776 struct vhost_memory *mem) 777 { 778 if (!vhost_vdpa_first_dev(dev)) { 779 return 0; 780 } 781 782 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); 783 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && 784 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { 785 int i; 786 for (i = 0; i < mem->nregions; i++) { 787 trace_vhost_vdpa_dump_regions(dev, i, 788 mem->regions[i].guest_phys_addr, 789 mem->regions[i].memory_size, 790 mem->regions[i].userspace_addr, 791 mem->regions[i].flags_padding); 792 } 793 } 794 if (mem->padding) { 795 return -EINVAL; 796 } 797 798 return 0; 799 } 800 801 static int vhost_vdpa_set_features(struct vhost_dev *dev, 802 uint64_t features) 803 { 804 struct vhost_vdpa *v = dev->opaque; 805 int ret; 806 807 if (!vhost_vdpa_first_dev(dev)) { 808 return 0; 809 } 810 811 if (v->shadow_vqs_enabled) { 812 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { 813 /* 814 * QEMU is just trying to enable or disable logging. SVQ handles 815 * this sepparately, so no need to forward this. 816 */ 817 v->acked_features = features; 818 return 0; 819 } 820 821 v->acked_features = features; 822 823 /* We must not ack _F_LOG if SVQ is enabled */ 824 features &= ~BIT_ULL(VHOST_F_LOG_ALL); 825 } 826 827 trace_vhost_vdpa_set_features(dev, features); 828 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); 829 if (ret) { 830 return ret; 831 } 832 833 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 834 } 835 836 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) 837 { 838 struct vhost_vdpa *v = dev->opaque; 839 840 uint64_t features; 841 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 842 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | 843 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID | 844 0x1ULL << VHOST_BACKEND_F_SUSPEND; 845 int r; 846 847 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { 848 return -EFAULT; 849 } 850 851 features &= f; 852 853 if (vhost_vdpa_first_dev(dev)) { 854 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); 855 if (r) { 856 return -EFAULT; 857 } 858 } 859 860 dev->backend_cap = features; 861 v->shared->backend_cap = features; 862 863 return 0; 864 } 865 866 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, 867 uint32_t *device_id) 868 { 869 int ret; 870 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); 871 trace_vhost_vdpa_get_device_id(dev, *device_id); 872 return ret; 873 } 874 875 static int vhost_vdpa_reset_device(struct vhost_dev *dev) 876 { 877 struct vhost_vdpa *v = dev->opaque; 878 int ret; 879 uint8_t status = 0; 880 881 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); 882 trace_vhost_vdpa_reset_device(dev); 883 v->suspended = false; 884 return ret; 885 } 886 887 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) 888 { 889 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 890 891 trace_vhost_vdpa_get_vq_index(dev, idx, idx); 892 return idx; 893 } 894 895 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx) 896 { 897 struct vhost_dev *dev = v->dev; 898 struct vhost_vring_state state = { 899 .index = idx, 900 .num = 1, 901 }; 902 int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); 903 904 trace_vhost_vdpa_set_vring_ready(dev, idx, r); 905 return r; 906 } 907 908 static int vhost_vdpa_set_config_call(struct vhost_dev *dev, 909 int fd) 910 { 911 trace_vhost_vdpa_set_config_call(dev, fd); 912 return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); 913 } 914 915 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, 916 uint32_t config_len) 917 { 918 int b, len; 919 char line[QEMU_HEXDUMP_LINE_LEN]; 920 921 for (b = 0; b < config_len; b += 16) { 922 len = config_len - b; 923 qemu_hexdump_line(line, b, config, len, false); 924 trace_vhost_vdpa_dump_config(dev, line); 925 } 926 } 927 928 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, 929 uint32_t offset, uint32_t size, 930 uint32_t flags) 931 { 932 struct vhost_vdpa_config *config; 933 int ret; 934 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 935 936 trace_vhost_vdpa_set_config(dev, offset, size, flags); 937 config = g_malloc(size + config_size); 938 config->off = offset; 939 config->len = size; 940 memcpy(config->buf, data, size); 941 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && 942 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 943 vhost_vdpa_dump_config(dev, data, size); 944 } 945 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); 946 g_free(config); 947 return ret; 948 } 949 950 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, 951 uint32_t config_len, Error **errp) 952 { 953 struct vhost_vdpa_config *v_config; 954 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 955 int ret; 956 957 trace_vhost_vdpa_get_config(dev, config, config_len); 958 v_config = g_malloc(config_len + config_size); 959 v_config->len = config_len; 960 v_config->off = 0; 961 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); 962 memcpy(config, v_config->buf, config_len); 963 g_free(v_config); 964 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && 965 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 966 vhost_vdpa_dump_config(dev, config, config_len); 967 } 968 return ret; 969 } 970 971 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, 972 struct vhost_vring_state *ring) 973 { 974 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); 975 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); 976 } 977 978 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, 979 struct vhost_vring_file *file) 980 { 981 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); 982 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); 983 } 984 985 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, 986 struct vhost_vring_file *file) 987 { 988 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); 989 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); 990 } 991 992 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, 993 struct vhost_vring_addr *addr) 994 { 995 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, 996 addr->desc_user_addr, addr->used_user_addr, 997 addr->avail_user_addr, 998 addr->log_guest_addr); 999 1000 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); 1001 1002 } 1003 1004 /** 1005 * Set the shadow virtqueue descriptors to the device 1006 * 1007 * @dev: The vhost device model 1008 * @svq: The shadow virtqueue 1009 * @idx: The index of the virtqueue in the vhost device 1010 * @errp: Error 1011 * 1012 * Note that this function does not rewind kick file descriptor if cannot set 1013 * call one. 1014 */ 1015 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, 1016 VhostShadowVirtqueue *svq, unsigned idx, 1017 Error **errp) 1018 { 1019 struct vhost_vring_file file = { 1020 .index = dev->vq_index + idx, 1021 }; 1022 const EventNotifier *event_notifier = &svq->hdev_kick; 1023 int r; 1024 1025 r = event_notifier_init(&svq->hdev_kick, 0); 1026 if (r != 0) { 1027 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); 1028 goto err_init_hdev_kick; 1029 } 1030 1031 r = event_notifier_init(&svq->hdev_call, 0); 1032 if (r != 0) { 1033 error_setg_errno(errp, -r, "Couldn't create call event notifier"); 1034 goto err_init_hdev_call; 1035 } 1036 1037 file.fd = event_notifier_get_fd(event_notifier); 1038 r = vhost_vdpa_set_vring_dev_kick(dev, &file); 1039 if (unlikely(r != 0)) { 1040 error_setg_errno(errp, -r, "Can't set device kick fd"); 1041 goto err_init_set_dev_fd; 1042 } 1043 1044 event_notifier = &svq->hdev_call; 1045 file.fd = event_notifier_get_fd(event_notifier); 1046 r = vhost_vdpa_set_vring_dev_call(dev, &file); 1047 if (unlikely(r != 0)) { 1048 error_setg_errno(errp, -r, "Can't set device call fd"); 1049 goto err_init_set_dev_fd; 1050 } 1051 1052 return 0; 1053 1054 err_init_set_dev_fd: 1055 event_notifier_set_handler(&svq->hdev_call, NULL); 1056 1057 err_init_hdev_call: 1058 event_notifier_cleanup(&svq->hdev_kick); 1059 1060 err_init_hdev_kick: 1061 return r; 1062 } 1063 1064 /** 1065 * Unmap a SVQ area in the device 1066 */ 1067 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) 1068 { 1069 const DMAMap needle = { 1070 .translated_addr = addr, 1071 }; 1072 const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, 1073 &needle); 1074 hwaddr size; 1075 int r; 1076 1077 if (unlikely(!result)) { 1078 error_report("Unable to find SVQ address to unmap"); 1079 return; 1080 } 1081 1082 size = ROUND_UP(result->size, qemu_real_host_page_size()); 1083 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, 1084 size); 1085 if (unlikely(r < 0)) { 1086 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); 1087 return; 1088 } 1089 1090 vhost_iova_tree_remove(v->shared->iova_tree, *result); 1091 } 1092 1093 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, 1094 const VhostShadowVirtqueue *svq) 1095 { 1096 struct vhost_vdpa *v = dev->opaque; 1097 struct vhost_vring_addr svq_addr; 1098 1099 vhost_svq_get_vring_addr(svq, &svq_addr); 1100 1101 vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); 1102 1103 vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); 1104 } 1105 1106 /** 1107 * Map the SVQ area in the device 1108 * 1109 * @v: Vhost-vdpa device 1110 * @needle: The area to search iova 1111 * @errorp: Error pointer 1112 */ 1113 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, 1114 Error **errp) 1115 { 1116 int r; 1117 1118 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle); 1119 if (unlikely(r != IOVA_OK)) { 1120 error_setg(errp, "Cannot allocate iova (%d)", r); 1121 return false; 1122 } 1123 1124 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, 1125 needle->size + 1, 1126 (void *)(uintptr_t)needle->translated_addr, 1127 needle->perm == IOMMU_RO); 1128 if (unlikely(r != 0)) { 1129 error_setg_errno(errp, -r, "Cannot map region to device"); 1130 vhost_iova_tree_remove(v->shared->iova_tree, *needle); 1131 } 1132 1133 return r == 0; 1134 } 1135 1136 /** 1137 * Map the shadow virtqueue rings in the device 1138 * 1139 * @dev: The vhost device 1140 * @svq: The shadow virtqueue 1141 * @addr: Assigned IOVA addresses 1142 * @errp: Error pointer 1143 */ 1144 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, 1145 const VhostShadowVirtqueue *svq, 1146 struct vhost_vring_addr *addr, 1147 Error **errp) 1148 { 1149 ERRP_GUARD(); 1150 DMAMap device_region, driver_region; 1151 struct vhost_vring_addr svq_addr; 1152 struct vhost_vdpa *v = dev->opaque; 1153 size_t device_size = vhost_svq_device_area_size(svq); 1154 size_t driver_size = vhost_svq_driver_area_size(svq); 1155 size_t avail_offset; 1156 bool ok; 1157 1158 vhost_svq_get_vring_addr(svq, &svq_addr); 1159 1160 driver_region = (DMAMap) { 1161 .translated_addr = svq_addr.desc_user_addr, 1162 .size = driver_size - 1, 1163 .perm = IOMMU_RO, 1164 }; 1165 ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); 1166 if (unlikely(!ok)) { 1167 error_prepend(errp, "Cannot create vq driver region: "); 1168 return false; 1169 } 1170 addr->desc_user_addr = driver_region.iova; 1171 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; 1172 addr->avail_user_addr = driver_region.iova + avail_offset; 1173 1174 device_region = (DMAMap) { 1175 .translated_addr = svq_addr.used_user_addr, 1176 .size = device_size - 1, 1177 .perm = IOMMU_RW, 1178 }; 1179 ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); 1180 if (unlikely(!ok)) { 1181 error_prepend(errp, "Cannot create vq device region: "); 1182 vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); 1183 } 1184 addr->used_user_addr = device_region.iova; 1185 1186 return ok; 1187 } 1188 1189 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, 1190 VhostShadowVirtqueue *svq, unsigned idx, 1191 Error **errp) 1192 { 1193 uint16_t vq_index = dev->vq_index + idx; 1194 struct vhost_vring_state s = { 1195 .index = vq_index, 1196 }; 1197 int r; 1198 1199 r = vhost_vdpa_set_dev_vring_base(dev, &s); 1200 if (unlikely(r)) { 1201 error_setg_errno(errp, -r, "Cannot set vring base"); 1202 return false; 1203 } 1204 1205 r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); 1206 return r == 0; 1207 } 1208 1209 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) 1210 { 1211 struct vhost_vdpa *v = dev->opaque; 1212 Error *err = NULL; 1213 unsigned i; 1214 1215 if (!v->shadow_vqs_enabled) { 1216 return true; 1217 } 1218 1219 for (i = 0; i < v->shadow_vqs->len; ++i) { 1220 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); 1221 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1222 struct vhost_vring_addr addr = { 1223 .index = dev->vq_index + i, 1224 }; 1225 int r; 1226 bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); 1227 if (unlikely(!ok)) { 1228 goto err; 1229 } 1230 1231 vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); 1232 ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); 1233 if (unlikely(!ok)) { 1234 goto err_map; 1235 } 1236 1237 /* Override vring GPA set by vhost subsystem */ 1238 r = vhost_vdpa_set_vring_dev_addr(dev, &addr); 1239 if (unlikely(r != 0)) { 1240 error_setg_errno(&err, -r, "Cannot set device address"); 1241 goto err_set_addr; 1242 } 1243 } 1244 1245 return true; 1246 1247 err_set_addr: 1248 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); 1249 1250 err_map: 1251 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); 1252 1253 err: 1254 error_reportf_err(err, "Cannot setup SVQ %u: ", i); 1255 for (unsigned j = 0; j < i; ++j) { 1256 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); 1257 vhost_vdpa_svq_unmap_rings(dev, svq); 1258 vhost_svq_stop(svq); 1259 } 1260 1261 return false; 1262 } 1263 1264 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) 1265 { 1266 struct vhost_vdpa *v = dev->opaque; 1267 1268 if (!v->shadow_vqs_enabled) { 1269 return; 1270 } 1271 1272 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { 1273 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1274 1275 vhost_svq_stop(svq); 1276 vhost_vdpa_svq_unmap_rings(dev, svq); 1277 1278 event_notifier_cleanup(&svq->hdev_kick); 1279 event_notifier_cleanup(&svq->hdev_call); 1280 } 1281 } 1282 1283 static void vhost_vdpa_suspend(struct vhost_dev *dev) 1284 { 1285 struct vhost_vdpa *v = dev->opaque; 1286 int r; 1287 1288 if (!vhost_vdpa_first_dev(dev)) { 1289 return; 1290 } 1291 1292 if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { 1293 trace_vhost_vdpa_suspend(dev); 1294 r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); 1295 if (unlikely(r)) { 1296 error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); 1297 } else { 1298 v->suspended = true; 1299 return; 1300 } 1301 } 1302 1303 vhost_vdpa_reset_device(dev); 1304 } 1305 1306 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) 1307 { 1308 struct vhost_vdpa *v = dev->opaque; 1309 bool ok; 1310 trace_vhost_vdpa_dev_start(dev, started); 1311 1312 if (started) { 1313 vhost_vdpa_host_notifiers_init(dev); 1314 ok = vhost_vdpa_svqs_start(dev); 1315 if (unlikely(!ok)) { 1316 return -1; 1317 } 1318 } else { 1319 vhost_vdpa_suspend(dev); 1320 vhost_vdpa_svqs_stop(dev); 1321 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 1322 } 1323 1324 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1325 return 0; 1326 } 1327 1328 if (started) { 1329 if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) { 1330 error_report("SVQ can not work while IOMMU enable, please disable" 1331 "IOMMU and try again"); 1332 return -1; 1333 } 1334 memory_listener_register(&v->listener, dev->vdev->dma_as); 1335 1336 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 1337 } 1338 1339 return 0; 1340 } 1341 1342 static void vhost_vdpa_reset_status(struct vhost_dev *dev) 1343 { 1344 struct vhost_vdpa *v = dev->opaque; 1345 1346 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1347 return; 1348 } 1349 1350 vhost_vdpa_reset_device(dev); 1351 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 1352 VIRTIO_CONFIG_S_DRIVER); 1353 memory_listener_unregister(&v->listener); 1354 } 1355 1356 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, 1357 struct vhost_log *log) 1358 { 1359 struct vhost_vdpa *v = dev->opaque; 1360 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { 1361 return 0; 1362 } 1363 1364 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, 1365 log->log); 1366 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); 1367 } 1368 1369 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, 1370 struct vhost_vring_addr *addr) 1371 { 1372 struct vhost_vdpa *v = dev->opaque; 1373 1374 if (v->shadow_vqs_enabled) { 1375 /* 1376 * Device vring addr was set at device start. SVQ base is handled by 1377 * VirtQueue code. 1378 */ 1379 return 0; 1380 } 1381 1382 return vhost_vdpa_set_vring_dev_addr(dev, addr); 1383 } 1384 1385 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, 1386 struct vhost_vring_state *ring) 1387 { 1388 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); 1389 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); 1390 } 1391 1392 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, 1393 struct vhost_vring_state *ring) 1394 { 1395 struct vhost_vdpa *v = dev->opaque; 1396 1397 if (v->shadow_vqs_enabled) { 1398 /* 1399 * Device vring base was set at device start. SVQ base is handled by 1400 * VirtQueue code. 1401 */ 1402 return 0; 1403 } 1404 1405 return vhost_vdpa_set_dev_vring_base(dev, ring); 1406 } 1407 1408 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, 1409 struct vhost_vring_state *ring) 1410 { 1411 struct vhost_vdpa *v = dev->opaque; 1412 int ret; 1413 1414 if (v->shadow_vqs_enabled) { 1415 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); 1416 return 0; 1417 } 1418 1419 if (!v->suspended) { 1420 /* 1421 * Cannot trust in value returned by device, let vhost recover used 1422 * idx from guest. 1423 */ 1424 return -1; 1425 } 1426 1427 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); 1428 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); 1429 return ret; 1430 } 1431 1432 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, 1433 struct vhost_vring_file *file) 1434 { 1435 struct vhost_vdpa *v = dev->opaque; 1436 int vdpa_idx = file->index - dev->vq_index; 1437 1438 if (v->shadow_vqs_enabled) { 1439 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1440 vhost_svq_set_svq_kick_fd(svq, file->fd); 1441 return 0; 1442 } else { 1443 return vhost_vdpa_set_vring_dev_kick(dev, file); 1444 } 1445 } 1446 1447 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, 1448 struct vhost_vring_file *file) 1449 { 1450 struct vhost_vdpa *v = dev->opaque; 1451 int vdpa_idx = file->index - dev->vq_index; 1452 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1453 1454 /* Remember last call fd because we can switch to SVQ anytime. */ 1455 vhost_svq_set_svq_call_fd(svq, file->fd); 1456 if (v->shadow_vqs_enabled) { 1457 return 0; 1458 } 1459 1460 return vhost_vdpa_set_vring_dev_call(dev, file); 1461 } 1462 1463 static int vhost_vdpa_get_features(struct vhost_dev *dev, 1464 uint64_t *features) 1465 { 1466 int ret = vhost_vdpa_get_dev_features(dev, features); 1467 1468 if (ret == 0) { 1469 /* Add SVQ logging capabilities */ 1470 *features |= BIT_ULL(VHOST_F_LOG_ALL); 1471 } 1472 1473 return ret; 1474 } 1475 1476 static int vhost_vdpa_set_owner(struct vhost_dev *dev) 1477 { 1478 if (!vhost_vdpa_first_dev(dev)) { 1479 return 0; 1480 } 1481 1482 trace_vhost_vdpa_set_owner(dev); 1483 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); 1484 } 1485 1486 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, 1487 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) 1488 { 1489 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 1490 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; 1491 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; 1492 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; 1493 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, 1494 addr->avail_user_addr, addr->used_user_addr); 1495 return 0; 1496 } 1497 1498 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) 1499 { 1500 return true; 1501 } 1502 1503 const VhostOps vdpa_ops = { 1504 .backend_type = VHOST_BACKEND_TYPE_VDPA, 1505 .vhost_backend_init = vhost_vdpa_init, 1506 .vhost_backend_cleanup = vhost_vdpa_cleanup, 1507 .vhost_set_log_base = vhost_vdpa_set_log_base, 1508 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, 1509 .vhost_set_vring_num = vhost_vdpa_set_vring_num, 1510 .vhost_set_vring_base = vhost_vdpa_set_vring_base, 1511 .vhost_get_vring_base = vhost_vdpa_get_vring_base, 1512 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, 1513 .vhost_set_vring_call = vhost_vdpa_set_vring_call, 1514 .vhost_get_features = vhost_vdpa_get_features, 1515 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, 1516 .vhost_set_owner = vhost_vdpa_set_owner, 1517 .vhost_set_vring_endian = NULL, 1518 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, 1519 .vhost_set_mem_table = vhost_vdpa_set_mem_table, 1520 .vhost_set_features = vhost_vdpa_set_features, 1521 .vhost_reset_device = vhost_vdpa_reset_device, 1522 .vhost_get_vq_index = vhost_vdpa_get_vq_index, 1523 .vhost_get_config = vhost_vdpa_get_config, 1524 .vhost_set_config = vhost_vdpa_set_config, 1525 .vhost_requires_shm_log = NULL, 1526 .vhost_migration_done = NULL, 1527 .vhost_net_set_mtu = NULL, 1528 .vhost_set_iotlb_callback = NULL, 1529 .vhost_send_device_iotlb_msg = NULL, 1530 .vhost_dev_start = vhost_vdpa_dev_start, 1531 .vhost_get_device_id = vhost_vdpa_get_device_id, 1532 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, 1533 .vhost_force_iommu = vhost_vdpa_force_iommu, 1534 .vhost_set_config_call = vhost_vdpa_set_config_call, 1535 .vhost_reset_status = vhost_vdpa_reset_status, 1536 }; 1537