1 /* 2 * vhost-vdpa 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <linux/vhost.h> 14 #include <linux/vfio.h> 15 #include <sys/eventfd.h> 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/virtio/vhost-backend.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/virtio/vhost-shadow-virtqueue.h" 21 #include "hw/virtio/vhost-vdpa.h" 22 #include "exec/address-spaces.h" 23 #include "migration/blocker.h" 24 #include "qemu/cutils.h" 25 #include "qemu/main-loop.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qapi/error.h" 29 30 /* 31 * Return one past the end of the end of section. Be careful with uint64_t 32 * conversions! 33 */ 34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) 35 { 36 Int128 llend = int128_make64(section->offset_within_address_space); 37 llend = int128_add(llend, section->size); 38 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 39 40 return llend; 41 } 42 43 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, 44 uint64_t iova_min, 45 uint64_t iova_max) 46 { 47 Int128 llend; 48 49 if ((!memory_region_is_ram(section->mr) && 50 !memory_region_is_iommu(section->mr)) || 51 memory_region_is_protected(section->mr) || 52 /* vhost-vDPA doesn't allow MMIO to be mapped */ 53 memory_region_is_ram_device(section->mr)) { 54 return true; 55 } 56 57 if (section->offset_within_address_space < iova_min) { 58 error_report("RAM section out of device range (min=0x%" PRIx64 59 ", addr=0x%" HWADDR_PRIx ")", 60 iova_min, section->offset_within_address_space); 61 return true; 62 } 63 64 llend = vhost_vdpa_section_end(section); 65 if (int128_gt(llend, int128_make64(iova_max))) { 66 error_report("RAM section out of device range (max=0x%" PRIx64 67 ", end addr=0x%" PRIx64 ")", 68 iova_max, int128_get64(llend)); 69 return true; 70 } 71 72 return false; 73 } 74 75 /* 76 * The caller must set asid = 0 if the device does not support asid. 77 * This is not an ABI break since it is set to 0 by the initializer anyway. 78 */ 79 int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 80 hwaddr size, void *vaddr, bool readonly) 81 { 82 struct vhost_msg_v2 msg = {}; 83 int fd = v->device_fd; 84 int ret = 0; 85 86 msg.type = v->msg_type; 87 msg.asid = asid; 88 msg.iotlb.iova = iova; 89 msg.iotlb.size = size; 90 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; 91 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; 92 msg.iotlb.type = VHOST_IOTLB_UPDATE; 93 94 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, 95 msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, 96 msg.iotlb.type); 97 98 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 99 error_report("failed to write, fd=%d, errno=%d (%s)", 100 fd, errno, strerror(errno)); 101 return -EIO ; 102 } 103 104 return ret; 105 } 106 107 /* 108 * The caller must set asid = 0 if the device does not support asid. 109 * This is not an ABI break since it is set to 0 by the initializer anyway. 110 */ 111 int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, 112 hwaddr size) 113 { 114 struct vhost_msg_v2 msg = {}; 115 int fd = v->device_fd; 116 int ret = 0; 117 118 msg.type = v->msg_type; 119 msg.asid = asid; 120 msg.iotlb.iova = iova; 121 msg.iotlb.size = size; 122 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; 123 124 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, 125 msg.iotlb.size, msg.iotlb.type); 126 127 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 128 error_report("failed to write, fd=%d, errno=%d (%s)", 129 fd, errno, strerror(errno)); 130 return -EIO ; 131 } 132 133 return ret; 134 } 135 136 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) 137 { 138 int fd = v->device_fd; 139 struct vhost_msg_v2 msg = { 140 .type = v->msg_type, 141 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, 142 }; 143 144 trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); 145 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 146 error_report("failed to write, fd=%d, errno=%d (%s)", 147 fd, errno, strerror(errno)); 148 } 149 } 150 151 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) 152 { 153 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && 154 !v->iotlb_batch_begin_sent) { 155 vhost_vdpa_listener_begin_batch(v); 156 } 157 158 v->iotlb_batch_begin_sent = true; 159 } 160 161 static void vhost_vdpa_listener_commit(MemoryListener *listener) 162 { 163 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 164 struct vhost_dev *dev = v->dev; 165 struct vhost_msg_v2 msg = {}; 166 int fd = v->device_fd; 167 168 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { 169 return; 170 } 171 172 if (!v->iotlb_batch_begin_sent) { 173 return; 174 } 175 176 msg.type = v->msg_type; 177 msg.iotlb.type = VHOST_IOTLB_BATCH_END; 178 179 trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); 180 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 181 error_report("failed to write, fd=%d, errno=%d (%s)", 182 fd, errno, strerror(errno)); 183 } 184 185 v->iotlb_batch_begin_sent = false; 186 } 187 188 static void vhost_vdpa_listener_region_add(MemoryListener *listener, 189 MemoryRegionSection *section) 190 { 191 DMAMap mem_region = {}; 192 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 193 hwaddr iova; 194 Int128 llend, llsize; 195 void *vaddr; 196 int ret; 197 198 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 199 v->iova_range.last)) { 200 return; 201 } 202 203 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 204 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 205 error_report("%s received unaligned region", __func__); 206 return; 207 } 208 209 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 210 llend = vhost_vdpa_section_end(section); 211 if (int128_ge(int128_make64(iova), llend)) { 212 return; 213 } 214 215 memory_region_ref(section->mr); 216 217 /* Here we assume that memory_region_is_ram(section->mr)==true */ 218 219 vaddr = memory_region_get_ram_ptr(section->mr) + 220 section->offset_within_region + 221 (iova - section->offset_within_address_space); 222 223 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), 224 vaddr, section->readonly); 225 226 llsize = int128_sub(llend, int128_make64(iova)); 227 if (v->shadow_data) { 228 int r; 229 230 mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, 231 mem_region.size = int128_get64(llsize) - 1, 232 mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), 233 234 r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); 235 if (unlikely(r != IOVA_OK)) { 236 error_report("Can't allocate a mapping (%d)", r); 237 goto fail; 238 } 239 240 iova = mem_region.iova; 241 } 242 243 vhost_vdpa_iotlb_batch_begin_once(v); 244 ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, 245 int128_get64(llsize), vaddr, section->readonly); 246 if (ret) { 247 error_report("vhost vdpa map fail!"); 248 goto fail_map; 249 } 250 251 return; 252 253 fail_map: 254 if (v->shadow_data) { 255 vhost_iova_tree_remove(v->iova_tree, mem_region); 256 } 257 258 fail: 259 /* 260 * On the initfn path, store the first error in the container so we 261 * can gracefully fail. Runtime, there's not much we can do other 262 * than throw a hardware error. 263 */ 264 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); 265 return; 266 267 } 268 269 static void vhost_vdpa_listener_region_del(MemoryListener *listener, 270 MemoryRegionSection *section) 271 { 272 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 273 hwaddr iova; 274 Int128 llend, llsize; 275 int ret; 276 277 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 278 v->iova_range.last)) { 279 return; 280 } 281 282 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 283 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 284 error_report("%s received unaligned region", __func__); 285 return; 286 } 287 288 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 289 llend = vhost_vdpa_section_end(section); 290 291 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); 292 293 if (int128_ge(int128_make64(iova), llend)) { 294 return; 295 } 296 297 llsize = int128_sub(llend, int128_make64(iova)); 298 299 if (v->shadow_data) { 300 const DMAMap *result; 301 const void *vaddr = memory_region_get_ram_ptr(section->mr) + 302 section->offset_within_region + 303 (iova - section->offset_within_address_space); 304 DMAMap mem_region = { 305 .translated_addr = (hwaddr)(uintptr_t)vaddr, 306 .size = int128_get64(llsize) - 1, 307 }; 308 309 result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); 310 if (!result) { 311 /* The memory listener map wasn't mapped */ 312 return; 313 } 314 iova = result->iova; 315 vhost_iova_tree_remove(v->iova_tree, *result); 316 } 317 vhost_vdpa_iotlb_batch_begin_once(v); 318 ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, 319 int128_get64(llsize)); 320 if (ret) { 321 error_report("vhost_vdpa dma unmap error!"); 322 } 323 324 memory_region_unref(section->mr); 325 } 326 /* 327 * IOTLB API is used by vhost-vdpa which requires incremental updating 328 * of the mapping. So we can not use generic vhost memory listener which 329 * depends on the addnop(). 330 */ 331 static const MemoryListener vhost_vdpa_memory_listener = { 332 .name = "vhost-vdpa", 333 .commit = vhost_vdpa_listener_commit, 334 .region_add = vhost_vdpa_listener_region_add, 335 .region_del = vhost_vdpa_listener_region_del, 336 }; 337 338 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, 339 void *arg) 340 { 341 struct vhost_vdpa *v = dev->opaque; 342 int fd = v->device_fd; 343 int ret; 344 345 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 346 347 ret = ioctl(fd, request, arg); 348 return ret < 0 ? -errno : ret; 349 } 350 351 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) 352 { 353 uint8_t s; 354 int ret; 355 356 trace_vhost_vdpa_add_status(dev, status); 357 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 358 if (ret < 0) { 359 return ret; 360 } 361 362 s |= status; 363 364 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); 365 if (ret < 0) { 366 return ret; 367 } 368 369 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); 370 if (ret < 0) { 371 return ret; 372 } 373 374 if (!(s & status)) { 375 return -EIO; 376 } 377 378 return 0; 379 } 380 381 /* 382 * The use of this function is for requests that only need to be 383 * applied once. Typically such request occurs at the beginning 384 * of operation, and before setting up queues. It should not be 385 * used for request that performs operation until all queues are 386 * set, which would need to check dev->vq_index_end instead. 387 */ 388 static bool vhost_vdpa_first_dev(struct vhost_dev *dev) 389 { 390 struct vhost_vdpa *v = dev->opaque; 391 392 return v->index == 0; 393 } 394 395 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, 396 uint64_t *features) 397 { 398 int ret; 399 400 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); 401 trace_vhost_vdpa_get_features(dev, *features); 402 return ret; 403 } 404 405 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) 406 { 407 g_autoptr(GPtrArray) shadow_vqs = NULL; 408 409 shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); 410 for (unsigned n = 0; n < hdev->nvqs; ++n) { 411 VhostShadowVirtqueue *svq; 412 413 svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); 414 g_ptr_array_add(shadow_vqs, svq); 415 } 416 417 v->shadow_vqs = g_steal_pointer(&shadow_vqs); 418 } 419 420 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) 421 { 422 struct vhost_vdpa *v; 423 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 424 trace_vhost_vdpa_init(dev, opaque); 425 int ret; 426 427 /* 428 * Similar to VFIO, we end up pinning all guest memory and have to 429 * disable discarding of RAM. 430 */ 431 ret = ram_block_discard_disable(true); 432 if (ret) { 433 error_report("Cannot set discarding of RAM broken"); 434 return ret; 435 } 436 437 v = opaque; 438 v->dev = dev; 439 dev->opaque = opaque ; 440 v->listener = vhost_vdpa_memory_listener; 441 v->msg_type = VHOST_IOTLB_MSG_V2; 442 vhost_vdpa_init_svq(dev, v); 443 444 if (!vhost_vdpa_first_dev(dev)) { 445 return 0; 446 } 447 448 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 449 VIRTIO_CONFIG_S_DRIVER); 450 451 return 0; 452 } 453 454 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, 455 int queue_index) 456 { 457 size_t page_size = qemu_real_host_page_size(); 458 struct vhost_vdpa *v = dev->opaque; 459 VirtIODevice *vdev = dev->vdev; 460 VhostVDPAHostNotifier *n; 461 462 n = &v->notifier[queue_index]; 463 464 if (n->addr) { 465 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); 466 object_unparent(OBJECT(&n->mr)); 467 munmap(n->addr, page_size); 468 n->addr = NULL; 469 } 470 } 471 472 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) 473 { 474 size_t page_size = qemu_real_host_page_size(); 475 struct vhost_vdpa *v = dev->opaque; 476 VirtIODevice *vdev = dev->vdev; 477 VhostVDPAHostNotifier *n; 478 int fd = v->device_fd; 479 void *addr; 480 char *name; 481 482 vhost_vdpa_host_notifier_uninit(dev, queue_index); 483 484 n = &v->notifier[queue_index]; 485 486 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, 487 queue_index * page_size); 488 if (addr == MAP_FAILED) { 489 goto err; 490 } 491 492 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", 493 v, queue_index); 494 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 495 page_size, addr); 496 g_free(name); 497 498 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { 499 object_unparent(OBJECT(&n->mr)); 500 munmap(addr, page_size); 501 goto err; 502 } 503 n->addr = addr; 504 505 return 0; 506 507 err: 508 return -1; 509 } 510 511 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) 512 { 513 int i; 514 515 for (i = dev->vq_index; i < dev->vq_index + n; i++) { 516 vhost_vdpa_host_notifier_uninit(dev, i); 517 } 518 } 519 520 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) 521 { 522 struct vhost_vdpa *v = dev->opaque; 523 int i; 524 525 if (v->shadow_vqs_enabled) { 526 /* FIXME SVQ is not compatible with host notifiers mr */ 527 return; 528 } 529 530 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { 531 if (vhost_vdpa_host_notifier_init(dev, i)) { 532 goto err; 533 } 534 } 535 536 return; 537 538 err: 539 vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); 540 return; 541 } 542 543 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) 544 { 545 struct vhost_vdpa *v = dev->opaque; 546 size_t idx; 547 548 for (idx = 0; idx < v->shadow_vqs->len; ++idx) { 549 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); 550 } 551 g_ptr_array_free(v->shadow_vqs, true); 552 } 553 554 static int vhost_vdpa_cleanup(struct vhost_dev *dev) 555 { 556 struct vhost_vdpa *v; 557 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 558 v = dev->opaque; 559 trace_vhost_vdpa_cleanup(dev, v); 560 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 561 memory_listener_unregister(&v->listener); 562 vhost_vdpa_svq_cleanup(dev); 563 564 dev->opaque = NULL; 565 ram_block_discard_disable(false); 566 567 return 0; 568 } 569 570 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) 571 { 572 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); 573 return INT_MAX; 574 } 575 576 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, 577 struct vhost_memory *mem) 578 { 579 if (!vhost_vdpa_first_dev(dev)) { 580 return 0; 581 } 582 583 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); 584 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && 585 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { 586 int i; 587 for (i = 0; i < mem->nregions; i++) { 588 trace_vhost_vdpa_dump_regions(dev, i, 589 mem->regions[i].guest_phys_addr, 590 mem->regions[i].memory_size, 591 mem->regions[i].userspace_addr, 592 mem->regions[i].flags_padding); 593 } 594 } 595 if (mem->padding) { 596 return -EINVAL; 597 } 598 599 return 0; 600 } 601 602 static int vhost_vdpa_set_features(struct vhost_dev *dev, 603 uint64_t features) 604 { 605 struct vhost_vdpa *v = dev->opaque; 606 int ret; 607 608 if (!vhost_vdpa_first_dev(dev)) { 609 return 0; 610 } 611 612 if (v->shadow_vqs_enabled) { 613 if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { 614 /* 615 * QEMU is just trying to enable or disable logging. SVQ handles 616 * this sepparately, so no need to forward this. 617 */ 618 v->acked_features = features; 619 return 0; 620 } 621 622 v->acked_features = features; 623 624 /* We must not ack _F_LOG if SVQ is enabled */ 625 features &= ~BIT_ULL(VHOST_F_LOG_ALL); 626 } 627 628 trace_vhost_vdpa_set_features(dev, features); 629 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); 630 if (ret) { 631 return ret; 632 } 633 634 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 635 } 636 637 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) 638 { 639 uint64_t features; 640 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 641 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | 642 0x1ULL << VHOST_BACKEND_F_IOTLB_ASID; 643 int r; 644 645 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { 646 return -EFAULT; 647 } 648 649 features &= f; 650 651 if (vhost_vdpa_first_dev(dev)) { 652 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); 653 if (r) { 654 return -EFAULT; 655 } 656 } 657 658 dev->backend_cap = features; 659 660 return 0; 661 } 662 663 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, 664 uint32_t *device_id) 665 { 666 int ret; 667 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); 668 trace_vhost_vdpa_get_device_id(dev, *device_id); 669 return ret; 670 } 671 672 static void vhost_vdpa_reset_svq(struct vhost_vdpa *v) 673 { 674 if (!v->shadow_vqs_enabled) { 675 return; 676 } 677 678 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { 679 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 680 vhost_svq_stop(svq); 681 } 682 } 683 684 static int vhost_vdpa_reset_device(struct vhost_dev *dev) 685 { 686 struct vhost_vdpa *v = dev->opaque; 687 int ret; 688 uint8_t status = 0; 689 690 vhost_vdpa_reset_svq(v); 691 692 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); 693 trace_vhost_vdpa_reset_device(dev, status); 694 return ret; 695 } 696 697 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) 698 { 699 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 700 701 trace_vhost_vdpa_get_vq_index(dev, idx, idx); 702 return idx; 703 } 704 705 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) 706 { 707 int i; 708 trace_vhost_vdpa_set_vring_ready(dev); 709 for (i = 0; i < dev->nvqs; ++i) { 710 struct vhost_vring_state state = { 711 .index = dev->vq_index + i, 712 .num = 1, 713 }; 714 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); 715 } 716 return 0; 717 } 718 719 static int vhost_vdpa_set_config_call(struct vhost_dev *dev, 720 int fd) 721 { 722 trace_vhost_vdpa_set_config_call(dev, fd); 723 return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); 724 } 725 726 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, 727 uint32_t config_len) 728 { 729 int b, len; 730 char line[QEMU_HEXDUMP_LINE_LEN]; 731 732 for (b = 0; b < config_len; b += 16) { 733 len = config_len - b; 734 qemu_hexdump_line(line, b, config, len, false); 735 trace_vhost_vdpa_dump_config(dev, line); 736 } 737 } 738 739 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, 740 uint32_t offset, uint32_t size, 741 uint32_t flags) 742 { 743 struct vhost_vdpa_config *config; 744 int ret; 745 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 746 747 trace_vhost_vdpa_set_config(dev, offset, size, flags); 748 config = g_malloc(size + config_size); 749 config->off = offset; 750 config->len = size; 751 memcpy(config->buf, data, size); 752 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && 753 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 754 vhost_vdpa_dump_config(dev, data, size); 755 } 756 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); 757 g_free(config); 758 return ret; 759 } 760 761 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, 762 uint32_t config_len, Error **errp) 763 { 764 struct vhost_vdpa_config *v_config; 765 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 766 int ret; 767 768 trace_vhost_vdpa_get_config(dev, config, config_len); 769 v_config = g_malloc(config_len + config_size); 770 v_config->len = config_len; 771 v_config->off = 0; 772 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); 773 memcpy(config, v_config->buf, config_len); 774 g_free(v_config); 775 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && 776 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 777 vhost_vdpa_dump_config(dev, config, config_len); 778 } 779 return ret; 780 } 781 782 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, 783 struct vhost_vring_state *ring) 784 { 785 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); 786 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); 787 } 788 789 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, 790 struct vhost_vring_file *file) 791 { 792 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); 793 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); 794 } 795 796 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, 797 struct vhost_vring_file *file) 798 { 799 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); 800 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); 801 } 802 803 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, 804 struct vhost_vring_addr *addr) 805 { 806 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, 807 addr->desc_user_addr, addr->used_user_addr, 808 addr->avail_user_addr, 809 addr->log_guest_addr); 810 811 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); 812 813 } 814 815 /** 816 * Set the shadow virtqueue descriptors to the device 817 * 818 * @dev: The vhost device model 819 * @svq: The shadow virtqueue 820 * @idx: The index of the virtqueue in the vhost device 821 * @errp: Error 822 * 823 * Note that this function does not rewind kick file descriptor if cannot set 824 * call one. 825 */ 826 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, 827 VhostShadowVirtqueue *svq, unsigned idx, 828 Error **errp) 829 { 830 struct vhost_vring_file file = { 831 .index = dev->vq_index + idx, 832 }; 833 const EventNotifier *event_notifier = &svq->hdev_kick; 834 int r; 835 836 r = event_notifier_init(&svq->hdev_kick, 0); 837 if (r != 0) { 838 error_setg_errno(errp, -r, "Couldn't create kick event notifier"); 839 goto err_init_hdev_kick; 840 } 841 842 r = event_notifier_init(&svq->hdev_call, 0); 843 if (r != 0) { 844 error_setg_errno(errp, -r, "Couldn't create call event notifier"); 845 goto err_init_hdev_call; 846 } 847 848 file.fd = event_notifier_get_fd(event_notifier); 849 r = vhost_vdpa_set_vring_dev_kick(dev, &file); 850 if (unlikely(r != 0)) { 851 error_setg_errno(errp, -r, "Can't set device kick fd"); 852 goto err_init_set_dev_fd; 853 } 854 855 event_notifier = &svq->hdev_call; 856 file.fd = event_notifier_get_fd(event_notifier); 857 r = vhost_vdpa_set_vring_dev_call(dev, &file); 858 if (unlikely(r != 0)) { 859 error_setg_errno(errp, -r, "Can't set device call fd"); 860 goto err_init_set_dev_fd; 861 } 862 863 return 0; 864 865 err_init_set_dev_fd: 866 event_notifier_set_handler(&svq->hdev_call, NULL); 867 868 err_init_hdev_call: 869 event_notifier_cleanup(&svq->hdev_kick); 870 871 err_init_hdev_kick: 872 return r; 873 } 874 875 /** 876 * Unmap a SVQ area in the device 877 */ 878 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) 879 { 880 const DMAMap needle = { 881 .translated_addr = addr, 882 }; 883 const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); 884 hwaddr size; 885 int r; 886 887 if (unlikely(!result)) { 888 error_report("Unable to find SVQ address to unmap"); 889 return; 890 } 891 892 size = ROUND_UP(result->size, qemu_real_host_page_size()); 893 r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); 894 if (unlikely(r < 0)) { 895 error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); 896 return; 897 } 898 899 vhost_iova_tree_remove(v->iova_tree, *result); 900 } 901 902 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, 903 const VhostShadowVirtqueue *svq) 904 { 905 struct vhost_vdpa *v = dev->opaque; 906 struct vhost_vring_addr svq_addr; 907 908 vhost_svq_get_vring_addr(svq, &svq_addr); 909 910 vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); 911 912 vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); 913 } 914 915 /** 916 * Map the SVQ area in the device 917 * 918 * @v: Vhost-vdpa device 919 * @needle: The area to search iova 920 * @errorp: Error pointer 921 */ 922 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, 923 Error **errp) 924 { 925 int r; 926 927 r = vhost_iova_tree_map_alloc(v->iova_tree, needle); 928 if (unlikely(r != IOVA_OK)) { 929 error_setg(errp, "Cannot allocate iova (%d)", r); 930 return false; 931 } 932 933 r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, 934 needle->size + 1, 935 (void *)(uintptr_t)needle->translated_addr, 936 needle->perm == IOMMU_RO); 937 if (unlikely(r != 0)) { 938 error_setg_errno(errp, -r, "Cannot map region to device"); 939 vhost_iova_tree_remove(v->iova_tree, *needle); 940 } 941 942 return r == 0; 943 } 944 945 /** 946 * Map the shadow virtqueue rings in the device 947 * 948 * @dev: The vhost device 949 * @svq: The shadow virtqueue 950 * @addr: Assigned IOVA addresses 951 * @errp: Error pointer 952 */ 953 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, 954 const VhostShadowVirtqueue *svq, 955 struct vhost_vring_addr *addr, 956 Error **errp) 957 { 958 ERRP_GUARD(); 959 DMAMap device_region, driver_region; 960 struct vhost_vring_addr svq_addr; 961 struct vhost_vdpa *v = dev->opaque; 962 size_t device_size = vhost_svq_device_area_size(svq); 963 size_t driver_size = vhost_svq_driver_area_size(svq); 964 size_t avail_offset; 965 bool ok; 966 967 vhost_svq_get_vring_addr(svq, &svq_addr); 968 969 driver_region = (DMAMap) { 970 .translated_addr = svq_addr.desc_user_addr, 971 .size = driver_size - 1, 972 .perm = IOMMU_RO, 973 }; 974 ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); 975 if (unlikely(!ok)) { 976 error_prepend(errp, "Cannot create vq driver region: "); 977 return false; 978 } 979 addr->desc_user_addr = driver_region.iova; 980 avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; 981 addr->avail_user_addr = driver_region.iova + avail_offset; 982 983 device_region = (DMAMap) { 984 .translated_addr = svq_addr.used_user_addr, 985 .size = device_size - 1, 986 .perm = IOMMU_RW, 987 }; 988 ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); 989 if (unlikely(!ok)) { 990 error_prepend(errp, "Cannot create vq device region: "); 991 vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); 992 } 993 addr->used_user_addr = device_region.iova; 994 995 return ok; 996 } 997 998 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, 999 VhostShadowVirtqueue *svq, unsigned idx, 1000 Error **errp) 1001 { 1002 uint16_t vq_index = dev->vq_index + idx; 1003 struct vhost_vring_state s = { 1004 .index = vq_index, 1005 }; 1006 int r; 1007 1008 r = vhost_vdpa_set_dev_vring_base(dev, &s); 1009 if (unlikely(r)) { 1010 error_setg_errno(errp, -r, "Cannot set vring base"); 1011 return false; 1012 } 1013 1014 r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); 1015 return r == 0; 1016 } 1017 1018 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) 1019 { 1020 struct vhost_vdpa *v = dev->opaque; 1021 Error *err = NULL; 1022 unsigned i; 1023 1024 if (!v->shadow_vqs_enabled) { 1025 return true; 1026 } 1027 1028 for (i = 0; i < v->shadow_vqs->len; ++i) { 1029 VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); 1030 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1031 struct vhost_vring_addr addr = { 1032 .index = dev->vq_index + i, 1033 }; 1034 int r; 1035 bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); 1036 if (unlikely(!ok)) { 1037 goto err; 1038 } 1039 1040 vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); 1041 ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); 1042 if (unlikely(!ok)) { 1043 goto err_map; 1044 } 1045 1046 /* Override vring GPA set by vhost subsystem */ 1047 r = vhost_vdpa_set_vring_dev_addr(dev, &addr); 1048 if (unlikely(r != 0)) { 1049 error_setg_errno(&err, -r, "Cannot set device address"); 1050 goto err_set_addr; 1051 } 1052 } 1053 1054 return true; 1055 1056 err_set_addr: 1057 vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); 1058 1059 err_map: 1060 vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); 1061 1062 err: 1063 error_reportf_err(err, "Cannot setup SVQ %u: ", i); 1064 for (unsigned j = 0; j < i; ++j) { 1065 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); 1066 vhost_vdpa_svq_unmap_rings(dev, svq); 1067 vhost_svq_stop(svq); 1068 } 1069 1070 return false; 1071 } 1072 1073 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) 1074 { 1075 struct vhost_vdpa *v = dev->opaque; 1076 1077 if (!v->shadow_vqs_enabled) { 1078 return; 1079 } 1080 1081 for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { 1082 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); 1083 vhost_vdpa_svq_unmap_rings(dev, svq); 1084 1085 event_notifier_cleanup(&svq->hdev_kick); 1086 event_notifier_cleanup(&svq->hdev_call); 1087 } 1088 } 1089 1090 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) 1091 { 1092 struct vhost_vdpa *v = dev->opaque; 1093 bool ok; 1094 trace_vhost_vdpa_dev_start(dev, started); 1095 1096 if (started) { 1097 vhost_vdpa_host_notifiers_init(dev); 1098 ok = vhost_vdpa_svqs_start(dev); 1099 if (unlikely(!ok)) { 1100 return -1; 1101 } 1102 vhost_vdpa_set_vring_ready(dev); 1103 } else { 1104 vhost_vdpa_svqs_stop(dev); 1105 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 1106 } 1107 1108 if (dev->vq_index + dev->nvqs != dev->vq_index_end) { 1109 return 0; 1110 } 1111 1112 if (started) { 1113 memory_listener_register(&v->listener, &address_space_memory); 1114 return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 1115 } else { 1116 vhost_vdpa_reset_device(dev); 1117 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 1118 VIRTIO_CONFIG_S_DRIVER); 1119 memory_listener_unregister(&v->listener); 1120 1121 return 0; 1122 } 1123 } 1124 1125 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, 1126 struct vhost_log *log) 1127 { 1128 struct vhost_vdpa *v = dev->opaque; 1129 if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { 1130 return 0; 1131 } 1132 1133 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, 1134 log->log); 1135 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); 1136 } 1137 1138 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, 1139 struct vhost_vring_addr *addr) 1140 { 1141 struct vhost_vdpa *v = dev->opaque; 1142 1143 if (v->shadow_vqs_enabled) { 1144 /* 1145 * Device vring addr was set at device start. SVQ base is handled by 1146 * VirtQueue code. 1147 */ 1148 return 0; 1149 } 1150 1151 return vhost_vdpa_set_vring_dev_addr(dev, addr); 1152 } 1153 1154 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, 1155 struct vhost_vring_state *ring) 1156 { 1157 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); 1158 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); 1159 } 1160 1161 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, 1162 struct vhost_vring_state *ring) 1163 { 1164 struct vhost_vdpa *v = dev->opaque; 1165 VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index); 1166 1167 /* 1168 * vhost-vdpa devices does not support in-flight requests. Set all of them 1169 * as available. 1170 * 1171 * TODO: This is ok for networking, but other kinds of devices might 1172 * have problems with these retransmissions. 1173 */ 1174 while (virtqueue_rewind(vq, 1)) { 1175 continue; 1176 } 1177 if (v->shadow_vqs_enabled) { 1178 /* 1179 * Device vring base was set at device start. SVQ base is handled by 1180 * VirtQueue code. 1181 */ 1182 return 0; 1183 } 1184 1185 return vhost_vdpa_set_dev_vring_base(dev, ring); 1186 } 1187 1188 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, 1189 struct vhost_vring_state *ring) 1190 { 1191 struct vhost_vdpa *v = dev->opaque; 1192 int ret; 1193 1194 if (v->shadow_vqs_enabled) { 1195 ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); 1196 return 0; 1197 } 1198 1199 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); 1200 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); 1201 return ret; 1202 } 1203 1204 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, 1205 struct vhost_vring_file *file) 1206 { 1207 struct vhost_vdpa *v = dev->opaque; 1208 int vdpa_idx = file->index - dev->vq_index; 1209 1210 if (v->shadow_vqs_enabled) { 1211 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1212 vhost_svq_set_svq_kick_fd(svq, file->fd); 1213 return 0; 1214 } else { 1215 return vhost_vdpa_set_vring_dev_kick(dev, file); 1216 } 1217 } 1218 1219 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, 1220 struct vhost_vring_file *file) 1221 { 1222 struct vhost_vdpa *v = dev->opaque; 1223 1224 if (v->shadow_vqs_enabled) { 1225 int vdpa_idx = file->index - dev->vq_index; 1226 VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); 1227 1228 vhost_svq_set_svq_call_fd(svq, file->fd); 1229 return 0; 1230 } else { 1231 return vhost_vdpa_set_vring_dev_call(dev, file); 1232 } 1233 } 1234 1235 static int vhost_vdpa_get_features(struct vhost_dev *dev, 1236 uint64_t *features) 1237 { 1238 struct vhost_vdpa *v = dev->opaque; 1239 int ret = vhost_vdpa_get_dev_features(dev, features); 1240 1241 if (ret == 0 && v->shadow_vqs_enabled) { 1242 /* Add SVQ logging capabilities */ 1243 *features |= BIT_ULL(VHOST_F_LOG_ALL); 1244 } 1245 1246 return ret; 1247 } 1248 1249 static int vhost_vdpa_set_owner(struct vhost_dev *dev) 1250 { 1251 if (!vhost_vdpa_first_dev(dev)) { 1252 return 0; 1253 } 1254 1255 trace_vhost_vdpa_set_owner(dev); 1256 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); 1257 } 1258 1259 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, 1260 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) 1261 { 1262 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 1263 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; 1264 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; 1265 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; 1266 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, 1267 addr->avail_user_addr, addr->used_user_addr); 1268 return 0; 1269 } 1270 1271 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) 1272 { 1273 return true; 1274 } 1275 1276 const VhostOps vdpa_ops = { 1277 .backend_type = VHOST_BACKEND_TYPE_VDPA, 1278 .vhost_backend_init = vhost_vdpa_init, 1279 .vhost_backend_cleanup = vhost_vdpa_cleanup, 1280 .vhost_set_log_base = vhost_vdpa_set_log_base, 1281 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, 1282 .vhost_set_vring_num = vhost_vdpa_set_vring_num, 1283 .vhost_set_vring_base = vhost_vdpa_set_vring_base, 1284 .vhost_get_vring_base = vhost_vdpa_get_vring_base, 1285 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, 1286 .vhost_set_vring_call = vhost_vdpa_set_vring_call, 1287 .vhost_get_features = vhost_vdpa_get_features, 1288 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, 1289 .vhost_set_owner = vhost_vdpa_set_owner, 1290 .vhost_set_vring_endian = NULL, 1291 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, 1292 .vhost_set_mem_table = vhost_vdpa_set_mem_table, 1293 .vhost_set_features = vhost_vdpa_set_features, 1294 .vhost_reset_device = vhost_vdpa_reset_device, 1295 .vhost_get_vq_index = vhost_vdpa_get_vq_index, 1296 .vhost_get_config = vhost_vdpa_get_config, 1297 .vhost_set_config = vhost_vdpa_set_config, 1298 .vhost_requires_shm_log = NULL, 1299 .vhost_migration_done = NULL, 1300 .vhost_backend_can_merge = NULL, 1301 .vhost_net_set_mtu = NULL, 1302 .vhost_set_iotlb_callback = NULL, 1303 .vhost_send_device_iotlb_msg = NULL, 1304 .vhost_dev_start = vhost_vdpa_dev_start, 1305 .vhost_get_device_id = vhost_vdpa_get_device_id, 1306 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, 1307 .vhost_force_iommu = vhost_vdpa_force_iommu, 1308 .vhost_set_config_call = vhost_vdpa_set_config_call, 1309 }; 1310