1 /* 2 * vhost-vdpa 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include <linux/vhost.h> 14 #include <linux/vfio.h> 15 #include <sys/eventfd.h> 16 #include <sys/ioctl.h> 17 #include "hw/virtio/vhost.h" 18 #include "hw/virtio/vhost-backend.h" 19 #include "hw/virtio/virtio-net.h" 20 #include "hw/virtio/vhost-vdpa.h" 21 #include "exec/address-spaces.h" 22 #include "qemu/main-loop.h" 23 #include "cpu.h" 24 #include "trace.h" 25 #include "qemu-common.h" 26 27 /* 28 * Return one past the end of the end of section. Be careful with uint64_t 29 * conversions! 30 */ 31 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) 32 { 33 Int128 llend = int128_make64(section->offset_within_address_space); 34 llend = int128_add(llend, section->size); 35 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); 36 37 return llend; 38 } 39 40 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, 41 uint64_t iova_min, 42 uint64_t iova_max) 43 { 44 Int128 llend; 45 46 if ((!memory_region_is_ram(section->mr) && 47 !memory_region_is_iommu(section->mr)) || 48 memory_region_is_protected(section->mr) || 49 /* vhost-vDPA doesn't allow MMIO to be mapped */ 50 memory_region_is_ram_device(section->mr)) { 51 return true; 52 } 53 54 if (section->offset_within_address_space < iova_min) { 55 error_report("RAM section out of device range (min=0x%" PRIx64 56 ", addr=0x%" HWADDR_PRIx ")", 57 iova_min, section->offset_within_address_space); 58 return true; 59 } 60 61 llend = vhost_vdpa_section_end(section); 62 if (int128_gt(llend, int128_make64(iova_max))) { 63 error_report("RAM section out of device range (max=0x%" PRIx64 64 ", end addr=0x%" PRIx64 ")", 65 iova_max, int128_get64(llend)); 66 return true; 67 } 68 69 return false; 70 } 71 72 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, 73 void *vaddr, bool readonly) 74 { 75 struct vhost_msg_v2 msg = {}; 76 int fd = v->device_fd; 77 int ret = 0; 78 79 msg.type = v->msg_type; 80 msg.iotlb.iova = iova; 81 msg.iotlb.size = size; 82 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; 83 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; 84 msg.iotlb.type = VHOST_IOTLB_UPDATE; 85 86 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size, 87 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); 88 89 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 90 error_report("failed to write, fd=%d, errno=%d (%s)", 91 fd, errno, strerror(errno)); 92 return -EIO ; 93 } 94 95 return ret; 96 } 97 98 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, 99 hwaddr size) 100 { 101 struct vhost_msg_v2 msg = {}; 102 int fd = v->device_fd; 103 int ret = 0; 104 105 msg.type = v->msg_type; 106 msg.iotlb.iova = iova; 107 msg.iotlb.size = size; 108 msg.iotlb.type = VHOST_IOTLB_INVALIDATE; 109 110 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova, 111 msg.iotlb.size, msg.iotlb.type); 112 113 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 114 error_report("failed to write, fd=%d, errno=%d (%s)", 115 fd, errno, strerror(errno)); 116 return -EIO ; 117 } 118 119 return ret; 120 } 121 122 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) 123 { 124 int fd = v->device_fd; 125 struct vhost_msg_v2 msg = { 126 .type = v->msg_type, 127 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, 128 }; 129 130 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 131 error_report("failed to write, fd=%d, errno=%d (%s)", 132 fd, errno, strerror(errno)); 133 } 134 } 135 136 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) 137 { 138 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && 139 !v->iotlb_batch_begin_sent) { 140 vhost_vdpa_listener_begin_batch(v); 141 } 142 143 v->iotlb_batch_begin_sent = true; 144 } 145 146 static void vhost_vdpa_listener_commit(MemoryListener *listener) 147 { 148 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 149 struct vhost_dev *dev = v->dev; 150 struct vhost_msg_v2 msg = {}; 151 int fd = v->device_fd; 152 153 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { 154 return; 155 } 156 157 if (!v->iotlb_batch_begin_sent) { 158 return; 159 } 160 161 msg.type = v->msg_type; 162 msg.iotlb.type = VHOST_IOTLB_BATCH_END; 163 164 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { 165 error_report("failed to write, fd=%d, errno=%d (%s)", 166 fd, errno, strerror(errno)); 167 } 168 169 v->iotlb_batch_begin_sent = false; 170 } 171 172 static void vhost_vdpa_listener_region_add(MemoryListener *listener, 173 MemoryRegionSection *section) 174 { 175 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 176 hwaddr iova; 177 Int128 llend, llsize; 178 void *vaddr; 179 int ret; 180 181 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 182 v->iova_range.last)) { 183 return; 184 } 185 186 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 187 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 188 error_report("%s received unaligned region", __func__); 189 return; 190 } 191 192 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 193 llend = vhost_vdpa_section_end(section); 194 if (int128_ge(int128_make64(iova), llend)) { 195 return; 196 } 197 198 memory_region_ref(section->mr); 199 200 /* Here we assume that memory_region_is_ram(section->mr)==true */ 201 202 vaddr = memory_region_get_ram_ptr(section->mr) + 203 section->offset_within_region + 204 (iova - section->offset_within_address_space); 205 206 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), 207 vaddr, section->readonly); 208 209 llsize = int128_sub(llend, int128_make64(iova)); 210 211 vhost_vdpa_iotlb_batch_begin_once(v); 212 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize), 213 vaddr, section->readonly); 214 if (ret) { 215 error_report("vhost vdpa map fail!"); 216 goto fail; 217 } 218 219 return; 220 221 fail: 222 /* 223 * On the initfn path, store the first error in the container so we 224 * can gracefully fail. Runtime, there's not much we can do other 225 * than throw a hardware error. 226 */ 227 error_report("vhost-vdpa: DMA mapping failed, unable to continue"); 228 return; 229 230 } 231 232 static void vhost_vdpa_listener_region_del(MemoryListener *listener, 233 MemoryRegionSection *section) 234 { 235 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); 236 hwaddr iova; 237 Int128 llend, llsize; 238 int ret; 239 240 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, 241 v->iova_range.last)) { 242 return; 243 } 244 245 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) != 246 (section->offset_within_region & ~TARGET_PAGE_MASK))) { 247 error_report("%s received unaligned region", __func__); 248 return; 249 } 250 251 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); 252 llend = vhost_vdpa_section_end(section); 253 254 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); 255 256 if (int128_ge(int128_make64(iova), llend)) { 257 return; 258 } 259 260 llsize = int128_sub(llend, int128_make64(iova)); 261 262 vhost_vdpa_iotlb_batch_begin_once(v); 263 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize)); 264 if (ret) { 265 error_report("vhost_vdpa dma unmap error!"); 266 } 267 268 memory_region_unref(section->mr); 269 } 270 /* 271 * IOTLB API is used by vhost-vpda which requires incremental updating 272 * of the mapping. So we can not use generic vhost memory listener which 273 * depends on the addnop(). 274 */ 275 static const MemoryListener vhost_vdpa_memory_listener = { 276 .name = "vhost-vdpa", 277 .commit = vhost_vdpa_listener_commit, 278 .region_add = vhost_vdpa_listener_region_add, 279 .region_del = vhost_vdpa_listener_region_del, 280 }; 281 282 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, 283 void *arg) 284 { 285 struct vhost_vdpa *v = dev->opaque; 286 int fd = v->device_fd; 287 int ret; 288 289 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 290 291 ret = ioctl(fd, request, arg); 292 return ret < 0 ? -errno : ret; 293 } 294 295 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) 296 { 297 uint8_t s; 298 299 trace_vhost_vdpa_add_status(dev, status); 300 if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { 301 return; 302 } 303 304 s |= status; 305 306 vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); 307 } 308 309 static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v) 310 { 311 int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, 312 &v->iova_range); 313 if (ret != 0) { 314 v->iova_range.first = 0; 315 v->iova_range.last = UINT64_MAX; 316 } 317 318 trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, 319 v->iova_range.last); 320 } 321 322 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) 323 { 324 struct vhost_vdpa *v; 325 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 326 trace_vhost_vdpa_init(dev, opaque); 327 328 v = opaque; 329 v->dev = dev; 330 dev->opaque = opaque ; 331 v->listener = vhost_vdpa_memory_listener; 332 v->msg_type = VHOST_IOTLB_MSG_V2; 333 334 vhost_vdpa_get_iova_range(v); 335 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 336 VIRTIO_CONFIG_S_DRIVER); 337 338 return 0; 339 } 340 341 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, 342 int queue_index) 343 { 344 size_t page_size = qemu_real_host_page_size; 345 struct vhost_vdpa *v = dev->opaque; 346 VirtIODevice *vdev = dev->vdev; 347 VhostVDPAHostNotifier *n; 348 349 n = &v->notifier[queue_index]; 350 351 if (n->addr) { 352 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false); 353 object_unparent(OBJECT(&n->mr)); 354 munmap(n->addr, page_size); 355 n->addr = NULL; 356 } 357 } 358 359 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) 360 { 361 int i; 362 363 for (i = 0; i < n; i++) { 364 vhost_vdpa_host_notifier_uninit(dev, i); 365 } 366 } 367 368 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) 369 { 370 size_t page_size = qemu_real_host_page_size; 371 struct vhost_vdpa *v = dev->opaque; 372 VirtIODevice *vdev = dev->vdev; 373 VhostVDPAHostNotifier *n; 374 int fd = v->device_fd; 375 void *addr; 376 char *name; 377 378 vhost_vdpa_host_notifier_uninit(dev, queue_index); 379 380 n = &v->notifier[queue_index]; 381 382 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd, 383 queue_index * page_size); 384 if (addr == MAP_FAILED) { 385 goto err; 386 } 387 388 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]", 389 v, queue_index); 390 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 391 page_size, addr); 392 g_free(name); 393 394 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { 395 munmap(addr, page_size); 396 goto err; 397 } 398 n->addr = addr; 399 400 return 0; 401 402 err: 403 return -1; 404 } 405 406 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) 407 { 408 int i; 409 410 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { 411 if (vhost_vdpa_host_notifier_init(dev, i)) { 412 goto err; 413 } 414 } 415 416 return; 417 418 err: 419 vhost_vdpa_host_notifiers_uninit(dev, i); 420 return; 421 } 422 423 static int vhost_vdpa_cleanup(struct vhost_dev *dev) 424 { 425 struct vhost_vdpa *v; 426 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 427 v = dev->opaque; 428 trace_vhost_vdpa_cleanup(dev, v); 429 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 430 memory_listener_unregister(&v->listener); 431 432 dev->opaque = NULL; 433 return 0; 434 } 435 436 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) 437 { 438 trace_vhost_vdpa_memslots_limit(dev, INT_MAX); 439 return INT_MAX; 440 } 441 442 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, 443 struct vhost_memory *mem) 444 { 445 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); 446 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && 447 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { 448 int i; 449 for (i = 0; i < mem->nregions; i++) { 450 trace_vhost_vdpa_dump_regions(dev, i, 451 mem->regions[i].guest_phys_addr, 452 mem->regions[i].memory_size, 453 mem->regions[i].userspace_addr, 454 mem->regions[i].flags_padding); 455 } 456 } 457 if (mem->padding) { 458 return -1; 459 } 460 461 return 0; 462 } 463 464 static int vhost_vdpa_set_features(struct vhost_dev *dev, 465 uint64_t features) 466 { 467 int ret; 468 trace_vhost_vdpa_set_features(dev, features); 469 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); 470 uint8_t status = 0; 471 if (ret) { 472 return ret; 473 } 474 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 475 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); 476 477 return !(status & VIRTIO_CONFIG_S_FEATURES_OK); 478 } 479 480 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) 481 { 482 uint64_t features; 483 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 484 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH; 485 int r; 486 487 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { 488 return -EFAULT; 489 } 490 491 features &= f; 492 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); 493 if (r) { 494 return -EFAULT; 495 } 496 497 dev->backend_cap = features; 498 499 return 0; 500 } 501 502 static int vhost_vdpa_get_device_id(struct vhost_dev *dev, 503 uint32_t *device_id) 504 { 505 int ret; 506 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id); 507 trace_vhost_vdpa_get_device_id(dev, *device_id); 508 return ret; 509 } 510 511 static int vhost_vdpa_reset_device(struct vhost_dev *dev) 512 { 513 int ret; 514 uint8_t status = 0; 515 516 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status); 517 trace_vhost_vdpa_reset_device(dev, status); 518 return ret; 519 } 520 521 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) 522 { 523 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 524 525 trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index); 526 return idx - dev->vq_index; 527 } 528 529 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) 530 { 531 int i; 532 trace_vhost_vdpa_set_vring_ready(dev); 533 for (i = 0; i < dev->nvqs; ++i) { 534 struct vhost_vring_state state = { 535 .index = dev->vq_index + i, 536 .num = 1, 537 }; 538 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state); 539 } 540 return 0; 541 } 542 543 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, 544 uint32_t config_len) 545 { 546 int b, len; 547 char line[QEMU_HEXDUMP_LINE_LEN]; 548 549 for (b = 0; b < config_len; b += 16) { 550 len = config_len - b; 551 qemu_hexdump_line(line, b, config, len, false); 552 trace_vhost_vdpa_dump_config(dev, line); 553 } 554 } 555 556 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data, 557 uint32_t offset, uint32_t size, 558 uint32_t flags) 559 { 560 struct vhost_vdpa_config *config; 561 int ret; 562 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 563 564 trace_vhost_vdpa_set_config(dev, offset, size, flags); 565 config = g_malloc(size + config_size); 566 config->off = offset; 567 config->len = size; 568 memcpy(config->buf, data, size); 569 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) && 570 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 571 vhost_vdpa_dump_config(dev, data, size); 572 } 573 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config); 574 g_free(config); 575 return ret; 576 } 577 578 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, 579 uint32_t config_len, Error **errp) 580 { 581 struct vhost_vdpa_config *v_config; 582 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 583 int ret; 584 585 trace_vhost_vdpa_get_config(dev, config, config_len); 586 v_config = g_malloc(config_len + config_size); 587 v_config->len = config_len; 588 v_config->off = 0; 589 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config); 590 memcpy(config, v_config->buf, config_len); 591 g_free(v_config); 592 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) && 593 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) { 594 vhost_vdpa_dump_config(dev, config, config_len); 595 } 596 return ret; 597 } 598 599 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) 600 { 601 struct vhost_vdpa *v = dev->opaque; 602 trace_vhost_vdpa_dev_start(dev, started); 603 if (started) { 604 uint8_t status = 0; 605 memory_listener_register(&v->listener, &address_space_memory); 606 vhost_vdpa_host_notifiers_init(dev); 607 vhost_vdpa_set_vring_ready(dev); 608 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); 609 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); 610 611 return !(status & VIRTIO_CONFIG_S_DRIVER_OK); 612 } else { 613 vhost_vdpa_reset_device(dev); 614 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | 615 VIRTIO_CONFIG_S_DRIVER); 616 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); 617 memory_listener_unregister(&v->listener); 618 619 return 0; 620 } 621 } 622 623 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, 624 struct vhost_log *log) 625 { 626 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, 627 log->log); 628 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); 629 } 630 631 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, 632 struct vhost_vring_addr *addr) 633 { 634 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, 635 addr->desc_user_addr, addr->used_user_addr, 636 addr->avail_user_addr, 637 addr->log_guest_addr); 638 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); 639 } 640 641 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, 642 struct vhost_vring_state *ring) 643 { 644 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num); 645 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring); 646 } 647 648 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, 649 struct vhost_vring_state *ring) 650 { 651 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); 652 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); 653 } 654 655 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, 656 struct vhost_vring_state *ring) 657 { 658 int ret; 659 660 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); 661 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); 662 return ret; 663 } 664 665 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, 666 struct vhost_vring_file *file) 667 { 668 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); 669 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); 670 } 671 672 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, 673 struct vhost_vring_file *file) 674 { 675 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); 676 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); 677 } 678 679 static int vhost_vdpa_get_features(struct vhost_dev *dev, 680 uint64_t *features) 681 { 682 int ret; 683 684 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); 685 trace_vhost_vdpa_get_features(dev, *features); 686 return ret; 687 } 688 689 static int vhost_vdpa_set_owner(struct vhost_dev *dev) 690 { 691 trace_vhost_vdpa_set_owner(dev); 692 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); 693 } 694 695 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev, 696 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq) 697 { 698 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); 699 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys; 700 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys; 701 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys; 702 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr, 703 addr->avail_user_addr, addr->used_user_addr); 704 return 0; 705 } 706 707 static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) 708 { 709 return true; 710 } 711 712 const VhostOps vdpa_ops = { 713 .backend_type = VHOST_BACKEND_TYPE_VDPA, 714 .vhost_backend_init = vhost_vdpa_init, 715 .vhost_backend_cleanup = vhost_vdpa_cleanup, 716 .vhost_set_log_base = vhost_vdpa_set_log_base, 717 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, 718 .vhost_set_vring_num = vhost_vdpa_set_vring_num, 719 .vhost_set_vring_base = vhost_vdpa_set_vring_base, 720 .vhost_get_vring_base = vhost_vdpa_get_vring_base, 721 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick, 722 .vhost_set_vring_call = vhost_vdpa_set_vring_call, 723 .vhost_get_features = vhost_vdpa_get_features, 724 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap, 725 .vhost_set_owner = vhost_vdpa_set_owner, 726 .vhost_set_vring_endian = NULL, 727 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit, 728 .vhost_set_mem_table = vhost_vdpa_set_mem_table, 729 .vhost_set_features = vhost_vdpa_set_features, 730 .vhost_reset_device = vhost_vdpa_reset_device, 731 .vhost_get_vq_index = vhost_vdpa_get_vq_index, 732 .vhost_get_config = vhost_vdpa_get_config, 733 .vhost_set_config = vhost_vdpa_set_config, 734 .vhost_requires_shm_log = NULL, 735 .vhost_migration_done = NULL, 736 .vhost_backend_can_merge = NULL, 737 .vhost_net_set_mtu = NULL, 738 .vhost_set_iotlb_callback = NULL, 739 .vhost_send_device_iotlb_msg = NULL, 740 .vhost_dev_start = vhost_vdpa_dev_start, 741 .vhost_get_device_id = vhost_vdpa_get_device_id, 742 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, 743 .vhost_force_iommu = vhost_vdpa_force_iommu, 744 }; 745