1 /* 2 * vhost-user 3 * 4 * Copyright (c) 2013 Virtual Open Systems Sarl. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qapi/error.h" 13 #include "hw/virtio/vhost.h" 14 #include "hw/virtio/vhost-user.h" 15 #include "hw/virtio/vhost-backend.h" 16 #include "hw/virtio/virtio.h" 17 #include "hw/virtio/virtio-net.h" 18 #include "chardev/char-fe.h" 19 #include "io/channel-socket.h" 20 #include "sysemu/kvm.h" 21 #include "qemu/error-report.h" 22 #include "qemu/main-loop.h" 23 #include "qemu/sockets.h" 24 #include "sysemu/cryptodev.h" 25 #include "migration/migration.h" 26 #include "migration/postcopy-ram.h" 27 #include "trace.h" 28 #include "exec/ramblock.h" 29 30 #include <sys/ioctl.h> 31 #include <sys/socket.h> 32 #include <sys/un.h> 33 34 #include "standard-headers/linux/vhost_types.h" 35 36 #ifdef CONFIG_LINUX 37 #include <linux/userfaultfd.h> 38 #endif 39 40 #define VHOST_MEMORY_BASELINE_NREGIONS 8 41 #define VHOST_USER_F_PROTOCOL_FEATURES 30 42 #define VHOST_USER_SLAVE_MAX_FDS 8 43 44 /* 45 * Set maximum number of RAM slots supported to 46 * the maximum number supported by the target 47 * hardware plaform. 48 */ 49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \ 50 defined(TARGET_ARM) || defined(TARGET_ARM_64) 51 #include "hw/acpi/acpi.h" 52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS 53 54 #elif defined(TARGET_PPC) || defined(TARGET_PPC64) 55 #include "hw/ppc/spapr.h" 56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS 57 58 #else 59 #define VHOST_USER_MAX_RAM_SLOTS 512 60 #endif 61 62 /* 63 * Maximum size of virtio device config space 64 */ 65 #define VHOST_USER_MAX_CONFIG_SIZE 256 66 67 enum VhostUserProtocolFeature { 68 VHOST_USER_PROTOCOL_F_MQ = 0, 69 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, 70 VHOST_USER_PROTOCOL_F_RARP = 2, 71 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, 72 VHOST_USER_PROTOCOL_F_NET_MTU = 4, 73 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5, 74 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, 75 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, 76 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, 77 VHOST_USER_PROTOCOL_F_CONFIG = 9, 78 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10, 79 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, 80 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, 81 VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13, 82 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */ 83 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, 84 VHOST_USER_PROTOCOL_F_MAX 85 }; 86 87 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) 88 89 typedef enum VhostUserRequest { 90 VHOST_USER_NONE = 0, 91 VHOST_USER_GET_FEATURES = 1, 92 VHOST_USER_SET_FEATURES = 2, 93 VHOST_USER_SET_OWNER = 3, 94 VHOST_USER_RESET_OWNER = 4, 95 VHOST_USER_SET_MEM_TABLE = 5, 96 VHOST_USER_SET_LOG_BASE = 6, 97 VHOST_USER_SET_LOG_FD = 7, 98 VHOST_USER_SET_VRING_NUM = 8, 99 VHOST_USER_SET_VRING_ADDR = 9, 100 VHOST_USER_SET_VRING_BASE = 10, 101 VHOST_USER_GET_VRING_BASE = 11, 102 VHOST_USER_SET_VRING_KICK = 12, 103 VHOST_USER_SET_VRING_CALL = 13, 104 VHOST_USER_SET_VRING_ERR = 14, 105 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 106 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 107 VHOST_USER_GET_QUEUE_NUM = 17, 108 VHOST_USER_SET_VRING_ENABLE = 18, 109 VHOST_USER_SEND_RARP = 19, 110 VHOST_USER_NET_SET_MTU = 20, 111 VHOST_USER_SET_SLAVE_REQ_FD = 21, 112 VHOST_USER_IOTLB_MSG = 22, 113 VHOST_USER_SET_VRING_ENDIAN = 23, 114 VHOST_USER_GET_CONFIG = 24, 115 VHOST_USER_SET_CONFIG = 25, 116 VHOST_USER_CREATE_CRYPTO_SESSION = 26, 117 VHOST_USER_CLOSE_CRYPTO_SESSION = 27, 118 VHOST_USER_POSTCOPY_ADVISE = 28, 119 VHOST_USER_POSTCOPY_LISTEN = 29, 120 VHOST_USER_POSTCOPY_END = 30, 121 VHOST_USER_GET_INFLIGHT_FD = 31, 122 VHOST_USER_SET_INFLIGHT_FD = 32, 123 VHOST_USER_GPU_SET_SOCKET = 33, 124 VHOST_USER_RESET_DEVICE = 34, 125 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */ 126 VHOST_USER_GET_MAX_MEM_SLOTS = 36, 127 VHOST_USER_ADD_MEM_REG = 37, 128 VHOST_USER_REM_MEM_REG = 38, 129 VHOST_USER_MAX 130 } VhostUserRequest; 131 132 typedef enum VhostUserSlaveRequest { 133 VHOST_USER_SLAVE_NONE = 0, 134 VHOST_USER_SLAVE_IOTLB_MSG = 1, 135 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2, 136 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3, 137 VHOST_USER_SLAVE_MAX 138 } VhostUserSlaveRequest; 139 140 typedef struct VhostUserMemoryRegion { 141 uint64_t guest_phys_addr; 142 uint64_t memory_size; 143 uint64_t userspace_addr; 144 uint64_t mmap_offset; 145 } VhostUserMemoryRegion; 146 147 typedef struct VhostUserMemory { 148 uint32_t nregions; 149 uint32_t padding; 150 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS]; 151 } VhostUserMemory; 152 153 typedef struct VhostUserMemRegMsg { 154 uint64_t padding; 155 VhostUserMemoryRegion region; 156 } VhostUserMemRegMsg; 157 158 typedef struct VhostUserLog { 159 uint64_t mmap_size; 160 uint64_t mmap_offset; 161 } VhostUserLog; 162 163 typedef struct VhostUserConfig { 164 uint32_t offset; 165 uint32_t size; 166 uint32_t flags; 167 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; 168 } VhostUserConfig; 169 170 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512 171 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64 172 173 typedef struct VhostUserCryptoSession { 174 /* session id for success, -1 on errors */ 175 int64_t session_id; 176 CryptoDevBackendSymSessionInfo session_setup_data; 177 uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN]; 178 uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN]; 179 } VhostUserCryptoSession; 180 181 static VhostUserConfig c __attribute__ ((unused)); 182 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \ 183 + sizeof(c.size) \ 184 + sizeof(c.flags)) 185 186 typedef struct VhostUserVringArea { 187 uint64_t u64; 188 uint64_t size; 189 uint64_t offset; 190 } VhostUserVringArea; 191 192 typedef struct VhostUserInflight { 193 uint64_t mmap_size; 194 uint64_t mmap_offset; 195 uint16_t num_queues; 196 uint16_t queue_size; 197 } VhostUserInflight; 198 199 typedef struct { 200 VhostUserRequest request; 201 202 #define VHOST_USER_VERSION_MASK (0x3) 203 #define VHOST_USER_REPLY_MASK (0x1 << 2) 204 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) 205 uint32_t flags; 206 uint32_t size; /* the following payload size */ 207 } QEMU_PACKED VhostUserHeader; 208 209 typedef union { 210 #define VHOST_USER_VRING_IDX_MASK (0xff) 211 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) 212 uint64_t u64; 213 struct vhost_vring_state state; 214 struct vhost_vring_addr addr; 215 VhostUserMemory memory; 216 VhostUserMemRegMsg mem_reg; 217 VhostUserLog log; 218 struct vhost_iotlb_msg iotlb; 219 VhostUserConfig config; 220 VhostUserCryptoSession session; 221 VhostUserVringArea area; 222 VhostUserInflight inflight; 223 } VhostUserPayload; 224 225 typedef struct VhostUserMsg { 226 VhostUserHeader hdr; 227 VhostUserPayload payload; 228 } QEMU_PACKED VhostUserMsg; 229 230 static VhostUserMsg m __attribute__ ((unused)); 231 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader)) 232 233 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload)) 234 235 /* The version of the protocol we support */ 236 #define VHOST_USER_VERSION (0x1) 237 238 struct vhost_user { 239 struct vhost_dev *dev; 240 /* Shared between vhost devs of the same virtio device */ 241 VhostUserState *user; 242 QIOChannel *slave_ioc; 243 GSource *slave_src; 244 NotifierWithReturn postcopy_notifier; 245 struct PostCopyFD postcopy_fd; 246 uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS]; 247 /* Length of the region_rb and region_rb_offset arrays */ 248 size_t region_rb_len; 249 /* RAMBlock associated with a given region */ 250 RAMBlock **region_rb; 251 /* 252 * The offset from the start of the RAMBlock to the start of the 253 * vhost region. 254 */ 255 ram_addr_t *region_rb_offset; 256 257 /* True once we've entered postcopy_listen */ 258 bool postcopy_listen; 259 260 /* Our current regions */ 261 int num_shadow_regions; 262 struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS]; 263 }; 264 265 struct scrub_regions { 266 struct vhost_memory_region *region; 267 int reg_idx; 268 int fd_idx; 269 }; 270 271 static bool ioeventfd_enabled(void) 272 { 273 return !kvm_enabled() || kvm_eventfds_enabled(); 274 } 275 276 static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) 277 { 278 struct vhost_user *u = dev->opaque; 279 CharBackend *chr = u->user->chr; 280 uint8_t *p = (uint8_t *) msg; 281 int r, size = VHOST_USER_HDR_SIZE; 282 283 r = qemu_chr_fe_read_all(chr, p, size); 284 if (r != size) { 285 int saved_errno = errno; 286 error_report("Failed to read msg header. Read %d instead of %d." 287 " Original request %d.", r, size, msg->hdr.request); 288 return r < 0 ? -saved_errno : -EIO; 289 } 290 291 /* validate received flags */ 292 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) { 293 error_report("Failed to read msg header." 294 " Flags 0x%x instead of 0x%x.", msg->hdr.flags, 295 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); 296 return -EPROTO; 297 } 298 299 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); 300 301 return 0; 302 } 303 304 struct vhost_user_read_cb_data { 305 struct vhost_dev *dev; 306 VhostUserMsg *msg; 307 GMainLoop *loop; 308 int ret; 309 }; 310 311 static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, 312 gpointer opaque) 313 { 314 struct vhost_user_read_cb_data *data = opaque; 315 struct vhost_dev *dev = data->dev; 316 VhostUserMsg *msg = data->msg; 317 struct vhost_user *u = dev->opaque; 318 CharBackend *chr = u->user->chr; 319 uint8_t *p = (uint8_t *) msg; 320 int r, size; 321 322 r = vhost_user_read_header(dev, msg); 323 if (r < 0) { 324 data->ret = r; 325 goto end; 326 } 327 328 /* validate message size is sane */ 329 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) { 330 error_report("Failed to read msg header." 331 " Size %d exceeds the maximum %zu.", msg->hdr.size, 332 VHOST_USER_PAYLOAD_SIZE); 333 data->ret = -EPROTO; 334 goto end; 335 } 336 337 if (msg->hdr.size) { 338 p += VHOST_USER_HDR_SIZE; 339 size = msg->hdr.size; 340 r = qemu_chr_fe_read_all(chr, p, size); 341 if (r != size) { 342 int saved_errno = errno; 343 error_report("Failed to read msg payload." 344 " Read %d instead of %d.", r, msg->hdr.size); 345 data->ret = r < 0 ? -saved_errno : -EIO; 346 goto end; 347 } 348 } 349 350 end: 351 g_main_loop_quit(data->loop); 352 return G_SOURCE_REMOVE; 353 } 354 355 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, 356 gpointer opaque); 357 358 /* 359 * This updates the read handler to use a new event loop context. 360 * Event sources are removed from the previous context : this ensures 361 * that events detected in the previous context are purged. They will 362 * be re-detected and processed in the new context. 363 */ 364 static void slave_update_read_handler(struct vhost_dev *dev, 365 GMainContext *ctxt) 366 { 367 struct vhost_user *u = dev->opaque; 368 369 if (!u->slave_ioc) { 370 return; 371 } 372 373 if (u->slave_src) { 374 g_source_destroy(u->slave_src); 375 g_source_unref(u->slave_src); 376 } 377 378 u->slave_src = qio_channel_add_watch_source(u->slave_ioc, 379 G_IO_IN | G_IO_HUP, 380 slave_read, dev, NULL, 381 ctxt); 382 } 383 384 static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) 385 { 386 struct vhost_user *u = dev->opaque; 387 CharBackend *chr = u->user->chr; 388 GMainContext *prev_ctxt = chr->chr->gcontext; 389 GMainContext *ctxt = g_main_context_new(); 390 GMainLoop *loop = g_main_loop_new(ctxt, FALSE); 391 struct vhost_user_read_cb_data data = { 392 .dev = dev, 393 .loop = loop, 394 .msg = msg, 395 .ret = 0 396 }; 397 398 /* 399 * We want to be able to monitor the slave channel fd while waiting 400 * for chr I/O. This requires an event loop, but we can't nest the 401 * one to which chr is currently attached : its fd handlers might not 402 * be prepared for re-entrancy. So we create a new one and switch chr 403 * to use it. 404 */ 405 slave_update_read_handler(dev, ctxt); 406 qemu_chr_be_update_read_handlers(chr->chr, ctxt); 407 qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data); 408 409 g_main_loop_run(loop); 410 411 /* 412 * Restore the previous event loop context. This also destroys/recreates 413 * event sources : this guarantees that all pending events in the original 414 * context that have been processed by the nested loop are purged. 415 */ 416 qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt); 417 slave_update_read_handler(dev, NULL); 418 419 g_main_loop_unref(loop); 420 g_main_context_unref(ctxt); 421 422 return data.ret; 423 } 424 425 static int process_message_reply(struct vhost_dev *dev, 426 const VhostUserMsg *msg) 427 { 428 int ret; 429 VhostUserMsg msg_reply; 430 431 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { 432 return 0; 433 } 434 435 ret = vhost_user_read(dev, &msg_reply); 436 if (ret < 0) { 437 return ret; 438 } 439 440 if (msg_reply.hdr.request != msg->hdr.request) { 441 error_report("Received unexpected msg type. " 442 "Expected %d received %d", 443 msg->hdr.request, msg_reply.hdr.request); 444 return -EPROTO; 445 } 446 447 return msg_reply.payload.u64 ? -EIO : 0; 448 } 449 450 static bool vhost_user_one_time_request(VhostUserRequest request) 451 { 452 switch (request) { 453 case VHOST_USER_SET_OWNER: 454 case VHOST_USER_RESET_OWNER: 455 case VHOST_USER_SET_MEM_TABLE: 456 case VHOST_USER_GET_QUEUE_NUM: 457 case VHOST_USER_NET_SET_MTU: 458 return true; 459 default: 460 return false; 461 } 462 } 463 464 /* most non-init callers ignore the error */ 465 static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, 466 int *fds, int fd_num) 467 { 468 struct vhost_user *u = dev->opaque; 469 CharBackend *chr = u->user->chr; 470 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size; 471 472 /* 473 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE, 474 * we just need send it once in the first time. For later such 475 * request, we just ignore it. 476 */ 477 if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) { 478 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; 479 return 0; 480 } 481 482 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { 483 error_report("Failed to set msg fds."); 484 return -EINVAL; 485 } 486 487 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); 488 if (ret != size) { 489 int saved_errno = errno; 490 error_report("Failed to write msg." 491 " Wrote %d instead of %d.", ret, size); 492 return ret < 0 ? -saved_errno : -EIO; 493 } 494 495 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); 496 497 return 0; 498 } 499 500 int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd) 501 { 502 VhostUserMsg msg = { 503 .hdr.request = VHOST_USER_GPU_SET_SOCKET, 504 .hdr.flags = VHOST_USER_VERSION, 505 }; 506 507 return vhost_user_write(dev, &msg, &fd, 1); 508 } 509 510 static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, 511 struct vhost_log *log) 512 { 513 int fds[VHOST_USER_MAX_RAM_SLOTS]; 514 size_t fd_num = 0; 515 bool shmfd = virtio_has_feature(dev->protocol_features, 516 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 517 int ret; 518 VhostUserMsg msg = { 519 .hdr.request = VHOST_USER_SET_LOG_BASE, 520 .hdr.flags = VHOST_USER_VERSION, 521 .payload.log.mmap_size = log->size * sizeof(*(log->log)), 522 .payload.log.mmap_offset = 0, 523 .hdr.size = sizeof(msg.payload.log), 524 }; 525 526 if (shmfd && log->fd != -1) { 527 fds[fd_num++] = log->fd; 528 } 529 530 ret = vhost_user_write(dev, &msg, fds, fd_num); 531 if (ret < 0) { 532 return ret; 533 } 534 535 if (shmfd) { 536 msg.hdr.size = 0; 537 ret = vhost_user_read(dev, &msg); 538 if (ret < 0) { 539 return ret; 540 } 541 542 if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) { 543 error_report("Received unexpected msg type. " 544 "Expected %d received %d", 545 VHOST_USER_SET_LOG_BASE, msg.hdr.request); 546 return -EPROTO; 547 } 548 } 549 550 return 0; 551 } 552 553 static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset, 554 int *fd) 555 { 556 MemoryRegion *mr; 557 558 assert((uintptr_t)addr == addr); 559 mr = memory_region_from_host((void *)(uintptr_t)addr, offset); 560 *fd = memory_region_get_fd(mr); 561 562 return mr; 563 } 564 565 static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst, 566 struct vhost_memory_region *src, 567 uint64_t mmap_offset) 568 { 569 assert(src != NULL && dst != NULL); 570 dst->userspace_addr = src->userspace_addr; 571 dst->memory_size = src->memory_size; 572 dst->guest_phys_addr = src->guest_phys_addr; 573 dst->mmap_offset = mmap_offset; 574 } 575 576 static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, 577 struct vhost_dev *dev, 578 VhostUserMsg *msg, 579 int *fds, size_t *fd_num, 580 bool track_ramblocks) 581 { 582 int i, fd; 583 ram_addr_t offset; 584 MemoryRegion *mr; 585 struct vhost_memory_region *reg; 586 VhostUserMemoryRegion region_buffer; 587 588 msg->hdr.request = VHOST_USER_SET_MEM_TABLE; 589 590 for (i = 0; i < dev->mem->nregions; ++i) { 591 reg = dev->mem->regions + i; 592 593 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); 594 if (fd > 0) { 595 if (track_ramblocks) { 596 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS); 597 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name, 598 reg->memory_size, 599 reg->guest_phys_addr, 600 reg->userspace_addr, 601 offset); 602 u->region_rb_offset[i] = offset; 603 u->region_rb[i] = mr->ram_block; 604 } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) { 605 error_report("Failed preparing vhost-user memory table msg"); 606 return -ENOBUFS; 607 } 608 vhost_user_fill_msg_region(®ion_buffer, reg, offset); 609 msg->payload.memory.regions[*fd_num] = region_buffer; 610 fds[(*fd_num)++] = fd; 611 } else if (track_ramblocks) { 612 u->region_rb_offset[i] = 0; 613 u->region_rb[i] = NULL; 614 } 615 } 616 617 msg->payload.memory.nregions = *fd_num; 618 619 if (!*fd_num) { 620 error_report("Failed initializing vhost-user memory map, " 621 "consider using -object memory-backend-file share=on"); 622 return -EINVAL; 623 } 624 625 msg->hdr.size = sizeof(msg->payload.memory.nregions); 626 msg->hdr.size += sizeof(msg->payload.memory.padding); 627 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); 628 629 return 0; 630 } 631 632 static inline bool reg_equal(struct vhost_memory_region *shadow_reg, 633 struct vhost_memory_region *vdev_reg) 634 { 635 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr && 636 shadow_reg->userspace_addr == vdev_reg->userspace_addr && 637 shadow_reg->memory_size == vdev_reg->memory_size; 638 } 639 640 static void scrub_shadow_regions(struct vhost_dev *dev, 641 struct scrub_regions *add_reg, 642 int *nr_add_reg, 643 struct scrub_regions *rem_reg, 644 int *nr_rem_reg, uint64_t *shadow_pcb, 645 bool track_ramblocks) 646 { 647 struct vhost_user *u = dev->opaque; 648 bool found[VHOST_USER_MAX_RAM_SLOTS] = {}; 649 struct vhost_memory_region *reg, *shadow_reg; 650 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0; 651 ram_addr_t offset; 652 MemoryRegion *mr; 653 bool matching; 654 655 /* 656 * Find memory regions present in our shadow state which are not in 657 * the device's current memory state. 658 * 659 * Mark regions in both the shadow and device state as "found". 660 */ 661 for (i = 0; i < u->num_shadow_regions; i++) { 662 shadow_reg = &u->shadow_regions[i]; 663 matching = false; 664 665 for (j = 0; j < dev->mem->nregions; j++) { 666 reg = &dev->mem->regions[j]; 667 668 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); 669 670 if (reg_equal(shadow_reg, reg)) { 671 matching = true; 672 found[j] = true; 673 if (track_ramblocks) { 674 /* 675 * Reset postcopy client bases, region_rb, and 676 * region_rb_offset in case regions are removed. 677 */ 678 if (fd > 0) { 679 u->region_rb_offset[j] = offset; 680 u->region_rb[j] = mr->ram_block; 681 shadow_pcb[j] = u->postcopy_client_bases[i]; 682 } else { 683 u->region_rb_offset[j] = 0; 684 u->region_rb[j] = NULL; 685 } 686 } 687 break; 688 } 689 } 690 691 /* 692 * If the region was not found in the current device memory state 693 * create an entry for it in the removed list. 694 */ 695 if (!matching) { 696 rem_reg[rm_idx].region = shadow_reg; 697 rem_reg[rm_idx++].reg_idx = i; 698 } 699 } 700 701 /* 702 * For regions not marked "found", create entries in the added list. 703 * 704 * Note their indexes in the device memory state and the indexes of their 705 * file descriptors. 706 */ 707 for (i = 0; i < dev->mem->nregions; i++) { 708 reg = &dev->mem->regions[i]; 709 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); 710 if (fd > 0) { 711 ++fd_num; 712 } 713 714 /* 715 * If the region was in both the shadow and device state we don't 716 * need to send a VHOST_USER_ADD_MEM_REG message for it. 717 */ 718 if (found[i]) { 719 continue; 720 } 721 722 add_reg[add_idx].region = reg; 723 add_reg[add_idx].reg_idx = i; 724 add_reg[add_idx++].fd_idx = fd_num; 725 } 726 *nr_rem_reg = rm_idx; 727 *nr_add_reg = add_idx; 728 729 return; 730 } 731 732 static int send_remove_regions(struct vhost_dev *dev, 733 struct scrub_regions *remove_reg, 734 int nr_rem_reg, VhostUserMsg *msg, 735 bool reply_supported) 736 { 737 struct vhost_user *u = dev->opaque; 738 struct vhost_memory_region *shadow_reg; 739 int i, fd, shadow_reg_idx, ret; 740 ram_addr_t offset; 741 VhostUserMemoryRegion region_buffer; 742 743 /* 744 * The regions in remove_reg appear in the same order they do in the 745 * shadow table. Therefore we can minimize memory copies by iterating 746 * through remove_reg backwards. 747 */ 748 for (i = nr_rem_reg - 1; i >= 0; i--) { 749 shadow_reg = remove_reg[i].region; 750 shadow_reg_idx = remove_reg[i].reg_idx; 751 752 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd); 753 754 if (fd > 0) { 755 msg->hdr.request = VHOST_USER_REM_MEM_REG; 756 vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); 757 msg->payload.mem_reg.region = region_buffer; 758 759 ret = vhost_user_write(dev, msg, NULL, 0); 760 if (ret < 0) { 761 return ret; 762 } 763 764 if (reply_supported) { 765 ret = process_message_reply(dev, msg); 766 if (ret) { 767 return ret; 768 } 769 } 770 } 771 772 /* 773 * At this point we know the backend has unmapped the region. It is now 774 * safe to remove it from the shadow table. 775 */ 776 memmove(&u->shadow_regions[shadow_reg_idx], 777 &u->shadow_regions[shadow_reg_idx + 1], 778 sizeof(struct vhost_memory_region) * 779 (u->num_shadow_regions - shadow_reg_idx - 1)); 780 u->num_shadow_regions--; 781 } 782 783 return 0; 784 } 785 786 static int send_add_regions(struct vhost_dev *dev, 787 struct scrub_regions *add_reg, int nr_add_reg, 788 VhostUserMsg *msg, uint64_t *shadow_pcb, 789 bool reply_supported, bool track_ramblocks) 790 { 791 struct vhost_user *u = dev->opaque; 792 int i, fd, ret, reg_idx, reg_fd_idx; 793 struct vhost_memory_region *reg; 794 MemoryRegion *mr; 795 ram_addr_t offset; 796 VhostUserMsg msg_reply; 797 VhostUserMemoryRegion region_buffer; 798 799 for (i = 0; i < nr_add_reg; i++) { 800 reg = add_reg[i].region; 801 reg_idx = add_reg[i].reg_idx; 802 reg_fd_idx = add_reg[i].fd_idx; 803 804 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd); 805 806 if (fd > 0) { 807 if (track_ramblocks) { 808 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name, 809 reg->memory_size, 810 reg->guest_phys_addr, 811 reg->userspace_addr, 812 offset); 813 u->region_rb_offset[reg_idx] = offset; 814 u->region_rb[reg_idx] = mr->ram_block; 815 } 816 msg->hdr.request = VHOST_USER_ADD_MEM_REG; 817 vhost_user_fill_msg_region(®ion_buffer, reg, offset); 818 msg->payload.mem_reg.region = region_buffer; 819 820 ret = vhost_user_write(dev, msg, &fd, 1); 821 if (ret < 0) { 822 return ret; 823 } 824 825 if (track_ramblocks) { 826 uint64_t reply_gpa; 827 828 ret = vhost_user_read(dev, &msg_reply); 829 if (ret < 0) { 830 return ret; 831 } 832 833 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr; 834 835 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) { 836 error_report("%s: Received unexpected msg type." 837 "Expected %d received %d", __func__, 838 VHOST_USER_ADD_MEM_REG, 839 msg_reply.hdr.request); 840 return -EPROTO; 841 } 842 843 /* 844 * We're using the same structure, just reusing one of the 845 * fields, so it should be the same size. 846 */ 847 if (msg_reply.hdr.size != msg->hdr.size) { 848 error_report("%s: Unexpected size for postcopy reply " 849 "%d vs %d", __func__, msg_reply.hdr.size, 850 msg->hdr.size); 851 return -EPROTO; 852 } 853 854 /* Get the postcopy client base from the backend's reply. */ 855 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) { 856 shadow_pcb[reg_idx] = 857 msg_reply.payload.mem_reg.region.userspace_addr; 858 trace_vhost_user_set_mem_table_postcopy( 859 msg_reply.payload.mem_reg.region.userspace_addr, 860 msg->payload.mem_reg.region.userspace_addr, 861 reg_fd_idx, reg_idx); 862 } else { 863 error_report("%s: invalid postcopy reply for region. " 864 "Got guest physical address %" PRIX64 ", expected " 865 "%" PRIX64, __func__, reply_gpa, 866 dev->mem->regions[reg_idx].guest_phys_addr); 867 return -EPROTO; 868 } 869 } else if (reply_supported) { 870 ret = process_message_reply(dev, msg); 871 if (ret) { 872 return ret; 873 } 874 } 875 } else if (track_ramblocks) { 876 u->region_rb_offset[reg_idx] = 0; 877 u->region_rb[reg_idx] = NULL; 878 } 879 880 /* 881 * At this point, we know the backend has mapped in the new 882 * region, if the region has a valid file descriptor. 883 * 884 * The region should now be added to the shadow table. 885 */ 886 u->shadow_regions[u->num_shadow_regions].guest_phys_addr = 887 reg->guest_phys_addr; 888 u->shadow_regions[u->num_shadow_regions].userspace_addr = 889 reg->userspace_addr; 890 u->shadow_regions[u->num_shadow_regions].memory_size = 891 reg->memory_size; 892 u->num_shadow_regions++; 893 } 894 895 return 0; 896 } 897 898 static int vhost_user_add_remove_regions(struct vhost_dev *dev, 899 VhostUserMsg *msg, 900 bool reply_supported, 901 bool track_ramblocks) 902 { 903 struct vhost_user *u = dev->opaque; 904 struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS]; 905 struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS]; 906 uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {}; 907 int nr_add_reg, nr_rem_reg; 908 int ret; 909 910 msg->hdr.size = sizeof(msg->payload.mem_reg); 911 912 /* Find the regions which need to be removed or added. */ 913 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg, 914 shadow_pcb, track_ramblocks); 915 916 if (nr_rem_reg) { 917 ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg, 918 reply_supported); 919 if (ret < 0) { 920 goto err; 921 } 922 } 923 924 if (nr_add_reg) { 925 ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb, 926 reply_supported, track_ramblocks); 927 if (ret < 0) { 928 goto err; 929 } 930 } 931 932 if (track_ramblocks) { 933 memcpy(u->postcopy_client_bases, shadow_pcb, 934 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); 935 /* 936 * Now we've registered this with the postcopy code, we ack to the 937 * client, because now we're in the position to be able to deal with 938 * any faults it generates. 939 */ 940 /* TODO: Use this for failure cases as well with a bad value. */ 941 msg->hdr.size = sizeof(msg->payload.u64); 942 msg->payload.u64 = 0; /* OK */ 943 944 ret = vhost_user_write(dev, msg, NULL, 0); 945 if (ret < 0) { 946 return ret; 947 } 948 } 949 950 return 0; 951 952 err: 953 if (track_ramblocks) { 954 memcpy(u->postcopy_client_bases, shadow_pcb, 955 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); 956 } 957 958 return ret; 959 } 960 961 static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, 962 struct vhost_memory *mem, 963 bool reply_supported, 964 bool config_mem_slots) 965 { 966 struct vhost_user *u = dev->opaque; 967 int fds[VHOST_MEMORY_BASELINE_NREGIONS]; 968 size_t fd_num = 0; 969 VhostUserMsg msg_reply; 970 int region_i, msg_i; 971 int ret; 972 973 VhostUserMsg msg = { 974 .hdr.flags = VHOST_USER_VERSION, 975 }; 976 977 if (u->region_rb_len < dev->mem->nregions) { 978 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions); 979 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset, 980 dev->mem->nregions); 981 memset(&(u->region_rb[u->region_rb_len]), '\0', 982 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len)); 983 memset(&(u->region_rb_offset[u->region_rb_len]), '\0', 984 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len)); 985 u->region_rb_len = dev->mem->nregions; 986 } 987 988 if (config_mem_slots) { 989 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true); 990 if (ret < 0) { 991 return ret; 992 } 993 } else { 994 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, 995 true); 996 if (ret < 0) { 997 return ret; 998 } 999 1000 ret = vhost_user_write(dev, &msg, fds, fd_num); 1001 if (ret < 0) { 1002 return ret; 1003 } 1004 1005 ret = vhost_user_read(dev, &msg_reply); 1006 if (ret < 0) { 1007 return ret; 1008 } 1009 1010 if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) { 1011 error_report("%s: Received unexpected msg type." 1012 "Expected %d received %d", __func__, 1013 VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request); 1014 return -EPROTO; 1015 } 1016 1017 /* 1018 * We're using the same structure, just reusing one of the 1019 * fields, so it should be the same size. 1020 */ 1021 if (msg_reply.hdr.size != msg.hdr.size) { 1022 error_report("%s: Unexpected size for postcopy reply " 1023 "%d vs %d", __func__, msg_reply.hdr.size, 1024 msg.hdr.size); 1025 return -EPROTO; 1026 } 1027 1028 memset(u->postcopy_client_bases, 0, 1029 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); 1030 1031 /* 1032 * They're in the same order as the regions that were sent 1033 * but some of the regions were skipped (above) if they 1034 * didn't have fd's 1035 */ 1036 for (msg_i = 0, region_i = 0; 1037 region_i < dev->mem->nregions; 1038 region_i++) { 1039 if (msg_i < fd_num && 1040 msg_reply.payload.memory.regions[msg_i].guest_phys_addr == 1041 dev->mem->regions[region_i].guest_phys_addr) { 1042 u->postcopy_client_bases[region_i] = 1043 msg_reply.payload.memory.regions[msg_i].userspace_addr; 1044 trace_vhost_user_set_mem_table_postcopy( 1045 msg_reply.payload.memory.regions[msg_i].userspace_addr, 1046 msg.payload.memory.regions[msg_i].userspace_addr, 1047 msg_i, region_i); 1048 msg_i++; 1049 } 1050 } 1051 if (msg_i != fd_num) { 1052 error_report("%s: postcopy reply not fully consumed " 1053 "%d vs %zd", 1054 __func__, msg_i, fd_num); 1055 return -EIO; 1056 } 1057 1058 /* 1059 * Now we've registered this with the postcopy code, we ack to the 1060 * client, because now we're in the position to be able to deal 1061 * with any faults it generates. 1062 */ 1063 /* TODO: Use this for failure cases as well with a bad value. */ 1064 msg.hdr.size = sizeof(msg.payload.u64); 1065 msg.payload.u64 = 0; /* OK */ 1066 ret = vhost_user_write(dev, &msg, NULL, 0); 1067 if (ret < 0) { 1068 return ret; 1069 } 1070 } 1071 1072 return 0; 1073 } 1074 1075 static int vhost_user_set_mem_table(struct vhost_dev *dev, 1076 struct vhost_memory *mem) 1077 { 1078 struct vhost_user *u = dev->opaque; 1079 int fds[VHOST_MEMORY_BASELINE_NREGIONS]; 1080 size_t fd_num = 0; 1081 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler; 1082 bool reply_supported = virtio_has_feature(dev->protocol_features, 1083 VHOST_USER_PROTOCOL_F_REPLY_ACK); 1084 bool config_mem_slots = 1085 virtio_has_feature(dev->protocol_features, 1086 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS); 1087 int ret; 1088 1089 if (do_postcopy) { 1090 /* 1091 * Postcopy has enough differences that it's best done in it's own 1092 * version 1093 */ 1094 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported, 1095 config_mem_slots); 1096 } 1097 1098 VhostUserMsg msg = { 1099 .hdr.flags = VHOST_USER_VERSION, 1100 }; 1101 1102 if (reply_supported) { 1103 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 1104 } 1105 1106 if (config_mem_slots) { 1107 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false); 1108 if (ret < 0) { 1109 return ret; 1110 } 1111 } else { 1112 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, 1113 false); 1114 if (ret < 0) { 1115 return ret; 1116 } 1117 1118 ret = vhost_user_write(dev, &msg, fds, fd_num); 1119 if (ret < 0) { 1120 return ret; 1121 } 1122 1123 if (reply_supported) { 1124 return process_message_reply(dev, &msg); 1125 } 1126 } 1127 1128 return 0; 1129 } 1130 1131 static int vhost_user_set_vring_endian(struct vhost_dev *dev, 1132 struct vhost_vring_state *ring) 1133 { 1134 bool cross_endian = virtio_has_feature(dev->protocol_features, 1135 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN); 1136 VhostUserMsg msg = { 1137 .hdr.request = VHOST_USER_SET_VRING_ENDIAN, 1138 .hdr.flags = VHOST_USER_VERSION, 1139 .payload.state = *ring, 1140 .hdr.size = sizeof(msg.payload.state), 1141 }; 1142 1143 if (!cross_endian) { 1144 error_report("vhost-user trying to send unhandled ioctl"); 1145 return -ENOTSUP; 1146 } 1147 1148 return vhost_user_write(dev, &msg, NULL, 0); 1149 } 1150 1151 static int vhost_set_vring(struct vhost_dev *dev, 1152 unsigned long int request, 1153 struct vhost_vring_state *ring) 1154 { 1155 VhostUserMsg msg = { 1156 .hdr.request = request, 1157 .hdr.flags = VHOST_USER_VERSION, 1158 .payload.state = *ring, 1159 .hdr.size = sizeof(msg.payload.state), 1160 }; 1161 1162 return vhost_user_write(dev, &msg, NULL, 0); 1163 } 1164 1165 static int vhost_user_set_vring_num(struct vhost_dev *dev, 1166 struct vhost_vring_state *ring) 1167 { 1168 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); 1169 } 1170 1171 static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) 1172 { 1173 assert(n && n->unmap_addr); 1174 munmap(n->unmap_addr, qemu_real_host_page_size()); 1175 n->unmap_addr = NULL; 1176 } 1177 1178 /* 1179 * clean-up function for notifier, will finally free the structure 1180 * under rcu. 1181 */ 1182 static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n, 1183 VirtIODevice *vdev) 1184 { 1185 if (n->addr) { 1186 if (vdev) { 1187 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false); 1188 } 1189 assert(!n->unmap_addr); 1190 n->unmap_addr = n->addr; 1191 n->addr = NULL; 1192 call_rcu(n, vhost_user_host_notifier_free, rcu); 1193 } 1194 } 1195 1196 static int vhost_user_set_vring_base(struct vhost_dev *dev, 1197 struct vhost_vring_state *ring) 1198 { 1199 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); 1200 } 1201 1202 static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) 1203 { 1204 int i; 1205 1206 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { 1207 return -EINVAL; 1208 } 1209 1210 for (i = 0; i < dev->nvqs; ++i) { 1211 int ret; 1212 struct vhost_vring_state state = { 1213 .index = dev->vq_index + i, 1214 .num = enable, 1215 }; 1216 1217 ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); 1218 if (ret < 0) { 1219 /* 1220 * Restoring the previous state is likely infeasible, as well as 1221 * proceeding regardless the error, so just bail out and hope for 1222 * the device-level recovery. 1223 */ 1224 return ret; 1225 } 1226 } 1227 1228 return 0; 1229 } 1230 1231 static VhostUserHostNotifier *fetch_notifier(VhostUserState *u, 1232 int idx) 1233 { 1234 if (idx >= u->notifiers->len) { 1235 return NULL; 1236 } 1237 return g_ptr_array_index(u->notifiers, idx); 1238 } 1239 1240 static int vhost_user_get_vring_base(struct vhost_dev *dev, 1241 struct vhost_vring_state *ring) 1242 { 1243 int ret; 1244 VhostUserMsg msg = { 1245 .hdr.request = VHOST_USER_GET_VRING_BASE, 1246 .hdr.flags = VHOST_USER_VERSION, 1247 .payload.state = *ring, 1248 .hdr.size = sizeof(msg.payload.state), 1249 }; 1250 struct vhost_user *u = dev->opaque; 1251 1252 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index); 1253 if (n) { 1254 vhost_user_host_notifier_remove(n, dev->vdev); 1255 } 1256 1257 ret = vhost_user_write(dev, &msg, NULL, 0); 1258 if (ret < 0) { 1259 return ret; 1260 } 1261 1262 ret = vhost_user_read(dev, &msg); 1263 if (ret < 0) { 1264 return ret; 1265 } 1266 1267 if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) { 1268 error_report("Received unexpected msg type. Expected %d received %d", 1269 VHOST_USER_GET_VRING_BASE, msg.hdr.request); 1270 return -EPROTO; 1271 } 1272 1273 if (msg.hdr.size != sizeof(msg.payload.state)) { 1274 error_report("Received bad msg size."); 1275 return -EPROTO; 1276 } 1277 1278 *ring = msg.payload.state; 1279 1280 return 0; 1281 } 1282 1283 static int vhost_set_vring_file(struct vhost_dev *dev, 1284 VhostUserRequest request, 1285 struct vhost_vring_file *file) 1286 { 1287 int fds[VHOST_USER_MAX_RAM_SLOTS]; 1288 size_t fd_num = 0; 1289 VhostUserMsg msg = { 1290 .hdr.request = request, 1291 .hdr.flags = VHOST_USER_VERSION, 1292 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK, 1293 .hdr.size = sizeof(msg.payload.u64), 1294 }; 1295 1296 if (ioeventfd_enabled() && file->fd > 0) { 1297 fds[fd_num++] = file->fd; 1298 } else { 1299 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; 1300 } 1301 1302 return vhost_user_write(dev, &msg, fds, fd_num); 1303 } 1304 1305 static int vhost_user_set_vring_kick(struct vhost_dev *dev, 1306 struct vhost_vring_file *file) 1307 { 1308 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file); 1309 } 1310 1311 static int vhost_user_set_vring_call(struct vhost_dev *dev, 1312 struct vhost_vring_file *file) 1313 { 1314 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file); 1315 } 1316 1317 static int vhost_user_set_vring_err(struct vhost_dev *dev, 1318 struct vhost_vring_file *file) 1319 { 1320 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file); 1321 } 1322 1323 static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) 1324 { 1325 int ret; 1326 VhostUserMsg msg = { 1327 .hdr.request = request, 1328 .hdr.flags = VHOST_USER_VERSION, 1329 }; 1330 1331 if (vhost_user_one_time_request(request) && dev->vq_index != 0) { 1332 return 0; 1333 } 1334 1335 ret = vhost_user_write(dev, &msg, NULL, 0); 1336 if (ret < 0) { 1337 return ret; 1338 } 1339 1340 ret = vhost_user_read(dev, &msg); 1341 if (ret < 0) { 1342 return ret; 1343 } 1344 1345 if (msg.hdr.request != request) { 1346 error_report("Received unexpected msg type. Expected %d received %d", 1347 request, msg.hdr.request); 1348 return -EPROTO; 1349 } 1350 1351 if (msg.hdr.size != sizeof(msg.payload.u64)) { 1352 error_report("Received bad msg size."); 1353 return -EPROTO; 1354 } 1355 1356 *u64 = msg.payload.u64; 1357 1358 return 0; 1359 } 1360 1361 static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features) 1362 { 1363 if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) { 1364 return -EPROTO; 1365 } 1366 1367 return 0; 1368 } 1369 1370 static int enforce_reply(struct vhost_dev *dev, 1371 const VhostUserMsg *msg) 1372 { 1373 uint64_t dummy; 1374 1375 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) { 1376 return process_message_reply(dev, msg); 1377 } 1378 1379 /* 1380 * We need to wait for a reply but the backend does not 1381 * support replies for the command we just sent. 1382 * Send VHOST_USER_GET_FEATURES which makes all backends 1383 * send a reply. 1384 */ 1385 return vhost_user_get_features(dev, &dummy); 1386 } 1387 1388 static int vhost_user_set_vring_addr(struct vhost_dev *dev, 1389 struct vhost_vring_addr *addr) 1390 { 1391 int ret; 1392 VhostUserMsg msg = { 1393 .hdr.request = VHOST_USER_SET_VRING_ADDR, 1394 .hdr.flags = VHOST_USER_VERSION, 1395 .payload.addr = *addr, 1396 .hdr.size = sizeof(msg.payload.addr), 1397 }; 1398 1399 bool reply_supported = virtio_has_feature(dev->protocol_features, 1400 VHOST_USER_PROTOCOL_F_REPLY_ACK); 1401 1402 /* 1403 * wait for a reply if logging is enabled to make sure 1404 * backend is actually logging changes 1405 */ 1406 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG); 1407 1408 if (reply_supported && wait_for_reply) { 1409 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 1410 } 1411 1412 ret = vhost_user_write(dev, &msg, NULL, 0); 1413 if (ret < 0) { 1414 return ret; 1415 } 1416 1417 if (wait_for_reply) { 1418 return enforce_reply(dev, &msg); 1419 } 1420 1421 return 0; 1422 } 1423 1424 static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, 1425 bool wait_for_reply) 1426 { 1427 VhostUserMsg msg = { 1428 .hdr.request = request, 1429 .hdr.flags = VHOST_USER_VERSION, 1430 .payload.u64 = u64, 1431 .hdr.size = sizeof(msg.payload.u64), 1432 }; 1433 int ret; 1434 1435 if (wait_for_reply) { 1436 bool reply_supported = virtio_has_feature(dev->protocol_features, 1437 VHOST_USER_PROTOCOL_F_REPLY_ACK); 1438 if (reply_supported) { 1439 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 1440 } 1441 } 1442 1443 ret = vhost_user_write(dev, &msg, NULL, 0); 1444 if (ret < 0) { 1445 return ret; 1446 } 1447 1448 if (wait_for_reply) { 1449 return enforce_reply(dev, &msg); 1450 } 1451 1452 return 0; 1453 } 1454 1455 static int vhost_user_set_features(struct vhost_dev *dev, 1456 uint64_t features) 1457 { 1458 /* 1459 * wait for a reply if logging is enabled to make sure 1460 * backend is actually logging changes 1461 */ 1462 bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL); 1463 1464 /* 1465 * We need to include any extra backend only feature bits that 1466 * might be needed by our device. Currently this includes the 1467 * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol 1468 * features. 1469 */ 1470 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, 1471 features | dev->backend_features, 1472 log_enabled); 1473 } 1474 1475 static int vhost_user_set_protocol_features(struct vhost_dev *dev, 1476 uint64_t features) 1477 { 1478 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features, 1479 false); 1480 } 1481 1482 static int vhost_user_set_owner(struct vhost_dev *dev) 1483 { 1484 VhostUserMsg msg = { 1485 .hdr.request = VHOST_USER_SET_OWNER, 1486 .hdr.flags = VHOST_USER_VERSION, 1487 }; 1488 1489 return vhost_user_write(dev, &msg, NULL, 0); 1490 } 1491 1492 static int vhost_user_get_max_memslots(struct vhost_dev *dev, 1493 uint64_t *max_memslots) 1494 { 1495 uint64_t backend_max_memslots; 1496 int err; 1497 1498 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS, 1499 &backend_max_memslots); 1500 if (err < 0) { 1501 return err; 1502 } 1503 1504 *max_memslots = backend_max_memslots; 1505 1506 return 0; 1507 } 1508 1509 static int vhost_user_reset_device(struct vhost_dev *dev) 1510 { 1511 VhostUserMsg msg = { 1512 .hdr.flags = VHOST_USER_VERSION, 1513 }; 1514 1515 msg.hdr.request = virtio_has_feature(dev->protocol_features, 1516 VHOST_USER_PROTOCOL_F_RESET_DEVICE) 1517 ? VHOST_USER_RESET_DEVICE 1518 : VHOST_USER_RESET_OWNER; 1519 1520 return vhost_user_write(dev, &msg, NULL, 0); 1521 } 1522 1523 static int vhost_user_slave_handle_config_change(struct vhost_dev *dev) 1524 { 1525 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { 1526 return -ENOSYS; 1527 } 1528 1529 return dev->config_ops->vhost_dev_config_notifier(dev); 1530 } 1531 1532 /* 1533 * Fetch or create the notifier for a given idx. Newly created 1534 * notifiers are added to the pointer array that tracks them. 1535 */ 1536 static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u, 1537 int idx) 1538 { 1539 VhostUserHostNotifier *n = NULL; 1540 if (idx >= u->notifiers->len) { 1541 g_ptr_array_set_size(u->notifiers, idx + 1); 1542 } 1543 1544 n = g_ptr_array_index(u->notifiers, idx); 1545 if (!n) { 1546 /* 1547 * In case notification arrive out-of-order, 1548 * make room for current index. 1549 */ 1550 g_ptr_array_remove_index(u->notifiers, idx); 1551 n = g_new0(VhostUserHostNotifier, 1); 1552 n->idx = idx; 1553 g_ptr_array_insert(u->notifiers, idx, n); 1554 trace_vhost_user_create_notifier(idx, n); 1555 } 1556 1557 return n; 1558 } 1559 1560 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, 1561 VhostUserVringArea *area, 1562 int fd) 1563 { 1564 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK; 1565 size_t page_size = qemu_real_host_page_size(); 1566 struct vhost_user *u = dev->opaque; 1567 VhostUserState *user = u->user; 1568 VirtIODevice *vdev = dev->vdev; 1569 VhostUserHostNotifier *n; 1570 void *addr; 1571 char *name; 1572 1573 if (!virtio_has_feature(dev->protocol_features, 1574 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) || 1575 vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) { 1576 return -EINVAL; 1577 } 1578 1579 /* 1580 * Fetch notifier and invalidate any old data before setting up 1581 * new mapped address. 1582 */ 1583 n = fetch_or_create_notifier(user, queue_idx); 1584 vhost_user_host_notifier_remove(n, vdev); 1585 1586 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { 1587 return 0; 1588 } 1589 1590 /* Sanity check. */ 1591 if (area->size != page_size) { 1592 return -EINVAL; 1593 } 1594 1595 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, 1596 fd, area->offset); 1597 if (addr == MAP_FAILED) { 1598 return -EFAULT; 1599 } 1600 1601 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", 1602 user, queue_idx); 1603 if (!n->mr.ram) { /* Don't init again after suspend. */ 1604 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, 1605 page_size, addr); 1606 } else { 1607 n->mr.ram_block->host = addr; 1608 } 1609 g_free(name); 1610 1611 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { 1612 object_unparent(OBJECT(&n->mr)); 1613 munmap(addr, page_size); 1614 return -ENXIO; 1615 } 1616 1617 n->addr = addr; 1618 1619 return 0; 1620 } 1621 1622 static void close_slave_channel(struct vhost_user *u) 1623 { 1624 g_source_destroy(u->slave_src); 1625 g_source_unref(u->slave_src); 1626 u->slave_src = NULL; 1627 object_unref(OBJECT(u->slave_ioc)); 1628 u->slave_ioc = NULL; 1629 } 1630 1631 static gboolean slave_read(QIOChannel *ioc, GIOCondition condition, 1632 gpointer opaque) 1633 { 1634 struct vhost_dev *dev = opaque; 1635 struct vhost_user *u = dev->opaque; 1636 VhostUserHeader hdr = { 0, }; 1637 VhostUserPayload payload = { 0, }; 1638 Error *local_err = NULL; 1639 gboolean rc = G_SOURCE_CONTINUE; 1640 int ret = 0; 1641 struct iovec iov; 1642 g_autofree int *fd = NULL; 1643 size_t fdsize = 0; 1644 int i; 1645 1646 /* Read header */ 1647 iov.iov_base = &hdr; 1648 iov.iov_len = VHOST_USER_HDR_SIZE; 1649 1650 if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) { 1651 error_report_err(local_err); 1652 goto err; 1653 } 1654 1655 if (hdr.size > VHOST_USER_PAYLOAD_SIZE) { 1656 error_report("Failed to read msg header." 1657 " Size %d exceeds the maximum %zu.", hdr.size, 1658 VHOST_USER_PAYLOAD_SIZE); 1659 goto err; 1660 } 1661 1662 /* Read payload */ 1663 if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) { 1664 error_report_err(local_err); 1665 goto err; 1666 } 1667 1668 switch (hdr.request) { 1669 case VHOST_USER_SLAVE_IOTLB_MSG: 1670 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb); 1671 break; 1672 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG : 1673 ret = vhost_user_slave_handle_config_change(dev); 1674 break; 1675 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG: 1676 ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area, 1677 fd ? fd[0] : -1); 1678 break; 1679 default: 1680 error_report("Received unexpected msg type: %d.", hdr.request); 1681 ret = -EINVAL; 1682 } 1683 1684 /* 1685 * REPLY_ACK feature handling. Other reply types has to be managed 1686 * directly in their request handlers. 1687 */ 1688 if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) { 1689 struct iovec iovec[2]; 1690 1691 1692 hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK; 1693 hdr.flags |= VHOST_USER_REPLY_MASK; 1694 1695 payload.u64 = !!ret; 1696 hdr.size = sizeof(payload.u64); 1697 1698 iovec[0].iov_base = &hdr; 1699 iovec[0].iov_len = VHOST_USER_HDR_SIZE; 1700 iovec[1].iov_base = &payload; 1701 iovec[1].iov_len = hdr.size; 1702 1703 if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) { 1704 error_report_err(local_err); 1705 goto err; 1706 } 1707 } 1708 1709 goto fdcleanup; 1710 1711 err: 1712 close_slave_channel(u); 1713 rc = G_SOURCE_REMOVE; 1714 1715 fdcleanup: 1716 if (fd) { 1717 for (i = 0; i < fdsize; i++) { 1718 close(fd[i]); 1719 } 1720 } 1721 return rc; 1722 } 1723 1724 static int vhost_setup_slave_channel(struct vhost_dev *dev) 1725 { 1726 VhostUserMsg msg = { 1727 .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD, 1728 .hdr.flags = VHOST_USER_VERSION, 1729 }; 1730 struct vhost_user *u = dev->opaque; 1731 int sv[2], ret = 0; 1732 bool reply_supported = virtio_has_feature(dev->protocol_features, 1733 VHOST_USER_PROTOCOL_F_REPLY_ACK); 1734 Error *local_err = NULL; 1735 QIOChannel *ioc; 1736 1737 if (!virtio_has_feature(dev->protocol_features, 1738 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) { 1739 return 0; 1740 } 1741 1742 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { 1743 int saved_errno = errno; 1744 error_report("socketpair() failed"); 1745 return -saved_errno; 1746 } 1747 1748 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err)); 1749 if (!ioc) { 1750 error_report_err(local_err); 1751 return -ECONNREFUSED; 1752 } 1753 u->slave_ioc = ioc; 1754 slave_update_read_handler(dev, NULL); 1755 1756 if (reply_supported) { 1757 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 1758 } 1759 1760 ret = vhost_user_write(dev, &msg, &sv[1], 1); 1761 if (ret) { 1762 goto out; 1763 } 1764 1765 if (reply_supported) { 1766 ret = process_message_reply(dev, &msg); 1767 } 1768 1769 out: 1770 close(sv[1]); 1771 if (ret) { 1772 close_slave_channel(u); 1773 } 1774 1775 return ret; 1776 } 1777 1778 #ifdef CONFIG_LINUX 1779 /* 1780 * Called back from the postcopy fault thread when a fault is received on our 1781 * ufd. 1782 * TODO: This is Linux specific 1783 */ 1784 static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd, 1785 void *ufd) 1786 { 1787 struct vhost_dev *dev = pcfd->data; 1788 struct vhost_user *u = dev->opaque; 1789 struct uffd_msg *msg = ufd; 1790 uint64_t faultaddr = msg->arg.pagefault.address; 1791 RAMBlock *rb = NULL; 1792 uint64_t rb_offset; 1793 int i; 1794 1795 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr, 1796 dev->mem->nregions); 1797 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { 1798 trace_vhost_user_postcopy_fault_handler_loop(i, 1799 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size); 1800 if (faultaddr >= u->postcopy_client_bases[i]) { 1801 /* Ofset of the fault address in the vhost region */ 1802 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i]; 1803 if (region_offset < dev->mem->regions[i].memory_size) { 1804 rb_offset = region_offset + u->region_rb_offset[i]; 1805 trace_vhost_user_postcopy_fault_handler_found(i, 1806 region_offset, rb_offset); 1807 rb = u->region_rb[i]; 1808 return postcopy_request_shared_page(pcfd, rb, faultaddr, 1809 rb_offset); 1810 } 1811 } 1812 } 1813 error_report("%s: Failed to find region for fault %" PRIx64, 1814 __func__, faultaddr); 1815 return -1; 1816 } 1817 1818 static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb, 1819 uint64_t offset) 1820 { 1821 struct vhost_dev *dev = pcfd->data; 1822 struct vhost_user *u = dev->opaque; 1823 int i; 1824 1825 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset); 1826 1827 if (!u) { 1828 return 0; 1829 } 1830 /* Translate the offset into an address in the clients address space */ 1831 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) { 1832 if (u->region_rb[i] == rb && 1833 offset >= u->region_rb_offset[i] && 1834 offset < (u->region_rb_offset[i] + 1835 dev->mem->regions[i].memory_size)) { 1836 uint64_t client_addr = (offset - u->region_rb_offset[i]) + 1837 u->postcopy_client_bases[i]; 1838 trace_vhost_user_postcopy_waker_found(client_addr); 1839 return postcopy_wake_shared(pcfd, client_addr, rb); 1840 } 1841 } 1842 1843 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset); 1844 return 0; 1845 } 1846 #endif 1847 1848 /* 1849 * Called at the start of an inbound postcopy on reception of the 1850 * 'advise' command. 1851 */ 1852 static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) 1853 { 1854 #ifdef CONFIG_LINUX 1855 struct vhost_user *u = dev->opaque; 1856 CharBackend *chr = u->user->chr; 1857 int ufd; 1858 int ret; 1859 VhostUserMsg msg = { 1860 .hdr.request = VHOST_USER_POSTCOPY_ADVISE, 1861 .hdr.flags = VHOST_USER_VERSION, 1862 }; 1863 1864 ret = vhost_user_write(dev, &msg, NULL, 0); 1865 if (ret < 0) { 1866 error_setg(errp, "Failed to send postcopy_advise to vhost"); 1867 return ret; 1868 } 1869 1870 ret = vhost_user_read(dev, &msg); 1871 if (ret < 0) { 1872 error_setg(errp, "Failed to get postcopy_advise reply from vhost"); 1873 return ret; 1874 } 1875 1876 if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) { 1877 error_setg(errp, "Unexpected msg type. Expected %d received %d", 1878 VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request); 1879 return -EPROTO; 1880 } 1881 1882 if (msg.hdr.size) { 1883 error_setg(errp, "Received bad msg size."); 1884 return -EPROTO; 1885 } 1886 ufd = qemu_chr_fe_get_msgfd(chr); 1887 if (ufd < 0) { 1888 error_setg(errp, "%s: Failed to get ufd", __func__); 1889 return -EIO; 1890 } 1891 qemu_socket_set_nonblock(ufd); 1892 1893 /* register ufd with userfault thread */ 1894 u->postcopy_fd.fd = ufd; 1895 u->postcopy_fd.data = dev; 1896 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler; 1897 u->postcopy_fd.waker = vhost_user_postcopy_waker; 1898 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */ 1899 postcopy_register_shared_ufd(&u->postcopy_fd); 1900 return 0; 1901 #else 1902 error_setg(errp, "Postcopy not supported on non-Linux systems"); 1903 return -ENOSYS; 1904 #endif 1905 } 1906 1907 /* 1908 * Called at the switch to postcopy on reception of the 'listen' command. 1909 */ 1910 static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp) 1911 { 1912 struct vhost_user *u = dev->opaque; 1913 int ret; 1914 VhostUserMsg msg = { 1915 .hdr.request = VHOST_USER_POSTCOPY_LISTEN, 1916 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1917 }; 1918 u->postcopy_listen = true; 1919 1920 trace_vhost_user_postcopy_listen(); 1921 1922 ret = vhost_user_write(dev, &msg, NULL, 0); 1923 if (ret < 0) { 1924 error_setg(errp, "Failed to send postcopy_listen to vhost"); 1925 return ret; 1926 } 1927 1928 ret = process_message_reply(dev, &msg); 1929 if (ret) { 1930 error_setg(errp, "Failed to receive reply to postcopy_listen"); 1931 return ret; 1932 } 1933 1934 return 0; 1935 } 1936 1937 /* 1938 * Called at the end of postcopy 1939 */ 1940 static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) 1941 { 1942 VhostUserMsg msg = { 1943 .hdr.request = VHOST_USER_POSTCOPY_END, 1944 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 1945 }; 1946 int ret; 1947 struct vhost_user *u = dev->opaque; 1948 1949 trace_vhost_user_postcopy_end_entry(); 1950 1951 ret = vhost_user_write(dev, &msg, NULL, 0); 1952 if (ret < 0) { 1953 error_setg(errp, "Failed to send postcopy_end to vhost"); 1954 return ret; 1955 } 1956 1957 ret = process_message_reply(dev, &msg); 1958 if (ret) { 1959 error_setg(errp, "Failed to receive reply to postcopy_end"); 1960 return ret; 1961 } 1962 postcopy_unregister_shared_ufd(&u->postcopy_fd); 1963 close(u->postcopy_fd.fd); 1964 u->postcopy_fd.handler = NULL; 1965 1966 trace_vhost_user_postcopy_end_exit(); 1967 1968 return 0; 1969 } 1970 1971 static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, 1972 void *opaque) 1973 { 1974 struct PostcopyNotifyData *pnd = opaque; 1975 struct vhost_user *u = container_of(notifier, struct vhost_user, 1976 postcopy_notifier); 1977 struct vhost_dev *dev = u->dev; 1978 1979 switch (pnd->reason) { 1980 case POSTCOPY_NOTIFY_PROBE: 1981 if (!virtio_has_feature(dev->protocol_features, 1982 VHOST_USER_PROTOCOL_F_PAGEFAULT)) { 1983 /* TODO: Get the device name into this error somehow */ 1984 error_setg(pnd->errp, 1985 "vhost-user backend not capable of postcopy"); 1986 return -ENOENT; 1987 } 1988 break; 1989 1990 case POSTCOPY_NOTIFY_INBOUND_ADVISE: 1991 return vhost_user_postcopy_advise(dev, pnd->errp); 1992 1993 case POSTCOPY_NOTIFY_INBOUND_LISTEN: 1994 return vhost_user_postcopy_listen(dev, pnd->errp); 1995 1996 case POSTCOPY_NOTIFY_INBOUND_END: 1997 return vhost_user_postcopy_end(dev, pnd->errp); 1998 1999 default: 2000 /* We ignore notifications we don't know */ 2001 break; 2002 } 2003 2004 return 0; 2005 } 2006 2007 static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, 2008 Error **errp) 2009 { 2010 uint64_t features, ram_slots; 2011 struct vhost_user *u; 2012 VhostUserState *vus = (VhostUserState *) opaque; 2013 int err; 2014 2015 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 2016 2017 u = g_new0(struct vhost_user, 1); 2018 u->user = vus; 2019 u->dev = dev; 2020 dev->opaque = u; 2021 2022 err = vhost_user_get_features(dev, &features); 2023 if (err < 0) { 2024 error_setg_errno(errp, -err, "vhost_backend_init failed"); 2025 return err; 2026 } 2027 2028 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) { 2029 bool supports_f_config = vus->supports_config || 2030 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier); 2031 uint64_t protocol_features; 2032 2033 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 2034 2035 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES, 2036 &protocol_features); 2037 if (err < 0) { 2038 error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); 2039 return -EPROTO; 2040 } 2041 2042 /* 2043 * We will use all the protocol features we support - although 2044 * we suppress F_CONFIG if we know QEMUs internal code can not support 2045 * it. 2046 */ 2047 protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK; 2048 2049 if (supports_f_config) { 2050 if (!virtio_has_feature(protocol_features, 2051 VHOST_USER_PROTOCOL_F_CONFIG)) { 2052 error_setg(errp, "vhost-user device expecting " 2053 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does " 2054 "not support it."); 2055 return -EPROTO; 2056 } 2057 } else { 2058 if (virtio_has_feature(protocol_features, 2059 VHOST_USER_PROTOCOL_F_CONFIG)) { 2060 warn_reportf_err(*errp, "vhost-user backend supports " 2061 "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not."); 2062 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG); 2063 } 2064 } 2065 2066 /* final set of protocol features */ 2067 dev->protocol_features = protocol_features; 2068 err = vhost_user_set_protocol_features(dev, dev->protocol_features); 2069 if (err < 0) { 2070 error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); 2071 return -EPROTO; 2072 } 2073 2074 /* query the max queues we support if backend supports Multiple Queue */ 2075 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) { 2076 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM, 2077 &dev->max_queues); 2078 if (err < 0) { 2079 error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); 2080 return -EPROTO; 2081 } 2082 } else { 2083 dev->max_queues = 1; 2084 } 2085 2086 if (dev->num_queues && dev->max_queues < dev->num_queues) { 2087 error_setg(errp, "The maximum number of queues supported by the " 2088 "backend is %" PRIu64, dev->max_queues); 2089 return -EINVAL; 2090 } 2091 2092 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) && 2093 !(virtio_has_feature(dev->protocol_features, 2094 VHOST_USER_PROTOCOL_F_SLAVE_REQ) && 2095 virtio_has_feature(dev->protocol_features, 2096 VHOST_USER_PROTOCOL_F_REPLY_ACK))) { 2097 error_setg(errp, "IOMMU support requires reply-ack and " 2098 "slave-req protocol features."); 2099 return -EINVAL; 2100 } 2101 2102 /* get max memory regions if backend supports configurable RAM slots */ 2103 if (!virtio_has_feature(dev->protocol_features, 2104 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) { 2105 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS; 2106 } else { 2107 err = vhost_user_get_max_memslots(dev, &ram_slots); 2108 if (err < 0) { 2109 error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); 2110 return -EPROTO; 2111 } 2112 2113 if (ram_slots < u->user->memory_slots) { 2114 error_setg(errp, "The backend specified a max ram slots limit " 2115 "of %" PRIu64", when the prior validated limit was " 2116 "%d. This limit should never decrease.", ram_slots, 2117 u->user->memory_slots); 2118 return -EINVAL; 2119 } 2120 2121 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS); 2122 } 2123 } 2124 2125 if (dev->migration_blocker == NULL && 2126 !virtio_has_feature(dev->protocol_features, 2127 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) { 2128 error_setg(&dev->migration_blocker, 2129 "Migration disabled: vhost-user backend lacks " 2130 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature."); 2131 } 2132 2133 if (dev->vq_index == 0) { 2134 err = vhost_setup_slave_channel(dev); 2135 if (err < 0) { 2136 error_setg_errno(errp, EPROTO, "vhost_backend_init failed"); 2137 return -EPROTO; 2138 } 2139 } 2140 2141 u->postcopy_notifier.notify = vhost_user_postcopy_notifier; 2142 postcopy_add_notifier(&u->postcopy_notifier); 2143 2144 return 0; 2145 } 2146 2147 static int vhost_user_backend_cleanup(struct vhost_dev *dev) 2148 { 2149 struct vhost_user *u; 2150 2151 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 2152 2153 u = dev->opaque; 2154 if (u->postcopy_notifier.notify) { 2155 postcopy_remove_notifier(&u->postcopy_notifier); 2156 u->postcopy_notifier.notify = NULL; 2157 } 2158 u->postcopy_listen = false; 2159 if (u->postcopy_fd.handler) { 2160 postcopy_unregister_shared_ufd(&u->postcopy_fd); 2161 close(u->postcopy_fd.fd); 2162 u->postcopy_fd.handler = NULL; 2163 } 2164 if (u->slave_ioc) { 2165 close_slave_channel(u); 2166 } 2167 g_free(u->region_rb); 2168 u->region_rb = NULL; 2169 g_free(u->region_rb_offset); 2170 u->region_rb_offset = NULL; 2171 u->region_rb_len = 0; 2172 g_free(u); 2173 dev->opaque = 0; 2174 2175 return 0; 2176 } 2177 2178 static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx) 2179 { 2180 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); 2181 2182 return idx; 2183 } 2184 2185 static int vhost_user_memslots_limit(struct vhost_dev *dev) 2186 { 2187 struct vhost_user *u = dev->opaque; 2188 2189 return u->user->memory_slots; 2190 } 2191 2192 static bool vhost_user_requires_shm_log(struct vhost_dev *dev) 2193 { 2194 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 2195 2196 return virtio_has_feature(dev->protocol_features, 2197 VHOST_USER_PROTOCOL_F_LOG_SHMFD); 2198 } 2199 2200 static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) 2201 { 2202 VhostUserMsg msg = { }; 2203 2204 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 2205 2206 /* If guest supports GUEST_ANNOUNCE do nothing */ 2207 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) { 2208 return 0; 2209 } 2210 2211 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */ 2212 if (virtio_has_feature(dev->protocol_features, 2213 VHOST_USER_PROTOCOL_F_RARP)) { 2214 msg.hdr.request = VHOST_USER_SEND_RARP; 2215 msg.hdr.flags = VHOST_USER_VERSION; 2216 memcpy((char *)&msg.payload.u64, mac_addr, 6); 2217 msg.hdr.size = sizeof(msg.payload.u64); 2218 2219 return vhost_user_write(dev, &msg, NULL, 0); 2220 } 2221 return -ENOTSUP; 2222 } 2223 2224 static bool vhost_user_can_merge(struct vhost_dev *dev, 2225 uint64_t start1, uint64_t size1, 2226 uint64_t start2, uint64_t size2) 2227 { 2228 ram_addr_t offset; 2229 int mfd, rfd; 2230 2231 (void)vhost_user_get_mr_data(start1, &offset, &mfd); 2232 (void)vhost_user_get_mr_data(start2, &offset, &rfd); 2233 2234 return mfd == rfd; 2235 } 2236 2237 static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) 2238 { 2239 VhostUserMsg msg; 2240 bool reply_supported = virtio_has_feature(dev->protocol_features, 2241 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2242 int ret; 2243 2244 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { 2245 return 0; 2246 } 2247 2248 msg.hdr.request = VHOST_USER_NET_SET_MTU; 2249 msg.payload.u64 = mtu; 2250 msg.hdr.size = sizeof(msg.payload.u64); 2251 msg.hdr.flags = VHOST_USER_VERSION; 2252 if (reply_supported) { 2253 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 2254 } 2255 2256 ret = vhost_user_write(dev, &msg, NULL, 0); 2257 if (ret < 0) { 2258 return ret; 2259 } 2260 2261 /* If reply_ack supported, slave has to ack specified MTU is valid */ 2262 if (reply_supported) { 2263 return process_message_reply(dev, &msg); 2264 } 2265 2266 return 0; 2267 } 2268 2269 static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, 2270 struct vhost_iotlb_msg *imsg) 2271 { 2272 int ret; 2273 VhostUserMsg msg = { 2274 .hdr.request = VHOST_USER_IOTLB_MSG, 2275 .hdr.size = sizeof(msg.payload.iotlb), 2276 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, 2277 .payload.iotlb = *imsg, 2278 }; 2279 2280 ret = vhost_user_write(dev, &msg, NULL, 0); 2281 if (ret < 0) { 2282 return ret; 2283 } 2284 2285 return process_message_reply(dev, &msg); 2286 } 2287 2288 2289 static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) 2290 { 2291 /* No-op as the receive channel is not dedicated to IOTLB messages. */ 2292 } 2293 2294 static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, 2295 uint32_t config_len, Error **errp) 2296 { 2297 int ret; 2298 VhostUserMsg msg = { 2299 .hdr.request = VHOST_USER_GET_CONFIG, 2300 .hdr.flags = VHOST_USER_VERSION, 2301 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len, 2302 }; 2303 2304 if (!virtio_has_feature(dev->protocol_features, 2305 VHOST_USER_PROTOCOL_F_CONFIG)) { 2306 error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported"); 2307 return -EINVAL; 2308 } 2309 2310 assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE); 2311 2312 msg.payload.config.offset = 0; 2313 msg.payload.config.size = config_len; 2314 ret = vhost_user_write(dev, &msg, NULL, 0); 2315 if (ret < 0) { 2316 error_setg_errno(errp, -ret, "vhost_get_config failed"); 2317 return ret; 2318 } 2319 2320 ret = vhost_user_read(dev, &msg); 2321 if (ret < 0) { 2322 error_setg_errno(errp, -ret, "vhost_get_config failed"); 2323 return ret; 2324 } 2325 2326 if (msg.hdr.request != VHOST_USER_GET_CONFIG) { 2327 error_setg(errp, 2328 "Received unexpected msg type. Expected %d received %d", 2329 VHOST_USER_GET_CONFIG, msg.hdr.request); 2330 return -EPROTO; 2331 } 2332 2333 if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) { 2334 error_setg(errp, "Received bad msg size."); 2335 return -EPROTO; 2336 } 2337 2338 memcpy(config, msg.payload.config.region, config_len); 2339 2340 return 0; 2341 } 2342 2343 static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, 2344 uint32_t offset, uint32_t size, uint32_t flags) 2345 { 2346 int ret; 2347 uint8_t *p; 2348 bool reply_supported = virtio_has_feature(dev->protocol_features, 2349 VHOST_USER_PROTOCOL_F_REPLY_ACK); 2350 2351 VhostUserMsg msg = { 2352 .hdr.request = VHOST_USER_SET_CONFIG, 2353 .hdr.flags = VHOST_USER_VERSION, 2354 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size, 2355 }; 2356 2357 if (!virtio_has_feature(dev->protocol_features, 2358 VHOST_USER_PROTOCOL_F_CONFIG)) { 2359 return -ENOTSUP; 2360 } 2361 2362 if (reply_supported) { 2363 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; 2364 } 2365 2366 if (size > VHOST_USER_MAX_CONFIG_SIZE) { 2367 return -EINVAL; 2368 } 2369 2370 msg.payload.config.offset = offset, 2371 msg.payload.config.size = size, 2372 msg.payload.config.flags = flags, 2373 p = msg.payload.config.region; 2374 memcpy(p, data, size); 2375 2376 ret = vhost_user_write(dev, &msg, NULL, 0); 2377 if (ret < 0) { 2378 return ret; 2379 } 2380 2381 if (reply_supported) { 2382 return process_message_reply(dev, &msg); 2383 } 2384 2385 return 0; 2386 } 2387 2388 static int vhost_user_crypto_create_session(struct vhost_dev *dev, 2389 void *session_info, 2390 uint64_t *session_id) 2391 { 2392 int ret; 2393 bool crypto_session = virtio_has_feature(dev->protocol_features, 2394 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); 2395 CryptoDevBackendSymSessionInfo *sess_info = session_info; 2396 VhostUserMsg msg = { 2397 .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION, 2398 .hdr.flags = VHOST_USER_VERSION, 2399 .hdr.size = sizeof(msg.payload.session), 2400 }; 2401 2402 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); 2403 2404 if (!crypto_session) { 2405 error_report("vhost-user trying to send unhandled ioctl"); 2406 return -ENOTSUP; 2407 } 2408 2409 memcpy(&msg.payload.session.session_setup_data, sess_info, 2410 sizeof(CryptoDevBackendSymSessionInfo)); 2411 if (sess_info->key_len) { 2412 memcpy(&msg.payload.session.key, sess_info->cipher_key, 2413 sess_info->key_len); 2414 } 2415 if (sess_info->auth_key_len > 0) { 2416 memcpy(&msg.payload.session.auth_key, sess_info->auth_key, 2417 sess_info->auth_key_len); 2418 } 2419 ret = vhost_user_write(dev, &msg, NULL, 0); 2420 if (ret < 0) { 2421 error_report("vhost_user_write() return %d, create session failed", 2422 ret); 2423 return ret; 2424 } 2425 2426 ret = vhost_user_read(dev, &msg); 2427 if (ret < 0) { 2428 error_report("vhost_user_read() return %d, create session failed", 2429 ret); 2430 return ret; 2431 } 2432 2433 if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) { 2434 error_report("Received unexpected msg type. Expected %d received %d", 2435 VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request); 2436 return -EPROTO; 2437 } 2438 2439 if (msg.hdr.size != sizeof(msg.payload.session)) { 2440 error_report("Received bad msg size."); 2441 return -EPROTO; 2442 } 2443 2444 if (msg.payload.session.session_id < 0) { 2445 error_report("Bad session id: %" PRId64 "", 2446 msg.payload.session.session_id); 2447 return -EINVAL; 2448 } 2449 *session_id = msg.payload.session.session_id; 2450 2451 return 0; 2452 } 2453 2454 static int 2455 vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) 2456 { 2457 int ret; 2458 bool crypto_session = virtio_has_feature(dev->protocol_features, 2459 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); 2460 VhostUserMsg msg = { 2461 .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION, 2462 .hdr.flags = VHOST_USER_VERSION, 2463 .hdr.size = sizeof(msg.payload.u64), 2464 }; 2465 msg.payload.u64 = session_id; 2466 2467 if (!crypto_session) { 2468 error_report("vhost-user trying to send unhandled ioctl"); 2469 return -ENOTSUP; 2470 } 2471 2472 ret = vhost_user_write(dev, &msg, NULL, 0); 2473 if (ret < 0) { 2474 error_report("vhost_user_write() return %d, close session failed", 2475 ret); 2476 return ret; 2477 } 2478 2479 return 0; 2480 } 2481 2482 static bool vhost_user_mem_section_filter(struct vhost_dev *dev, 2483 MemoryRegionSection *section) 2484 { 2485 bool result; 2486 2487 result = memory_region_get_fd(section->mr) >= 0; 2488 2489 return result; 2490 } 2491 2492 static int vhost_user_get_inflight_fd(struct vhost_dev *dev, 2493 uint16_t queue_size, 2494 struct vhost_inflight *inflight) 2495 { 2496 void *addr; 2497 int fd; 2498 int ret; 2499 struct vhost_user *u = dev->opaque; 2500 CharBackend *chr = u->user->chr; 2501 VhostUserMsg msg = { 2502 .hdr.request = VHOST_USER_GET_INFLIGHT_FD, 2503 .hdr.flags = VHOST_USER_VERSION, 2504 .payload.inflight.num_queues = dev->nvqs, 2505 .payload.inflight.queue_size = queue_size, 2506 .hdr.size = sizeof(msg.payload.inflight), 2507 }; 2508 2509 if (!virtio_has_feature(dev->protocol_features, 2510 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2511 return 0; 2512 } 2513 2514 ret = vhost_user_write(dev, &msg, NULL, 0); 2515 if (ret < 0) { 2516 return ret; 2517 } 2518 2519 ret = vhost_user_read(dev, &msg); 2520 if (ret < 0) { 2521 return ret; 2522 } 2523 2524 if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { 2525 error_report("Received unexpected msg type. " 2526 "Expected %d received %d", 2527 VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); 2528 return -EPROTO; 2529 } 2530 2531 if (msg.hdr.size != sizeof(msg.payload.inflight)) { 2532 error_report("Received bad msg size."); 2533 return -EPROTO; 2534 } 2535 2536 if (!msg.payload.inflight.mmap_size) { 2537 return 0; 2538 } 2539 2540 fd = qemu_chr_fe_get_msgfd(chr); 2541 if (fd < 0) { 2542 error_report("Failed to get mem fd"); 2543 return -EIO; 2544 } 2545 2546 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, 2547 MAP_SHARED, fd, msg.payload.inflight.mmap_offset); 2548 2549 if (addr == MAP_FAILED) { 2550 error_report("Failed to mmap mem fd"); 2551 close(fd); 2552 return -EFAULT; 2553 } 2554 2555 inflight->addr = addr; 2556 inflight->fd = fd; 2557 inflight->size = msg.payload.inflight.mmap_size; 2558 inflight->offset = msg.payload.inflight.mmap_offset; 2559 inflight->queue_size = queue_size; 2560 2561 return 0; 2562 } 2563 2564 static int vhost_user_set_inflight_fd(struct vhost_dev *dev, 2565 struct vhost_inflight *inflight) 2566 { 2567 VhostUserMsg msg = { 2568 .hdr.request = VHOST_USER_SET_INFLIGHT_FD, 2569 .hdr.flags = VHOST_USER_VERSION, 2570 .payload.inflight.mmap_size = inflight->size, 2571 .payload.inflight.mmap_offset = inflight->offset, 2572 .payload.inflight.num_queues = dev->nvqs, 2573 .payload.inflight.queue_size = inflight->queue_size, 2574 .hdr.size = sizeof(msg.payload.inflight), 2575 }; 2576 2577 if (!virtio_has_feature(dev->protocol_features, 2578 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) { 2579 return 0; 2580 } 2581 2582 return vhost_user_write(dev, &msg, &inflight->fd, 1); 2583 } 2584 2585 static void vhost_user_state_destroy(gpointer data) 2586 { 2587 VhostUserHostNotifier *n = (VhostUserHostNotifier *) data; 2588 if (n) { 2589 vhost_user_host_notifier_remove(n, NULL); 2590 object_unparent(OBJECT(&n->mr)); 2591 /* 2592 * We can't free until vhost_user_host_notifier_remove has 2593 * done it's thing so schedule the free with RCU. 2594 */ 2595 g_free_rcu(n, rcu); 2596 } 2597 } 2598 2599 bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) 2600 { 2601 if (user->chr) { 2602 error_setg(errp, "Cannot initialize vhost-user state"); 2603 return false; 2604 } 2605 user->chr = chr; 2606 user->memory_slots = 0; 2607 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4, 2608 &vhost_user_state_destroy); 2609 return true; 2610 } 2611 2612 void vhost_user_cleanup(VhostUserState *user) 2613 { 2614 if (!user->chr) { 2615 return; 2616 } 2617 memory_region_transaction_begin(); 2618 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true); 2619 memory_region_transaction_commit(); 2620 user->chr = NULL; 2621 } 2622 2623 const VhostOps user_ops = { 2624 .backend_type = VHOST_BACKEND_TYPE_USER, 2625 .vhost_backend_init = vhost_user_backend_init, 2626 .vhost_backend_cleanup = vhost_user_backend_cleanup, 2627 .vhost_backend_memslots_limit = vhost_user_memslots_limit, 2628 .vhost_set_log_base = vhost_user_set_log_base, 2629 .vhost_set_mem_table = vhost_user_set_mem_table, 2630 .vhost_set_vring_addr = vhost_user_set_vring_addr, 2631 .vhost_set_vring_endian = vhost_user_set_vring_endian, 2632 .vhost_set_vring_num = vhost_user_set_vring_num, 2633 .vhost_set_vring_base = vhost_user_set_vring_base, 2634 .vhost_get_vring_base = vhost_user_get_vring_base, 2635 .vhost_set_vring_kick = vhost_user_set_vring_kick, 2636 .vhost_set_vring_call = vhost_user_set_vring_call, 2637 .vhost_set_vring_err = vhost_user_set_vring_err, 2638 .vhost_set_features = vhost_user_set_features, 2639 .vhost_get_features = vhost_user_get_features, 2640 .vhost_set_owner = vhost_user_set_owner, 2641 .vhost_reset_device = vhost_user_reset_device, 2642 .vhost_get_vq_index = vhost_user_get_vq_index, 2643 .vhost_set_vring_enable = vhost_user_set_vring_enable, 2644 .vhost_requires_shm_log = vhost_user_requires_shm_log, 2645 .vhost_migration_done = vhost_user_migration_done, 2646 .vhost_backend_can_merge = vhost_user_can_merge, 2647 .vhost_net_set_mtu = vhost_user_net_set_mtu, 2648 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback, 2649 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg, 2650 .vhost_get_config = vhost_user_get_config, 2651 .vhost_set_config = vhost_user_set_config, 2652 .vhost_crypto_create_session = vhost_user_crypto_create_session, 2653 .vhost_crypto_close_session = vhost_user_crypto_close_session, 2654 .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, 2655 .vhost_get_inflight_fd = vhost_user_get_inflight_fd, 2656 .vhost_set_inflight_fd = vhost_user_set_inflight_fd, 2657 }; 2658