1 /* 2 * QTest testcase for the vhost-user 3 * 4 * Copyright (c) 2014 Virtual Open Systems Sarl. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 11 #include "qemu/osdep.h" 12 13 #include "libqtest-single.h" 14 #include "qapi/error.h" 15 #include "qapi/qmp/qdict.h" 16 #include "qemu/config-file.h" 17 #include "qemu/option.h" 18 #include "qemu/range.h" 19 #include "qemu/sockets.h" 20 #include "chardev/char-fe.h" 21 #include "qemu/memfd.h" 22 #include "qemu/module.h" 23 #include "sysemu/sysemu.h" 24 #include "libqos/libqos.h" 25 #include "libqos/pci-pc.h" 26 #include "libqos/virtio-pci.h" 27 28 #include "libqos/malloc-pc.h" 29 #include "libqos/qgraph_internal.h" 30 #include "hw/virtio/virtio-net.h" 31 32 #include "standard-headers/linux/vhost_types.h" 33 #include "standard-headers/linux/virtio_ids.h" 34 #include "standard-headers/linux/virtio_net.h" 35 #include "standard-headers/linux/virtio_gpio.h" 36 #include "standard-headers/linux/virtio_scmi.h" 37 38 #ifdef CONFIG_LINUX 39 #include <sys/vfs.h> 40 #endif 41 42 43 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \ 44 "mem-path=%s,share=on -numa node,memdev=mem" 45 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \ 46 " -numa node,memdev=mem" 47 #define QEMU_CMD_SHM " -m %d -object memory-backend-shm,id=mem,size=%dM," \ 48 " -numa node,memdev=mem" 49 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s" 50 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on" 51 52 #define HUGETLBFS_MAGIC 0x958458f6 53 54 /*********** FROM hw/virtio/vhost-user.c *************************************/ 55 56 #define VHOST_MEMORY_MAX_NREGIONS 8 57 #define VHOST_MAX_VIRTQUEUES 0x100 58 59 #define VHOST_USER_F_PROTOCOL_FEATURES 30 60 #define VIRTIO_F_VERSION_1 32 61 62 #define VHOST_USER_PROTOCOL_F_MQ 0 63 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1 64 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6 65 #define VHOST_USER_PROTOCOL_F_CONFIG 9 66 67 #define VHOST_LOG_PAGE 0x1000 68 69 typedef enum VhostUserRequest { 70 VHOST_USER_NONE = 0, 71 VHOST_USER_GET_FEATURES = 1, 72 VHOST_USER_SET_FEATURES = 2, 73 VHOST_USER_SET_OWNER = 3, 74 VHOST_USER_RESET_OWNER = 4, 75 VHOST_USER_SET_MEM_TABLE = 5, 76 VHOST_USER_SET_LOG_BASE = 6, 77 VHOST_USER_SET_LOG_FD = 7, 78 VHOST_USER_SET_VRING_NUM = 8, 79 VHOST_USER_SET_VRING_ADDR = 9, 80 VHOST_USER_SET_VRING_BASE = 10, 81 VHOST_USER_GET_VRING_BASE = 11, 82 VHOST_USER_SET_VRING_KICK = 12, 83 VHOST_USER_SET_VRING_CALL = 13, 84 VHOST_USER_SET_VRING_ERR = 14, 85 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 86 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 87 VHOST_USER_GET_QUEUE_NUM = 17, 88 VHOST_USER_SET_VRING_ENABLE = 18, 89 VHOST_USER_GET_CONFIG = 24, 90 VHOST_USER_SET_CONFIG = 25, 91 VHOST_USER_MAX 92 } VhostUserRequest; 93 94 typedef struct VhostUserMemoryRegion { 95 uint64_t guest_phys_addr; 96 uint64_t memory_size; 97 uint64_t userspace_addr; 98 uint64_t mmap_offset; 99 } VhostUserMemoryRegion; 100 101 typedef struct VhostUserMemory { 102 uint32_t nregions; 103 uint32_t padding; 104 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 105 } VhostUserMemory; 106 107 typedef struct VhostUserLog { 108 uint64_t mmap_size; 109 uint64_t mmap_offset; 110 } VhostUserLog; 111 112 typedef struct VhostUserMsg { 113 VhostUserRequest request; 114 115 #define VHOST_USER_VERSION_MASK (0x3) 116 #define VHOST_USER_REPLY_MASK (0x1<<2) 117 uint32_t flags; 118 uint32_t size; /* the following payload size */ 119 union { 120 #define VHOST_USER_VRING_IDX_MASK (0xff) 121 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8) 122 uint64_t u64; 123 struct vhost_vring_state state; 124 struct vhost_vring_addr addr; 125 VhostUserMemory memory; 126 VhostUserLog log; 127 } payload; 128 } QEMU_PACKED VhostUserMsg; 129 130 static VhostUserMsg m __attribute__ ((unused)); 131 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \ 132 + sizeof(m.flags) \ 133 + sizeof(m.size)) 134 135 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE) 136 137 /* The version of the protocol we support */ 138 #define VHOST_USER_VERSION (0x1) 139 /*****************************************************************************/ 140 141 enum { 142 TEST_FLAGS_OK, 143 TEST_FLAGS_DISCONNECT, 144 TEST_FLAGS_BAD, 145 TEST_FLAGS_END, 146 }; 147 148 enum { 149 VHOST_USER_NET, 150 VHOST_USER_GPIO, 151 VHOST_USER_SCMI, 152 }; 153 154 typedef struct TestServer { 155 gchar *socket_path; 156 gchar *mig_path; 157 gchar *chr_name; 158 gchar *tmpfs; 159 CharBackend chr; 160 int fds_num; 161 int fds[VHOST_MEMORY_MAX_NREGIONS]; 162 VhostUserMemory memory; 163 GMainContext *context; 164 GMainLoop *loop; 165 GThread *thread; 166 GMutex data_mutex; 167 GCond data_cond; 168 int log_fd; 169 uint64_t rings; 170 bool test_fail; 171 int test_flags; 172 int queues; 173 struct vhost_user_ops *vu_ops; 174 } TestServer; 175 176 struct vhost_user_ops { 177 /* Device types. */ 178 int type; 179 void (*append_opts)(TestServer *s, GString *cmd_line, 180 const char *chr_opts); 181 182 /* VHOST-USER commands. */ 183 uint64_t (*get_features)(TestServer *s); 184 void (*set_features)(TestServer *s, CharBackend *chr, 185 VhostUserMsg *msg); 186 void (*get_protocol_features)(TestServer *s, 187 CharBackend *chr, VhostUserMsg *msg); 188 }; 189 190 static const char *init_hugepagefs(void); 191 static TestServer *test_server_new(const gchar *name, 192 struct vhost_user_ops *ops); 193 static void test_server_free(TestServer *server); 194 static void test_server_listen(TestServer *server); 195 196 enum test_memfd { 197 TEST_MEMFD_AUTO, 198 TEST_MEMFD_YES, 199 TEST_MEMFD_NO, 200 TEST_MEMFD_SHM, 201 }; 202 203 static void append_vhost_net_opts(TestServer *s, GString *cmd_line, 204 const char *chr_opts) 205 { 206 g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV, 207 s->chr_name, s->socket_path, 208 chr_opts, s->chr_name); 209 } 210 211 /* 212 * For GPIO there are no other magic devices we need to add (like 213 * block or netdev) so all we need to worry about is the vhost-user 214 * chardev socket. 215 */ 216 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line, 217 const char *chr_opts) 218 { 219 g_string_append_printf(cmd_line, QEMU_CMD_CHR, 220 s->chr_name, s->socket_path, 221 chr_opts); 222 } 223 224 static void append_mem_opts(TestServer *server, GString *cmd_line, 225 int size, enum test_memfd memfd) 226 { 227 if (memfd == TEST_MEMFD_AUTO) { 228 memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES 229 : TEST_MEMFD_NO; 230 } 231 232 if (memfd == TEST_MEMFD_YES) { 233 g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size); 234 } else if (memfd == TEST_MEMFD_SHM) { 235 g_string_append_printf(cmd_line, QEMU_CMD_SHM, size, size); 236 } else { 237 const char *root = init_hugepagefs() ? : server->tmpfs; 238 239 g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root); 240 } 241 } 242 243 static bool wait_for_fds(TestServer *s) 244 { 245 gint64 end_time; 246 bool got_region; 247 int i; 248 249 g_mutex_lock(&s->data_mutex); 250 251 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 252 while (!s->fds_num) { 253 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 254 /* timeout has passed */ 255 g_assert(s->fds_num); 256 break; 257 } 258 } 259 260 /* check for sanity */ 261 g_assert_cmpint(s->fds_num, >, 0); 262 g_assert_cmpint(s->fds_num, ==, s->memory.nregions); 263 264 g_mutex_unlock(&s->data_mutex); 265 266 got_region = false; 267 for (i = 0; i < s->memory.nregions; ++i) { 268 VhostUserMemoryRegion *reg = &s->memory.regions[i]; 269 if (reg->guest_phys_addr == 0) { 270 got_region = true; 271 break; 272 } 273 } 274 if (!got_region) { 275 g_test_skip("No memory at address 0x0"); 276 } 277 return got_region; 278 } 279 280 static void read_guest_mem_server(QTestState *qts, TestServer *s) 281 { 282 uint8_t *guest_mem; 283 int i, j; 284 size_t size; 285 286 g_mutex_lock(&s->data_mutex); 287 288 /* iterate all regions */ 289 for (i = 0; i < s->fds_num; i++) { 290 291 /* We'll check only the region starting at 0x0 */ 292 if (s->memory.regions[i].guest_phys_addr != 0x0) { 293 continue; 294 } 295 296 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024); 297 298 size = s->memory.regions[i].memory_size + 299 s->memory.regions[i].mmap_offset; 300 301 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE, 302 MAP_SHARED, s->fds[i], 0); 303 304 g_assert(guest_mem != MAP_FAILED); 305 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem)); 306 307 for (j = 0; j < 1024; j++) { 308 uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j); 309 uint32_t b = guest_mem[j]; 310 311 g_assert_cmpint(a, ==, b); 312 } 313 314 munmap(guest_mem, s->memory.regions[i].memory_size); 315 } 316 317 g_mutex_unlock(&s->data_mutex); 318 } 319 320 static void *thread_function(void *data) 321 { 322 GMainLoop *loop = data; 323 g_main_loop_run(loop); 324 return NULL; 325 } 326 327 static int chr_can_read(void *opaque) 328 { 329 return VHOST_USER_HDR_SIZE; 330 } 331 332 static void chr_read(void *opaque, const uint8_t *buf, int size) 333 { 334 g_autoptr(GError) err = NULL; 335 TestServer *s = opaque; 336 CharBackend *chr = &s->chr; 337 VhostUserMsg msg; 338 uint8_t *p = (uint8_t *) &msg; 339 int fd = -1; 340 341 if (s->test_fail) { 342 qemu_chr_fe_disconnect(chr); 343 /* now switch to non-failure */ 344 s->test_fail = false; 345 } 346 347 if (size != VHOST_USER_HDR_SIZE) { 348 qos_printf("%s: Wrong message size received %d\n", __func__, size); 349 return; 350 } 351 352 g_mutex_lock(&s->data_mutex); 353 memcpy(p, buf, VHOST_USER_HDR_SIZE); 354 355 if (msg.size) { 356 p += VHOST_USER_HDR_SIZE; 357 size = qemu_chr_fe_read_all(chr, p, msg.size); 358 if (size != msg.size) { 359 qos_printf("%s: Wrong message size received %d != %d\n", 360 __func__, size, msg.size); 361 goto out; 362 } 363 } 364 365 switch (msg.request) { 366 case VHOST_USER_GET_FEATURES: 367 /* Mandatory for tests to define get_features */ 368 g_assert(s->vu_ops->get_features); 369 370 /* send back features to qemu */ 371 msg.flags |= VHOST_USER_REPLY_MASK; 372 msg.size = sizeof(m.payload.u64); 373 374 if (s->test_flags >= TEST_FLAGS_BAD) { 375 msg.payload.u64 = 0; 376 s->test_flags = TEST_FLAGS_END; 377 } else { 378 msg.payload.u64 = s->vu_ops->get_features(s); 379 } 380 381 qemu_chr_fe_write_all(chr, (uint8_t *) &msg, 382 VHOST_USER_HDR_SIZE + msg.size); 383 break; 384 385 case VHOST_USER_SET_FEATURES: 386 if (s->vu_ops->set_features) { 387 s->vu_ops->set_features(s, chr, &msg); 388 } 389 break; 390 391 case VHOST_USER_SET_OWNER: 392 /* 393 * We don't need to do anything here, the remote is just 394 * letting us know it is in charge. Just log it. 395 */ 396 qos_printf("set_owner: start of session\n"); 397 break; 398 399 case VHOST_USER_GET_PROTOCOL_FEATURES: 400 if (s->vu_ops->get_protocol_features) { 401 s->vu_ops->get_protocol_features(s, chr, &msg); 402 } 403 break; 404 405 case VHOST_USER_GET_CONFIG: 406 /* 407 * Treat GET_CONFIG as a NOP and just reply and let the guest 408 * consider we have updated its memory. Tests currently don't 409 * require working configs. 410 */ 411 msg.flags |= VHOST_USER_REPLY_MASK; 412 p = (uint8_t *) &msg; 413 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 414 break; 415 416 case VHOST_USER_SET_PROTOCOL_FEATURES: 417 /* 418 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for 419 * the remote end to send this. There is no handshake reply so 420 * just log the details for debugging. 421 */ 422 qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64); 423 break; 424 425 /* 426 * A real vhost-user backend would actually set the size and 427 * address of the vrings but we can simply report them. 428 */ 429 case VHOST_USER_SET_VRING_NUM: 430 qos_printf("set_vring_num: %d/%d\n", 431 msg.payload.state.index, msg.payload.state.num); 432 break; 433 case VHOST_USER_SET_VRING_ADDR: 434 qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n", 435 msg.payload.addr.avail_user_addr, 436 msg.payload.addr.desc_user_addr, 437 msg.payload.addr.used_user_addr); 438 break; 439 440 case VHOST_USER_GET_VRING_BASE: 441 /* send back vring base to qemu */ 442 msg.flags |= VHOST_USER_REPLY_MASK; 443 msg.size = sizeof(m.payload.state); 444 msg.payload.state.num = 0; 445 p = (uint8_t *) &msg; 446 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 447 448 assert(msg.payload.state.index < s->queues * 2); 449 s->rings &= ~(0x1ULL << msg.payload.state.index); 450 g_cond_broadcast(&s->data_cond); 451 break; 452 453 case VHOST_USER_SET_MEM_TABLE: 454 /* received the mem table */ 455 memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory)); 456 s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds, 457 G_N_ELEMENTS(s->fds)); 458 459 /* signal the test that it can continue */ 460 g_cond_broadcast(&s->data_cond); 461 break; 462 463 case VHOST_USER_SET_VRING_KICK: 464 case VHOST_USER_SET_VRING_CALL: 465 /* consume the fd */ 466 if (!qemu_chr_fe_get_msgfds(chr, &fd, 1) && fd < 0) { 467 qos_printf("call fd: %d, do not set non-blocking\n", fd); 468 break; 469 } 470 /* 471 * This is a non-blocking eventfd. 472 * The receive function forces it to be blocking, 473 * so revert it back to non-blocking. 474 */ 475 g_unix_set_fd_nonblocking(fd, true, &err); 476 g_assert_no_error(err); 477 break; 478 479 case VHOST_USER_SET_LOG_BASE: 480 if (s->log_fd != -1) { 481 close(s->log_fd); 482 s->log_fd = -1; 483 } 484 qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1); 485 msg.flags |= VHOST_USER_REPLY_MASK; 486 msg.size = 0; 487 p = (uint8_t *) &msg; 488 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE); 489 490 g_cond_broadcast(&s->data_cond); 491 break; 492 493 case VHOST_USER_SET_VRING_BASE: 494 assert(msg.payload.state.index < s->queues * 2); 495 s->rings |= 0x1ULL << msg.payload.state.index; 496 g_cond_broadcast(&s->data_cond); 497 break; 498 499 case VHOST_USER_GET_QUEUE_NUM: 500 msg.flags |= VHOST_USER_REPLY_MASK; 501 msg.size = sizeof(m.payload.u64); 502 msg.payload.u64 = s->queues; 503 p = (uint8_t *) &msg; 504 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 505 break; 506 507 case VHOST_USER_SET_VRING_ENABLE: 508 /* 509 * Another case we ignore as we don't need to respond. With a 510 * fully functioning vhost-user we would enable/disable the 511 * vring monitoring. 512 */ 513 qos_printf("set_vring(%d)=%s\n", msg.payload.state.index, 514 msg.payload.state.num ? "enabled" : "disabled"); 515 break; 516 517 default: 518 qos_printf("vhost-user: un-handled message: %d\n", msg.request); 519 break; 520 } 521 522 out: 523 g_mutex_unlock(&s->data_mutex); 524 } 525 526 static const char *init_hugepagefs(void) 527 { 528 #ifdef CONFIG_LINUX 529 static const char *hugepagefs; 530 const char *path = getenv("QTEST_HUGETLBFS_PATH"); 531 struct statfs fs; 532 int ret; 533 534 if (hugepagefs) { 535 return hugepagefs; 536 } 537 if (!path) { 538 return NULL; 539 } 540 541 if (access(path, R_OK | W_OK | X_OK)) { 542 qos_printf("access on path (%s): %s", path, strerror(errno)); 543 g_test_fail(); 544 return NULL; 545 } 546 547 do { 548 ret = statfs(path, &fs); 549 } while (ret != 0 && errno == EINTR); 550 551 if (ret != 0) { 552 qos_printf("statfs on path (%s): %s", path, strerror(errno)); 553 g_test_fail(); 554 return NULL; 555 } 556 557 if (fs.f_type != HUGETLBFS_MAGIC) { 558 qos_printf("Warning: path not on HugeTLBFS: %s", path); 559 g_test_fail(); 560 return NULL; 561 } 562 563 hugepagefs = path; 564 return hugepagefs; 565 #else 566 return NULL; 567 #endif 568 } 569 570 static TestServer *test_server_new(const gchar *name, 571 struct vhost_user_ops *ops) 572 { 573 TestServer *server = g_new0(TestServer, 1); 574 g_autofree const char *tmpfs = NULL; 575 GError *err = NULL; 576 577 server->context = g_main_context_new(); 578 server->loop = g_main_loop_new(server->context, FALSE); 579 580 /* run the main loop thread so the chardev may operate */ 581 server->thread = g_thread_new(NULL, thread_function, server->loop); 582 583 tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err); 584 if (!tmpfs) { 585 g_test_message("Can't create temporary directory in %s: %s", 586 g_get_tmp_dir(), err->message); 587 g_error_free(err); 588 } 589 g_assert(tmpfs); 590 591 server->tmpfs = g_strdup(tmpfs); 592 server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name); 593 server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name); 594 server->chr_name = g_strdup_printf("chr-%s", name); 595 596 g_mutex_init(&server->data_mutex); 597 g_cond_init(&server->data_cond); 598 599 server->log_fd = -1; 600 server->queues = 1; 601 server->vu_ops = ops; 602 603 return server; 604 } 605 606 static void chr_event(void *opaque, QEMUChrEvent event) 607 { 608 TestServer *s = opaque; 609 610 if (s->test_flags == TEST_FLAGS_END && 611 event == CHR_EVENT_CLOSED) { 612 s->test_flags = TEST_FLAGS_OK; 613 } 614 } 615 616 static void test_server_create_chr(TestServer *server, const gchar *opt) 617 { 618 g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s", 619 server->socket_path, opt); 620 Chardev *chr; 621 622 chr = qemu_chr_new(server->chr_name, chr_path, server->context); 623 g_assert(chr); 624 625 qemu_chr_fe_init(&server->chr, chr, &error_abort); 626 qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read, 627 chr_event, NULL, server, server->context, true); 628 } 629 630 static void test_server_listen(TestServer *server) 631 { 632 test_server_create_chr(server, ",server=on,wait=off"); 633 } 634 635 static void test_server_free(TestServer *server) 636 { 637 int i, ret; 638 639 /* finish the helper thread and dispatch pending sources */ 640 g_main_loop_quit(server->loop); 641 g_thread_join(server->thread); 642 while (g_main_context_pending(NULL)) { 643 g_main_context_iteration(NULL, TRUE); 644 } 645 646 unlink(server->socket_path); 647 g_free(server->socket_path); 648 649 unlink(server->mig_path); 650 g_free(server->mig_path); 651 652 ret = rmdir(server->tmpfs); 653 if (ret != 0) { 654 g_test_message("unable to rmdir: path (%s): %s", 655 server->tmpfs, strerror(errno)); 656 } 657 g_free(server->tmpfs); 658 659 qemu_chr_fe_deinit(&server->chr, true); 660 661 for (i = 0; i < server->fds_num; i++) { 662 close(server->fds[i]); 663 } 664 665 if (server->log_fd != -1) { 666 close(server->log_fd); 667 } 668 669 g_free(server->chr_name); 670 671 g_main_loop_unref(server->loop); 672 g_main_context_unref(server->context); 673 g_cond_clear(&server->data_cond); 674 g_mutex_clear(&server->data_mutex); 675 g_free(server); 676 } 677 678 static void wait_for_log_fd(TestServer *s) 679 { 680 gint64 end_time; 681 682 g_mutex_lock(&s->data_mutex); 683 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 684 while (s->log_fd == -1) { 685 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 686 /* timeout has passed */ 687 g_assert(s->log_fd != -1); 688 break; 689 } 690 } 691 692 g_mutex_unlock(&s->data_mutex); 693 } 694 695 static void write_guest_mem(TestServer *s, uint32_t seed) 696 { 697 uint32_t *guest_mem; 698 int i, j; 699 size_t size; 700 701 /* iterate all regions */ 702 for (i = 0; i < s->fds_num; i++) { 703 704 /* We'll write only the region statring at 0x0 */ 705 if (s->memory.regions[i].guest_phys_addr != 0x0) { 706 continue; 707 } 708 709 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024); 710 711 size = s->memory.regions[i].memory_size + 712 s->memory.regions[i].mmap_offset; 713 714 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE, 715 MAP_SHARED, s->fds[i], 0); 716 717 g_assert(guest_mem != MAP_FAILED); 718 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem)); 719 720 for (j = 0; j < 256; j++) { 721 guest_mem[j] = seed + j; 722 } 723 724 munmap(guest_mem, s->memory.regions[i].memory_size); 725 break; 726 } 727 } 728 729 static guint64 get_log_size(TestServer *s) 730 { 731 guint64 log_size = 0; 732 int i; 733 734 for (i = 0; i < s->memory.nregions; ++i) { 735 VhostUserMemoryRegion *reg = &s->memory.regions[i]; 736 guint64 last = range_get_last(reg->guest_phys_addr, 737 reg->memory_size); 738 log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1); 739 } 740 741 return log_size; 742 } 743 744 typedef struct TestMigrateSource { 745 GSource source; 746 TestServer *src; 747 TestServer *dest; 748 } TestMigrateSource; 749 750 static gboolean 751 test_migrate_source_check(GSource *source) 752 { 753 TestMigrateSource *t = (TestMigrateSource *)source; 754 gboolean overlap = t->src->rings && t->dest->rings; 755 756 g_assert(!overlap); 757 758 return FALSE; 759 } 760 761 GSourceFuncs test_migrate_source_funcs = { 762 .check = test_migrate_source_check, 763 }; 764 765 static void vhost_user_test_cleanup(void *s) 766 { 767 TestServer *server = s; 768 769 qos_invalidate_command_line(); 770 test_server_free(server); 771 } 772 773 static void *vhost_user_test_setup(GString *cmd_line, void *arg) 774 { 775 TestServer *server = test_server_new("vhost-user-test", arg); 776 test_server_listen(server); 777 778 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO); 779 server->vu_ops->append_opts(server, cmd_line, ""); 780 781 g_test_queue_destroy(vhost_user_test_cleanup, server); 782 783 return server; 784 } 785 786 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg) 787 { 788 TestServer *server = test_server_new("vhost-user-test", arg); 789 test_server_listen(server); 790 791 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES); 792 server->vu_ops->append_opts(server, cmd_line, ""); 793 794 g_test_queue_destroy(vhost_user_test_cleanup, server); 795 796 return server; 797 } 798 799 static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg) 800 { 801 TestServer *server = test_server_new("vhost-user-test", arg); 802 test_server_listen(server); 803 804 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM); 805 server->vu_ops->append_opts(server, cmd_line, ""); 806 807 g_test_queue_destroy(vhost_user_test_cleanup, server); 808 809 return server; 810 } 811 812 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc) 813 { 814 TestServer *server = arg; 815 816 if (!wait_for_fds(server)) { 817 return; 818 } 819 820 read_guest_mem_server(global_qtest, server); 821 } 822 823 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc) 824 { 825 TestServer *s = arg; 826 TestServer *dest; 827 GString *dest_cmdline; 828 char *uri; 829 QTestState *to; 830 GSource *source; 831 QDict *rsp; 832 guint8 *log; 833 guint64 size; 834 835 if (!wait_for_fds(s)) { 836 return; 837 } 838 839 dest = test_server_new("dest", s->vu_ops); 840 dest_cmdline = g_string_new(qos_get_current_command_line()); 841 uri = g_strdup_printf("%s%s", "unix:", dest->mig_path); 842 843 size = get_log_size(s); 844 g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8)); 845 846 test_server_listen(dest); 847 g_string_append_printf(dest_cmdline, " -incoming %s", uri); 848 append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO); 849 dest->vu_ops->append_opts(dest, dest_cmdline, ""); 850 to = qtest_init(dest_cmdline->str); 851 852 /* This would be where you call qos_allocate_objects(to, NULL), if you want 853 * to talk to the QVirtioNet object on the destination. 854 */ 855 856 source = g_source_new(&test_migrate_source_funcs, 857 sizeof(TestMigrateSource)); 858 ((TestMigrateSource *)source)->src = s; 859 ((TestMigrateSource *)source)->dest = dest; 860 g_source_attach(source, s->context); 861 862 /* slow down migration to have time to fiddle with log */ 863 /* TODO: qtest could learn to break on some places */ 864 rsp = qmp("{ 'execute': 'migrate-set-parameters'," 865 "'arguments': { 'max-bandwidth': 10 } }"); 866 g_assert(qdict_haskey(rsp, "return")); 867 qobject_unref(rsp); 868 869 rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri); 870 g_assert(qdict_haskey(rsp, "return")); 871 qobject_unref(rsp); 872 873 wait_for_log_fd(s); 874 875 log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0); 876 g_assert(log != MAP_FAILED); 877 878 /* modify first page */ 879 write_guest_mem(s, 0x42); 880 log[0] = 1; 881 munmap(log, size); 882 883 /* speed things up */ 884 rsp = qmp("{ 'execute': 'migrate-set-parameters'," 885 "'arguments': { 'max-bandwidth': 0 } }"); 886 g_assert(qdict_haskey(rsp, "return")); 887 qobject_unref(rsp); 888 889 qmp_eventwait("STOP"); 890 qtest_qmp_eventwait(to, "RESUME"); 891 892 g_assert(wait_for_fds(dest)); 893 read_guest_mem_server(to, dest); 894 895 g_source_destroy(source); 896 g_source_unref(source); 897 898 qtest_quit(to); 899 test_server_free(dest); 900 g_free(uri); 901 g_string_free(dest_cmdline, true); 902 } 903 904 static void wait_for_rings_started(TestServer *s, size_t count) 905 { 906 gint64 end_time; 907 908 g_mutex_lock(&s->data_mutex); 909 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 910 while (ctpop64(s->rings) != count) { 911 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 912 /* timeout has passed */ 913 g_assert_cmpint(ctpop64(s->rings), ==, count); 914 break; 915 } 916 } 917 918 g_mutex_unlock(&s->data_mutex); 919 } 920 921 static inline void test_server_connect(TestServer *server) 922 { 923 test_server_create_chr(server, ",reconnect=1"); 924 } 925 926 static gboolean 927 reconnect_cb(gpointer user_data) 928 { 929 TestServer *s = user_data; 930 931 qemu_chr_fe_disconnect(&s->chr); 932 933 return FALSE; 934 } 935 936 static gpointer 937 connect_thread(gpointer data) 938 { 939 TestServer *s = data; 940 941 /* wait for qemu to start before first try, to avoid extra warnings */ 942 g_usleep(G_USEC_PER_SEC); 943 test_server_connect(s); 944 945 return NULL; 946 } 947 948 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg) 949 { 950 TestServer *s = test_server_new("reconnect", arg); 951 952 g_thread_unref(g_thread_new("connect", connect_thread, s)); 953 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 954 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 955 956 g_test_queue_destroy(vhost_user_test_cleanup, s); 957 958 return s; 959 } 960 961 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc) 962 { 963 TestServer *s = arg; 964 GSource *src; 965 966 if (!wait_for_fds(s)) { 967 return; 968 } 969 970 wait_for_rings_started(s, 2); 971 972 /* reconnect */ 973 s->fds_num = 0; 974 s->rings = 0; 975 src = g_idle_source_new(); 976 g_source_set_callback(src, reconnect_cb, s, NULL); 977 g_source_attach(src, s->context); 978 g_source_unref(src); 979 g_assert(wait_for_fds(s)); 980 wait_for_rings_started(s, 2); 981 } 982 983 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg) 984 { 985 TestServer *s = test_server_new("connect-fail", arg); 986 987 s->test_fail = true; 988 989 g_thread_unref(g_thread_new("connect", connect_thread, s)); 990 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 991 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 992 993 g_test_queue_destroy(vhost_user_test_cleanup, s); 994 995 return s; 996 } 997 998 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg) 999 { 1000 TestServer *s = test_server_new("flags-mismatch", arg); 1001 1002 s->test_flags = TEST_FLAGS_DISCONNECT; 1003 1004 g_thread_unref(g_thread_new("connect", connect_thread, s)); 1005 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 1006 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 1007 1008 g_test_queue_destroy(vhost_user_test_cleanup, s); 1009 1010 return s; 1011 } 1012 1013 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc) 1014 { 1015 TestServer *s = arg; 1016 1017 if (!wait_for_fds(s)) { 1018 return; 1019 } 1020 wait_for_rings_started(s, 2); 1021 } 1022 1023 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg) 1024 { 1025 TestServer *s = vhost_user_test_setup(cmd_line, arg); 1026 1027 s->queues = 2; 1028 g_string_append_printf(cmd_line, 1029 " -set netdev.hs0.queues=%d" 1030 " -global virtio-net-pci.vectors=%d", 1031 s->queues, s->queues * 2 + 2); 1032 1033 return s; 1034 } 1035 1036 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc) 1037 { 1038 TestServer *s = arg; 1039 1040 wait_for_rings_started(s, s->queues * 2); 1041 } 1042 1043 1044 static uint64_t vu_net_get_features(TestServer *s) 1045 { 1046 uint64_t features = 0x1ULL << VHOST_F_LOG_ALL | 1047 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1048 1049 if (s->queues > 1) { 1050 features |= 0x1ULL << VIRTIO_NET_F_MQ; 1051 } 1052 1053 return features; 1054 } 1055 1056 static void vu_net_set_features(TestServer *s, CharBackend *chr, 1057 VhostUserMsg *msg) 1058 { 1059 g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES)); 1060 if (s->test_flags == TEST_FLAGS_DISCONNECT) { 1061 qemu_chr_fe_disconnect(chr); 1062 s->test_flags = TEST_FLAGS_BAD; 1063 } 1064 } 1065 1066 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr, 1067 VhostUserMsg *msg) 1068 { 1069 /* send back features to qemu */ 1070 msg->flags |= VHOST_USER_REPLY_MASK; 1071 msg->size = sizeof(m.payload.u64); 1072 msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD; 1073 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN; 1074 if (s->queues > 1) { 1075 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ; 1076 } 1077 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1078 } 1079 1080 /* Each VHOST-USER device should have its ops structure defined. */ 1081 static struct vhost_user_ops g_vu_net_ops = { 1082 .type = VHOST_USER_NET, 1083 1084 .append_opts = append_vhost_net_opts, 1085 1086 .get_features = vu_net_get_features, 1087 .set_features = vu_net_set_features, 1088 .get_protocol_features = vu_net_get_protocol_features, 1089 }; 1090 1091 static void register_vhost_user_test(void) 1092 { 1093 QOSGraphTestOptions opts = { 1094 .before = vhost_user_test_setup, 1095 .subprocess = true, 1096 .arg = &g_vu_net_ops, 1097 }; 1098 1099 qemu_add_opts(&qemu_chardev_opts); 1100 1101 qos_add_test("vhost-user/read-guest-mem/memfile", 1102 "virtio-net", 1103 test_read_guest_mem, &opts); 1104 1105 opts.before = vhost_user_test_setup_shm; 1106 qos_add_test("vhost-user/read-guest-mem/shm", 1107 "virtio-net", 1108 test_read_guest_mem, &opts); 1109 1110 if (qemu_memfd_check(MFD_ALLOW_SEALING)) { 1111 opts.before = vhost_user_test_setup_memfd; 1112 qos_add_test("vhost-user/read-guest-mem/memfd", 1113 "virtio-net", 1114 test_read_guest_mem, &opts); 1115 } 1116 1117 qos_add_test("vhost-user/migrate", 1118 "virtio-net", 1119 test_migrate, &opts); 1120 1121 opts.before = vhost_user_test_setup_reconnect; 1122 qos_add_test("vhost-user/reconnect", "virtio-net", 1123 test_reconnect, &opts); 1124 1125 opts.before = vhost_user_test_setup_connect_fail; 1126 qos_add_test("vhost-user/connect-fail", "virtio-net", 1127 test_vhost_user_started, &opts); 1128 1129 opts.before = vhost_user_test_setup_flags_mismatch; 1130 qos_add_test("vhost-user/flags-mismatch", "virtio-net", 1131 test_vhost_user_started, &opts); 1132 1133 opts.before = vhost_user_test_setup_multiqueue; 1134 opts.edge.extra_device_opts = "mq=on"; 1135 qos_add_test("vhost-user/multiqueue", 1136 "virtio-net", 1137 test_multiqueue, &opts); 1138 } 1139 libqos_init(register_vhost_user_test); 1140 1141 static uint64_t vu_gpio_get_features(TestServer *s) 1142 { 1143 return 0x1ULL << VIRTIO_F_VERSION_1 | 1144 0x1ULL << VIRTIO_GPIO_F_IRQ | 1145 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1146 } 1147 1148 /* 1149 * This stub can't handle all the message types but we should reply 1150 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it 1151 * talking to a read vhost-user daemon. 1152 */ 1153 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr, 1154 VhostUserMsg *msg) 1155 { 1156 /* send back features to qemu */ 1157 msg->flags |= VHOST_USER_REPLY_MASK; 1158 msg->size = sizeof(m.payload.u64); 1159 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1160 1161 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1162 } 1163 1164 static struct vhost_user_ops g_vu_gpio_ops = { 1165 .type = VHOST_USER_GPIO, 1166 1167 .append_opts = append_vhost_gpio_opts, 1168 1169 .get_features = vu_gpio_get_features, 1170 .set_features = vu_net_set_features, 1171 .get_protocol_features = vu_gpio_get_protocol_features, 1172 }; 1173 1174 static void register_vhost_gpio_test(void) 1175 { 1176 QOSGraphTestOptions opts = { 1177 .before = vhost_user_test_setup, 1178 .subprocess = true, 1179 .arg = &g_vu_gpio_ops, 1180 }; 1181 1182 qemu_add_opts(&qemu_chardev_opts); 1183 1184 qos_add_test("read-guest-mem/memfile", 1185 "vhost-user-gpio", test_read_guest_mem, &opts); 1186 } 1187 libqos_init(register_vhost_gpio_test); 1188 1189 static uint64_t vu_scmi_get_features(TestServer *s) 1190 { 1191 return 0x1ULL << VIRTIO_F_VERSION_1 | 1192 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS | 1193 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1194 } 1195 1196 static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr, 1197 VhostUserMsg *msg) 1198 { 1199 msg->flags |= VHOST_USER_REPLY_MASK; 1200 msg->size = sizeof(m.payload.u64); 1201 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ; 1202 1203 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1204 } 1205 1206 static struct vhost_user_ops g_vu_scmi_ops = { 1207 .type = VHOST_USER_SCMI, 1208 1209 .append_opts = append_vhost_gpio_opts, 1210 1211 .get_features = vu_scmi_get_features, 1212 .set_features = vu_net_set_features, 1213 .get_protocol_features = vu_scmi_get_protocol_features, 1214 }; 1215 1216 static void register_vhost_scmi_test(void) 1217 { 1218 QOSGraphTestOptions opts = { 1219 .before = vhost_user_test_setup, 1220 .subprocess = true, 1221 .arg = &g_vu_scmi_ops, 1222 }; 1223 1224 qemu_add_opts(&qemu_chardev_opts); 1225 1226 qos_add_test("scmi/read-guest-mem/memfile", 1227 "vhost-user-scmi", test_read_guest_mem, &opts); 1228 } 1229 libqos_init(register_vhost_scmi_test); 1230