1 /* 2 * QTest testcase for the vhost-user 3 * 4 * Copyright (c) 2014 Virtual Open Systems Sarl. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2 or later. 7 * See the COPYING file in the top-level directory. 8 * 9 */ 10 11 #include "qemu/osdep.h" 12 13 #include "libqtest-single.h" 14 #include "qapi/error.h" 15 #include "qobject/qdict.h" 16 #include "qemu/config-file.h" 17 #include "qemu/option.h" 18 #include "qemu/range.h" 19 #include "qemu/sockets.h" 20 #include "chardev/char-fe.h" 21 #include "qemu/memfd.h" 22 #include "qemu/module.h" 23 #include "system/system.h" 24 #include "libqos/libqos.h" 25 #include "libqos/pci-pc.h" 26 #include "libqos/virtio-pci.h" 27 28 #include "libqos/malloc-pc.h" 29 #include "hw/virtio/virtio-net.h" 30 31 #include "standard-headers/linux/vhost_types.h" 32 #include "standard-headers/linux/virtio_ids.h" 33 #include "standard-headers/linux/virtio_net.h" 34 #include "standard-headers/linux/virtio_gpio.h" 35 #include "standard-headers/linux/virtio_scmi.h" 36 37 #ifdef CONFIG_LINUX 38 #include <sys/vfs.h> 39 #endif 40 41 42 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \ 43 "mem-path=%s,share=on -numa node,memdev=mem" 44 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \ 45 " -numa node,memdev=mem" 46 #define QEMU_CMD_SHM " -m %d -object memory-backend-shm,id=mem,size=%dM," \ 47 " -numa node,memdev=mem" 48 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s" 49 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on" 50 51 #define HUGETLBFS_MAGIC 0x958458f6 52 53 /*********** FROM hw/virtio/vhost-user.c *************************************/ 54 55 #define VHOST_MEMORY_MAX_NREGIONS 8 56 #define VHOST_MAX_VIRTQUEUES 0x100 57 58 #define VHOST_USER_F_PROTOCOL_FEATURES 30 59 #define VIRTIO_F_VERSION_1 32 60 61 #define VHOST_USER_PROTOCOL_F_MQ 0 62 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1 63 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6 64 #define VHOST_USER_PROTOCOL_F_CONFIG 9 65 66 #define VHOST_LOG_PAGE 0x1000 67 68 typedef enum VhostUserRequest { 69 VHOST_USER_NONE = 0, 70 VHOST_USER_GET_FEATURES = 1, 71 VHOST_USER_SET_FEATURES = 2, 72 VHOST_USER_SET_OWNER = 3, 73 VHOST_USER_RESET_OWNER = 4, 74 VHOST_USER_SET_MEM_TABLE = 5, 75 VHOST_USER_SET_LOG_BASE = 6, 76 VHOST_USER_SET_LOG_FD = 7, 77 VHOST_USER_SET_VRING_NUM = 8, 78 VHOST_USER_SET_VRING_ADDR = 9, 79 VHOST_USER_SET_VRING_BASE = 10, 80 VHOST_USER_GET_VRING_BASE = 11, 81 VHOST_USER_SET_VRING_KICK = 12, 82 VHOST_USER_SET_VRING_CALL = 13, 83 VHOST_USER_SET_VRING_ERR = 14, 84 VHOST_USER_GET_PROTOCOL_FEATURES = 15, 85 VHOST_USER_SET_PROTOCOL_FEATURES = 16, 86 VHOST_USER_GET_QUEUE_NUM = 17, 87 VHOST_USER_SET_VRING_ENABLE = 18, 88 VHOST_USER_GET_CONFIG = 24, 89 VHOST_USER_SET_CONFIG = 25, 90 VHOST_USER_MAX 91 } VhostUserRequest; 92 93 typedef struct VhostUserMemoryRegion { 94 uint64_t guest_phys_addr; 95 uint64_t memory_size; 96 uint64_t userspace_addr; 97 uint64_t mmap_offset; 98 } VhostUserMemoryRegion; 99 100 typedef struct VhostUserMemory { 101 uint32_t nregions; 102 uint32_t padding; 103 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS]; 104 } VhostUserMemory; 105 106 typedef struct VhostUserLog { 107 uint64_t mmap_size; 108 uint64_t mmap_offset; 109 } VhostUserLog; 110 111 typedef struct VhostUserMsg { 112 VhostUserRequest request; 113 114 #define VHOST_USER_VERSION_MASK (0x3) 115 #define VHOST_USER_REPLY_MASK (0x1<<2) 116 uint32_t flags; 117 uint32_t size; /* the following payload size */ 118 union { 119 #define VHOST_USER_VRING_IDX_MASK (0xff) 120 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8) 121 uint64_t u64; 122 struct vhost_vring_state state; 123 struct vhost_vring_addr addr; 124 VhostUserMemory memory; 125 VhostUserLog log; 126 } payload; 127 } QEMU_PACKED VhostUserMsg; 128 129 static VhostUserMsg m __attribute__ ((unused)); 130 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \ 131 + sizeof(m.flags) \ 132 + sizeof(m.size)) 133 134 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE) 135 136 /* The version of the protocol we support */ 137 #define VHOST_USER_VERSION (0x1) 138 /*****************************************************************************/ 139 140 enum { 141 TEST_FLAGS_OK, 142 TEST_FLAGS_DISCONNECT, 143 TEST_FLAGS_BAD, 144 TEST_FLAGS_END, 145 }; 146 147 enum { 148 VHOST_USER_NET, 149 VHOST_USER_GPIO, 150 VHOST_USER_SCMI, 151 }; 152 153 typedef struct TestServer { 154 gchar *socket_path; 155 gchar *mig_path; 156 gchar *chr_name; 157 gchar *tmpfs; 158 CharFrontend chr; 159 int fds_num; 160 int fds[VHOST_MEMORY_MAX_NREGIONS]; 161 VhostUserMemory memory; 162 GMainContext *context; 163 GMainLoop *loop; 164 GThread *thread; 165 GMutex data_mutex; 166 GCond data_cond; 167 int log_fd; 168 uint64_t rings; 169 bool test_fail; 170 int test_flags; 171 int queues; 172 struct vhost_user_ops *vu_ops; 173 } TestServer; 174 175 struct vhost_user_ops { 176 /* Device types. */ 177 int type; 178 void (*append_opts)(TestServer *s, GString *cmd_line, 179 const char *chr_opts); 180 181 /* VHOST-USER commands. */ 182 uint64_t (*get_features)(TestServer *s); 183 void (*set_features)(TestServer *s, CharFrontend *chr, 184 VhostUserMsg *msg); 185 void (*get_protocol_features)(TestServer *s, 186 CharFrontend *chr, VhostUserMsg *msg); 187 }; 188 189 static const char *init_hugepagefs(void); 190 static TestServer *test_server_new(const gchar *name, 191 struct vhost_user_ops *ops); 192 static void test_server_free(TestServer *server); 193 static void test_server_listen(TestServer *server); 194 195 enum test_memfd { 196 TEST_MEMFD_AUTO, 197 TEST_MEMFD_YES, 198 TEST_MEMFD_NO, 199 TEST_MEMFD_SHM, 200 }; 201 202 static void append_vhost_net_opts(TestServer *s, GString *cmd_line, 203 const char *chr_opts) 204 { 205 g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV, 206 s->chr_name, s->socket_path, 207 chr_opts, s->chr_name); 208 } 209 210 /* 211 * For GPIO there are no other magic devices we need to add (like 212 * block or netdev) so all we need to worry about is the vhost-user 213 * chardev socket. 214 */ 215 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line, 216 const char *chr_opts) 217 { 218 g_string_append_printf(cmd_line, QEMU_CMD_CHR, 219 s->chr_name, s->socket_path, 220 chr_opts); 221 } 222 223 static void append_mem_opts(TestServer *server, GString *cmd_line, 224 int size, enum test_memfd memfd) 225 { 226 if (memfd == TEST_MEMFD_AUTO) { 227 memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES 228 : TEST_MEMFD_NO; 229 } 230 231 if (memfd == TEST_MEMFD_YES) { 232 g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size); 233 } else if (memfd == TEST_MEMFD_SHM) { 234 g_string_append_printf(cmd_line, QEMU_CMD_SHM, size, size); 235 } else { 236 const char *root = init_hugepagefs() ? : server->tmpfs; 237 238 g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root); 239 } 240 } 241 242 static bool wait_for_fds(TestServer *s) 243 { 244 gint64 end_time; 245 bool got_region; 246 int i; 247 248 g_mutex_lock(&s->data_mutex); 249 250 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 251 while (!s->fds_num) { 252 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 253 /* timeout has passed */ 254 g_assert(s->fds_num); 255 break; 256 } 257 } 258 259 /* check for sanity */ 260 g_assert_cmpint(s->fds_num, >, 0); 261 g_assert_cmpint(s->fds_num, ==, s->memory.nregions); 262 263 g_mutex_unlock(&s->data_mutex); 264 265 got_region = false; 266 for (i = 0; i < s->memory.nregions; ++i) { 267 VhostUserMemoryRegion *reg = &s->memory.regions[i]; 268 if (reg->guest_phys_addr == 0) { 269 got_region = true; 270 break; 271 } 272 } 273 if (!got_region) { 274 g_test_skip("No memory at address 0x0"); 275 } 276 return got_region; 277 } 278 279 static void read_guest_mem_server(QTestState *qts, TestServer *s) 280 { 281 uint8_t *guest_mem; 282 int i, j; 283 size_t size; 284 285 g_mutex_lock(&s->data_mutex); 286 287 /* iterate all regions */ 288 for (i = 0; i < s->fds_num; i++) { 289 290 /* We'll check only the region starting at 0x0 */ 291 if (s->memory.regions[i].guest_phys_addr != 0x0) { 292 continue; 293 } 294 295 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024); 296 297 size = s->memory.regions[i].memory_size + 298 s->memory.regions[i].mmap_offset; 299 300 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE, 301 MAP_SHARED, s->fds[i], 0); 302 303 g_assert(guest_mem != MAP_FAILED); 304 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem)); 305 306 for (j = 0; j < 1024; j++) { 307 uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j); 308 uint32_t b = guest_mem[j]; 309 310 g_assert_cmpint(a, ==, b); 311 } 312 313 munmap(guest_mem, s->memory.regions[i].memory_size); 314 } 315 316 g_mutex_unlock(&s->data_mutex); 317 } 318 319 static void *thread_function(void *data) 320 { 321 GMainLoop *loop = data; 322 g_main_loop_run(loop); 323 return NULL; 324 } 325 326 static int chr_can_read(void *opaque) 327 { 328 return VHOST_USER_HDR_SIZE; 329 } 330 331 static void chr_read(void *opaque, const uint8_t *buf, int size) 332 { 333 TestServer *s = opaque; 334 CharFrontend *chr = &s->chr; 335 VhostUserMsg msg; 336 uint8_t *p = (uint8_t *) &msg; 337 int fd = -1; 338 339 if (s->test_fail) { 340 qemu_chr_fe_disconnect(chr); 341 /* now switch to non-failure */ 342 s->test_fail = false; 343 } 344 345 if (size != VHOST_USER_HDR_SIZE) { 346 g_test_message("Wrong message size received %d", size); 347 return; 348 } 349 350 g_mutex_lock(&s->data_mutex); 351 memcpy(p, buf, VHOST_USER_HDR_SIZE); 352 353 if (msg.size) { 354 p += VHOST_USER_HDR_SIZE; 355 size = qemu_chr_fe_read_all(chr, p, msg.size); 356 if (size != msg.size) { 357 g_test_message("Wrong message size received %d != %d", 358 size, msg.size); 359 goto out; 360 } 361 } 362 363 switch (msg.request) { 364 case VHOST_USER_GET_FEATURES: 365 /* Mandatory for tests to define get_features */ 366 g_assert(s->vu_ops->get_features); 367 368 /* send back features to qemu */ 369 msg.flags |= VHOST_USER_REPLY_MASK; 370 msg.size = sizeof(m.payload.u64); 371 372 if (s->test_flags >= TEST_FLAGS_BAD) { 373 msg.payload.u64 = 0; 374 s->test_flags = TEST_FLAGS_END; 375 } else { 376 msg.payload.u64 = s->vu_ops->get_features(s); 377 } 378 379 qemu_chr_fe_write_all(chr, (uint8_t *) &msg, 380 VHOST_USER_HDR_SIZE + msg.size); 381 break; 382 383 case VHOST_USER_SET_FEATURES: 384 if (s->vu_ops->set_features) { 385 s->vu_ops->set_features(s, chr, &msg); 386 } 387 break; 388 389 case VHOST_USER_SET_OWNER: 390 /* 391 * We don't need to do anything here, the remote is just 392 * letting us know it is in charge. Just log it. 393 */ 394 g_test_message("set_owner: start of session"); 395 break; 396 397 case VHOST_USER_GET_PROTOCOL_FEATURES: 398 if (s->vu_ops->get_protocol_features) { 399 s->vu_ops->get_protocol_features(s, chr, &msg); 400 } 401 break; 402 403 case VHOST_USER_GET_CONFIG: 404 /* 405 * Treat GET_CONFIG as a NOP and just reply and let the guest 406 * consider we have updated its memory. Tests currently don't 407 * require working configs. 408 */ 409 msg.flags |= VHOST_USER_REPLY_MASK; 410 p = (uint8_t *) &msg; 411 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 412 break; 413 414 case VHOST_USER_SET_PROTOCOL_FEATURES: 415 /* 416 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for 417 * the remote end to send this. There is no handshake reply so 418 * just log the details for debugging. 419 */ 420 g_test_message("set_protocol_features: 0x%"PRIx64, msg.payload.u64); 421 break; 422 423 /* 424 * A real vhost-user backend would actually set the size and 425 * address of the vrings but we can simply report them. 426 */ 427 case VHOST_USER_SET_VRING_NUM: 428 g_test_message("set_vring_num: %d/%d", 429 msg.payload.state.index, msg.payload.state.num); 430 break; 431 case VHOST_USER_SET_VRING_ADDR: 432 g_test_message("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64, 433 msg.payload.addr.avail_user_addr, 434 msg.payload.addr.desc_user_addr, 435 msg.payload.addr.used_user_addr); 436 break; 437 438 case VHOST_USER_GET_VRING_BASE: 439 /* send back vring base to qemu */ 440 msg.flags |= VHOST_USER_REPLY_MASK; 441 msg.size = sizeof(m.payload.state); 442 msg.payload.state.num = 0; 443 p = (uint8_t *) &msg; 444 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 445 446 assert(msg.payload.state.index < s->queues * 2); 447 s->rings &= ~(0x1ULL << msg.payload.state.index); 448 g_cond_broadcast(&s->data_cond); 449 break; 450 451 case VHOST_USER_SET_MEM_TABLE: 452 /* received the mem table */ 453 memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory)); 454 s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds, 455 G_N_ELEMENTS(s->fds)); 456 457 /* signal the test that it can continue */ 458 g_cond_broadcast(&s->data_cond); 459 break; 460 461 case VHOST_USER_SET_VRING_KICK: 462 case VHOST_USER_SET_VRING_CALL: 463 /* consume the fd */ 464 if (!qemu_chr_fe_get_msgfds(chr, &fd, 1) && fd < 0) { 465 g_test_message("call fd: %d, do not set non-blocking", fd); 466 break; 467 } 468 /* 469 * This is a non-blocking eventfd. 470 * The receive function forces it to be blocking, 471 * so revert it back to non-blocking. 472 */ 473 qemu_set_blocking(fd, false, &error_abort); 474 break; 475 476 case VHOST_USER_SET_LOG_BASE: 477 if (s->log_fd != -1) { 478 close(s->log_fd); 479 s->log_fd = -1; 480 } 481 qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1); 482 msg.flags |= VHOST_USER_REPLY_MASK; 483 msg.size = 0; 484 p = (uint8_t *) &msg; 485 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE); 486 487 g_cond_broadcast(&s->data_cond); 488 break; 489 490 case VHOST_USER_SET_VRING_BASE: 491 assert(msg.payload.state.index < s->queues * 2); 492 s->rings |= 0x1ULL << msg.payload.state.index; 493 g_cond_broadcast(&s->data_cond); 494 break; 495 496 case VHOST_USER_GET_QUEUE_NUM: 497 msg.flags |= VHOST_USER_REPLY_MASK; 498 msg.size = sizeof(m.payload.u64); 499 msg.payload.u64 = s->queues; 500 p = (uint8_t *) &msg; 501 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size); 502 break; 503 504 case VHOST_USER_SET_VRING_ENABLE: 505 /* 506 * Another case we ignore as we don't need to respond. With a 507 * fully functioning vhost-user we would enable/disable the 508 * vring monitoring. 509 */ 510 g_test_message("set_vring(%d)=%s", msg.payload.state.index, 511 msg.payload.state.num ? "enabled" : "disabled"); 512 break; 513 514 default: 515 g_test_message("vhost-user: un-handled message: %d", msg.request); 516 break; 517 } 518 519 out: 520 g_mutex_unlock(&s->data_mutex); 521 } 522 523 static const char *init_hugepagefs(void) 524 { 525 #ifdef CONFIG_LINUX 526 static const char *hugepagefs; 527 const char *path = getenv("QTEST_HUGETLBFS_PATH"); 528 struct statfs fs; 529 int ret; 530 531 if (hugepagefs) { 532 return hugepagefs; 533 } 534 if (!path) { 535 return NULL; 536 } 537 538 if (access(path, R_OK | W_OK | X_OK)) { 539 g_test_message("access on path (%s): %s", path, strerror(errno)); 540 g_test_fail(); 541 return NULL; 542 } 543 544 do { 545 ret = statfs(path, &fs); 546 } while (ret != 0 && errno == EINTR); 547 548 if (ret != 0) { 549 g_test_message("statfs on path (%s): %s", path, strerror(errno)); 550 g_test_fail(); 551 return NULL; 552 } 553 554 if (fs.f_type != HUGETLBFS_MAGIC) { 555 g_test_message("Warning: path not on HugeTLBFS: %s", path); 556 g_test_fail(); 557 return NULL; 558 } 559 560 hugepagefs = path; 561 return hugepagefs; 562 #else 563 return NULL; 564 #endif 565 } 566 567 static TestServer *test_server_new(const gchar *name, 568 struct vhost_user_ops *ops) 569 { 570 TestServer *server = g_new0(TestServer, 1); 571 g_autofree const char *tmpfs = NULL; 572 GError *err = NULL; 573 574 server->context = g_main_context_new(); 575 server->loop = g_main_loop_new(server->context, FALSE); 576 577 /* run the main loop thread so the chardev may operate */ 578 server->thread = g_thread_new(NULL, thread_function, server->loop); 579 580 tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err); 581 if (!tmpfs) { 582 g_test_message("Can't create temporary directory in %s: %s", 583 g_get_tmp_dir(), err->message); 584 g_error_free(err); 585 } 586 g_assert(tmpfs); 587 588 server->tmpfs = g_strdup(tmpfs); 589 server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name); 590 server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name); 591 server->chr_name = g_strdup_printf("chr-%s", name); 592 593 g_mutex_init(&server->data_mutex); 594 g_cond_init(&server->data_cond); 595 596 server->log_fd = -1; 597 server->queues = 1; 598 server->vu_ops = ops; 599 600 return server; 601 } 602 603 static void chr_event(void *opaque, QEMUChrEvent event) 604 { 605 TestServer *s = opaque; 606 607 if (s->test_flags == TEST_FLAGS_END && 608 event == CHR_EVENT_CLOSED) { 609 s->test_flags = TEST_FLAGS_OK; 610 } 611 } 612 613 static void test_server_create_chr(TestServer *server, const gchar *opt) 614 { 615 g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s", 616 server->socket_path, opt); 617 Chardev *chr; 618 619 chr = qemu_chr_new(server->chr_name, chr_path, server->context); 620 g_assert(chr); 621 622 qemu_chr_fe_init(&server->chr, chr, &error_abort); 623 qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read, 624 chr_event, NULL, server, server->context, true); 625 } 626 627 static void test_server_listen(TestServer *server) 628 { 629 test_server_create_chr(server, ",server=on,wait=off"); 630 } 631 632 static void test_server_free(TestServer *server) 633 { 634 int i, ret; 635 636 /* finish the helper thread and dispatch pending sources */ 637 g_main_loop_quit(server->loop); 638 g_thread_join(server->thread); 639 while (g_main_context_pending(NULL)) { 640 g_main_context_iteration(NULL, TRUE); 641 } 642 643 unlink(server->socket_path); 644 g_free(server->socket_path); 645 646 unlink(server->mig_path); 647 g_free(server->mig_path); 648 649 ret = rmdir(server->tmpfs); 650 if (ret != 0) { 651 g_test_message("unable to rmdir: path (%s): %s", 652 server->tmpfs, strerror(errno)); 653 } 654 g_free(server->tmpfs); 655 656 qemu_chr_fe_deinit(&server->chr, true); 657 658 for (i = 0; i < server->fds_num; i++) { 659 close(server->fds[i]); 660 } 661 662 if (server->log_fd != -1) { 663 close(server->log_fd); 664 } 665 666 g_free(server->chr_name); 667 668 g_main_loop_unref(server->loop); 669 g_main_context_unref(server->context); 670 g_cond_clear(&server->data_cond); 671 g_mutex_clear(&server->data_mutex); 672 g_free(server); 673 } 674 675 static void wait_for_log_fd(TestServer *s) 676 { 677 gint64 end_time; 678 679 g_mutex_lock(&s->data_mutex); 680 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 681 while (s->log_fd == -1) { 682 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 683 /* timeout has passed */ 684 g_assert(s->log_fd != -1); 685 break; 686 } 687 } 688 689 g_mutex_unlock(&s->data_mutex); 690 } 691 692 static void write_guest_mem(TestServer *s, uint32_t seed) 693 { 694 uint32_t *guest_mem; 695 int i, j; 696 size_t size; 697 698 /* iterate all regions */ 699 for (i = 0; i < s->fds_num; i++) { 700 701 /* We'll write only the region statring at 0x0 */ 702 if (s->memory.regions[i].guest_phys_addr != 0x0) { 703 continue; 704 } 705 706 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024); 707 708 size = s->memory.regions[i].memory_size + 709 s->memory.regions[i].mmap_offset; 710 711 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE, 712 MAP_SHARED, s->fds[i], 0); 713 714 g_assert(guest_mem != MAP_FAILED); 715 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem)); 716 717 for (j = 0; j < 256; j++) { 718 guest_mem[j] = seed + j; 719 } 720 721 munmap(guest_mem, s->memory.regions[i].memory_size); 722 break; 723 } 724 } 725 726 static guint64 get_log_size(TestServer *s) 727 { 728 guint64 log_size = 0; 729 int i; 730 731 for (i = 0; i < s->memory.nregions; ++i) { 732 VhostUserMemoryRegion *reg = &s->memory.regions[i]; 733 guint64 last = range_get_last(reg->guest_phys_addr, 734 reg->memory_size); 735 log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1); 736 } 737 738 return log_size; 739 } 740 741 typedef struct TestMigrateSource { 742 GSource source; 743 TestServer *src; 744 TestServer *dest; 745 } TestMigrateSource; 746 747 static gboolean 748 test_migrate_source_check(GSource *source) 749 { 750 TestMigrateSource *t = (TestMigrateSource *)source; 751 gboolean overlap = t->src->rings && t->dest->rings; 752 753 g_assert(!overlap); 754 755 return FALSE; 756 } 757 758 GSourceFuncs test_migrate_source_funcs = { 759 .check = test_migrate_source_check, 760 }; 761 762 static void vhost_user_test_cleanup(void *s) 763 { 764 TestServer *server = s; 765 766 qos_invalidate_command_line(); 767 test_server_free(server); 768 } 769 770 static void *vhost_user_test_setup(GString *cmd_line, void *arg) 771 { 772 TestServer *server = test_server_new("vhost-user-test", arg); 773 test_server_listen(server); 774 775 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO); 776 server->vu_ops->append_opts(server, cmd_line, ""); 777 778 g_test_queue_destroy(vhost_user_test_cleanup, server); 779 780 return server; 781 } 782 783 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg) 784 { 785 TestServer *server = test_server_new("vhost-user-test", arg); 786 test_server_listen(server); 787 788 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES); 789 server->vu_ops->append_opts(server, cmd_line, ""); 790 791 g_test_queue_destroy(vhost_user_test_cleanup, server); 792 793 return server; 794 } 795 796 static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg) 797 { 798 TestServer *server = test_server_new("vhost-user-test", arg); 799 test_server_listen(server); 800 801 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM); 802 server->vu_ops->append_opts(server, cmd_line, ""); 803 804 g_test_queue_destroy(vhost_user_test_cleanup, server); 805 806 return server; 807 } 808 809 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc) 810 { 811 TestServer *server = arg; 812 813 if (!wait_for_fds(server)) { 814 return; 815 } 816 817 read_guest_mem_server(global_qtest, server); 818 } 819 820 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc) 821 { 822 TestServer *s = arg; 823 TestServer *dest; 824 GString *dest_cmdline; 825 char *uri; 826 QTestState *to; 827 GSource *source; 828 QDict *rsp; 829 guint8 *log; 830 guint64 size; 831 832 if (!wait_for_fds(s)) { 833 return; 834 } 835 836 dest = test_server_new("dest", s->vu_ops); 837 dest_cmdline = g_string_new(qos_get_current_command_line()); 838 uri = g_strdup_printf("%s%s", "unix:", dest->mig_path); 839 840 size = get_log_size(s); 841 g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8)); 842 843 test_server_listen(dest); 844 g_string_append_printf(dest_cmdline, " -incoming %s", uri); 845 append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO); 846 dest->vu_ops->append_opts(dest, dest_cmdline, ""); 847 to = qtest_init(dest_cmdline->str); 848 849 /* This would be where you call qos_allocate_objects(to, NULL), if you want 850 * to talk to the QVirtioNet object on the destination. 851 */ 852 853 source = g_source_new(&test_migrate_source_funcs, 854 sizeof(TestMigrateSource)); 855 ((TestMigrateSource *)source)->src = s; 856 ((TestMigrateSource *)source)->dest = dest; 857 g_source_attach(source, s->context); 858 859 /* slow down migration to have time to fiddle with log */ 860 /* TODO: qtest could learn to break on some places */ 861 rsp = qmp("{ 'execute': 'migrate-set-parameters'," 862 "'arguments': { 'max-bandwidth': 10 } }"); 863 g_assert(qdict_haskey(rsp, "return")); 864 qobject_unref(rsp); 865 866 rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri); 867 g_assert(qdict_haskey(rsp, "return")); 868 qobject_unref(rsp); 869 870 wait_for_log_fd(s); 871 872 log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0); 873 g_assert(log != MAP_FAILED); 874 875 /* modify first page */ 876 write_guest_mem(s, 0x42); 877 log[0] = 1; 878 munmap(log, size); 879 880 /* speed things up */ 881 rsp = qmp("{ 'execute': 'migrate-set-parameters'," 882 "'arguments': { 'max-bandwidth': 0 } }"); 883 g_assert(qdict_haskey(rsp, "return")); 884 qobject_unref(rsp); 885 886 qmp_eventwait("STOP"); 887 qtest_qmp_eventwait(to, "RESUME"); 888 889 g_assert(wait_for_fds(dest)); 890 read_guest_mem_server(to, dest); 891 892 g_source_destroy(source); 893 g_source_unref(source); 894 895 qtest_quit(to); 896 test_server_free(dest); 897 g_free(uri); 898 g_string_free(dest_cmdline, true); 899 } 900 901 static void wait_for_rings_started(TestServer *s, size_t count) 902 { 903 gint64 end_time; 904 905 g_mutex_lock(&s->data_mutex); 906 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND; 907 while (ctpop64(s->rings) != count) { 908 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) { 909 /* timeout has passed */ 910 g_assert_cmpint(ctpop64(s->rings), ==, count); 911 break; 912 } 913 } 914 915 g_mutex_unlock(&s->data_mutex); 916 } 917 918 static inline void test_server_connect(TestServer *server) 919 { 920 test_server_create_chr(server, ",reconnect-ms=1000"); 921 } 922 923 static gboolean 924 reconnect_cb(gpointer user_data) 925 { 926 TestServer *s = user_data; 927 928 qemu_chr_fe_disconnect(&s->chr); 929 930 return FALSE; 931 } 932 933 static gpointer 934 connect_thread(gpointer data) 935 { 936 TestServer *s = data; 937 938 /* wait for qemu to start before first try, to avoid extra warnings */ 939 g_usleep(G_USEC_PER_SEC); 940 test_server_connect(s); 941 942 return NULL; 943 } 944 945 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg) 946 { 947 TestServer *s = test_server_new("reconnect", arg); 948 949 g_thread_unref(g_thread_new("connect", connect_thread, s)); 950 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 951 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 952 953 g_test_queue_destroy(vhost_user_test_cleanup, s); 954 955 return s; 956 } 957 958 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc) 959 { 960 TestServer *s = arg; 961 GSource *src; 962 963 if (!wait_for_fds(s)) { 964 return; 965 } 966 967 wait_for_rings_started(s, 2); 968 969 /* reconnect */ 970 s->fds_num = 0; 971 s->rings = 0; 972 src = g_idle_source_new(); 973 g_source_set_callback(src, reconnect_cb, s, NULL); 974 g_source_attach(src, s->context); 975 g_source_unref(src); 976 g_assert(wait_for_fds(s)); 977 wait_for_rings_started(s, 2); 978 } 979 980 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg) 981 { 982 TestServer *s = test_server_new("connect-fail", arg); 983 984 s->test_fail = true; 985 986 g_thread_unref(g_thread_new("connect", connect_thread, s)); 987 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 988 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 989 990 g_test_queue_destroy(vhost_user_test_cleanup, s); 991 992 return s; 993 } 994 995 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg) 996 { 997 TestServer *s = test_server_new("flags-mismatch", arg); 998 999 s->test_flags = TEST_FLAGS_DISCONNECT; 1000 1001 g_thread_unref(g_thread_new("connect", connect_thread, s)); 1002 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO); 1003 s->vu_ops->append_opts(s, cmd_line, ",server=on"); 1004 1005 g_test_queue_destroy(vhost_user_test_cleanup, s); 1006 1007 return s; 1008 } 1009 1010 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc) 1011 { 1012 TestServer *s = arg; 1013 1014 if (!wait_for_fds(s)) { 1015 return; 1016 } 1017 wait_for_rings_started(s, 2); 1018 } 1019 1020 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg) 1021 { 1022 TestServer *s = vhost_user_test_setup(cmd_line, arg); 1023 1024 s->queues = 2; 1025 g_string_append_printf(cmd_line, 1026 " -set netdev.hs0.queues=%d" 1027 " -global virtio-net-pci.vectors=%d", 1028 s->queues, s->queues * 2 + 2); 1029 1030 return s; 1031 } 1032 1033 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc) 1034 { 1035 TestServer *s = arg; 1036 1037 wait_for_rings_started(s, s->queues * 2); 1038 } 1039 1040 1041 static uint64_t vu_net_get_features(TestServer *s) 1042 { 1043 uint64_t features = 0x1ULL << VIRTIO_F_VERSION_1 | 1044 0x1ULL << VHOST_F_LOG_ALL | 1045 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1046 1047 if (s->queues > 1) { 1048 features |= 0x1ULL << VIRTIO_NET_F_MQ; 1049 } 1050 1051 return features; 1052 } 1053 1054 static void vu_net_set_features(TestServer *s, CharFrontend *chr, 1055 VhostUserMsg *msg) 1056 { 1057 g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES)); 1058 if (s->test_flags == TEST_FLAGS_DISCONNECT) { 1059 qemu_chr_fe_disconnect(chr); 1060 s->test_flags = TEST_FLAGS_BAD; 1061 } 1062 } 1063 1064 static void vu_net_get_protocol_features(TestServer *s, CharFrontend *chr, 1065 VhostUserMsg *msg) 1066 { 1067 /* send back features to qemu */ 1068 msg->flags |= VHOST_USER_REPLY_MASK; 1069 msg->size = sizeof(m.payload.u64); 1070 msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD; 1071 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN; 1072 if (s->queues > 1) { 1073 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ; 1074 } 1075 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1076 } 1077 1078 /* Each VHOST-USER device should have its ops structure defined. */ 1079 static struct vhost_user_ops g_vu_net_ops = { 1080 .type = VHOST_USER_NET, 1081 1082 .append_opts = append_vhost_net_opts, 1083 1084 .get_features = vu_net_get_features, 1085 .set_features = vu_net_set_features, 1086 .get_protocol_features = vu_net_get_protocol_features, 1087 }; 1088 1089 static void register_vhost_user_test(void) 1090 { 1091 QOSGraphTestOptions opts = { 1092 .before = vhost_user_test_setup, 1093 .subprocess = true, 1094 .arg = &g_vu_net_ops, 1095 }; 1096 1097 qemu_add_opts(&qemu_chardev_opts); 1098 1099 qos_add_test("vhost-user/read-guest-mem/memfile", 1100 "virtio-net", 1101 test_read_guest_mem, &opts); 1102 1103 opts.before = vhost_user_test_setup_shm; 1104 qos_add_test("vhost-user/read-guest-mem/shm", 1105 "virtio-net", 1106 test_read_guest_mem, &opts); 1107 1108 if (qemu_memfd_check(MFD_ALLOW_SEALING)) { 1109 opts.before = vhost_user_test_setup_memfd; 1110 qos_add_test("vhost-user/read-guest-mem/memfd", 1111 "virtio-net", 1112 test_read_guest_mem, &opts); 1113 } 1114 1115 qos_add_test("vhost-user/migrate", 1116 "virtio-net", 1117 test_migrate, &opts); 1118 1119 opts.before = vhost_user_test_setup_reconnect; 1120 qos_add_test("vhost-user/reconnect", "virtio-net", 1121 test_reconnect, &opts); 1122 1123 opts.before = vhost_user_test_setup_connect_fail; 1124 qos_add_test("vhost-user/connect-fail", "virtio-net", 1125 test_vhost_user_started, &opts); 1126 1127 opts.before = vhost_user_test_setup_flags_mismatch; 1128 qos_add_test("vhost-user/flags-mismatch", "virtio-net", 1129 test_vhost_user_started, &opts); 1130 1131 opts.before = vhost_user_test_setup_multiqueue; 1132 opts.edge.extra_device_opts = "mq=on"; 1133 qos_add_test("vhost-user/multiqueue", 1134 "virtio-net", 1135 test_multiqueue, &opts); 1136 } 1137 libqos_init(register_vhost_user_test); 1138 1139 static uint64_t vu_gpio_get_features(TestServer *s) 1140 { 1141 return 0x1ULL << VIRTIO_F_VERSION_1 | 1142 0x1ULL << VIRTIO_GPIO_F_IRQ | 1143 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1144 } 1145 1146 /* 1147 * This stub can't handle all the message types but we should reply 1148 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it 1149 * talking to a read vhost-user daemon. 1150 */ 1151 static void vu_gpio_get_protocol_features(TestServer *s, CharFrontend *chr, 1152 VhostUserMsg *msg) 1153 { 1154 /* send back features to qemu */ 1155 msg->flags |= VHOST_USER_REPLY_MASK; 1156 msg->size = sizeof(m.payload.u64); 1157 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG; 1158 1159 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1160 } 1161 1162 static struct vhost_user_ops g_vu_gpio_ops = { 1163 .type = VHOST_USER_GPIO, 1164 1165 .append_opts = append_vhost_gpio_opts, 1166 1167 .get_features = vu_gpio_get_features, 1168 .set_features = vu_net_set_features, 1169 .get_protocol_features = vu_gpio_get_protocol_features, 1170 }; 1171 1172 static void register_vhost_gpio_test(void) 1173 { 1174 QOSGraphTestOptions opts = { 1175 .before = vhost_user_test_setup, 1176 .subprocess = true, 1177 .arg = &g_vu_gpio_ops, 1178 }; 1179 1180 qemu_add_opts(&qemu_chardev_opts); 1181 1182 qos_add_test("read-guest-mem/memfile", 1183 "vhost-user-gpio", test_read_guest_mem, &opts); 1184 } 1185 libqos_init(register_vhost_gpio_test); 1186 1187 static uint64_t vu_scmi_get_features(TestServer *s) 1188 { 1189 return 0x1ULL << VIRTIO_F_VERSION_1 | 1190 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS | 1191 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES; 1192 } 1193 1194 static void vu_scmi_get_protocol_features(TestServer *s, CharFrontend *chr, 1195 VhostUserMsg *msg) 1196 { 1197 msg->flags |= VHOST_USER_REPLY_MASK; 1198 msg->size = sizeof(m.payload.u64); 1199 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ; 1200 1201 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size); 1202 } 1203 1204 static struct vhost_user_ops g_vu_scmi_ops = { 1205 .type = VHOST_USER_SCMI, 1206 1207 .append_opts = append_vhost_gpio_opts, 1208 1209 .get_features = vu_scmi_get_features, 1210 .set_features = vu_net_set_features, 1211 .get_protocol_features = vu_scmi_get_protocol_features, 1212 }; 1213 1214 static void register_vhost_scmi_test(void) 1215 { 1216 QOSGraphTestOptions opts = { 1217 .before = vhost_user_test_setup, 1218 .subprocess = true, 1219 .arg = &g_vu_scmi_ops, 1220 }; 1221 1222 qemu_add_opts(&qemu_chardev_opts); 1223 1224 qos_add_test("scmi/read-guest-mem/memfile", 1225 "vhost-user-scmi", test_read_guest_mem, &opts); 1226 } 1227 libqos_init(register_vhost_scmi_test); 1228