1 /*
2 * QTest testcase for the vhost-user
3 *
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11 #include "qemu/osdep.h"
12
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qobject/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "system/system.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
27
28 #include "libqos/malloc-pc.h"
29 #include "hw/virtio/virtio-net.h"
30
31 #include "standard-headers/linux/vhost_types.h"
32 #include "standard-headers/linux/virtio_ids.h"
33 #include "standard-headers/linux/virtio_net.h"
34 #include "standard-headers/linux/virtio_gpio.h"
35 #include "standard-headers/linux/virtio_scmi.h"
36
37 #ifdef CONFIG_LINUX
38 #include <sys/vfs.h>
39 #endif
40
41
42 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
43 "mem-path=%s,share=on -numa node,memdev=mem"
44 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
45 " -numa node,memdev=mem"
46 #define QEMU_CMD_SHM " -m %d -object memory-backend-shm,id=mem,size=%dM," \
47 " -numa node,memdev=mem"
48 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
49 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
50
51 #define HUGETLBFS_MAGIC 0x958458f6
52
53 /*********** FROM hw/virtio/vhost-user.c *************************************/
54
55 #define VHOST_MEMORY_MAX_NREGIONS 8
56 #define VHOST_MAX_VIRTQUEUES 0x100
57
58 #define VHOST_USER_F_PROTOCOL_FEATURES 30
59 #define VIRTIO_F_VERSION_1 32
60
61 #define VHOST_USER_PROTOCOL_F_MQ 0
62 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
63 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
64 #define VHOST_USER_PROTOCOL_F_CONFIG 9
65
66 #define VHOST_LOG_PAGE 0x1000
67
68 typedef enum VhostUserRequest {
69 VHOST_USER_NONE = 0,
70 VHOST_USER_GET_FEATURES = 1,
71 VHOST_USER_SET_FEATURES = 2,
72 VHOST_USER_SET_OWNER = 3,
73 VHOST_USER_RESET_OWNER = 4,
74 VHOST_USER_SET_MEM_TABLE = 5,
75 VHOST_USER_SET_LOG_BASE = 6,
76 VHOST_USER_SET_LOG_FD = 7,
77 VHOST_USER_SET_VRING_NUM = 8,
78 VHOST_USER_SET_VRING_ADDR = 9,
79 VHOST_USER_SET_VRING_BASE = 10,
80 VHOST_USER_GET_VRING_BASE = 11,
81 VHOST_USER_SET_VRING_KICK = 12,
82 VHOST_USER_SET_VRING_CALL = 13,
83 VHOST_USER_SET_VRING_ERR = 14,
84 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
85 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
86 VHOST_USER_GET_QUEUE_NUM = 17,
87 VHOST_USER_SET_VRING_ENABLE = 18,
88 VHOST_USER_GET_CONFIG = 24,
89 VHOST_USER_SET_CONFIG = 25,
90 VHOST_USER_MAX
91 } VhostUserRequest;
92
93 typedef struct VhostUserMemoryRegion {
94 uint64_t guest_phys_addr;
95 uint64_t memory_size;
96 uint64_t userspace_addr;
97 uint64_t mmap_offset;
98 } VhostUserMemoryRegion;
99
100 typedef struct VhostUserMemory {
101 uint32_t nregions;
102 uint32_t padding;
103 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
104 } VhostUserMemory;
105
106 typedef struct VhostUserLog {
107 uint64_t mmap_size;
108 uint64_t mmap_offset;
109 } VhostUserLog;
110
111 typedef struct VhostUserMsg {
112 VhostUserRequest request;
113
114 #define VHOST_USER_VERSION_MASK (0x3)
115 #define VHOST_USER_REPLY_MASK (0x1<<2)
116 uint32_t flags;
117 uint32_t size; /* the following payload size */
118 union {
119 #define VHOST_USER_VRING_IDX_MASK (0xff)
120 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
121 uint64_t u64;
122 struct vhost_vring_state state;
123 struct vhost_vring_addr addr;
124 VhostUserMemory memory;
125 VhostUserLog log;
126 } payload;
127 } QEMU_PACKED VhostUserMsg;
128
129 static VhostUserMsg m __attribute__ ((unused));
130 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
131 + sizeof(m.flags) \
132 + sizeof(m.size))
133
134 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
135
136 /* The version of the protocol we support */
137 #define VHOST_USER_VERSION (0x1)
138 /*****************************************************************************/
139
140 enum {
141 TEST_FLAGS_OK,
142 TEST_FLAGS_DISCONNECT,
143 TEST_FLAGS_BAD,
144 TEST_FLAGS_END,
145 };
146
147 enum {
148 VHOST_USER_NET,
149 VHOST_USER_GPIO,
150 VHOST_USER_SCMI,
151 };
152
153 typedef struct TestServer {
154 gchar *socket_path;
155 gchar *mig_path;
156 gchar *chr_name;
157 gchar *tmpfs;
158 CharBackend chr;
159 int fds_num;
160 int fds[VHOST_MEMORY_MAX_NREGIONS];
161 VhostUserMemory memory;
162 GMainContext *context;
163 GMainLoop *loop;
164 GThread *thread;
165 GMutex data_mutex;
166 GCond data_cond;
167 int log_fd;
168 uint64_t rings;
169 bool test_fail;
170 int test_flags;
171 int queues;
172 struct vhost_user_ops *vu_ops;
173 } TestServer;
174
175 struct vhost_user_ops {
176 /* Device types. */
177 int type;
178 void (*append_opts)(TestServer *s, GString *cmd_line,
179 const char *chr_opts);
180
181 /* VHOST-USER commands. */
182 uint64_t (*get_features)(TestServer *s);
183 void (*set_features)(TestServer *s, CharBackend *chr,
184 VhostUserMsg *msg);
185 void (*get_protocol_features)(TestServer *s,
186 CharBackend *chr, VhostUserMsg *msg);
187 };
188
189 static const char *init_hugepagefs(void);
190 static TestServer *test_server_new(const gchar *name,
191 struct vhost_user_ops *ops);
192 static void test_server_free(TestServer *server);
193 static void test_server_listen(TestServer *server);
194
195 enum test_memfd {
196 TEST_MEMFD_AUTO,
197 TEST_MEMFD_YES,
198 TEST_MEMFD_NO,
199 TEST_MEMFD_SHM,
200 };
201
append_vhost_net_opts(TestServer * s,GString * cmd_line,const char * chr_opts)202 static void append_vhost_net_opts(TestServer *s, GString *cmd_line,
203 const char *chr_opts)
204 {
205 g_string_append_printf(cmd_line, QEMU_CMD_CHR QEMU_CMD_NETDEV,
206 s->chr_name, s->socket_path,
207 chr_opts, s->chr_name);
208 }
209
210 /*
211 * For GPIO there are no other magic devices we need to add (like
212 * block or netdev) so all we need to worry about is the vhost-user
213 * chardev socket.
214 */
append_vhost_gpio_opts(TestServer * s,GString * cmd_line,const char * chr_opts)215 static void append_vhost_gpio_opts(TestServer *s, GString *cmd_line,
216 const char *chr_opts)
217 {
218 g_string_append_printf(cmd_line, QEMU_CMD_CHR,
219 s->chr_name, s->socket_path,
220 chr_opts);
221 }
222
append_mem_opts(TestServer * server,GString * cmd_line,int size,enum test_memfd memfd)223 static void append_mem_opts(TestServer *server, GString *cmd_line,
224 int size, enum test_memfd memfd)
225 {
226 if (memfd == TEST_MEMFD_AUTO) {
227 memfd = qemu_memfd_check(MFD_ALLOW_SEALING) ? TEST_MEMFD_YES
228 : TEST_MEMFD_NO;
229 }
230
231 if (memfd == TEST_MEMFD_YES) {
232 g_string_append_printf(cmd_line, QEMU_CMD_MEMFD, size, size);
233 } else if (memfd == TEST_MEMFD_SHM) {
234 g_string_append_printf(cmd_line, QEMU_CMD_SHM, size, size);
235 } else {
236 const char *root = init_hugepagefs() ? : server->tmpfs;
237
238 g_string_append_printf(cmd_line, QEMU_CMD_MEM, size, size, root);
239 }
240 }
241
wait_for_fds(TestServer * s)242 static bool wait_for_fds(TestServer *s)
243 {
244 gint64 end_time;
245 bool got_region;
246 int i;
247
248 g_mutex_lock(&s->data_mutex);
249
250 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
251 while (!s->fds_num) {
252 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
253 /* timeout has passed */
254 g_assert(s->fds_num);
255 break;
256 }
257 }
258
259 /* check for sanity */
260 g_assert_cmpint(s->fds_num, >, 0);
261 g_assert_cmpint(s->fds_num, ==, s->memory.nregions);
262
263 g_mutex_unlock(&s->data_mutex);
264
265 got_region = false;
266 for (i = 0; i < s->memory.nregions; ++i) {
267 VhostUserMemoryRegion *reg = &s->memory.regions[i];
268 if (reg->guest_phys_addr == 0) {
269 got_region = true;
270 break;
271 }
272 }
273 if (!got_region) {
274 g_test_skip("No memory at address 0x0");
275 }
276 return got_region;
277 }
278
read_guest_mem_server(QTestState * qts,TestServer * s)279 static void read_guest_mem_server(QTestState *qts, TestServer *s)
280 {
281 uint8_t *guest_mem;
282 int i, j;
283 size_t size;
284
285 g_mutex_lock(&s->data_mutex);
286
287 /* iterate all regions */
288 for (i = 0; i < s->fds_num; i++) {
289
290 /* We'll check only the region starting at 0x0 */
291 if (s->memory.regions[i].guest_phys_addr != 0x0) {
292 continue;
293 }
294
295 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
296
297 size = s->memory.regions[i].memory_size +
298 s->memory.regions[i].mmap_offset;
299
300 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
301 MAP_SHARED, s->fds[i], 0);
302
303 g_assert(guest_mem != MAP_FAILED);
304 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
305
306 for (j = 0; j < 1024; j++) {
307 uint32_t a = qtest_readb(qts, s->memory.regions[i].guest_phys_addr + j);
308 uint32_t b = guest_mem[j];
309
310 g_assert_cmpint(a, ==, b);
311 }
312
313 munmap(guest_mem, s->memory.regions[i].memory_size);
314 }
315
316 g_mutex_unlock(&s->data_mutex);
317 }
318
thread_function(void * data)319 static void *thread_function(void *data)
320 {
321 GMainLoop *loop = data;
322 g_main_loop_run(loop);
323 return NULL;
324 }
325
chr_can_read(void * opaque)326 static int chr_can_read(void *opaque)
327 {
328 return VHOST_USER_HDR_SIZE;
329 }
330
chr_read(void * opaque,const uint8_t * buf,int size)331 static void chr_read(void *opaque, const uint8_t *buf, int size)
332 {
333 g_autoptr(GError) err = NULL;
334 TestServer *s = opaque;
335 CharBackend *chr = &s->chr;
336 VhostUserMsg msg;
337 uint8_t *p = (uint8_t *) &msg;
338 int fd = -1;
339
340 if (s->test_fail) {
341 qemu_chr_fe_disconnect(chr);
342 /* now switch to non-failure */
343 s->test_fail = false;
344 }
345
346 if (size != VHOST_USER_HDR_SIZE) {
347 g_test_message("Wrong message size received %d", size);
348 return;
349 }
350
351 g_mutex_lock(&s->data_mutex);
352 memcpy(p, buf, VHOST_USER_HDR_SIZE);
353
354 if (msg.size) {
355 p += VHOST_USER_HDR_SIZE;
356 size = qemu_chr_fe_read_all(chr, p, msg.size);
357 if (size != msg.size) {
358 g_test_message("Wrong message size received %d != %d",
359 size, msg.size);
360 goto out;
361 }
362 }
363
364 switch (msg.request) {
365 case VHOST_USER_GET_FEATURES:
366 /* Mandatory for tests to define get_features */
367 g_assert(s->vu_ops->get_features);
368
369 /* send back features to qemu */
370 msg.flags |= VHOST_USER_REPLY_MASK;
371 msg.size = sizeof(m.payload.u64);
372
373 if (s->test_flags >= TEST_FLAGS_BAD) {
374 msg.payload.u64 = 0;
375 s->test_flags = TEST_FLAGS_END;
376 } else {
377 msg.payload.u64 = s->vu_ops->get_features(s);
378 }
379
380 qemu_chr_fe_write_all(chr, (uint8_t *) &msg,
381 VHOST_USER_HDR_SIZE + msg.size);
382 break;
383
384 case VHOST_USER_SET_FEATURES:
385 if (s->vu_ops->set_features) {
386 s->vu_ops->set_features(s, chr, &msg);
387 }
388 break;
389
390 case VHOST_USER_SET_OWNER:
391 /*
392 * We don't need to do anything here, the remote is just
393 * letting us know it is in charge. Just log it.
394 */
395 g_test_message("set_owner: start of session\n");
396 break;
397
398 case VHOST_USER_GET_PROTOCOL_FEATURES:
399 if (s->vu_ops->get_protocol_features) {
400 s->vu_ops->get_protocol_features(s, chr, &msg);
401 }
402 break;
403
404 case VHOST_USER_GET_CONFIG:
405 /*
406 * Treat GET_CONFIG as a NOP and just reply and let the guest
407 * consider we have updated its memory. Tests currently don't
408 * require working configs.
409 */
410 msg.flags |= VHOST_USER_REPLY_MASK;
411 p = (uint8_t *) &msg;
412 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
413 break;
414
415 case VHOST_USER_SET_PROTOCOL_FEATURES:
416 /*
417 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
418 * the remote end to send this. There is no handshake reply so
419 * just log the details for debugging.
420 */
421 g_test_message("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64);
422 break;
423
424 /*
425 * A real vhost-user backend would actually set the size and
426 * address of the vrings but we can simply report them.
427 */
428 case VHOST_USER_SET_VRING_NUM:
429 g_test_message("set_vring_num: %d/%d\n",
430 msg.payload.state.index, msg.payload.state.num);
431 break;
432 case VHOST_USER_SET_VRING_ADDR:
433 g_test_message("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n",
434 msg.payload.addr.avail_user_addr,
435 msg.payload.addr.desc_user_addr,
436 msg.payload.addr.used_user_addr);
437 break;
438
439 case VHOST_USER_GET_VRING_BASE:
440 /* send back vring base to qemu */
441 msg.flags |= VHOST_USER_REPLY_MASK;
442 msg.size = sizeof(m.payload.state);
443 msg.payload.state.num = 0;
444 p = (uint8_t *) &msg;
445 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
446
447 assert(msg.payload.state.index < s->queues * 2);
448 s->rings &= ~(0x1ULL << msg.payload.state.index);
449 g_cond_broadcast(&s->data_cond);
450 break;
451
452 case VHOST_USER_SET_MEM_TABLE:
453 /* received the mem table */
454 memcpy(&s->memory, &msg.payload.memory, sizeof(msg.payload.memory));
455 s->fds_num = qemu_chr_fe_get_msgfds(chr, s->fds,
456 G_N_ELEMENTS(s->fds));
457
458 /* signal the test that it can continue */
459 g_cond_broadcast(&s->data_cond);
460 break;
461
462 case VHOST_USER_SET_VRING_KICK:
463 case VHOST_USER_SET_VRING_CALL:
464 /* consume the fd */
465 if (!qemu_chr_fe_get_msgfds(chr, &fd, 1) && fd < 0) {
466 g_test_message("call fd: %d, do not set non-blocking\n", fd);
467 break;
468 }
469 /*
470 * This is a non-blocking eventfd.
471 * The receive function forces it to be blocking,
472 * so revert it back to non-blocking.
473 */
474 g_unix_set_fd_nonblocking(fd, true, &err);
475 g_assert_no_error(err);
476 break;
477
478 case VHOST_USER_SET_LOG_BASE:
479 if (s->log_fd != -1) {
480 close(s->log_fd);
481 s->log_fd = -1;
482 }
483 qemu_chr_fe_get_msgfds(chr, &s->log_fd, 1);
484 msg.flags |= VHOST_USER_REPLY_MASK;
485 msg.size = 0;
486 p = (uint8_t *) &msg;
487 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE);
488
489 g_cond_broadcast(&s->data_cond);
490 break;
491
492 case VHOST_USER_SET_VRING_BASE:
493 assert(msg.payload.state.index < s->queues * 2);
494 s->rings |= 0x1ULL << msg.payload.state.index;
495 g_cond_broadcast(&s->data_cond);
496 break;
497
498 case VHOST_USER_GET_QUEUE_NUM:
499 msg.flags |= VHOST_USER_REPLY_MASK;
500 msg.size = sizeof(m.payload.u64);
501 msg.payload.u64 = s->queues;
502 p = (uint8_t *) &msg;
503 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
504 break;
505
506 case VHOST_USER_SET_VRING_ENABLE:
507 /*
508 * Another case we ignore as we don't need to respond. With a
509 * fully functioning vhost-user we would enable/disable the
510 * vring monitoring.
511 */
512 g_test_message("set_vring(%d)=%s\n", msg.payload.state.index,
513 msg.payload.state.num ? "enabled" : "disabled");
514 break;
515
516 default:
517 g_test_message("vhost-user: un-handled message: %d\n", msg.request);
518 break;
519 }
520
521 out:
522 g_mutex_unlock(&s->data_mutex);
523 }
524
init_hugepagefs(void)525 static const char *init_hugepagefs(void)
526 {
527 #ifdef CONFIG_LINUX
528 static const char *hugepagefs;
529 const char *path = getenv("QTEST_HUGETLBFS_PATH");
530 struct statfs fs;
531 int ret;
532
533 if (hugepagefs) {
534 return hugepagefs;
535 }
536 if (!path) {
537 return NULL;
538 }
539
540 if (access(path, R_OK | W_OK | X_OK)) {
541 g_test_message("access on path (%s): %s", path, strerror(errno));
542 g_test_fail();
543 return NULL;
544 }
545
546 do {
547 ret = statfs(path, &fs);
548 } while (ret != 0 && errno == EINTR);
549
550 if (ret != 0) {
551 g_test_message("statfs on path (%s): %s", path, strerror(errno));
552 g_test_fail();
553 return NULL;
554 }
555
556 if (fs.f_type != HUGETLBFS_MAGIC) {
557 g_test_message("Warning: path not on HugeTLBFS: %s", path);
558 g_test_fail();
559 return NULL;
560 }
561
562 hugepagefs = path;
563 return hugepagefs;
564 #else
565 return NULL;
566 #endif
567 }
568
test_server_new(const gchar * name,struct vhost_user_ops * ops)569 static TestServer *test_server_new(const gchar *name,
570 struct vhost_user_ops *ops)
571 {
572 TestServer *server = g_new0(TestServer, 1);
573 g_autofree const char *tmpfs = NULL;
574 GError *err = NULL;
575
576 server->context = g_main_context_new();
577 server->loop = g_main_loop_new(server->context, FALSE);
578
579 /* run the main loop thread so the chardev may operate */
580 server->thread = g_thread_new(NULL, thread_function, server->loop);
581
582 tmpfs = g_dir_make_tmp("vhost-test-XXXXXX", &err);
583 if (!tmpfs) {
584 g_test_message("Can't create temporary directory in %s: %s",
585 g_get_tmp_dir(), err->message);
586 g_error_free(err);
587 }
588 g_assert(tmpfs);
589
590 server->tmpfs = g_strdup(tmpfs);
591 server->socket_path = g_strdup_printf("%s/%s.sock", tmpfs, name);
592 server->mig_path = g_strdup_printf("%s/%s.mig", tmpfs, name);
593 server->chr_name = g_strdup_printf("chr-%s", name);
594
595 g_mutex_init(&server->data_mutex);
596 g_cond_init(&server->data_cond);
597
598 server->log_fd = -1;
599 server->queues = 1;
600 server->vu_ops = ops;
601
602 return server;
603 }
604
chr_event(void * opaque,QEMUChrEvent event)605 static void chr_event(void *opaque, QEMUChrEvent event)
606 {
607 TestServer *s = opaque;
608
609 if (s->test_flags == TEST_FLAGS_END &&
610 event == CHR_EVENT_CLOSED) {
611 s->test_flags = TEST_FLAGS_OK;
612 }
613 }
614
test_server_create_chr(TestServer * server,const gchar * opt)615 static void test_server_create_chr(TestServer *server, const gchar *opt)
616 {
617 g_autofree gchar *chr_path = g_strdup_printf("unix:%s%s",
618 server->socket_path, opt);
619 Chardev *chr;
620
621 chr = qemu_chr_new(server->chr_name, chr_path, server->context);
622 g_assert(chr);
623
624 qemu_chr_fe_init(&server->chr, chr, &error_abort);
625 qemu_chr_fe_set_handlers(&server->chr, chr_can_read, chr_read,
626 chr_event, NULL, server, server->context, true);
627 }
628
test_server_listen(TestServer * server)629 static void test_server_listen(TestServer *server)
630 {
631 test_server_create_chr(server, ",server=on,wait=off");
632 }
633
test_server_free(TestServer * server)634 static void test_server_free(TestServer *server)
635 {
636 int i, ret;
637
638 /* finish the helper thread and dispatch pending sources */
639 g_main_loop_quit(server->loop);
640 g_thread_join(server->thread);
641 while (g_main_context_pending(NULL)) {
642 g_main_context_iteration(NULL, TRUE);
643 }
644
645 unlink(server->socket_path);
646 g_free(server->socket_path);
647
648 unlink(server->mig_path);
649 g_free(server->mig_path);
650
651 ret = rmdir(server->tmpfs);
652 if (ret != 0) {
653 g_test_message("unable to rmdir: path (%s): %s",
654 server->tmpfs, strerror(errno));
655 }
656 g_free(server->tmpfs);
657
658 qemu_chr_fe_deinit(&server->chr, true);
659
660 for (i = 0; i < server->fds_num; i++) {
661 close(server->fds[i]);
662 }
663
664 if (server->log_fd != -1) {
665 close(server->log_fd);
666 }
667
668 g_free(server->chr_name);
669
670 g_main_loop_unref(server->loop);
671 g_main_context_unref(server->context);
672 g_cond_clear(&server->data_cond);
673 g_mutex_clear(&server->data_mutex);
674 g_free(server);
675 }
676
wait_for_log_fd(TestServer * s)677 static void wait_for_log_fd(TestServer *s)
678 {
679 gint64 end_time;
680
681 g_mutex_lock(&s->data_mutex);
682 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
683 while (s->log_fd == -1) {
684 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
685 /* timeout has passed */
686 g_assert(s->log_fd != -1);
687 break;
688 }
689 }
690
691 g_mutex_unlock(&s->data_mutex);
692 }
693
write_guest_mem(TestServer * s,uint32_t seed)694 static void write_guest_mem(TestServer *s, uint32_t seed)
695 {
696 uint32_t *guest_mem;
697 int i, j;
698 size_t size;
699
700 /* iterate all regions */
701 for (i = 0; i < s->fds_num; i++) {
702
703 /* We'll write only the region statring at 0x0 */
704 if (s->memory.regions[i].guest_phys_addr != 0x0) {
705 continue;
706 }
707
708 g_assert_cmpint(s->memory.regions[i].memory_size, >, 1024);
709
710 size = s->memory.regions[i].memory_size +
711 s->memory.regions[i].mmap_offset;
712
713 guest_mem = mmap(0, size, PROT_READ | PROT_WRITE,
714 MAP_SHARED, s->fds[i], 0);
715
716 g_assert(guest_mem != MAP_FAILED);
717 guest_mem += (s->memory.regions[i].mmap_offset / sizeof(*guest_mem));
718
719 for (j = 0; j < 256; j++) {
720 guest_mem[j] = seed + j;
721 }
722
723 munmap(guest_mem, s->memory.regions[i].memory_size);
724 break;
725 }
726 }
727
get_log_size(TestServer * s)728 static guint64 get_log_size(TestServer *s)
729 {
730 guint64 log_size = 0;
731 int i;
732
733 for (i = 0; i < s->memory.nregions; ++i) {
734 VhostUserMemoryRegion *reg = &s->memory.regions[i];
735 guint64 last = range_get_last(reg->guest_phys_addr,
736 reg->memory_size);
737 log_size = MAX(log_size, last / (8 * VHOST_LOG_PAGE) + 1);
738 }
739
740 return log_size;
741 }
742
743 typedef struct TestMigrateSource {
744 GSource source;
745 TestServer *src;
746 TestServer *dest;
747 } TestMigrateSource;
748
749 static gboolean
test_migrate_source_check(GSource * source)750 test_migrate_source_check(GSource *source)
751 {
752 TestMigrateSource *t = (TestMigrateSource *)source;
753 gboolean overlap = t->src->rings && t->dest->rings;
754
755 g_assert(!overlap);
756
757 return FALSE;
758 }
759
760 GSourceFuncs test_migrate_source_funcs = {
761 .check = test_migrate_source_check,
762 };
763
vhost_user_test_cleanup(void * s)764 static void vhost_user_test_cleanup(void *s)
765 {
766 TestServer *server = s;
767
768 qos_invalidate_command_line();
769 test_server_free(server);
770 }
771
vhost_user_test_setup(GString * cmd_line,void * arg)772 static void *vhost_user_test_setup(GString *cmd_line, void *arg)
773 {
774 TestServer *server = test_server_new("vhost-user-test", arg);
775 test_server_listen(server);
776
777 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_AUTO);
778 server->vu_ops->append_opts(server, cmd_line, "");
779
780 g_test_queue_destroy(vhost_user_test_cleanup, server);
781
782 return server;
783 }
784
vhost_user_test_setup_memfd(GString * cmd_line,void * arg)785 static void *vhost_user_test_setup_memfd(GString *cmd_line, void *arg)
786 {
787 TestServer *server = test_server_new("vhost-user-test", arg);
788 test_server_listen(server);
789
790 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_YES);
791 server->vu_ops->append_opts(server, cmd_line, "");
792
793 g_test_queue_destroy(vhost_user_test_cleanup, server);
794
795 return server;
796 }
797
vhost_user_test_setup_shm(GString * cmd_line,void * arg)798 static void *vhost_user_test_setup_shm(GString *cmd_line, void *arg)
799 {
800 TestServer *server = test_server_new("vhost-user-test", arg);
801 test_server_listen(server);
802
803 append_mem_opts(server, cmd_line, 256, TEST_MEMFD_SHM);
804 server->vu_ops->append_opts(server, cmd_line, "");
805
806 g_test_queue_destroy(vhost_user_test_cleanup, server);
807
808 return server;
809 }
810
test_read_guest_mem(void * obj,void * arg,QGuestAllocator * alloc)811 static void test_read_guest_mem(void *obj, void *arg, QGuestAllocator *alloc)
812 {
813 TestServer *server = arg;
814
815 if (!wait_for_fds(server)) {
816 return;
817 }
818
819 read_guest_mem_server(global_qtest, server);
820 }
821
test_migrate(void * obj,void * arg,QGuestAllocator * alloc)822 static void test_migrate(void *obj, void *arg, QGuestAllocator *alloc)
823 {
824 TestServer *s = arg;
825 TestServer *dest;
826 GString *dest_cmdline;
827 char *uri;
828 QTestState *to;
829 GSource *source;
830 QDict *rsp;
831 guint8 *log;
832 guint64 size;
833
834 if (!wait_for_fds(s)) {
835 return;
836 }
837
838 dest = test_server_new("dest", s->vu_ops);
839 dest_cmdline = g_string_new(qos_get_current_command_line());
840 uri = g_strdup_printf("%s%s", "unix:", dest->mig_path);
841
842 size = get_log_size(s);
843 g_assert_cmpint(size, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE * 8));
844
845 test_server_listen(dest);
846 g_string_append_printf(dest_cmdline, " -incoming %s", uri);
847 append_mem_opts(dest, dest_cmdline, 256, TEST_MEMFD_AUTO);
848 dest->vu_ops->append_opts(dest, dest_cmdline, "");
849 to = qtest_init(dest_cmdline->str);
850
851 /* This would be where you call qos_allocate_objects(to, NULL), if you want
852 * to talk to the QVirtioNet object on the destination.
853 */
854
855 source = g_source_new(&test_migrate_source_funcs,
856 sizeof(TestMigrateSource));
857 ((TestMigrateSource *)source)->src = s;
858 ((TestMigrateSource *)source)->dest = dest;
859 g_source_attach(source, s->context);
860
861 /* slow down migration to have time to fiddle with log */
862 /* TODO: qtest could learn to break on some places */
863 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
864 "'arguments': { 'max-bandwidth': 10 } }");
865 g_assert(qdict_haskey(rsp, "return"));
866 qobject_unref(rsp);
867
868 rsp = qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri);
869 g_assert(qdict_haskey(rsp, "return"));
870 qobject_unref(rsp);
871
872 wait_for_log_fd(s);
873
874 log = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, s->log_fd, 0);
875 g_assert(log != MAP_FAILED);
876
877 /* modify first page */
878 write_guest_mem(s, 0x42);
879 log[0] = 1;
880 munmap(log, size);
881
882 /* speed things up */
883 rsp = qmp("{ 'execute': 'migrate-set-parameters',"
884 "'arguments': { 'max-bandwidth': 0 } }");
885 g_assert(qdict_haskey(rsp, "return"));
886 qobject_unref(rsp);
887
888 qmp_eventwait("STOP");
889 qtest_qmp_eventwait(to, "RESUME");
890
891 g_assert(wait_for_fds(dest));
892 read_guest_mem_server(to, dest);
893
894 g_source_destroy(source);
895 g_source_unref(source);
896
897 qtest_quit(to);
898 test_server_free(dest);
899 g_free(uri);
900 g_string_free(dest_cmdline, true);
901 }
902
wait_for_rings_started(TestServer * s,size_t count)903 static void wait_for_rings_started(TestServer *s, size_t count)
904 {
905 gint64 end_time;
906
907 g_mutex_lock(&s->data_mutex);
908 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
909 while (ctpop64(s->rings) != count) {
910 if (!g_cond_wait_until(&s->data_cond, &s->data_mutex, end_time)) {
911 /* timeout has passed */
912 g_assert_cmpint(ctpop64(s->rings), ==, count);
913 break;
914 }
915 }
916
917 g_mutex_unlock(&s->data_mutex);
918 }
919
test_server_connect(TestServer * server)920 static inline void test_server_connect(TestServer *server)
921 {
922 test_server_create_chr(server, ",reconnect-ms=1000");
923 }
924
925 static gboolean
reconnect_cb(gpointer user_data)926 reconnect_cb(gpointer user_data)
927 {
928 TestServer *s = user_data;
929
930 qemu_chr_fe_disconnect(&s->chr);
931
932 return FALSE;
933 }
934
935 static gpointer
connect_thread(gpointer data)936 connect_thread(gpointer data)
937 {
938 TestServer *s = data;
939
940 /* wait for qemu to start before first try, to avoid extra warnings */
941 g_usleep(G_USEC_PER_SEC);
942 test_server_connect(s);
943
944 return NULL;
945 }
946
vhost_user_test_setup_reconnect(GString * cmd_line,void * arg)947 static void *vhost_user_test_setup_reconnect(GString *cmd_line, void *arg)
948 {
949 TestServer *s = test_server_new("reconnect", arg);
950
951 g_thread_unref(g_thread_new("connect", connect_thread, s));
952 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
953 s->vu_ops->append_opts(s, cmd_line, ",server=on");
954
955 g_test_queue_destroy(vhost_user_test_cleanup, s);
956
957 return s;
958 }
959
test_reconnect(void * obj,void * arg,QGuestAllocator * alloc)960 static void test_reconnect(void *obj, void *arg, QGuestAllocator *alloc)
961 {
962 TestServer *s = arg;
963 GSource *src;
964
965 if (!wait_for_fds(s)) {
966 return;
967 }
968
969 wait_for_rings_started(s, 2);
970
971 /* reconnect */
972 s->fds_num = 0;
973 s->rings = 0;
974 src = g_idle_source_new();
975 g_source_set_callback(src, reconnect_cb, s, NULL);
976 g_source_attach(src, s->context);
977 g_source_unref(src);
978 g_assert(wait_for_fds(s));
979 wait_for_rings_started(s, 2);
980 }
981
vhost_user_test_setup_connect_fail(GString * cmd_line,void * arg)982 static void *vhost_user_test_setup_connect_fail(GString *cmd_line, void *arg)
983 {
984 TestServer *s = test_server_new("connect-fail", arg);
985
986 s->test_fail = true;
987
988 g_thread_unref(g_thread_new("connect", connect_thread, s));
989 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
990 s->vu_ops->append_opts(s, cmd_line, ",server=on");
991
992 g_test_queue_destroy(vhost_user_test_cleanup, s);
993
994 return s;
995 }
996
vhost_user_test_setup_flags_mismatch(GString * cmd_line,void * arg)997 static void *vhost_user_test_setup_flags_mismatch(GString *cmd_line, void *arg)
998 {
999 TestServer *s = test_server_new("flags-mismatch", arg);
1000
1001 s->test_flags = TEST_FLAGS_DISCONNECT;
1002
1003 g_thread_unref(g_thread_new("connect", connect_thread, s));
1004 append_mem_opts(s, cmd_line, 256, TEST_MEMFD_AUTO);
1005 s->vu_ops->append_opts(s, cmd_line, ",server=on");
1006
1007 g_test_queue_destroy(vhost_user_test_cleanup, s);
1008
1009 return s;
1010 }
1011
test_vhost_user_started(void * obj,void * arg,QGuestAllocator * alloc)1012 static void test_vhost_user_started(void *obj, void *arg, QGuestAllocator *alloc)
1013 {
1014 TestServer *s = arg;
1015
1016 if (!wait_for_fds(s)) {
1017 return;
1018 }
1019 wait_for_rings_started(s, 2);
1020 }
1021
vhost_user_test_setup_multiqueue(GString * cmd_line,void * arg)1022 static void *vhost_user_test_setup_multiqueue(GString *cmd_line, void *arg)
1023 {
1024 TestServer *s = vhost_user_test_setup(cmd_line, arg);
1025
1026 s->queues = 2;
1027 g_string_append_printf(cmd_line,
1028 " -set netdev.hs0.queues=%d"
1029 " -global virtio-net-pci.vectors=%d",
1030 s->queues, s->queues * 2 + 2);
1031
1032 return s;
1033 }
1034
test_multiqueue(void * obj,void * arg,QGuestAllocator * alloc)1035 static void test_multiqueue(void *obj, void *arg, QGuestAllocator *alloc)
1036 {
1037 TestServer *s = arg;
1038
1039 wait_for_rings_started(s, s->queues * 2);
1040 }
1041
1042
vu_net_get_features(TestServer * s)1043 static uint64_t vu_net_get_features(TestServer *s)
1044 {
1045 uint64_t features = 0x1ULL << VIRTIO_F_VERSION_1 |
1046 0x1ULL << VHOST_F_LOG_ALL |
1047 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1048
1049 if (s->queues > 1) {
1050 features |= 0x1ULL << VIRTIO_NET_F_MQ;
1051 }
1052
1053 return features;
1054 }
1055
vu_net_set_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1056 static void vu_net_set_features(TestServer *s, CharBackend *chr,
1057 VhostUserMsg *msg)
1058 {
1059 g_assert(msg->payload.u64 & (0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
1060 if (s->test_flags == TEST_FLAGS_DISCONNECT) {
1061 qemu_chr_fe_disconnect(chr);
1062 s->test_flags = TEST_FLAGS_BAD;
1063 }
1064 }
1065
vu_net_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1066 static void vu_net_get_protocol_features(TestServer *s, CharBackend *chr,
1067 VhostUserMsg *msg)
1068 {
1069 /* send back features to qemu */
1070 msg->flags |= VHOST_USER_REPLY_MASK;
1071 msg->size = sizeof(m.payload.u64);
1072 msg->payload.u64 = 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
1073 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN;
1074 if (s->queues > 1) {
1075 msg->payload.u64 |= 1 << VHOST_USER_PROTOCOL_F_MQ;
1076 }
1077 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1078 }
1079
1080 /* Each VHOST-USER device should have its ops structure defined. */
1081 static struct vhost_user_ops g_vu_net_ops = {
1082 .type = VHOST_USER_NET,
1083
1084 .append_opts = append_vhost_net_opts,
1085
1086 .get_features = vu_net_get_features,
1087 .set_features = vu_net_set_features,
1088 .get_protocol_features = vu_net_get_protocol_features,
1089 };
1090
register_vhost_user_test(void)1091 static void register_vhost_user_test(void)
1092 {
1093 QOSGraphTestOptions opts = {
1094 .before = vhost_user_test_setup,
1095 .subprocess = true,
1096 .arg = &g_vu_net_ops,
1097 };
1098
1099 qemu_add_opts(&qemu_chardev_opts);
1100
1101 qos_add_test("vhost-user/read-guest-mem/memfile",
1102 "virtio-net",
1103 test_read_guest_mem, &opts);
1104
1105 opts.before = vhost_user_test_setup_shm;
1106 qos_add_test("vhost-user/read-guest-mem/shm",
1107 "virtio-net",
1108 test_read_guest_mem, &opts);
1109
1110 if (qemu_memfd_check(MFD_ALLOW_SEALING)) {
1111 opts.before = vhost_user_test_setup_memfd;
1112 qos_add_test("vhost-user/read-guest-mem/memfd",
1113 "virtio-net",
1114 test_read_guest_mem, &opts);
1115 }
1116
1117 qos_add_test("vhost-user/migrate",
1118 "virtio-net",
1119 test_migrate, &opts);
1120
1121 opts.before = vhost_user_test_setup_reconnect;
1122 qos_add_test("vhost-user/reconnect", "virtio-net",
1123 test_reconnect, &opts);
1124
1125 opts.before = vhost_user_test_setup_connect_fail;
1126 qos_add_test("vhost-user/connect-fail", "virtio-net",
1127 test_vhost_user_started, &opts);
1128
1129 opts.before = vhost_user_test_setup_flags_mismatch;
1130 qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1131 test_vhost_user_started, &opts);
1132
1133 opts.before = vhost_user_test_setup_multiqueue;
1134 opts.edge.extra_device_opts = "mq=on";
1135 qos_add_test("vhost-user/multiqueue",
1136 "virtio-net",
1137 test_multiqueue, &opts);
1138 }
1139 libqos_init(register_vhost_user_test);
1140
vu_gpio_get_features(TestServer * s)1141 static uint64_t vu_gpio_get_features(TestServer *s)
1142 {
1143 return 0x1ULL << VIRTIO_F_VERSION_1 |
1144 0x1ULL << VIRTIO_GPIO_F_IRQ |
1145 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1146 }
1147
1148 /*
1149 * This stub can't handle all the message types but we should reply
1150 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1151 * talking to a read vhost-user daemon.
1152 */
vu_gpio_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1153 static void vu_gpio_get_protocol_features(TestServer *s, CharBackend *chr,
1154 VhostUserMsg *msg)
1155 {
1156 /* send back features to qemu */
1157 msg->flags |= VHOST_USER_REPLY_MASK;
1158 msg->size = sizeof(m.payload.u64);
1159 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1160
1161 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1162 }
1163
1164 static struct vhost_user_ops g_vu_gpio_ops = {
1165 .type = VHOST_USER_GPIO,
1166
1167 .append_opts = append_vhost_gpio_opts,
1168
1169 .get_features = vu_gpio_get_features,
1170 .set_features = vu_net_set_features,
1171 .get_protocol_features = vu_gpio_get_protocol_features,
1172 };
1173
register_vhost_gpio_test(void)1174 static void register_vhost_gpio_test(void)
1175 {
1176 QOSGraphTestOptions opts = {
1177 .before = vhost_user_test_setup,
1178 .subprocess = true,
1179 .arg = &g_vu_gpio_ops,
1180 };
1181
1182 qemu_add_opts(&qemu_chardev_opts);
1183
1184 qos_add_test("read-guest-mem/memfile",
1185 "vhost-user-gpio", test_read_guest_mem, &opts);
1186 }
1187 libqos_init(register_vhost_gpio_test);
1188
vu_scmi_get_features(TestServer * s)1189 static uint64_t vu_scmi_get_features(TestServer *s)
1190 {
1191 return 0x1ULL << VIRTIO_F_VERSION_1 |
1192 0x1ULL << VIRTIO_SCMI_F_P2A_CHANNELS |
1193 0x1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1194 }
1195
vu_scmi_get_protocol_features(TestServer * s,CharBackend * chr,VhostUserMsg * msg)1196 static void vu_scmi_get_protocol_features(TestServer *s, CharBackend *chr,
1197 VhostUserMsg *msg)
1198 {
1199 msg->flags |= VHOST_USER_REPLY_MASK;
1200 msg->size = sizeof(m.payload.u64);
1201 msg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_MQ;
1202
1203 qemu_chr_fe_write_all(chr, (uint8_t *)msg, VHOST_USER_HDR_SIZE + msg->size);
1204 }
1205
1206 static struct vhost_user_ops g_vu_scmi_ops = {
1207 .type = VHOST_USER_SCMI,
1208
1209 .append_opts = append_vhost_gpio_opts,
1210
1211 .get_features = vu_scmi_get_features,
1212 .set_features = vu_net_set_features,
1213 .get_protocol_features = vu_scmi_get_protocol_features,
1214 };
1215
register_vhost_scmi_test(void)1216 static void register_vhost_scmi_test(void)
1217 {
1218 QOSGraphTestOptions opts = {
1219 .before = vhost_user_test_setup,
1220 .subprocess = true,
1221 .arg = &g_vu_scmi_ops,
1222 };
1223
1224 qemu_add_opts(&qemu_chardev_opts);
1225
1226 qos_add_test("scmi/read-guest-mem/memfile",
1227 "vhost-user-scmi", test_read_guest_mem, &opts);
1228 }
1229 libqos_init(register_vhost_scmi_test);
1230