xref: /openbmc/qemu/tests/unit/test-coroutine.c (revision 701bff24)
1 /*
2  * Coroutine tests
3  *
4  * Copyright IBM, Corp. 2011
5  *
6  * Authors:
7  *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qemu/coroutine.h"
16 #include "qemu/coroutine_int.h"
17 #include "qemu/lockable.h"
18 
19 /*
20  * Check that qemu_in_coroutine() works
21  */
22 
23 static void coroutine_fn verify_in_coroutine(void *opaque)
24 {
25     g_assert(qemu_in_coroutine());
26 }
27 
28 static void test_in_coroutine(void)
29 {
30     Coroutine *coroutine;
31 
32     g_assert(!qemu_in_coroutine());
33 
34     coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
35     qemu_coroutine_enter(coroutine);
36 }
37 
38 /*
39  * Check that qemu_coroutine_self() works
40  */
41 
42 static void coroutine_fn verify_self(void *opaque)
43 {
44     Coroutine **p_co = opaque;
45     g_assert(qemu_coroutine_self() == *p_co);
46 }
47 
48 static void test_self(void)
49 {
50     Coroutine *coroutine;
51 
52     coroutine = qemu_coroutine_create(verify_self, &coroutine);
53     qemu_coroutine_enter(coroutine);
54 }
55 
56 /*
57  * Check that qemu_coroutine_entered() works
58  */
59 
60 static void coroutine_fn verify_entered_step_2(void *opaque)
61 {
62     Coroutine *caller = (Coroutine *)opaque;
63 
64     g_assert(qemu_coroutine_entered(caller));
65     g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
66     qemu_coroutine_yield();
67 
68     /* Once more to check it still works after yielding */
69     g_assert(qemu_coroutine_entered(caller));
70     g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
71 }
72 
73 static void coroutine_fn verify_entered_step_1(void *opaque)
74 {
75     Coroutine *self = qemu_coroutine_self();
76     Coroutine *coroutine;
77 
78     g_assert(qemu_coroutine_entered(self));
79 
80     coroutine = qemu_coroutine_create(verify_entered_step_2, self);
81     g_assert(!qemu_coroutine_entered(coroutine));
82     qemu_coroutine_enter(coroutine);
83     g_assert(!qemu_coroutine_entered(coroutine));
84     qemu_coroutine_enter(coroutine);
85 }
86 
87 static void test_entered(void)
88 {
89     Coroutine *coroutine;
90 
91     coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
92     g_assert(!qemu_coroutine_entered(coroutine));
93     qemu_coroutine_enter(coroutine);
94 }
95 
96 /*
97  * Check that coroutines may nest multiple levels
98  */
99 
100 typedef struct {
101     unsigned int n_enter;   /* num coroutines entered */
102     unsigned int n_return;  /* num coroutines returned */
103     unsigned int max;       /* maximum level of nesting */
104 } NestData;
105 
106 static void coroutine_fn nest(void *opaque)
107 {
108     NestData *nd = opaque;
109 
110     nd->n_enter++;
111 
112     if (nd->n_enter < nd->max) {
113         Coroutine *child;
114 
115         child = qemu_coroutine_create(nest, nd);
116         qemu_coroutine_enter(child);
117     }
118 
119     nd->n_return++;
120 }
121 
122 static void test_nesting(void)
123 {
124     Coroutine *root;
125     NestData nd = {
126         .n_enter  = 0,
127         .n_return = 0,
128         .max      = 128,
129     };
130 
131     root = qemu_coroutine_create(nest, &nd);
132     qemu_coroutine_enter(root);
133 
134     /* Must enter and return from max nesting level */
135     g_assert_cmpint(nd.n_enter, ==, nd.max);
136     g_assert_cmpint(nd.n_return, ==, nd.max);
137 }
138 
139 /*
140  * Check that yield/enter transfer control correctly
141  */
142 
143 static void coroutine_fn yield_5_times(void *opaque)
144 {
145     bool *done = opaque;
146     int i;
147 
148     for (i = 0; i < 5; i++) {
149         qemu_coroutine_yield();
150     }
151     *done = true;
152 }
153 
154 static void test_yield(void)
155 {
156     Coroutine *coroutine;
157     bool done = false;
158     int i = -1; /* one extra time to return from coroutine */
159 
160     coroutine = qemu_coroutine_create(yield_5_times, &done);
161     while (!done) {
162         qemu_coroutine_enter(coroutine);
163         i++;
164     }
165     g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
166 }
167 
168 static void coroutine_fn c2_fn(void *opaque)
169 {
170     qemu_coroutine_yield();
171 }
172 
173 static void coroutine_fn c1_fn(void *opaque)
174 {
175     Coroutine *c2 = opaque;
176     qemu_coroutine_enter(c2);
177 }
178 
179 static void test_no_dangling_access(void)
180 {
181     Coroutine *c1;
182     Coroutine *c2;
183     Coroutine tmp;
184 
185     c2 = qemu_coroutine_create(c2_fn, NULL);
186     c1 = qemu_coroutine_create(c1_fn, c2);
187 
188     qemu_coroutine_enter(c1);
189 
190     /* c1 shouldn't be used any more now; make sure we segfault if it is */
191     tmp = *c1;
192     memset(c1, 0xff, sizeof(Coroutine));
193     qemu_coroutine_enter(c2);
194 
195     /* Must restore the coroutine now to avoid corrupted pool */
196     *c1 = tmp;
197 }
198 
199 static bool locked;
200 static int done;
201 
202 static void coroutine_fn mutex_fn(void *opaque)
203 {
204     CoMutex *m = opaque;
205     qemu_co_mutex_lock(m);
206     assert(!locked);
207     locked = true;
208     qemu_coroutine_yield();
209     locked = false;
210     qemu_co_mutex_unlock(m);
211     done++;
212 }
213 
214 static void coroutine_fn lockable_fn(void *opaque)
215 {
216     QemuLockable *x = opaque;
217     qemu_lockable_lock(x);
218     assert(!locked);
219     locked = true;
220     qemu_coroutine_yield();
221     locked = false;
222     qemu_lockable_unlock(x);
223     done++;
224 }
225 
226 static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
227 {
228     Coroutine *c1 = qemu_coroutine_create(entry, opaque);
229     Coroutine *c2 = qemu_coroutine_create(entry, opaque);
230 
231     done = 0;
232     qemu_coroutine_enter(c1);
233     g_assert(locked);
234     qemu_coroutine_enter(c2);
235 
236     /* Unlock queues c2.  It is then started automatically when c1 yields or
237      * terminates.
238      */
239     qemu_coroutine_enter(c1);
240     g_assert_cmpint(done, ==, 1);
241     g_assert(locked);
242 
243     qemu_coroutine_enter(c2);
244     g_assert_cmpint(done, ==, 2);
245     g_assert(!locked);
246 }
247 
248 static void test_co_mutex(void)
249 {
250     CoMutex m;
251 
252     qemu_co_mutex_init(&m);
253     do_test_co_mutex(mutex_fn, &m);
254 }
255 
256 static void test_co_mutex_lockable(void)
257 {
258     CoMutex m;
259     CoMutex *null_pointer = NULL;
260 
261     qemu_co_mutex_init(&m);
262     do_test_co_mutex(lockable_fn, QEMU_MAKE_LOCKABLE(&m));
263 
264     g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL);
265 }
266 
267 static CoRwlock rwlock;
268 
269 /* Test that readers are properly sent back to the queue when upgrading,
270  * even if they are the sole readers.  The test scenario is as follows:
271  *
272  *
273  * | c1           | c2         |
274  * |--------------+------------+
275  * | rdlock       |            |
276  * | yield        |            |
277  * |              | wrlock     |
278  * |              | <queued>   |
279  * | upgrade      |            |
280  * | <queued>     | <dequeued> |
281  * |              | unlock     |
282  * | <dequeued>   |            |
283  * | unlock       |            |
284  */
285 
286 static void coroutine_fn rwlock_yield_upgrade(void *opaque)
287 {
288     qemu_co_rwlock_rdlock(&rwlock);
289     qemu_coroutine_yield();
290 
291     qemu_co_rwlock_upgrade(&rwlock);
292     qemu_co_rwlock_unlock(&rwlock);
293 
294     *(bool *)opaque = true;
295 }
296 
297 static void coroutine_fn rwlock_wrlock_yield(void *opaque)
298 {
299     qemu_co_rwlock_wrlock(&rwlock);
300     qemu_coroutine_yield();
301 
302     qemu_co_rwlock_unlock(&rwlock);
303     *(bool *)opaque = true;
304 }
305 
306 static void test_co_rwlock_upgrade(void)
307 {
308     bool c1_done = false;
309     bool c2_done = false;
310     Coroutine *c1, *c2;
311 
312     qemu_co_rwlock_init(&rwlock);
313     c1 = qemu_coroutine_create(rwlock_yield_upgrade, &c1_done);
314     c2 = qemu_coroutine_create(rwlock_wrlock_yield, &c2_done);
315 
316     qemu_coroutine_enter(c1);
317     qemu_coroutine_enter(c2);
318 
319     /* c1 now should go to sleep.  */
320     qemu_coroutine_enter(c1);
321     g_assert(!c1_done);
322 
323     qemu_coroutine_enter(c2);
324     g_assert(c1_done);
325     g_assert(c2_done);
326 }
327 
328 static void coroutine_fn rwlock_rdlock_yield(void *opaque)
329 {
330     qemu_co_rwlock_rdlock(&rwlock);
331     qemu_coroutine_yield();
332 
333     qemu_co_rwlock_unlock(&rwlock);
334     qemu_coroutine_yield();
335 
336     *(bool *)opaque = true;
337 }
338 
339 static void coroutine_fn rwlock_wrlock_downgrade(void *opaque)
340 {
341     qemu_co_rwlock_wrlock(&rwlock);
342 
343     qemu_co_rwlock_downgrade(&rwlock);
344     qemu_co_rwlock_unlock(&rwlock);
345     *(bool *)opaque = true;
346 }
347 
348 static void coroutine_fn rwlock_rdlock(void *opaque)
349 {
350     qemu_co_rwlock_rdlock(&rwlock);
351 
352     qemu_co_rwlock_unlock(&rwlock);
353     *(bool *)opaque = true;
354 }
355 
356 static void coroutine_fn rwlock_wrlock(void *opaque)
357 {
358     qemu_co_rwlock_wrlock(&rwlock);
359 
360     qemu_co_rwlock_unlock(&rwlock);
361     *(bool *)opaque = true;
362 }
363 
364 /*
365  * Check that downgrading a reader-writer lock does not cause a hang.
366  *
367  * Four coroutines are used to produce a situation where there are
368  * both reader and writer hopefuls waiting to acquire an rwlock that
369  * is held by a reader.
370  *
371  * The correct sequence of operations we aim to provoke can be
372  * represented as:
373  *
374  * | c1     | c2         | c3         | c4         |
375  * |--------+------------+------------+------------|
376  * | rdlock |            |            |            |
377  * | yield  |            |            |            |
378  * |        | wrlock     |            |            |
379  * |        | <queued>   |            |            |
380  * |        |            | rdlock     |            |
381  * |        |            | <queued>   |            |
382  * |        |            |            | wrlock     |
383  * |        |            |            | <queued>   |
384  * | unlock |            |            |            |
385  * | yield  |            |            |            |
386  * |        | <dequeued> |            |            |
387  * |        | downgrade  |            |            |
388  * |        |            | <dequeued> |            |
389  * |        |            | unlock     |            |
390  * |        | ...        |            |            |
391  * |        | unlock     |            |            |
392  * |        |            |            | <dequeued> |
393  * |        |            |            | unlock     |
394  */
395 static void test_co_rwlock_downgrade(void)
396 {
397     bool c1_done = false;
398     bool c2_done = false;
399     bool c3_done = false;
400     bool c4_done = false;
401     Coroutine *c1, *c2, *c3, *c4;
402 
403     qemu_co_rwlock_init(&rwlock);
404 
405     c1 = qemu_coroutine_create(rwlock_rdlock_yield, &c1_done);
406     c2 = qemu_coroutine_create(rwlock_wrlock_downgrade, &c2_done);
407     c3 = qemu_coroutine_create(rwlock_rdlock, &c3_done);
408     c4 = qemu_coroutine_create(rwlock_wrlock, &c4_done);
409 
410     qemu_coroutine_enter(c1);
411     qemu_coroutine_enter(c2);
412     qemu_coroutine_enter(c3);
413     qemu_coroutine_enter(c4);
414 
415     qemu_coroutine_enter(c1);
416 
417     g_assert(c2_done);
418     g_assert(c3_done);
419     g_assert(c4_done);
420 
421     qemu_coroutine_enter(c1);
422 
423     g_assert(c1_done);
424 }
425 
426 /*
427  * Check that creation, enter, and return work
428  */
429 
430 static void coroutine_fn set_and_exit(void *opaque)
431 {
432     bool *done = opaque;
433 
434     *done = true;
435 }
436 
437 static void test_lifecycle(void)
438 {
439     Coroutine *coroutine;
440     bool done = false;
441 
442     /* Create, enter, and return from coroutine */
443     coroutine = qemu_coroutine_create(set_and_exit, &done);
444     qemu_coroutine_enter(coroutine);
445     g_assert(done); /* expect done to be true (first time) */
446 
447     /* Repeat to check that no state affects this test */
448     done = false;
449     coroutine = qemu_coroutine_create(set_and_exit, &done);
450     qemu_coroutine_enter(coroutine);
451     g_assert(done); /* expect done to be true (second time) */
452 }
453 
454 
455 #define RECORD_SIZE 10 /* Leave some room for expansion */
456 struct coroutine_position {
457     int func;
458     int state;
459 };
460 static struct coroutine_position records[RECORD_SIZE];
461 static unsigned record_pos;
462 
463 static void record_push(int func, int state)
464 {
465     struct coroutine_position *cp = &records[record_pos++];
466     g_assert_cmpint(record_pos, <, RECORD_SIZE);
467     cp->func = func;
468     cp->state = state;
469 }
470 
471 static void coroutine_fn co_order_test(void *opaque)
472 {
473     record_push(2, 1);
474     g_assert(qemu_in_coroutine());
475     qemu_coroutine_yield();
476     record_push(2, 2);
477     g_assert(qemu_in_coroutine());
478 }
479 
480 static void do_order_test(void)
481 {
482     Coroutine *co;
483 
484     co = qemu_coroutine_create(co_order_test, NULL);
485     record_push(1, 1);
486     qemu_coroutine_enter(co);
487     record_push(1, 2);
488     g_assert(!qemu_in_coroutine());
489     qemu_coroutine_enter(co);
490     record_push(1, 3);
491     g_assert(!qemu_in_coroutine());
492 }
493 
494 static void test_order(void)
495 {
496     int i;
497     const struct coroutine_position expected_pos[] = {
498         {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
499     };
500     do_order_test();
501     g_assert_cmpint(record_pos, ==, 5);
502     for (i = 0; i < record_pos; i++) {
503         g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
504         g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
505     }
506 }
507 /*
508  * Lifecycle benchmark
509  */
510 
511 static void coroutine_fn empty_coroutine(void *opaque)
512 {
513     /* Do nothing */
514 }
515 
516 static void perf_lifecycle(void)
517 {
518     Coroutine *coroutine;
519     unsigned int i, max;
520     double duration;
521 
522     max = 1000000;
523 
524     g_test_timer_start();
525     for (i = 0; i < max; i++) {
526         coroutine = qemu_coroutine_create(empty_coroutine, NULL);
527         qemu_coroutine_enter(coroutine);
528     }
529     duration = g_test_timer_elapsed();
530 
531     g_test_message("Lifecycle %u iterations: %f s", max, duration);
532 }
533 
534 static void perf_nesting(void)
535 {
536     unsigned int i, maxcycles, maxnesting;
537     double duration;
538 
539     maxcycles = 10000;
540     maxnesting = 1000;
541     Coroutine *root;
542 
543     g_test_timer_start();
544     for (i = 0; i < maxcycles; i++) {
545         NestData nd = {
546             .n_enter  = 0,
547             .n_return = 0,
548             .max      = maxnesting,
549         };
550         root = qemu_coroutine_create(nest, &nd);
551         qemu_coroutine_enter(root);
552     }
553     duration = g_test_timer_elapsed();
554 
555     g_test_message("Nesting %u iterations of %u depth each: %f s",
556         maxcycles, maxnesting, duration);
557 }
558 
559 /*
560  * Yield benchmark
561  */
562 
563 static void coroutine_fn yield_loop(void *opaque)
564 {
565     unsigned int *counter = opaque;
566 
567     while ((*counter) > 0) {
568         (*counter)--;
569         qemu_coroutine_yield();
570     }
571 }
572 
573 static void perf_yield(void)
574 {
575     unsigned int i, maxcycles;
576     double duration;
577 
578     maxcycles = 100000000;
579     i = maxcycles;
580     Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
581 
582     g_test_timer_start();
583     while (i > 0) {
584         qemu_coroutine_enter(coroutine);
585     }
586     duration = g_test_timer_elapsed();
587 
588     g_test_message("Yield %u iterations: %f s", maxcycles, duration);
589 }
590 
591 static __attribute__((noinline)) void dummy(unsigned *i)
592 {
593     (*i)--;
594 }
595 
596 static void perf_baseline(void)
597 {
598     unsigned int i, maxcycles;
599     double duration;
600 
601     maxcycles = 100000000;
602     i = maxcycles;
603 
604     g_test_timer_start();
605     while (i > 0) {
606         dummy(&i);
607     }
608     duration = g_test_timer_elapsed();
609 
610     g_test_message("Function call %u iterations: %f s", maxcycles, duration);
611 }
612 
613 static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque)
614 {
615     qemu_coroutine_yield();
616 }
617 
618 static void perf_cost(void)
619 {
620     const unsigned long maxcycles = 40000000;
621     unsigned long i = 0;
622     double duration;
623     unsigned long ops;
624     Coroutine *co;
625 
626     g_test_timer_start();
627     while (i++ < maxcycles) {
628         co = qemu_coroutine_create(perf_cost_func, &i);
629         qemu_coroutine_enter(co);
630         qemu_coroutine_enter(co);
631     }
632     duration = g_test_timer_elapsed();
633     ops = (long)(maxcycles / (duration * 1000));
634 
635     g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
636                    "%luns per coroutine",
637                    maxcycles,
638                    duration, ops,
639                    (unsigned long)(1000000000.0 * duration / maxcycles));
640 }
641 
642 int main(int argc, char **argv)
643 {
644     g_test_init(&argc, &argv, NULL);
645 
646     /* This test assumes there is a freelist and marks freed coroutine memory
647      * with a sentinel value.  If there is no freelist this would legitimately
648      * crash, so skip it.
649      */
650     if (CONFIG_COROUTINE_POOL) {
651         g_test_add_func("/basic/no-dangling-access", test_no_dangling_access);
652     }
653 
654     g_test_add_func("/basic/lifecycle", test_lifecycle);
655     g_test_add_func("/basic/yield", test_yield);
656     g_test_add_func("/basic/nesting", test_nesting);
657     g_test_add_func("/basic/self", test_self);
658     g_test_add_func("/basic/entered", test_entered);
659     g_test_add_func("/basic/in_coroutine", test_in_coroutine);
660     g_test_add_func("/basic/order", test_order);
661     g_test_add_func("/locking/co-mutex", test_co_mutex);
662     g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable);
663     g_test_add_func("/locking/co-rwlock/upgrade", test_co_rwlock_upgrade);
664     g_test_add_func("/locking/co-rwlock/downgrade", test_co_rwlock_downgrade);
665     if (g_test_perf()) {
666         g_test_add_func("/perf/lifecycle", perf_lifecycle);
667         g_test_add_func("/perf/nesting", perf_nesting);
668         g_test_add_func("/perf/yield", perf_yield);
669         g_test_add_func("/perf/function-call", perf_baseline);
670         g_test_add_func("/perf/cost", perf_cost);
671     }
672     return g_test_run();
673 }
674