xref: /openbmc/qemu/tests/unit/test-bdrv-drain.c (revision d05ab380db649d882396653f9830b67d84bffbe1)
1da668aa1SThomas Huth /*
2da668aa1SThomas Huth  * Block node draining tests
3da668aa1SThomas Huth  *
4da668aa1SThomas Huth  * Copyright (c) 2017 Kevin Wolf <kwolf@redhat.com>
5da668aa1SThomas Huth  *
6da668aa1SThomas Huth  * Permission is hereby granted, free of charge, to any person obtaining a copy
7da668aa1SThomas Huth  * of this software and associated documentation files (the "Software"), to deal
8da668aa1SThomas Huth  * in the Software without restriction, including without limitation the rights
9da668aa1SThomas Huth  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10da668aa1SThomas Huth  * copies of the Software, and to permit persons to whom the Software is
11da668aa1SThomas Huth  * furnished to do so, subject to the following conditions:
12da668aa1SThomas Huth  *
13da668aa1SThomas Huth  * The above copyright notice and this permission notice shall be included in
14da668aa1SThomas Huth  * all copies or substantial portions of the Software.
15da668aa1SThomas Huth  *
16da668aa1SThomas Huth  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17da668aa1SThomas Huth  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18da668aa1SThomas Huth  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19da668aa1SThomas Huth  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20da668aa1SThomas Huth  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21da668aa1SThomas Huth  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22da668aa1SThomas Huth  * THE SOFTWARE.
23da668aa1SThomas Huth  */
24da668aa1SThomas Huth 
25da668aa1SThomas Huth #include "qemu/osdep.h"
26e2c1c34fSMarkus Armbruster #include "block/block_int.h"
27da668aa1SThomas Huth #include "block/blockjob_int.h"
28da668aa1SThomas Huth #include "sysemu/block-backend.h"
29da668aa1SThomas Huth #include "qapi/error.h"
30da668aa1SThomas Huth #include "qemu/main-loop.h"
31da668aa1SThomas Huth #include "iothread.h"
32da668aa1SThomas Huth 
33da668aa1SThomas Huth static QemuEvent done_event;
34da668aa1SThomas Huth 
35da668aa1SThomas Huth typedef struct BDRVTestState {
36da668aa1SThomas Huth     int drain_count;
37da668aa1SThomas Huth     AioContext *bh_indirection_ctx;
38da668aa1SThomas Huth     bool sleep_in_drain_begin;
39da668aa1SThomas Huth } BDRVTestState;
40da668aa1SThomas Huth 
417bce1c29SKevin Wolf static void coroutine_fn sleep_in_drain_begin(void *opaque)
427bce1c29SKevin Wolf {
437bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
447bce1c29SKevin Wolf 
457bce1c29SKevin Wolf     qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
467bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
477bce1c29SKevin Wolf }
487bce1c29SKevin Wolf 
495e8ac217SKevin Wolf static void bdrv_test_drain_begin(BlockDriverState *bs)
50da668aa1SThomas Huth {
51da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
52da668aa1SThomas Huth     s->drain_count++;
53da668aa1SThomas Huth     if (s->sleep_in_drain_begin) {
547bce1c29SKevin Wolf         Coroutine *co = qemu_coroutine_create(sleep_in_drain_begin, bs);
557bce1c29SKevin Wolf         bdrv_inc_in_flight(bs);
567bce1c29SKevin Wolf         aio_co_enter(bdrv_get_aio_context(bs), co);
57da668aa1SThomas Huth     }
58da668aa1SThomas Huth }
59da668aa1SThomas Huth 
605e8ac217SKevin Wolf static void bdrv_test_drain_end(BlockDriverState *bs)
61da668aa1SThomas Huth {
62da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
63da668aa1SThomas Huth     s->drain_count--;
64da668aa1SThomas Huth }
65da668aa1SThomas Huth 
66da668aa1SThomas Huth static void bdrv_test_close(BlockDriverState *bs)
67da668aa1SThomas Huth {
68da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
69da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, >, 0);
70da668aa1SThomas Huth }
71da668aa1SThomas Huth 
72da668aa1SThomas Huth static void co_reenter_bh(void *opaque)
73da668aa1SThomas Huth {
74da668aa1SThomas Huth     aio_co_wake(opaque);
75da668aa1SThomas Huth }
76da668aa1SThomas Huth 
77da668aa1SThomas Huth static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
78f7ef38ddSVladimir Sementsov-Ogievskiy                                             int64_t offset, int64_t bytes,
79f7ef38ddSVladimir Sementsov-Ogievskiy                                             QEMUIOVector *qiov,
80f7ef38ddSVladimir Sementsov-Ogievskiy                                             BdrvRequestFlags flags)
81da668aa1SThomas Huth {
82da668aa1SThomas Huth     BDRVTestState *s = bs->opaque;
83da668aa1SThomas Huth 
84da668aa1SThomas Huth     /* We want this request to stay until the polling loop in drain waits for
85da668aa1SThomas Huth      * it to complete. We need to sleep a while as bdrv_drain_invoke() comes
86da668aa1SThomas Huth      * first and polls its result, too, but it shouldn't accidentally complete
87da668aa1SThomas Huth      * this request yet. */
88da668aa1SThomas Huth     qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000);
89da668aa1SThomas Huth 
90da668aa1SThomas Huth     if (s->bh_indirection_ctx) {
91da668aa1SThomas Huth         aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh,
92da668aa1SThomas Huth                                 qemu_coroutine_self());
93da668aa1SThomas Huth         qemu_coroutine_yield();
94da668aa1SThomas Huth     }
95da668aa1SThomas Huth 
96da668aa1SThomas Huth     return 0;
97da668aa1SThomas Huth }
98da668aa1SThomas Huth 
99da668aa1SThomas Huth static int bdrv_test_change_backing_file(BlockDriverState *bs,
100da668aa1SThomas Huth                                          const char *backing_file,
101da668aa1SThomas Huth                                          const char *backing_fmt)
102da668aa1SThomas Huth {
103da668aa1SThomas Huth     return 0;
104da668aa1SThomas Huth }
105da668aa1SThomas Huth 
106da668aa1SThomas Huth static BlockDriver bdrv_test = {
107da668aa1SThomas Huth     .format_name            = "test",
108da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestState),
10925f78d9eSVladimir Sementsov-Ogievskiy     .supports_backing       = true,
110da668aa1SThomas Huth 
111da668aa1SThomas Huth     .bdrv_close             = bdrv_test_close,
112da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_co_preadv,
113da668aa1SThomas Huth 
1145e8ac217SKevin Wolf     .bdrv_drain_begin       = bdrv_test_drain_begin,
1155e8ac217SKevin Wolf     .bdrv_drain_end         = bdrv_test_drain_end,
116da668aa1SThomas Huth 
117da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
118da668aa1SThomas Huth 
119da668aa1SThomas Huth     .bdrv_change_backing_file = bdrv_test_change_backing_file,
120da668aa1SThomas Huth };
121da668aa1SThomas Huth 
122da668aa1SThomas Huth static void aio_ret_cb(void *opaque, int ret)
123da668aa1SThomas Huth {
124da668aa1SThomas Huth     int *aio_ret = opaque;
125da668aa1SThomas Huth     *aio_ret = ret;
126da668aa1SThomas Huth }
127da668aa1SThomas Huth 
128da668aa1SThomas Huth typedef struct CallInCoroutineData {
129da668aa1SThomas Huth     void (*entry)(void);
130da668aa1SThomas Huth     bool done;
131da668aa1SThomas Huth } CallInCoroutineData;
132da668aa1SThomas Huth 
133da668aa1SThomas Huth static coroutine_fn void call_in_coroutine_entry(void *opaque)
134da668aa1SThomas Huth {
135da668aa1SThomas Huth     CallInCoroutineData *data = opaque;
136da668aa1SThomas Huth 
137da668aa1SThomas Huth     data->entry();
138da668aa1SThomas Huth     data->done = true;
139da668aa1SThomas Huth }
140da668aa1SThomas Huth 
141da668aa1SThomas Huth static void call_in_coroutine(void (*entry)(void))
142da668aa1SThomas Huth {
143da668aa1SThomas Huth     Coroutine *co;
144da668aa1SThomas Huth     CallInCoroutineData data = {
145da668aa1SThomas Huth         .entry  = entry,
146da668aa1SThomas Huth         .done   = false,
147da668aa1SThomas Huth     };
148da668aa1SThomas Huth 
149da668aa1SThomas Huth     co = qemu_coroutine_create(call_in_coroutine_entry, &data);
150da668aa1SThomas Huth     qemu_coroutine_enter(co);
151da668aa1SThomas Huth     while (!data.done) {
152da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
153da668aa1SThomas Huth     }
154da668aa1SThomas Huth }
155da668aa1SThomas Huth 
156da668aa1SThomas Huth enum drain_type {
157da668aa1SThomas Huth     BDRV_DRAIN_ALL,
158da668aa1SThomas Huth     BDRV_DRAIN,
159da668aa1SThomas Huth     DRAIN_TYPE_MAX,
160da668aa1SThomas Huth };
161da668aa1SThomas Huth 
162da668aa1SThomas Huth static void do_drain_begin(enum drain_type drain_type, BlockDriverState *bs)
163da668aa1SThomas Huth {
164da668aa1SThomas Huth     switch (drain_type) {
165da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_begin(); break;
166da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_begin(bs); break;
167da668aa1SThomas Huth     default:                    g_assert_not_reached();
168da668aa1SThomas Huth     }
169da668aa1SThomas Huth }
170da668aa1SThomas Huth 
171da668aa1SThomas Huth static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs)
172da668aa1SThomas Huth {
173da668aa1SThomas Huth     switch (drain_type) {
174da668aa1SThomas Huth     case BDRV_DRAIN_ALL:        bdrv_drain_all_end(); break;
175da668aa1SThomas Huth     case BDRV_DRAIN:            bdrv_drained_end(bs); break;
176da668aa1SThomas Huth     default:                    g_assert_not_reached();
177da668aa1SThomas Huth     }
178da668aa1SThomas Huth }
179da668aa1SThomas Huth 
180da668aa1SThomas Huth static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs)
181da668aa1SThomas Huth {
182da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
183da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
184da668aa1SThomas Huth     }
185da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
186da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
187da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
188da668aa1SThomas Huth     }
189da668aa1SThomas Huth }
190da668aa1SThomas Huth 
19157f3d07bSKevin Wolf static BlockBackend * no_coroutine_fn test_setup(void)
19257f3d07bSKevin Wolf {
19357f3d07bSKevin Wolf     BlockBackend *blk;
19457f3d07bSKevin Wolf     BlockDriverState *bs, *backing;
19557f3d07bSKevin Wolf 
19657f3d07bSKevin Wolf     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
19757f3d07bSKevin Wolf     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
19857f3d07bSKevin Wolf                               &error_abort);
19957f3d07bSKevin Wolf     blk_insert_bs(blk, bs, &error_abort);
20057f3d07bSKevin Wolf 
20157f3d07bSKevin Wolf     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
20257f3d07bSKevin Wolf     bdrv_set_backing_hd(bs, backing, &error_abort);
20357f3d07bSKevin Wolf 
20457f3d07bSKevin Wolf     bdrv_unref(backing);
20557f3d07bSKevin Wolf     bdrv_unref(bs);
20657f3d07bSKevin Wolf 
20757f3d07bSKevin Wolf     return blk;
20857f3d07bSKevin Wolf }
20957f3d07bSKevin Wolf 
210da668aa1SThomas Huth static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs)
211da668aa1SThomas Huth {
212da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
213da668aa1SThomas Huth         aio_context_acquire(bdrv_get_aio_context(bs));
214da668aa1SThomas Huth     }
215da668aa1SThomas Huth     do_drain_end(drain_type, bs);
216da668aa1SThomas Huth     if (drain_type != BDRV_DRAIN_ALL) {
217da668aa1SThomas Huth         aio_context_release(bdrv_get_aio_context(bs));
218da668aa1SThomas Huth     }
219da668aa1SThomas Huth }
220da668aa1SThomas Huth 
22157f3d07bSKevin Wolf static void test_drv_cb_common(BlockBackend *blk, enum drain_type drain_type,
22257f3d07bSKevin Wolf                                bool recursive)
223da668aa1SThomas Huth {
22457f3d07bSKevin Wolf     BlockDriverState *bs = blk_bs(blk);
22557f3d07bSKevin Wolf     BlockDriverState *backing = bs->backing->bs;
226da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
227da668aa1SThomas Huth     BlockAIOCB *acb;
228da668aa1SThomas Huth     int aio_ret;
229da668aa1SThomas Huth 
230da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
231da668aa1SThomas Huth 
232da668aa1SThomas Huth     s = bs->opaque;
233da668aa1SThomas Huth     backing_s = backing->opaque;
234da668aa1SThomas Huth 
235da668aa1SThomas Huth     /* Simple bdrv_drain_all_begin/end pair, check that CBs are called */
236da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
237da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
238da668aa1SThomas Huth 
239da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
240da668aa1SThomas Huth 
241da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
242da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
243da668aa1SThomas Huth 
244da668aa1SThomas Huth     do_drain_end(drain_type, bs);
245da668aa1SThomas Huth 
246da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
247da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
248da668aa1SThomas Huth 
249da668aa1SThomas Huth     /* Now do the same while a request is pending */
250da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
251da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
252da668aa1SThomas Huth     g_assert(acb != NULL);
253da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
254da668aa1SThomas Huth 
255da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
256da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
257da668aa1SThomas Huth 
258da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
259da668aa1SThomas Huth 
260da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, 0);
261da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 1);
262da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, !!recursive);
263da668aa1SThomas Huth 
264da668aa1SThomas Huth     do_drain_end(drain_type, bs);
265da668aa1SThomas Huth 
266da668aa1SThomas Huth     g_assert_cmpint(s->drain_count, ==, 0);
267da668aa1SThomas Huth     g_assert_cmpint(backing_s->drain_count, ==, 0);
268da668aa1SThomas Huth }
269da668aa1SThomas Huth 
270da668aa1SThomas Huth static void test_drv_cb_drain_all(void)
271da668aa1SThomas Huth {
27257f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
27357f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
27457f3d07bSKevin Wolf     blk_unref(blk);
275da668aa1SThomas Huth }
276da668aa1SThomas Huth 
277da668aa1SThomas Huth static void test_drv_cb_drain(void)
278da668aa1SThomas Huth {
27957f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
28057f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN, false);
28157f3d07bSKevin Wolf     blk_unref(blk);
28257f3d07bSKevin Wolf }
28357f3d07bSKevin Wolf 
28457f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_all_entry(void)
28557f3d07bSKevin Wolf {
28657f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
28757f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN_ALL, true);
288da668aa1SThomas Huth }
289da668aa1SThomas Huth 
290da668aa1SThomas Huth static void test_drv_cb_co_drain_all(void)
291da668aa1SThomas Huth {
29257f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
29357f3d07bSKevin Wolf     call_in_coroutine(test_drv_cb_co_drain_all_entry);
29457f3d07bSKevin Wolf     blk_unref(blk);
29557f3d07bSKevin Wolf }
29657f3d07bSKevin Wolf 
29757f3d07bSKevin Wolf static void coroutine_fn test_drv_cb_co_drain_entry(void)
29857f3d07bSKevin Wolf {
29957f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
30057f3d07bSKevin Wolf     test_drv_cb_common(blk, BDRV_DRAIN, false);
301da668aa1SThomas Huth }
302da668aa1SThomas Huth 
303da668aa1SThomas Huth static void test_drv_cb_co_drain(void)
304da668aa1SThomas Huth {
30557f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
30657f3d07bSKevin Wolf     call_in_coroutine(test_drv_cb_co_drain_entry);
30757f3d07bSKevin Wolf     blk_unref(blk);
308da668aa1SThomas Huth }
309da668aa1SThomas Huth 
31057f3d07bSKevin Wolf static void test_quiesce_common(BlockBackend *blk, enum drain_type drain_type,
31157f3d07bSKevin Wolf                                 bool recursive)
312da668aa1SThomas Huth {
31357f3d07bSKevin Wolf     BlockDriverState *bs = blk_bs(blk);
31457f3d07bSKevin Wolf     BlockDriverState *backing = bs->backing->bs;
315da668aa1SThomas Huth 
316da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
317da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
318da668aa1SThomas Huth 
319da668aa1SThomas Huth     do_drain_begin(drain_type, bs);
320da668aa1SThomas Huth 
32157e05be3SKevin Wolf     if (drain_type == BDRV_DRAIN_ALL) {
32257e05be3SKevin Wolf         g_assert_cmpint(bs->quiesce_counter, ==, 2);
32357e05be3SKevin Wolf     } else {
324da668aa1SThomas Huth         g_assert_cmpint(bs->quiesce_counter, ==, 1);
32557e05be3SKevin Wolf     }
326da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, !!recursive);
327da668aa1SThomas Huth 
328da668aa1SThomas Huth     do_drain_end(drain_type, bs);
329da668aa1SThomas Huth 
330da668aa1SThomas Huth     g_assert_cmpint(bs->quiesce_counter, ==, 0);
331da668aa1SThomas Huth     g_assert_cmpint(backing->quiesce_counter, ==, 0);
332da668aa1SThomas Huth }
333da668aa1SThomas Huth 
334da668aa1SThomas Huth static void test_quiesce_drain_all(void)
335da668aa1SThomas Huth {
33657f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
33757f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
33857f3d07bSKevin Wolf     blk_unref(blk);
339da668aa1SThomas Huth }
340da668aa1SThomas Huth 
341da668aa1SThomas Huth static void test_quiesce_drain(void)
342da668aa1SThomas Huth {
34357f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
34457f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN, false);
34557f3d07bSKevin Wolf     blk_unref(blk);
34657f3d07bSKevin Wolf }
34757f3d07bSKevin Wolf 
34857f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_all_entry(void)
34957f3d07bSKevin Wolf {
35057f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
35157f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN_ALL, true);
352da668aa1SThomas Huth }
353da668aa1SThomas Huth 
354da668aa1SThomas Huth static void test_quiesce_co_drain_all(void)
355da668aa1SThomas Huth {
35657f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
35757f3d07bSKevin Wolf     call_in_coroutine(test_quiesce_co_drain_all_entry);
35857f3d07bSKevin Wolf     blk_unref(blk);
35957f3d07bSKevin Wolf }
36057f3d07bSKevin Wolf 
36157f3d07bSKevin Wolf static void coroutine_fn test_quiesce_co_drain_entry(void)
36257f3d07bSKevin Wolf {
36357f3d07bSKevin Wolf     BlockBackend *blk = blk_all_next(NULL);
36457f3d07bSKevin Wolf     test_quiesce_common(blk, BDRV_DRAIN, false);
365da668aa1SThomas Huth }
366da668aa1SThomas Huth 
367da668aa1SThomas Huth static void test_quiesce_co_drain(void)
368da668aa1SThomas Huth {
36957f3d07bSKevin Wolf     BlockBackend *blk = test_setup();
37057f3d07bSKevin Wolf     call_in_coroutine(test_quiesce_co_drain_entry);
37157f3d07bSKevin Wolf     blk_unref(blk);
372da668aa1SThomas Huth }
373da668aa1SThomas Huth 
374da668aa1SThomas Huth static void test_nested(void)
375da668aa1SThomas Huth {
376da668aa1SThomas Huth     BlockBackend *blk;
377da668aa1SThomas Huth     BlockDriverState *bs, *backing;
378da668aa1SThomas Huth     BDRVTestState *s, *backing_s;
379da668aa1SThomas Huth     enum drain_type outer, inner;
380da668aa1SThomas Huth 
381da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
382da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
383da668aa1SThomas Huth                               &error_abort);
384da668aa1SThomas Huth     s = bs->opaque;
385da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
386da668aa1SThomas Huth 
387da668aa1SThomas Huth     backing = bdrv_new_open_driver(&bdrv_test, "backing", 0, &error_abort);
388da668aa1SThomas Huth     backing_s = backing->opaque;
389da668aa1SThomas Huth     bdrv_set_backing_hd(bs, backing, &error_abort);
390da668aa1SThomas Huth 
391da668aa1SThomas Huth     for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) {
392da668aa1SThomas Huth         for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) {
39357e05be3SKevin Wolf             int backing_quiesce = (outer == BDRV_DRAIN_ALL) +
39457e05be3SKevin Wolf                                   (inner == BDRV_DRAIN_ALL);
395da668aa1SThomas Huth 
396da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
397da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
398da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
399da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
400da668aa1SThomas Huth 
401da668aa1SThomas Huth             do_drain_begin(outer, bs);
402da668aa1SThomas Huth             do_drain_begin(inner, bs);
403da668aa1SThomas Huth 
40457e05be3SKevin Wolf             g_assert_cmpint(bs->quiesce_counter, ==, 2 + !!backing_quiesce);
405da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce);
40657e05be3SKevin Wolf             g_assert_cmpint(s->drain_count, ==, 1);
40757e05be3SKevin Wolf             g_assert_cmpint(backing_s->drain_count, ==, !!backing_quiesce);
408da668aa1SThomas Huth 
409da668aa1SThomas Huth             do_drain_end(inner, bs);
410da668aa1SThomas Huth             do_drain_end(outer, bs);
411da668aa1SThomas Huth 
412da668aa1SThomas Huth             g_assert_cmpint(bs->quiesce_counter, ==, 0);
413da668aa1SThomas Huth             g_assert_cmpint(backing->quiesce_counter, ==, 0);
414da668aa1SThomas Huth             g_assert_cmpint(s->drain_count, ==, 0);
415da668aa1SThomas Huth             g_assert_cmpint(backing_s->drain_count, ==, 0);
416da668aa1SThomas Huth         }
417da668aa1SThomas Huth     }
418da668aa1SThomas Huth 
419da668aa1SThomas Huth     bdrv_unref(backing);
420da668aa1SThomas Huth     bdrv_unref(bs);
421da668aa1SThomas Huth     blk_unref(blk);
422da668aa1SThomas Huth }
423da668aa1SThomas Huth 
424da668aa1SThomas Huth static void test_graph_change_drain_all(void)
425da668aa1SThomas Huth {
426da668aa1SThomas Huth     BlockBackend *blk_a, *blk_b;
427da668aa1SThomas Huth     BlockDriverState *bs_a, *bs_b;
428da668aa1SThomas Huth     BDRVTestState *a_s, *b_s;
429da668aa1SThomas Huth 
430da668aa1SThomas Huth     /* Create node A with a BlockBackend */
431da668aa1SThomas Huth     blk_a = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
432da668aa1SThomas Huth     bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR,
433da668aa1SThomas Huth                                 &error_abort);
434da668aa1SThomas Huth     a_s = bs_a->opaque;
435da668aa1SThomas Huth     blk_insert_bs(blk_a, bs_a, &error_abort);
436da668aa1SThomas Huth 
437da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 0);
438da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 0);
439da668aa1SThomas Huth 
440da668aa1SThomas Huth     /* Call bdrv_drain_all_begin() */
441da668aa1SThomas Huth     bdrv_drain_all_begin();
442da668aa1SThomas Huth 
443da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
444da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
445da668aa1SThomas Huth 
446da668aa1SThomas Huth     /* Create node B with a BlockBackend */
447da668aa1SThomas Huth     blk_b = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
448da668aa1SThomas Huth     bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR,
449da668aa1SThomas Huth                                 &error_abort);
450da668aa1SThomas Huth     b_s = bs_b->opaque;
451da668aa1SThomas Huth     blk_insert_bs(blk_b, bs_b, &error_abort);
452da668aa1SThomas Huth 
453da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
454da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
455da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
456da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
457da668aa1SThomas Huth 
458da668aa1SThomas Huth     /* Unref and finally delete node A */
459da668aa1SThomas Huth     blk_unref(blk_a);
460da668aa1SThomas Huth 
461da668aa1SThomas Huth     g_assert_cmpint(bs_a->quiesce_counter, ==, 1);
462da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
463da668aa1SThomas Huth     g_assert_cmpint(a_s->drain_count, ==, 1);
464da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
465da668aa1SThomas Huth 
466da668aa1SThomas Huth     bdrv_unref(bs_a);
467da668aa1SThomas Huth 
468da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 1);
469da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 1);
470da668aa1SThomas Huth 
471da668aa1SThomas Huth     /* End the drained section */
472da668aa1SThomas Huth     bdrv_drain_all_end();
473da668aa1SThomas Huth 
474da668aa1SThomas Huth     g_assert_cmpint(bs_b->quiesce_counter, ==, 0);
475da668aa1SThomas Huth     g_assert_cmpint(b_s->drain_count, ==, 0);
476da668aa1SThomas Huth 
477da668aa1SThomas Huth     bdrv_unref(bs_b);
478da668aa1SThomas Huth     blk_unref(blk_b);
479da668aa1SThomas Huth }
480da668aa1SThomas Huth 
481da668aa1SThomas Huth struct test_iothread_data {
482da668aa1SThomas Huth     BlockDriverState *bs;
483da668aa1SThomas Huth     enum drain_type drain_type;
484da668aa1SThomas Huth     int *aio_ret;
485ab613350SStefan Hajnoczi     bool co_done;
486da668aa1SThomas Huth };
487da668aa1SThomas Huth 
488ab613350SStefan Hajnoczi static void coroutine_fn test_iothread_drain_co_entry(void *opaque)
489da668aa1SThomas Huth {
490da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
491da668aa1SThomas Huth 
492da668aa1SThomas Huth     do_drain_begin(data->drain_type, data->bs);
493da668aa1SThomas Huth     g_assert_cmpint(*data->aio_ret, ==, 0);
494da668aa1SThomas Huth     do_drain_end(data->drain_type, data->bs);
495da668aa1SThomas Huth 
496ab613350SStefan Hajnoczi     data->co_done = true;
497ab613350SStefan Hajnoczi     aio_wait_kick();
498da668aa1SThomas Huth }
499da668aa1SThomas Huth 
500da668aa1SThomas Huth static void test_iothread_aio_cb(void *opaque, int ret)
501da668aa1SThomas Huth {
502da668aa1SThomas Huth     int *aio_ret = opaque;
503da668aa1SThomas Huth     *aio_ret = ret;
504da668aa1SThomas Huth     qemu_event_set(&done_event);
505da668aa1SThomas Huth }
506da668aa1SThomas Huth 
507da668aa1SThomas Huth static void test_iothread_main_thread_bh(void *opaque)
508da668aa1SThomas Huth {
509da668aa1SThomas Huth     struct test_iothread_data *data = opaque;
510da668aa1SThomas Huth 
511da668aa1SThomas Huth     /* Test that the AioContext is not yet locked in a random BH that is
512da668aa1SThomas Huth      * executed during drain, otherwise this would deadlock. */
513da668aa1SThomas Huth     aio_context_acquire(bdrv_get_aio_context(data->bs));
514da668aa1SThomas Huth     bdrv_flush(data->bs);
515c8bf923dSStefan Hajnoczi     bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */
516da668aa1SThomas Huth     aio_context_release(bdrv_get_aio_context(data->bs));
517da668aa1SThomas Huth }
518da668aa1SThomas Huth 
519da668aa1SThomas Huth /*
520da668aa1SThomas Huth  * Starts an AIO request on a BDS that runs in the AioContext of iothread 1.
521da668aa1SThomas Huth  * The request involves a BH on iothread 2 before it can complete.
522da668aa1SThomas Huth  *
523da668aa1SThomas Huth  * @drain_thread = 0 means that do_drain_begin/end are called from the main
524da668aa1SThomas Huth  * thread, @drain_thread = 1 means that they are called from iothread 1. Drain
525da668aa1SThomas Huth  * for this BDS cannot be called from iothread 2 because only the main thread
526da668aa1SThomas Huth  * may do cross-AioContext polling.
527da668aa1SThomas Huth  */
528da668aa1SThomas Huth static void test_iothread_common(enum drain_type drain_type, int drain_thread)
529da668aa1SThomas Huth {
530da668aa1SThomas Huth     BlockBackend *blk;
531da668aa1SThomas Huth     BlockDriverState *bs;
532da668aa1SThomas Huth     BDRVTestState *s;
533da668aa1SThomas Huth     BlockAIOCB *acb;
534ab613350SStefan Hajnoczi     Coroutine *co;
535da668aa1SThomas Huth     int aio_ret;
536da668aa1SThomas Huth     struct test_iothread_data data;
537da668aa1SThomas Huth 
538da668aa1SThomas Huth     IOThread *a = iothread_new();
539da668aa1SThomas Huth     IOThread *b = iothread_new();
540da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
541da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
542da668aa1SThomas Huth 
543da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
544da668aa1SThomas Huth 
545da668aa1SThomas Huth     /* bdrv_drain_all() may only be called from the main loop thread */
546da668aa1SThomas Huth     if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) {
547da668aa1SThomas Huth         goto out;
548da668aa1SThomas Huth     }
549da668aa1SThomas Huth 
550da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
551da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
552da668aa1SThomas Huth                               &error_abort);
553da668aa1SThomas Huth     s = bs->opaque;
554da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
555da668aa1SThomas Huth     blk_set_disable_request_queuing(blk, true);
556da668aa1SThomas Huth 
557da668aa1SThomas Huth     blk_set_aio_context(blk, ctx_a, &error_abort);
558da668aa1SThomas Huth     aio_context_acquire(ctx_a);
559da668aa1SThomas Huth 
560da668aa1SThomas Huth     s->bh_indirection_ctx = ctx_b;
561da668aa1SThomas Huth 
562da668aa1SThomas Huth     aio_ret = -EINPROGRESS;
563da668aa1SThomas Huth     qemu_event_reset(&done_event);
564da668aa1SThomas Huth 
565da668aa1SThomas Huth     if (drain_thread == 0) {
566da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret);
567da668aa1SThomas Huth     } else {
568da668aa1SThomas Huth         acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret);
569da668aa1SThomas Huth     }
570da668aa1SThomas Huth     g_assert(acb != NULL);
571da668aa1SThomas Huth     g_assert_cmpint(aio_ret, ==, -EINPROGRESS);
572da668aa1SThomas Huth 
573da668aa1SThomas Huth     aio_context_release(ctx_a);
574da668aa1SThomas Huth 
575da668aa1SThomas Huth     data = (struct test_iothread_data) {
576da668aa1SThomas Huth         .bs         = bs,
577da668aa1SThomas Huth         .drain_type = drain_type,
578da668aa1SThomas Huth         .aio_ret    = &aio_ret,
579da668aa1SThomas Huth     };
580da668aa1SThomas Huth 
581da668aa1SThomas Huth     switch (drain_thread) {
582da668aa1SThomas Huth     case 0:
583da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
584da668aa1SThomas Huth             aio_context_acquire(ctx_a);
585da668aa1SThomas Huth         }
586da668aa1SThomas Huth 
587c8bf923dSStefan Hajnoczi         /*
588c8bf923dSStefan Hajnoczi          * Increment in_flight so that do_drain_begin() waits for
589c8bf923dSStefan Hajnoczi          * test_iothread_main_thread_bh(). This prevents the race between
590c8bf923dSStefan Hajnoczi          * test_iothread_main_thread_bh() in IOThread a and do_drain_begin() in
591c8bf923dSStefan Hajnoczi          * this thread. test_iothread_main_thread_bh() decrements in_flight.
592c8bf923dSStefan Hajnoczi          */
593c8bf923dSStefan Hajnoczi         bdrv_inc_in_flight(bs);
594da668aa1SThomas Huth         aio_bh_schedule_oneshot(ctx_a, test_iothread_main_thread_bh, &data);
595da668aa1SThomas Huth 
596da668aa1SThomas Huth         /* The request is running on the IOThread a. Draining its block device
597da668aa1SThomas Huth          * will make sure that it has completed as far as the BDS is concerned,
598da668aa1SThomas Huth          * but the drain in this thread can continue immediately after
599da668aa1SThomas Huth          * bdrv_dec_in_flight() and aio_ret might be assigned only slightly
600da668aa1SThomas Huth          * later. */
601da668aa1SThomas Huth         do_drain_begin(drain_type, bs);
602da668aa1SThomas Huth         g_assert_cmpint(bs->in_flight, ==, 0);
603da668aa1SThomas Huth 
604da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
605da668aa1SThomas Huth             aio_context_release(ctx_a);
606da668aa1SThomas Huth         }
607da668aa1SThomas Huth         qemu_event_wait(&done_event);
608da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
609da668aa1SThomas Huth             aio_context_acquire(ctx_a);
610da668aa1SThomas Huth         }
611da668aa1SThomas Huth 
612da668aa1SThomas Huth         g_assert_cmpint(aio_ret, ==, 0);
613da668aa1SThomas Huth         do_drain_end(drain_type, bs);
614da668aa1SThomas Huth 
615da668aa1SThomas Huth         if (drain_type != BDRV_DRAIN_ALL) {
616da668aa1SThomas Huth             aio_context_release(ctx_a);
617da668aa1SThomas Huth         }
618da668aa1SThomas Huth         break;
619da668aa1SThomas Huth     case 1:
620ab613350SStefan Hajnoczi         co = qemu_coroutine_create(test_iothread_drain_co_entry, &data);
621ab613350SStefan Hajnoczi         aio_co_enter(ctx_a, co);
622ab613350SStefan Hajnoczi         AIO_WAIT_WHILE_UNLOCKED(NULL, !data.co_done);
623da668aa1SThomas Huth         break;
624da668aa1SThomas Huth     default:
625da668aa1SThomas Huth         g_assert_not_reached();
626da668aa1SThomas Huth     }
627da668aa1SThomas Huth 
628da668aa1SThomas Huth     aio_context_acquire(ctx_a);
629da668aa1SThomas Huth     blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
630da668aa1SThomas Huth     aio_context_release(ctx_a);
631da668aa1SThomas Huth 
632da668aa1SThomas Huth     bdrv_unref(bs);
633da668aa1SThomas Huth     blk_unref(blk);
634da668aa1SThomas Huth 
635da668aa1SThomas Huth out:
636da668aa1SThomas Huth     iothread_join(a);
637da668aa1SThomas Huth     iothread_join(b);
638da668aa1SThomas Huth }
639da668aa1SThomas Huth 
640da668aa1SThomas Huth static void test_iothread_drain_all(void)
641da668aa1SThomas Huth {
642da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 0);
643da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN_ALL, 1);
644da668aa1SThomas Huth }
645da668aa1SThomas Huth 
646da668aa1SThomas Huth static void test_iothread_drain(void)
647da668aa1SThomas Huth {
648da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 0);
649da668aa1SThomas Huth     test_iothread_common(BDRV_DRAIN, 1);
650da668aa1SThomas Huth }
651da668aa1SThomas Huth 
652da668aa1SThomas Huth 
653da668aa1SThomas Huth typedef struct TestBlockJob {
654da668aa1SThomas Huth     BlockJob common;
6551b177bbeSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
656da668aa1SThomas Huth     int run_ret;
657da668aa1SThomas Huth     int prepare_ret;
658da668aa1SThomas Huth     bool running;
659da668aa1SThomas Huth     bool should_complete;
660da668aa1SThomas Huth } TestBlockJob;
661da668aa1SThomas Huth 
662da668aa1SThomas Huth static int test_job_prepare(Job *job)
663da668aa1SThomas Huth {
664da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
665da668aa1SThomas Huth 
666da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6671b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
668da668aa1SThomas Huth     return s->prepare_ret;
669da668aa1SThomas Huth }
670da668aa1SThomas Huth 
671da668aa1SThomas Huth static void test_job_commit(Job *job)
672da668aa1SThomas Huth {
673da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
674da668aa1SThomas Huth 
675da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6761b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
677da668aa1SThomas Huth }
678da668aa1SThomas Huth 
679da668aa1SThomas Huth static void test_job_abort(Job *job)
680da668aa1SThomas Huth {
681da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
682da668aa1SThomas Huth 
683da668aa1SThomas Huth     /* Provoke an AIO_WAIT_WHILE() call to verify there is no deadlock */
6841b177bbeSVladimir Sementsov-Ogievskiy     bdrv_flush(s->bs);
685da668aa1SThomas Huth }
686da668aa1SThomas Huth 
687da668aa1SThomas Huth static int coroutine_fn test_job_run(Job *job, Error **errp)
688da668aa1SThomas Huth {
689da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
690da668aa1SThomas Huth 
691da668aa1SThomas Huth     /* We are running the actual job code past the pause point in
692da668aa1SThomas Huth      * job_co_entry(). */
693da668aa1SThomas Huth     s->running = true;
694da668aa1SThomas Huth 
695da668aa1SThomas Huth     job_transition_to_ready(&s->common.job);
696da668aa1SThomas Huth     while (!s->should_complete) {
697da668aa1SThomas Huth         /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
698da668aa1SThomas Huth          * emulate some actual activity (probably some I/O) here so that drain
699da668aa1SThomas Huth          * has to wait for this activity to stop. */
700da668aa1SThomas Huth         qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
701da668aa1SThomas Huth 
702da668aa1SThomas Huth         job_pause_point(&s->common.job);
703da668aa1SThomas Huth     }
704da668aa1SThomas Huth 
705da668aa1SThomas Huth     return s->run_ret;
706da668aa1SThomas Huth }
707da668aa1SThomas Huth 
708da668aa1SThomas Huth static void test_job_complete(Job *job, Error **errp)
709da668aa1SThomas Huth {
710da668aa1SThomas Huth     TestBlockJob *s = container_of(job, TestBlockJob, common.job);
711da668aa1SThomas Huth     s->should_complete = true;
712da668aa1SThomas Huth }
713da668aa1SThomas Huth 
714da668aa1SThomas Huth BlockJobDriver test_job_driver = {
715da668aa1SThomas Huth     .job_driver = {
716da668aa1SThomas Huth         .instance_size  = sizeof(TestBlockJob),
717da668aa1SThomas Huth         .free           = block_job_free,
718da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
719da668aa1SThomas Huth         .run            = test_job_run,
720da668aa1SThomas Huth         .complete       = test_job_complete,
721da668aa1SThomas Huth         .prepare        = test_job_prepare,
722da668aa1SThomas Huth         .commit         = test_job_commit,
723da668aa1SThomas Huth         .abort          = test_job_abort,
724da668aa1SThomas Huth     },
725da668aa1SThomas Huth };
726da668aa1SThomas Huth 
727da668aa1SThomas Huth enum test_job_result {
728da668aa1SThomas Huth     TEST_JOB_SUCCESS,
729da668aa1SThomas Huth     TEST_JOB_FAIL_RUN,
730da668aa1SThomas Huth     TEST_JOB_FAIL_PREPARE,
731da668aa1SThomas Huth };
732da668aa1SThomas Huth 
733da668aa1SThomas Huth enum test_job_drain_node {
734da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC,
735da668aa1SThomas Huth     TEST_JOB_DRAIN_SRC_CHILD,
736da668aa1SThomas Huth };
737da668aa1SThomas Huth 
738da668aa1SThomas Huth static void test_blockjob_common_drain_node(enum drain_type drain_type,
739da668aa1SThomas Huth                                             bool use_iothread,
740da668aa1SThomas Huth                                             enum test_job_result result,
741da668aa1SThomas Huth                                             enum test_job_drain_node drain_node)
742da668aa1SThomas Huth {
743da668aa1SThomas Huth     BlockBackend *blk_src, *blk_target;
744da668aa1SThomas Huth     BlockDriverState *src, *src_backing, *src_overlay, *target, *drain_bs;
745da668aa1SThomas Huth     BlockJob *job;
746da668aa1SThomas Huth     TestBlockJob *tjob;
747da668aa1SThomas Huth     IOThread *iothread = NULL;
748da668aa1SThomas Huth     AioContext *ctx;
749da668aa1SThomas Huth     int ret;
750da668aa1SThomas Huth 
751da668aa1SThomas Huth     src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR,
752da668aa1SThomas Huth                                &error_abort);
753da668aa1SThomas Huth     src_backing = bdrv_new_open_driver(&bdrv_test, "source-backing",
754da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
755da668aa1SThomas Huth     src_overlay = bdrv_new_open_driver(&bdrv_test, "source-overlay",
756da668aa1SThomas Huth                                        BDRV_O_RDWR, &error_abort);
757da668aa1SThomas Huth 
758da668aa1SThomas Huth     bdrv_set_backing_hd(src_overlay, src, &error_abort);
759da668aa1SThomas Huth     bdrv_unref(src);
760da668aa1SThomas Huth     bdrv_set_backing_hd(src, src_backing, &error_abort);
761da668aa1SThomas Huth     bdrv_unref(src_backing);
762da668aa1SThomas Huth 
763da668aa1SThomas Huth     blk_src = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
764da668aa1SThomas Huth     blk_insert_bs(blk_src, src_overlay, &error_abort);
765da668aa1SThomas Huth 
766da668aa1SThomas Huth     switch (drain_node) {
767da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC:
768da668aa1SThomas Huth         drain_bs = src;
769da668aa1SThomas Huth         break;
770da668aa1SThomas Huth     case TEST_JOB_DRAIN_SRC_CHILD:
771da668aa1SThomas Huth         drain_bs = src_backing;
772da668aa1SThomas Huth         break;
773da668aa1SThomas Huth     default:
774da668aa1SThomas Huth         g_assert_not_reached();
775da668aa1SThomas Huth     }
776da668aa1SThomas Huth 
777da668aa1SThomas Huth     if (use_iothread) {
778da668aa1SThomas Huth         iothread = iothread_new();
779da668aa1SThomas Huth         ctx = iothread_get_aio_context(iothread);
780da668aa1SThomas Huth         blk_set_aio_context(blk_src, ctx, &error_abort);
781da668aa1SThomas Huth     } else {
782da668aa1SThomas Huth         ctx = qemu_get_aio_context();
783da668aa1SThomas Huth     }
784da668aa1SThomas Huth 
785da668aa1SThomas Huth     target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
786da668aa1SThomas Huth                                   &error_abort);
787da668aa1SThomas Huth     blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
788da668aa1SThomas Huth     blk_insert_bs(blk_target, target, &error_abort);
789da668aa1SThomas Huth     blk_set_allow_aio_context_change(blk_target, true);
790da668aa1SThomas Huth 
791da668aa1SThomas Huth     aio_context_acquire(ctx);
792da668aa1SThomas Huth     tjob = block_job_create("job0", &test_job_driver, NULL, src,
793da668aa1SThomas Huth                             0, BLK_PERM_ALL,
794da668aa1SThomas Huth                             0, 0, NULL, NULL, &error_abort);
7951b177bbeSVladimir Sementsov-Ogievskiy     tjob->bs = src;
796da668aa1SThomas Huth     job = &tjob->common;
797da668aa1SThomas Huth     block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
798da668aa1SThomas Huth 
799da668aa1SThomas Huth     switch (result) {
800da668aa1SThomas Huth     case TEST_JOB_SUCCESS:
801da668aa1SThomas Huth         break;
802da668aa1SThomas Huth     case TEST_JOB_FAIL_RUN:
803da668aa1SThomas Huth         tjob->run_ret = -EIO;
804da668aa1SThomas Huth         break;
805da668aa1SThomas Huth     case TEST_JOB_FAIL_PREPARE:
806da668aa1SThomas Huth         tjob->prepare_ret = -EIO;
807da668aa1SThomas Huth         break;
808da668aa1SThomas Huth     }
8096f592e5aSEmanuele Giuseppe Esposito     aio_context_release(ctx);
810da668aa1SThomas Huth 
811da668aa1SThomas Huth     job_start(&job->job);
812da668aa1SThomas Huth 
813da668aa1SThomas Huth     if (use_iothread) {
814da668aa1SThomas Huth         /* job_co_entry() is run in the I/O thread, wait for the actual job
815da668aa1SThomas Huth          * code to start (we don't want to catch the job in the pause point in
816da668aa1SThomas Huth          * job_co_entry(). */
817da668aa1SThomas Huth         while (!tjob->running) {
818da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
819da668aa1SThomas Huth         }
820da668aa1SThomas Huth     }
821da668aa1SThomas Huth 
822191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
823da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
824da668aa1SThomas Huth         g_assert_false(job->job.paused);
825da668aa1SThomas Huth         g_assert_true(tjob->running);
826da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
827191e7af3SEmanuele Giuseppe Esposito     }
828da668aa1SThomas Huth 
829da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, drain_bs);
830da668aa1SThomas Huth 
831191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
832da668aa1SThomas Huth         if (drain_type == BDRV_DRAIN_ALL) {
833da668aa1SThomas Huth             /* bdrv_drain_all() drains both src and target */
834da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 2);
835da668aa1SThomas Huth         } else {
836da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 1);
837da668aa1SThomas Huth         }
838da668aa1SThomas Huth         g_assert_true(job->job.paused);
839da668aa1SThomas Huth         g_assert_false(job->job.busy); /* The job is paused */
840191e7af3SEmanuele Giuseppe Esposito     }
841da668aa1SThomas Huth 
842da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, drain_bs);
843da668aa1SThomas Huth 
844da668aa1SThomas Huth     if (use_iothread) {
845191e7af3SEmanuele Giuseppe Esposito         /*
846191e7af3SEmanuele Giuseppe Esposito          * Here we are waiting for the paused status to change,
847191e7af3SEmanuele Giuseppe Esposito          * so don't bother protecting the read every time.
848191e7af3SEmanuele Giuseppe Esposito          *
849191e7af3SEmanuele Giuseppe Esposito          * paused is reset in the I/O thread, wait for it
850191e7af3SEmanuele Giuseppe Esposito          */
851da668aa1SThomas Huth         while (job->job.paused) {
852da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
853da668aa1SThomas Huth         }
854da668aa1SThomas Huth     }
855da668aa1SThomas Huth 
856191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
857da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
858da668aa1SThomas Huth         g_assert_false(job->job.paused);
859da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
860191e7af3SEmanuele Giuseppe Esposito     }
861da668aa1SThomas Huth 
862da668aa1SThomas Huth     do_drain_begin_unlocked(drain_type, target);
863da668aa1SThomas Huth 
864191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
865da668aa1SThomas Huth         if (drain_type == BDRV_DRAIN_ALL) {
866da668aa1SThomas Huth             /* bdrv_drain_all() drains both src and target */
867da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 2);
868da668aa1SThomas Huth         } else {
869da668aa1SThomas Huth             g_assert_cmpint(job->job.pause_count, ==, 1);
870da668aa1SThomas Huth         }
871da668aa1SThomas Huth         g_assert_true(job->job.paused);
872da668aa1SThomas Huth         g_assert_false(job->job.busy); /* The job is paused */
873191e7af3SEmanuele Giuseppe Esposito     }
874da668aa1SThomas Huth 
875da668aa1SThomas Huth     do_drain_end_unlocked(drain_type, target);
876da668aa1SThomas Huth 
877da668aa1SThomas Huth     if (use_iothread) {
878191e7af3SEmanuele Giuseppe Esposito         /*
879191e7af3SEmanuele Giuseppe Esposito          * Here we are waiting for the paused status to change,
880191e7af3SEmanuele Giuseppe Esposito          * so don't bother protecting the read every time.
881191e7af3SEmanuele Giuseppe Esposito          *
882191e7af3SEmanuele Giuseppe Esposito          * paused is reset in the I/O thread, wait for it
883191e7af3SEmanuele Giuseppe Esposito          */
884da668aa1SThomas Huth         while (job->job.paused) {
885da668aa1SThomas Huth             aio_poll(qemu_get_aio_context(), false);
886da668aa1SThomas Huth         }
887da668aa1SThomas Huth     }
888da668aa1SThomas Huth 
889191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
890da668aa1SThomas Huth         g_assert_cmpint(job->job.pause_count, ==, 0);
891da668aa1SThomas Huth         g_assert_false(job->job.paused);
892da668aa1SThomas Huth         g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
893191e7af3SEmanuele Giuseppe Esposito     }
894da668aa1SThomas Huth 
895191e7af3SEmanuele Giuseppe Esposito     WITH_JOB_LOCK_GUARD() {
896191e7af3SEmanuele Giuseppe Esposito         ret = job_complete_sync_locked(&job->job, &error_abort);
897191e7af3SEmanuele Giuseppe Esposito     }
898da668aa1SThomas Huth     g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
899da668aa1SThomas Huth 
9006f592e5aSEmanuele Giuseppe Esposito     aio_context_acquire(ctx);
901da668aa1SThomas Huth     if (use_iothread) {
902da668aa1SThomas Huth         blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
903da668aa1SThomas Huth         assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
904da668aa1SThomas Huth     }
905da668aa1SThomas Huth     aio_context_release(ctx);
906da668aa1SThomas Huth 
907da668aa1SThomas Huth     blk_unref(blk_src);
908da668aa1SThomas Huth     blk_unref(blk_target);
909da668aa1SThomas Huth     bdrv_unref(src_overlay);
910da668aa1SThomas Huth     bdrv_unref(target);
911da668aa1SThomas Huth 
912da668aa1SThomas Huth     if (iothread) {
913da668aa1SThomas Huth         iothread_join(iothread);
914da668aa1SThomas Huth     }
915da668aa1SThomas Huth }
916da668aa1SThomas Huth 
917da668aa1SThomas Huth static void test_blockjob_common(enum drain_type drain_type, bool use_iothread,
918da668aa1SThomas Huth                                  enum test_job_result result)
919da668aa1SThomas Huth {
920da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
921da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC);
922da668aa1SThomas Huth     test_blockjob_common_drain_node(drain_type, use_iothread, result,
923da668aa1SThomas Huth                                     TEST_JOB_DRAIN_SRC_CHILD);
924da668aa1SThomas Huth }
925da668aa1SThomas Huth 
926da668aa1SThomas Huth static void test_blockjob_drain_all(void)
927da668aa1SThomas Huth {
928da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_SUCCESS);
929da668aa1SThomas Huth }
930da668aa1SThomas Huth 
931da668aa1SThomas Huth static void test_blockjob_drain(void)
932da668aa1SThomas Huth {
933da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_SUCCESS);
934da668aa1SThomas Huth }
935da668aa1SThomas Huth 
936da668aa1SThomas Huth static void test_blockjob_error_drain_all(void)
937da668aa1SThomas Huth {
938da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_RUN);
939da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, false, TEST_JOB_FAIL_PREPARE);
940da668aa1SThomas Huth }
941da668aa1SThomas Huth 
942da668aa1SThomas Huth static void test_blockjob_error_drain(void)
943da668aa1SThomas Huth {
944da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_RUN);
945da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, false, TEST_JOB_FAIL_PREPARE);
946da668aa1SThomas Huth }
947da668aa1SThomas Huth 
948da668aa1SThomas Huth static void test_blockjob_iothread_drain_all(void)
949da668aa1SThomas Huth {
950da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_SUCCESS);
951da668aa1SThomas Huth }
952da668aa1SThomas Huth 
953da668aa1SThomas Huth static void test_blockjob_iothread_drain(void)
954da668aa1SThomas Huth {
955da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_SUCCESS);
956da668aa1SThomas Huth }
957da668aa1SThomas Huth 
958da668aa1SThomas Huth static void test_blockjob_iothread_error_drain_all(void)
959da668aa1SThomas Huth {
960da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_RUN);
961da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN_ALL, true, TEST_JOB_FAIL_PREPARE);
962da668aa1SThomas Huth }
963da668aa1SThomas Huth 
964da668aa1SThomas Huth static void test_blockjob_iothread_error_drain(void)
965da668aa1SThomas Huth {
966da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_RUN);
967da668aa1SThomas Huth     test_blockjob_common(BDRV_DRAIN, true, TEST_JOB_FAIL_PREPARE);
968da668aa1SThomas Huth }
969da668aa1SThomas Huth 
970da668aa1SThomas Huth 
971da668aa1SThomas Huth typedef struct BDRVTestTopState {
972da668aa1SThomas Huth     BdrvChild *wait_child;
973da668aa1SThomas Huth } BDRVTestTopState;
974da668aa1SThomas Huth 
975da668aa1SThomas Huth static void bdrv_test_top_close(BlockDriverState *bs)
976da668aa1SThomas Huth {
977da668aa1SThomas Huth     BdrvChild *c, *next_c;
97832a8aba3SKevin Wolf 
97932a8aba3SKevin Wolf     bdrv_graph_wrlock(NULL);
980da668aa1SThomas Huth     QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
981da668aa1SThomas Huth         bdrv_unref_child(bs, c);
982da668aa1SThomas Huth     }
98332a8aba3SKevin Wolf     bdrv_graph_wrunlock();
984da668aa1SThomas Huth }
985da668aa1SThomas Huth 
986b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
987b9b10c35SKevin Wolf bdrv_test_top_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
988b9b10c35SKevin Wolf                         QEMUIOVector *qiov, BdrvRequestFlags flags)
989da668aa1SThomas Huth {
990da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
991da668aa1SThomas Huth     return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
992da668aa1SThomas Huth }
993da668aa1SThomas Huth 
994da668aa1SThomas Huth static BlockDriver bdrv_test_top_driver = {
995da668aa1SThomas Huth     .format_name            = "test_top_driver",
996da668aa1SThomas Huth     .instance_size          = sizeof(BDRVTestTopState),
997da668aa1SThomas Huth 
998da668aa1SThomas Huth     .bdrv_close             = bdrv_test_top_close,
999da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_test_top_co_preadv,
1000da668aa1SThomas Huth 
1001da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1002da668aa1SThomas Huth };
1003da668aa1SThomas Huth 
1004da668aa1SThomas Huth typedef struct TestCoDeleteByDrainData {
1005da668aa1SThomas Huth     BlockBackend *blk;
1006da668aa1SThomas Huth     bool detach_instead_of_delete;
1007da668aa1SThomas Huth     bool done;
1008da668aa1SThomas Huth } TestCoDeleteByDrainData;
1009da668aa1SThomas Huth 
1010da668aa1SThomas Huth static void coroutine_fn test_co_delete_by_drain(void *opaque)
1011da668aa1SThomas Huth {
1012da668aa1SThomas Huth     TestCoDeleteByDrainData *dbdd = opaque;
1013da668aa1SThomas Huth     BlockBackend *blk = dbdd->blk;
1014da668aa1SThomas Huth     BlockDriverState *bs = blk_bs(blk);
1015da668aa1SThomas Huth     BDRVTestTopState *tts = bs->opaque;
1016da668aa1SThomas Huth     void *buffer = g_malloc(65536);
1017da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buffer, 65536);
1018da668aa1SThomas Huth 
1019da668aa1SThomas Huth     /* Pretend some internal write operation from parent to child.
1020da668aa1SThomas Huth      * Important: We have to read from the child, not from the parent!
1021da668aa1SThomas Huth      * Draining works by first propagating it all up the tree to the
1022da668aa1SThomas Huth      * root and then waiting for drainage from root to the leaves
1023da668aa1SThomas Huth      * (protocol nodes).  If we have a request waiting on the root,
1024da668aa1SThomas Huth      * everything will be drained before we go back down the tree, but
1025da668aa1SThomas Huth      * we do not want that.  We want to be in the middle of draining
1026da668aa1SThomas Huth      * when this following requests returns. */
102787f130bdSKevin Wolf     bdrv_graph_co_rdlock();
1028da668aa1SThomas Huth     bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0);
102987f130bdSKevin Wolf     bdrv_graph_co_rdunlock();
1030da668aa1SThomas Huth 
1031da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1032da668aa1SThomas Huth 
1033da668aa1SThomas Huth     if (!dbdd->detach_instead_of_delete) {
103401a10c24SKevin Wolf         blk_co_unref(blk);
1035da668aa1SThomas Huth     } else {
1036da668aa1SThomas Huth         BdrvChild *c, *next_c;
1037da668aa1SThomas Huth         QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
103832a8aba3SKevin Wolf             bdrv_co_unref_child(bs, c);
1039da668aa1SThomas Huth         }
1040da668aa1SThomas Huth     }
1041da668aa1SThomas Huth 
1042da668aa1SThomas Huth     dbdd->done = true;
1043da668aa1SThomas Huth     g_free(buffer);
1044da668aa1SThomas Huth }
1045da668aa1SThomas Huth 
1046da668aa1SThomas Huth /**
1047da668aa1SThomas Huth  * Test what happens when some BDS has some children, you drain one of
1048da668aa1SThomas Huth  * them and this results in the BDS being deleted.
1049da668aa1SThomas Huth  *
1050da668aa1SThomas Huth  * If @detach_instead_of_delete is set, the BDS is not going to be
1051da668aa1SThomas Huth  * deleted but will only detach all of its children.
1052da668aa1SThomas Huth  */
1053da668aa1SThomas Huth static void do_test_delete_by_drain(bool detach_instead_of_delete,
1054da668aa1SThomas Huth                                     enum drain_type drain_type)
1055da668aa1SThomas Huth {
1056da668aa1SThomas Huth     BlockBackend *blk;
1057da668aa1SThomas Huth     BlockDriverState *bs, *child_bs, *null_bs;
1058da668aa1SThomas Huth     BDRVTestTopState *tts;
1059da668aa1SThomas Huth     TestCoDeleteByDrainData dbdd;
1060da668aa1SThomas Huth     Coroutine *co;
1061da668aa1SThomas Huth 
1062da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR,
1063da668aa1SThomas Huth                               &error_abort);
1064da668aa1SThomas Huth     bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1065da668aa1SThomas Huth     tts = bs->opaque;
1066da668aa1SThomas Huth 
1067da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1068da668aa1SThomas Huth                         &error_abort);
1069afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1070da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
1071da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1072afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1073da668aa1SThomas Huth 
1074da668aa1SThomas Huth     /* This child will be the one to pass to requests through to, and
1075da668aa1SThomas Huth      * it will stall until a drain occurs */
1076da668aa1SThomas Huth     child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR,
1077da668aa1SThomas Huth                                     &error_abort);
1078da668aa1SThomas Huth     child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
1079da668aa1SThomas Huth     /* Takes our reference to child_bs */
1080afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1081da668aa1SThomas Huth     tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
1082da668aa1SThomas Huth                                         &child_of_bds,
1083da668aa1SThomas Huth                                         BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
1084da668aa1SThomas Huth                                         &error_abort);
1085afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1086da668aa1SThomas Huth 
1087da668aa1SThomas Huth     /* This child is just there to be deleted
1088da668aa1SThomas Huth      * (for detach_instead_of_delete == true) */
1089da668aa1SThomas Huth     null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
1090da668aa1SThomas Huth                         &error_abort);
1091afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1092da668aa1SThomas Huth     bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
1093da668aa1SThomas Huth                       &error_abort);
1094afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1095da668aa1SThomas Huth 
1096da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1097da668aa1SThomas Huth     blk_insert_bs(blk, bs, &error_abort);
1098da668aa1SThomas Huth 
1099da668aa1SThomas Huth     /* Referenced by blk now */
1100da668aa1SThomas Huth     bdrv_unref(bs);
1101da668aa1SThomas Huth 
1102da668aa1SThomas Huth     g_assert_cmpint(bs->refcnt, ==, 1);
1103da668aa1SThomas Huth     g_assert_cmpint(child_bs->refcnt, ==, 1);
1104da668aa1SThomas Huth     g_assert_cmpint(null_bs->refcnt, ==, 1);
1105da668aa1SThomas Huth 
1106da668aa1SThomas Huth 
1107da668aa1SThomas Huth     dbdd = (TestCoDeleteByDrainData){
1108da668aa1SThomas Huth         .blk = blk,
1109da668aa1SThomas Huth         .detach_instead_of_delete = detach_instead_of_delete,
1110da668aa1SThomas Huth         .done = false,
1111da668aa1SThomas Huth     };
1112da668aa1SThomas Huth     co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd);
1113da668aa1SThomas Huth     qemu_coroutine_enter(co);
1114da668aa1SThomas Huth 
1115da668aa1SThomas Huth     /* Drain the child while the read operation is still pending.
1116da668aa1SThomas Huth      * This should result in the operation finishing and
1117da668aa1SThomas Huth      * test_co_delete_by_drain() resuming.  Thus, @bs will be deleted
1118da668aa1SThomas Huth      * and the coroutine will exit while this drain operation is still
1119da668aa1SThomas Huth      * in progress. */
1120da668aa1SThomas Huth     switch (drain_type) {
1121da668aa1SThomas Huth     case BDRV_DRAIN:
1122da668aa1SThomas Huth         bdrv_ref(child_bs);
1123da668aa1SThomas Huth         bdrv_drain(child_bs);
1124da668aa1SThomas Huth         bdrv_unref(child_bs);
1125da668aa1SThomas Huth         break;
1126da668aa1SThomas Huth     case BDRV_DRAIN_ALL:
1127da668aa1SThomas Huth         bdrv_drain_all_begin();
1128da668aa1SThomas Huth         bdrv_drain_all_end();
1129da668aa1SThomas Huth         break;
1130da668aa1SThomas Huth     default:
1131da668aa1SThomas Huth         g_assert_not_reached();
1132da668aa1SThomas Huth     }
1133da668aa1SThomas Huth 
1134da668aa1SThomas Huth     while (!dbdd.done) {
1135da668aa1SThomas Huth         aio_poll(qemu_get_aio_context(), true);
1136da668aa1SThomas Huth     }
1137da668aa1SThomas Huth 
1138da668aa1SThomas Huth     if (detach_instead_of_delete) {
1139da668aa1SThomas Huth         /* Here, the reference has not passed over to the coroutine,
1140da668aa1SThomas Huth          * so we have to delete the BB ourselves */
1141da668aa1SThomas Huth         blk_unref(blk);
1142da668aa1SThomas Huth     }
1143da668aa1SThomas Huth }
1144da668aa1SThomas Huth 
1145da668aa1SThomas Huth static void test_delete_by_drain(void)
1146da668aa1SThomas Huth {
1147da668aa1SThomas Huth     do_test_delete_by_drain(false, BDRV_DRAIN);
1148da668aa1SThomas Huth }
1149da668aa1SThomas Huth 
1150da668aa1SThomas Huth static void test_detach_by_drain_all(void)
1151da668aa1SThomas Huth {
1152da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN_ALL);
1153da668aa1SThomas Huth }
1154da668aa1SThomas Huth 
1155da668aa1SThomas Huth static void test_detach_by_drain(void)
1156da668aa1SThomas Huth {
1157da668aa1SThomas Huth     do_test_delete_by_drain(true, BDRV_DRAIN);
1158da668aa1SThomas Huth }
1159da668aa1SThomas Huth 
1160da668aa1SThomas Huth 
1161da668aa1SThomas Huth struct detach_by_parent_data {
1162da668aa1SThomas Huth     BlockDriverState *parent_b;
1163da668aa1SThomas Huth     BdrvChild *child_b;
1164da668aa1SThomas Huth     BlockDriverState *c;
1165da668aa1SThomas Huth     BdrvChild *child_c;
1166da668aa1SThomas Huth     bool by_parent_cb;
1167617f3a96SKevin Wolf     bool detach_on_drain;
1168da668aa1SThomas Huth };
1169da668aa1SThomas Huth static struct detach_by_parent_data detach_by_parent_data;
1170da668aa1SThomas Huth 
1171903df115SKevin Wolf static void no_coroutine_fn detach_indirect_bh(void *opaque)
1172da668aa1SThomas Huth {
1173da668aa1SThomas Huth     struct detach_by_parent_data *data = opaque;
1174da668aa1SThomas Huth 
1175617f3a96SKevin Wolf     bdrv_dec_in_flight(data->child_b->bs);
117632a8aba3SKevin Wolf 
117732a8aba3SKevin Wolf     bdrv_graph_wrlock(NULL);
1178da668aa1SThomas Huth     bdrv_unref_child(data->parent_b, data->child_b);
1179da668aa1SThomas Huth 
1180da668aa1SThomas Huth     bdrv_ref(data->c);
1181da668aa1SThomas Huth     data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C",
1182da668aa1SThomas Huth                                       &child_of_bds, BDRV_CHILD_DATA,
1183da668aa1SThomas Huth                                       &error_abort);
1184afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1185da668aa1SThomas Huth }
1186da668aa1SThomas Huth 
1187903df115SKevin Wolf static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
1188da668aa1SThomas Huth {
1189da668aa1SThomas Huth     struct detach_by_parent_data *data = &detach_by_parent_data;
1190da668aa1SThomas Huth 
1191da668aa1SThomas Huth     g_assert_cmpint(ret, ==, 0);
1192da668aa1SThomas Huth     if (data->by_parent_cb) {
1193617f3a96SKevin Wolf         bdrv_inc_in_flight(data->child_b->bs);
1194903df115SKevin Wolf         aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1195903df115SKevin Wolf                                 detach_indirect_bh, &detach_by_parent_data);
1196da668aa1SThomas Huth     }
1197da668aa1SThomas Huth }
1198da668aa1SThomas Huth 
1199*d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child)
1200da668aa1SThomas Huth {
1201617f3a96SKevin Wolf     struct detach_by_parent_data *data = &detach_by_parent_data;
1202617f3a96SKevin Wolf 
1203617f3a96SKevin Wolf     if (!data->detach_on_drain) {
1204617f3a96SKevin Wolf         return;
1205617f3a96SKevin Wolf     }
1206617f3a96SKevin Wolf     data->detach_on_drain = false;
1207617f3a96SKevin Wolf 
1208617f3a96SKevin Wolf     bdrv_inc_in_flight(data->child_b->bs);
1209da668aa1SThomas Huth     aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
1210da668aa1SThomas Huth                             detach_indirect_bh, &detach_by_parent_data);
1211da668aa1SThomas Huth     child_of_bds.drained_begin(child);
1212da668aa1SThomas Huth }
1213da668aa1SThomas Huth 
1214da668aa1SThomas Huth static BdrvChildClass detach_by_driver_cb_class;
1215da668aa1SThomas Huth 
1216da668aa1SThomas Huth /*
1217da668aa1SThomas Huth  * Initial graph:
1218da668aa1SThomas Huth  *
1219da668aa1SThomas Huth  * PA     PB
1220da668aa1SThomas Huth  *    \ /   \
1221da668aa1SThomas Huth  *     A     B     C
1222da668aa1SThomas Huth  *
1223da668aa1SThomas Huth  * by_parent_cb == true:  Test that parent callbacks don't poll
1224da668aa1SThomas Huth  *
1225da668aa1SThomas Huth  *     PA has a pending write request whose callback changes the child nodes of
1226da668aa1SThomas Huth  *     PB: It removes B and adds C instead. The subtree of PB is drained, which
1227da668aa1SThomas Huth  *     will indirectly drain the write request, too.
1228da668aa1SThomas Huth  *
1229da668aa1SThomas Huth  * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll
1230da668aa1SThomas Huth  *
1231da668aa1SThomas Huth  *     PA's BdrvChildClass has a .drained_begin callback that schedules a BH
1232da668aa1SThomas Huth  *     that does the same graph change. If bdrv_drain_invoke() calls it, the
1233da668aa1SThomas Huth  *     state is messed up, but if it is only polled in the single
1234da668aa1SThomas Huth  *     BDRV_POLL_WHILE() at the end of the drain, this should work fine.
1235da668aa1SThomas Huth  */
1236*d05ab380SEmanuele Giuseppe Esposito static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
1237da668aa1SThomas Huth {
1238da668aa1SThomas Huth     BlockBackend *blk;
1239da668aa1SThomas Huth     BlockDriverState *parent_a, *parent_b, *a, *b, *c;
1240da668aa1SThomas Huth     BdrvChild *child_a, *child_b;
1241da668aa1SThomas Huth     BlockAIOCB *acb;
1242da668aa1SThomas Huth 
1243da668aa1SThomas Huth     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, 0);
1244da668aa1SThomas Huth 
1245da668aa1SThomas Huth     if (!by_parent_cb) {
1246da668aa1SThomas Huth         detach_by_driver_cb_class = child_of_bds;
1247da668aa1SThomas Huth         detach_by_driver_cb_class.drained_begin =
1248da668aa1SThomas Huth             detach_by_driver_cb_drained_begin;
1249617f3a96SKevin Wolf         detach_by_driver_cb_class.drained_end = NULL;
1250617f3a96SKevin Wolf         detach_by_driver_cb_class.drained_poll = NULL;
1251da668aa1SThomas Huth     }
1252da668aa1SThomas Huth 
1253617f3a96SKevin Wolf     detach_by_parent_data = (struct detach_by_parent_data) {
1254617f3a96SKevin Wolf         .detach_on_drain = false,
1255617f3a96SKevin Wolf     };
1256617f3a96SKevin Wolf 
1257da668aa1SThomas Huth     /* Create all involved nodes */
1258da668aa1SThomas Huth     parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR,
1259da668aa1SThomas Huth                                     &error_abort);
1260da668aa1SThomas Huth     parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0,
1261da668aa1SThomas Huth                                     &error_abort);
1262da668aa1SThomas Huth 
1263da668aa1SThomas Huth     a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort);
1264da668aa1SThomas Huth     b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort);
1265da668aa1SThomas Huth     c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort);
1266da668aa1SThomas Huth 
1267da668aa1SThomas Huth     /* blk is a BB for parent-a */
1268da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1269da668aa1SThomas Huth     blk_insert_bs(blk, parent_a, &error_abort);
1270da668aa1SThomas Huth     bdrv_unref(parent_a);
1271da668aa1SThomas Huth 
1272da668aa1SThomas Huth     /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver
1273da668aa1SThomas Huth      * callback must not return immediately. */
1274da668aa1SThomas Huth     if (!by_parent_cb) {
1275da668aa1SThomas Huth         BDRVTestState *s = parent_a->opaque;
1276da668aa1SThomas Huth         s->sleep_in_drain_begin = true;
1277da668aa1SThomas Huth     }
1278da668aa1SThomas Huth 
1279da668aa1SThomas Huth     /* Set child relationships */
1280da668aa1SThomas Huth     bdrv_ref(b);
1281da668aa1SThomas Huth     bdrv_ref(a);
1282afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1283da668aa1SThomas Huth     child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
1284da668aa1SThomas Huth                                 BDRV_CHILD_DATA, &error_abort);
1285da668aa1SThomas Huth     child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
1286da668aa1SThomas Huth                                 BDRV_CHILD_COW, &error_abort);
1287da668aa1SThomas Huth 
1288da668aa1SThomas Huth     bdrv_ref(a);
1289da668aa1SThomas Huth     bdrv_attach_child(parent_a, a, "PA-A",
1290da668aa1SThomas Huth                       by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
1291da668aa1SThomas Huth                       BDRV_CHILD_DATA, &error_abort);
1292afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1293da668aa1SThomas Huth 
1294da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1295da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1296da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1297da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 2);
1298da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1299da668aa1SThomas Huth 
1300da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == child_a);
1301da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == child_b);
1302da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_b, next) == NULL);
1303da668aa1SThomas Huth 
1304da668aa1SThomas Huth     /* Start the evil write request */
1305da668aa1SThomas Huth     detach_by_parent_data = (struct detach_by_parent_data) {
1306da668aa1SThomas Huth         .parent_b = parent_b,
1307da668aa1SThomas Huth         .child_b = child_b,
1308da668aa1SThomas Huth         .c = c,
1309da668aa1SThomas Huth         .by_parent_cb = by_parent_cb,
1310617f3a96SKevin Wolf         .detach_on_drain = true,
1311da668aa1SThomas Huth     };
1312da668aa1SThomas Huth     acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL);
1313da668aa1SThomas Huth     g_assert(acb != NULL);
1314da668aa1SThomas Huth 
1315da668aa1SThomas Huth     /* Drain and check the expected result */
1316299403aeSKevin Wolf     bdrv_drained_begin(parent_b);
1317299403aeSKevin Wolf     bdrv_drained_begin(a);
1318299403aeSKevin Wolf     bdrv_drained_begin(b);
1319299403aeSKevin Wolf     bdrv_drained_begin(c);
1320da668aa1SThomas Huth 
1321da668aa1SThomas Huth     g_assert(detach_by_parent_data.child_c != NULL);
1322da668aa1SThomas Huth 
1323da668aa1SThomas Huth     g_assert_cmpint(parent_a->refcnt, ==, 1);
1324da668aa1SThomas Huth     g_assert_cmpint(parent_b->refcnt, ==, 1);
1325da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 3);
1326da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1327da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 2);
1328da668aa1SThomas Huth 
1329da668aa1SThomas Huth     g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c);
1330da668aa1SThomas Huth     g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a);
1331da668aa1SThomas Huth     g_assert(QLIST_NEXT(child_a, next) == NULL);
1332da668aa1SThomas Huth 
1333da668aa1SThomas Huth     g_assert_cmpint(parent_a->quiesce_counter, ==, 1);
1334299403aeSKevin Wolf     g_assert_cmpint(parent_b->quiesce_counter, ==, 3);
1335da668aa1SThomas Huth     g_assert_cmpint(a->quiesce_counter, ==, 1);
1336299403aeSKevin Wolf     g_assert_cmpint(b->quiesce_counter, ==, 1);
1337da668aa1SThomas Huth     g_assert_cmpint(c->quiesce_counter, ==, 1);
1338da668aa1SThomas Huth 
1339299403aeSKevin Wolf     bdrv_drained_end(parent_b);
1340299403aeSKevin Wolf     bdrv_drained_end(a);
1341299403aeSKevin Wolf     bdrv_drained_end(b);
1342299403aeSKevin Wolf     bdrv_drained_end(c);
1343da668aa1SThomas Huth 
1344da668aa1SThomas Huth     bdrv_unref(parent_b);
1345da668aa1SThomas Huth     blk_unref(blk);
1346da668aa1SThomas Huth 
1347da668aa1SThomas Huth     g_assert_cmpint(a->refcnt, ==, 1);
1348da668aa1SThomas Huth     g_assert_cmpint(b->refcnt, ==, 1);
1349da668aa1SThomas Huth     g_assert_cmpint(c->refcnt, ==, 1);
1350da668aa1SThomas Huth     bdrv_unref(a);
1351da668aa1SThomas Huth     bdrv_unref(b);
1352da668aa1SThomas Huth     bdrv_unref(c);
1353da668aa1SThomas Huth }
1354da668aa1SThomas Huth 
1355da668aa1SThomas Huth static void test_detach_by_parent_cb(void)
1356da668aa1SThomas Huth {
1357da668aa1SThomas Huth     test_detach_indirect(true);
1358da668aa1SThomas Huth }
1359da668aa1SThomas Huth 
1360da668aa1SThomas Huth static void test_detach_by_driver_cb(void)
1361da668aa1SThomas Huth {
1362da668aa1SThomas Huth     test_detach_indirect(false);
1363da668aa1SThomas Huth }
1364da668aa1SThomas Huth 
1365da668aa1SThomas Huth static void test_append_to_drained(void)
1366da668aa1SThomas Huth {
1367da668aa1SThomas Huth     BlockBackend *blk;
1368da668aa1SThomas Huth     BlockDriverState *base, *overlay;
1369da668aa1SThomas Huth     BDRVTestState *base_s, *overlay_s;
1370da668aa1SThomas Huth 
1371da668aa1SThomas Huth     blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
1372da668aa1SThomas Huth     base = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
1373da668aa1SThomas Huth     base_s = base->opaque;
1374da668aa1SThomas Huth     blk_insert_bs(blk, base, &error_abort);
1375da668aa1SThomas Huth 
1376da668aa1SThomas Huth     overlay = bdrv_new_open_driver(&bdrv_test, "overlay", BDRV_O_RDWR,
1377da668aa1SThomas Huth                                    &error_abort);
1378da668aa1SThomas Huth     overlay_s = overlay->opaque;
1379da668aa1SThomas Huth 
1380da668aa1SThomas Huth     do_drain_begin(BDRV_DRAIN, base);
1381da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1382da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1383da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1384da668aa1SThomas Huth 
1385487b9187SKevin Wolf     aio_context_acquire(qemu_get_aio_context());
1386da668aa1SThomas Huth     bdrv_append(overlay, base, &error_abort);
1387487b9187SKevin Wolf     aio_context_release(qemu_get_aio_context());
1388487b9187SKevin Wolf 
1389da668aa1SThomas Huth     g_assert_cmpint(base->in_flight, ==, 0);
1390da668aa1SThomas Huth     g_assert_cmpint(overlay->in_flight, ==, 0);
1391da668aa1SThomas Huth 
1392da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 1);
1393da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 1);
1394da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 1);
1395da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 1);
1396da668aa1SThomas Huth 
1397da668aa1SThomas Huth     do_drain_end(BDRV_DRAIN, base);
1398da668aa1SThomas Huth 
1399da668aa1SThomas Huth     g_assert_cmpint(base->quiesce_counter, ==, 0);
1400da668aa1SThomas Huth     g_assert_cmpint(base_s->drain_count, ==, 0);
1401da668aa1SThomas Huth     g_assert_cmpint(overlay->quiesce_counter, ==, 0);
1402da668aa1SThomas Huth     g_assert_cmpint(overlay_s->drain_count, ==, 0);
1403da668aa1SThomas Huth 
1404ae9d4417SVladimir Sementsov-Ogievskiy     bdrv_unref(overlay);
1405da668aa1SThomas Huth     bdrv_unref(base);
1406da668aa1SThomas Huth     blk_unref(blk);
1407da668aa1SThomas Huth }
1408da668aa1SThomas Huth 
1409da668aa1SThomas Huth static void test_set_aio_context(void)
1410da668aa1SThomas Huth {
1411da668aa1SThomas Huth     BlockDriverState *bs;
1412da668aa1SThomas Huth     IOThread *a = iothread_new();
1413da668aa1SThomas Huth     IOThread *b = iothread_new();
1414da668aa1SThomas Huth     AioContext *ctx_a = iothread_get_aio_context(a);
1415da668aa1SThomas Huth     AioContext *ctx_b = iothread_get_aio_context(b);
1416da668aa1SThomas Huth 
1417da668aa1SThomas Huth     bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR,
1418da668aa1SThomas Huth                               &error_abort);
1419da668aa1SThomas Huth 
1420da668aa1SThomas Huth     bdrv_drained_begin(bs);
1421142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort);
1422da668aa1SThomas Huth 
1423da668aa1SThomas Huth     aio_context_acquire(ctx_a);
1424da668aa1SThomas Huth     bdrv_drained_end(bs);
1425da668aa1SThomas Huth 
1426da668aa1SThomas Huth     bdrv_drained_begin(bs);
1427142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort);
1428da668aa1SThomas Huth     aio_context_release(ctx_a);
1429da668aa1SThomas Huth     aio_context_acquire(ctx_b);
1430142e6907SEmanuele Giuseppe Esposito     bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort);
1431da668aa1SThomas Huth     aio_context_release(ctx_b);
1432da668aa1SThomas Huth     bdrv_drained_end(bs);
1433da668aa1SThomas Huth 
1434da668aa1SThomas Huth     bdrv_unref(bs);
1435da668aa1SThomas Huth     iothread_join(a);
1436da668aa1SThomas Huth     iothread_join(b);
1437da668aa1SThomas Huth }
1438da668aa1SThomas Huth 
1439da668aa1SThomas Huth 
1440da668aa1SThomas Huth typedef struct TestDropBackingBlockJob {
1441da668aa1SThomas Huth     BlockJob common;
1442da668aa1SThomas Huth     bool should_complete;
1443da668aa1SThomas Huth     bool *did_complete;
1444da668aa1SThomas Huth     BlockDriverState *detach_also;
14451b177bbeSVladimir Sementsov-Ogievskiy     BlockDriverState *bs;
1446da668aa1SThomas Huth } TestDropBackingBlockJob;
1447da668aa1SThomas Huth 
1448da668aa1SThomas Huth static int coroutine_fn test_drop_backing_job_run(Job *job, Error **errp)
1449da668aa1SThomas Huth {
1450da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1451da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1452da668aa1SThomas Huth 
1453da668aa1SThomas Huth     while (!s->should_complete) {
1454da668aa1SThomas Huth         job_sleep_ns(job, 0);
1455da668aa1SThomas Huth     }
1456da668aa1SThomas Huth 
1457da668aa1SThomas Huth     return 0;
1458da668aa1SThomas Huth }
1459da668aa1SThomas Huth 
1460da668aa1SThomas Huth static void test_drop_backing_job_commit(Job *job)
1461da668aa1SThomas Huth {
1462da668aa1SThomas Huth     TestDropBackingBlockJob *s =
1463da668aa1SThomas Huth         container_of(job, TestDropBackingBlockJob, common.job);
1464da668aa1SThomas Huth 
14651b177bbeSVladimir Sementsov-Ogievskiy     bdrv_set_backing_hd(s->bs, NULL, &error_abort);
1466da668aa1SThomas Huth     bdrv_set_backing_hd(s->detach_also, NULL, &error_abort);
1467da668aa1SThomas Huth 
1468da668aa1SThomas Huth     *s->did_complete = true;
1469da668aa1SThomas Huth }
1470da668aa1SThomas Huth 
1471da668aa1SThomas Huth static const BlockJobDriver test_drop_backing_job_driver = {
1472da668aa1SThomas Huth     .job_driver = {
1473da668aa1SThomas Huth         .instance_size  = sizeof(TestDropBackingBlockJob),
1474da668aa1SThomas Huth         .free           = block_job_free,
1475da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1476da668aa1SThomas Huth         .run            = test_drop_backing_job_run,
1477da668aa1SThomas Huth         .commit         = test_drop_backing_job_commit,
1478da668aa1SThomas Huth     }
1479da668aa1SThomas Huth };
1480da668aa1SThomas Huth 
1481da668aa1SThomas Huth /**
1482da668aa1SThomas Huth  * Creates a child node with three parent nodes on it, and then runs a
1483da668aa1SThomas Huth  * block job on the final one, parent-node-2.
1484da668aa1SThomas Huth  *
1485da668aa1SThomas Huth  * The job is then asked to complete before a section where the child
1486da668aa1SThomas Huth  * is drained.
1487da668aa1SThomas Huth  *
1488da668aa1SThomas Huth  * Ending this section will undrain the child's parents, first
1489da668aa1SThomas Huth  * parent-node-2, then parent-node-1, then parent-node-0 -- the parent
1490da668aa1SThomas Huth  * list is in reverse order of how they were added.  Ending the drain
1491da668aa1SThomas Huth  * on parent-node-2 will resume the job, thus completing it and
1492da668aa1SThomas Huth  * scheduling job_exit().
1493da668aa1SThomas Huth  *
1494da668aa1SThomas Huth  * Ending the drain on parent-node-1 will poll the AioContext, which
1495da668aa1SThomas Huth  * lets job_exit() and thus test_drop_backing_job_commit() run.  That
1496da668aa1SThomas Huth  * function first removes the child as parent-node-2's backing file.
1497da668aa1SThomas Huth  *
1498da668aa1SThomas Huth  * In old (and buggy) implementations, there are two problems with
1499da668aa1SThomas Huth  * that:
1500da668aa1SThomas Huth  * (A) bdrv_drain_invoke() polls for every node that leaves the
1501da668aa1SThomas Huth  *     drained section.  This means that job_exit() is scheduled
1502da668aa1SThomas Huth  *     before the child has left the drained section.  Its
1503da668aa1SThomas Huth  *     quiesce_counter is therefore still 1 when it is removed from
1504da668aa1SThomas Huth  *     parent-node-2.
1505da668aa1SThomas Huth  *
1506da668aa1SThomas Huth  * (B) bdrv_replace_child_noperm() calls drained_end() on the old
1507da668aa1SThomas Huth  *     child's parents as many times as the child is quiesced.  This
1508da668aa1SThomas Huth  *     means it will call drained_end() on parent-node-2 once.
1509da668aa1SThomas Huth  *     Because parent-node-2 is no longer quiesced at this point, this
1510da668aa1SThomas Huth  *     will fail.
1511da668aa1SThomas Huth  *
1512da668aa1SThomas Huth  * bdrv_replace_child_noperm() therefore must call drained_end() on
1513da668aa1SThomas Huth  * the parent only if it really is still drained because the child is
1514da668aa1SThomas Huth  * drained.
1515da668aa1SThomas Huth  *
1516da668aa1SThomas Huth  * If removing child from parent-node-2 was successful (as it should
1517da668aa1SThomas Huth  * be), test_drop_backing_job_commit() will then also remove the child
1518da668aa1SThomas Huth  * from parent-node-0.
1519da668aa1SThomas Huth  *
1520da668aa1SThomas Huth  * With an old version of our drain infrastructure ((A) above), that
1521da668aa1SThomas Huth  * resulted in the following flow:
1522da668aa1SThomas Huth  *
1523da668aa1SThomas Huth  * 1. child attempts to leave its drained section.  The call recurses
1524da668aa1SThomas Huth  *    to its parents.
1525da668aa1SThomas Huth  *
1526da668aa1SThomas Huth  * 2. parent-node-2 leaves the drained section.  Polling in
1527da668aa1SThomas Huth  *    bdrv_drain_invoke() will schedule job_exit().
1528da668aa1SThomas Huth  *
1529da668aa1SThomas Huth  * 3. parent-node-1 leaves the drained section.  Polling in
1530da668aa1SThomas Huth  *    bdrv_drain_invoke() will run job_exit(), thus disconnecting
1531da668aa1SThomas Huth  *    parent-node-0 from the child node.
1532da668aa1SThomas Huth  *
1533da668aa1SThomas Huth  * 4. bdrv_parent_drained_end() uses a QLIST_FOREACH_SAFE() loop to
1534da668aa1SThomas Huth  *    iterate over the parents.  Thus, it now accesses the BdrvChild
1535da668aa1SThomas Huth  *    object that used to connect parent-node-0 and the child node.
1536da668aa1SThomas Huth  *    However, that object no longer exists, so it accesses a dangling
1537da668aa1SThomas Huth  *    pointer.
1538da668aa1SThomas Huth  *
1539da668aa1SThomas Huth  * The solution is to only poll once when running a bdrv_drained_end()
1540da668aa1SThomas Huth  * operation, specifically at the end when all drained_end()
1541da668aa1SThomas Huth  * operations for all involved nodes have been scheduled.
1542da668aa1SThomas Huth  * Note that this also solves (A) above, thus hiding (B).
1543da668aa1SThomas Huth  */
1544da668aa1SThomas Huth static void test_blockjob_commit_by_drained_end(void)
1545da668aa1SThomas Huth {
1546da668aa1SThomas Huth     BlockDriverState *bs_child, *bs_parents[3];
1547da668aa1SThomas Huth     TestDropBackingBlockJob *job;
1548da668aa1SThomas Huth     bool job_has_completed = false;
1549da668aa1SThomas Huth     int i;
1550da668aa1SThomas Huth 
1551da668aa1SThomas Huth     bs_child = bdrv_new_open_driver(&bdrv_test, "child-node", BDRV_O_RDWR,
1552da668aa1SThomas Huth                                     &error_abort);
1553da668aa1SThomas Huth 
1554da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1555da668aa1SThomas Huth         char name[32];
1556da668aa1SThomas Huth         snprintf(name, sizeof(name), "parent-node-%i", i);
1557da668aa1SThomas Huth         bs_parents[i] = bdrv_new_open_driver(&bdrv_test, name, BDRV_O_RDWR,
1558da668aa1SThomas Huth                                              &error_abort);
1559da668aa1SThomas Huth         bdrv_set_backing_hd(bs_parents[i], bs_child, &error_abort);
1560da668aa1SThomas Huth     }
1561da668aa1SThomas Huth 
1562da668aa1SThomas Huth     job = block_job_create("job", &test_drop_backing_job_driver, NULL,
1563da668aa1SThomas Huth                            bs_parents[2], 0, BLK_PERM_ALL, 0, 0, NULL, NULL,
1564da668aa1SThomas Huth                            &error_abort);
15651b177bbeSVladimir Sementsov-Ogievskiy     job->bs = bs_parents[2];
1566da668aa1SThomas Huth 
1567da668aa1SThomas Huth     job->detach_also = bs_parents[0];
1568da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1569da668aa1SThomas Huth 
1570da668aa1SThomas Huth     job_start(&job->common.job);
1571da668aa1SThomas Huth 
1572da668aa1SThomas Huth     job->should_complete = true;
1573da668aa1SThomas Huth     bdrv_drained_begin(bs_child);
1574da668aa1SThomas Huth     g_assert(!job_has_completed);
1575da668aa1SThomas Huth     bdrv_drained_end(bs_child);
15765e8ac217SKevin Wolf     aio_poll(qemu_get_aio_context(), false);
1577da668aa1SThomas Huth     g_assert(job_has_completed);
1578da668aa1SThomas Huth 
1579da668aa1SThomas Huth     bdrv_unref(bs_parents[0]);
1580da668aa1SThomas Huth     bdrv_unref(bs_parents[1]);
1581da668aa1SThomas Huth     bdrv_unref(bs_parents[2]);
1582da668aa1SThomas Huth     bdrv_unref(bs_child);
1583da668aa1SThomas Huth }
1584da668aa1SThomas Huth 
1585da668aa1SThomas Huth 
1586da668aa1SThomas Huth typedef struct TestSimpleBlockJob {
1587da668aa1SThomas Huth     BlockJob common;
1588da668aa1SThomas Huth     bool should_complete;
1589da668aa1SThomas Huth     bool *did_complete;
1590da668aa1SThomas Huth } TestSimpleBlockJob;
1591da668aa1SThomas Huth 
1592da668aa1SThomas Huth static int coroutine_fn test_simple_job_run(Job *job, Error **errp)
1593da668aa1SThomas Huth {
1594da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1595da668aa1SThomas Huth 
1596da668aa1SThomas Huth     while (!s->should_complete) {
1597da668aa1SThomas Huth         job_sleep_ns(job, 0);
1598da668aa1SThomas Huth     }
1599da668aa1SThomas Huth 
1600da668aa1SThomas Huth     return 0;
1601da668aa1SThomas Huth }
1602da668aa1SThomas Huth 
1603da668aa1SThomas Huth static void test_simple_job_clean(Job *job)
1604da668aa1SThomas Huth {
1605da668aa1SThomas Huth     TestSimpleBlockJob *s = container_of(job, TestSimpleBlockJob, common.job);
1606da668aa1SThomas Huth     *s->did_complete = true;
1607da668aa1SThomas Huth }
1608da668aa1SThomas Huth 
1609da668aa1SThomas Huth static const BlockJobDriver test_simple_job_driver = {
1610da668aa1SThomas Huth     .job_driver = {
1611da668aa1SThomas Huth         .instance_size  = sizeof(TestSimpleBlockJob),
1612da668aa1SThomas Huth         .free           = block_job_free,
1613da668aa1SThomas Huth         .user_resume    = block_job_user_resume,
1614da668aa1SThomas Huth         .run            = test_simple_job_run,
1615da668aa1SThomas Huth         .clean          = test_simple_job_clean,
1616da668aa1SThomas Huth     },
1617da668aa1SThomas Huth };
1618da668aa1SThomas Huth 
1619da668aa1SThomas Huth static int drop_intermediate_poll_update_filename(BdrvChild *child,
1620da668aa1SThomas Huth                                                   BlockDriverState *new_base,
1621da668aa1SThomas Huth                                                   const char *filename,
1622da668aa1SThomas Huth                                                   Error **errp)
1623da668aa1SThomas Huth {
1624da668aa1SThomas Huth     /*
1625da668aa1SThomas Huth      * We are free to poll here, which may change the block graph, if
1626da668aa1SThomas Huth      * it is not drained.
1627da668aa1SThomas Huth      */
1628da668aa1SThomas Huth 
1629da668aa1SThomas Huth     /* If the job is not drained: Complete it, schedule job_exit() */
1630da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1631da668aa1SThomas Huth     /* If the job is not drained: Run job_exit(), finish the job */
1632da668aa1SThomas Huth     aio_poll(qemu_get_current_aio_context(), false);
1633da668aa1SThomas Huth 
1634da668aa1SThomas Huth     return 0;
1635da668aa1SThomas Huth }
1636da668aa1SThomas Huth 
1637da668aa1SThomas Huth /**
1638da668aa1SThomas Huth  * Test a poll in the midst of bdrv_drop_intermediate().
1639da668aa1SThomas Huth  *
1640da668aa1SThomas Huth  * bdrv_drop_intermediate() calls BdrvChildClass.update_filename(),
1641da668aa1SThomas Huth  * which can yield or poll.  This may lead to graph changes, unless
1642da668aa1SThomas Huth  * the whole subtree in question is drained.
1643da668aa1SThomas Huth  *
1644da668aa1SThomas Huth  * We test this on the following graph:
1645da668aa1SThomas Huth  *
1646da668aa1SThomas Huth  *                    Job
1647da668aa1SThomas Huth  *
1648da668aa1SThomas Huth  *                     |
1649da668aa1SThomas Huth  *                  job-node
1650da668aa1SThomas Huth  *                     |
1651da668aa1SThomas Huth  *                     v
1652da668aa1SThomas Huth  *
1653da668aa1SThomas Huth  *                  job-node
1654da668aa1SThomas Huth  *
1655da668aa1SThomas Huth  *                     |
1656da668aa1SThomas Huth  *                  backing
1657da668aa1SThomas Huth  *                     |
1658da668aa1SThomas Huth  *                     v
1659da668aa1SThomas Huth  *
1660da668aa1SThomas Huth  * node-2 --chain--> node-1 --chain--> node-0
1661da668aa1SThomas Huth  *
1662da668aa1SThomas Huth  * We drop node-1 with bdrv_drop_intermediate(top=node-1, base=node-0).
1663da668aa1SThomas Huth  *
1664da668aa1SThomas Huth  * This first updates node-2's backing filename by invoking
1665da668aa1SThomas Huth  * drop_intermediate_poll_update_filename(), which polls twice.  This
1666da668aa1SThomas Huth  * causes the job to finish, which in turns causes the job-node to be
1667da668aa1SThomas Huth  * deleted.
1668da668aa1SThomas Huth  *
1669da668aa1SThomas Huth  * bdrv_drop_intermediate() uses a QLIST_FOREACH_SAFE() loop, so it
1670da668aa1SThomas Huth  * already has a pointer to the BdrvChild edge between job-node and
1671da668aa1SThomas Huth  * node-1.  When it tries to handle that edge, we probably get a
1672da668aa1SThomas Huth  * segmentation fault because the object no longer exists.
1673da668aa1SThomas Huth  *
1674da668aa1SThomas Huth  *
1675da668aa1SThomas Huth  * The solution is for bdrv_drop_intermediate() to drain top's
1676da668aa1SThomas Huth  * subtree.  This prevents graph changes from happening just because
1677da668aa1SThomas Huth  * BdrvChildClass.update_filename() yields or polls.  Thus, the block
1678da668aa1SThomas Huth  * job is paused during that drained section and must finish before or
1679da668aa1SThomas Huth  * after.
1680da668aa1SThomas Huth  *
1681da668aa1SThomas Huth  * (In addition, bdrv_replace_child() must keep the job paused.)
1682da668aa1SThomas Huth  */
1683da668aa1SThomas Huth static void test_drop_intermediate_poll(void)
1684da668aa1SThomas Huth {
1685da668aa1SThomas Huth     static BdrvChildClass chain_child_class;
1686da668aa1SThomas Huth     BlockDriverState *chain[3];
1687da668aa1SThomas Huth     TestSimpleBlockJob *job;
1688da668aa1SThomas Huth     BlockDriverState *job_node;
1689da668aa1SThomas Huth     bool job_has_completed = false;
1690da668aa1SThomas Huth     int i;
1691da668aa1SThomas Huth     int ret;
1692da668aa1SThomas Huth 
1693da668aa1SThomas Huth     chain_child_class = child_of_bds;
1694da668aa1SThomas Huth     chain_child_class.update_filename = drop_intermediate_poll_update_filename;
1695da668aa1SThomas Huth 
1696da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1697da668aa1SThomas Huth         char name[32];
1698da668aa1SThomas Huth         snprintf(name, 32, "node-%i", i);
1699da668aa1SThomas Huth 
1700da668aa1SThomas Huth         chain[i] = bdrv_new_open_driver(&bdrv_test, name, 0, &error_abort);
1701da668aa1SThomas Huth     }
1702da668aa1SThomas Huth 
1703da668aa1SThomas Huth     job_node = bdrv_new_open_driver(&bdrv_test, "job-node", BDRV_O_RDWR,
1704da668aa1SThomas Huth                                     &error_abort);
1705da668aa1SThomas Huth     bdrv_set_backing_hd(job_node, chain[1], &error_abort);
1706da668aa1SThomas Huth 
1707da668aa1SThomas Huth     /*
1708da668aa1SThomas Huth      * Establish the chain last, so the chain links are the first
1709da668aa1SThomas Huth      * elements in the BDS.parents lists
1710da668aa1SThomas Huth      */
1711afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
1712da668aa1SThomas Huth     for (i = 0; i < 3; i++) {
1713da668aa1SThomas Huth         if (i) {
1714da668aa1SThomas Huth             /* Takes the reference to chain[i - 1] */
17155bb04747SVladimir Sementsov-Ogievskiy             bdrv_attach_child(chain[i], chain[i - 1], "chain",
17165bb04747SVladimir Sementsov-Ogievskiy                               &chain_child_class, BDRV_CHILD_COW, &error_abort);
1717da668aa1SThomas Huth         }
1718da668aa1SThomas Huth     }
1719afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
1720da668aa1SThomas Huth 
1721da668aa1SThomas Huth     job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
1722da668aa1SThomas Huth                            0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
1723da668aa1SThomas Huth 
1724da668aa1SThomas Huth     /* The job has a reference now */
1725da668aa1SThomas Huth     bdrv_unref(job_node);
1726da668aa1SThomas Huth 
1727da668aa1SThomas Huth     job->did_complete = &job_has_completed;
1728da668aa1SThomas Huth 
1729da668aa1SThomas Huth     job_start(&job->common.job);
1730da668aa1SThomas Huth     job->should_complete = true;
1731da668aa1SThomas Huth 
1732da668aa1SThomas Huth     g_assert(!job_has_completed);
1733da668aa1SThomas Huth     ret = bdrv_drop_intermediate(chain[1], chain[0], NULL);
17345e8ac217SKevin Wolf     aio_poll(qemu_get_aio_context(), false);
1735da668aa1SThomas Huth     g_assert(ret == 0);
1736da668aa1SThomas Huth     g_assert(job_has_completed);
1737da668aa1SThomas Huth 
1738da668aa1SThomas Huth     bdrv_unref(chain[2]);
1739da668aa1SThomas Huth }
1740da668aa1SThomas Huth 
1741da668aa1SThomas Huth 
1742da668aa1SThomas Huth typedef struct BDRVReplaceTestState {
174323987471SKevin Wolf     bool setup_completed;
1744da668aa1SThomas Huth     bool was_drained;
1745da668aa1SThomas Huth     bool was_undrained;
1746da668aa1SThomas Huth     bool has_read;
1747da668aa1SThomas Huth 
1748da668aa1SThomas Huth     int drain_count;
1749da668aa1SThomas Huth 
1750da668aa1SThomas Huth     bool yield_before_read;
1751da668aa1SThomas Huth     Coroutine *io_co;
1752da668aa1SThomas Huth     Coroutine *drain_co;
1753da668aa1SThomas Huth } BDRVReplaceTestState;
1754da668aa1SThomas Huth 
1755da668aa1SThomas Huth static void bdrv_replace_test_close(BlockDriverState *bs)
1756da668aa1SThomas Huth {
1757da668aa1SThomas Huth }
1758da668aa1SThomas Huth 
1759da668aa1SThomas Huth /**
1760da668aa1SThomas Huth  * If @bs has a backing file:
1761da668aa1SThomas Huth  *   Yield if .yield_before_read is true (and wait for drain_begin to
1762da668aa1SThomas Huth  *   wake us up).
1763da668aa1SThomas Huth  *   Forward the read to bs->backing.  Set .has_read to true.
1764da668aa1SThomas Huth  *   If drain_begin has woken us, wake it in turn.
1765da668aa1SThomas Huth  *
1766da668aa1SThomas Huth  * Otherwise:
1767da668aa1SThomas Huth  *   Set .has_read to true and return success.
1768da668aa1SThomas Huth  */
1769b9b10c35SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
1770b9b10c35SKevin Wolf bdrv_replace_test_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1771b9b10c35SKevin Wolf                             QEMUIOVector *qiov, BdrvRequestFlags flags)
1772da668aa1SThomas Huth {
1773da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1774da668aa1SThomas Huth 
1775da668aa1SThomas Huth     if (bs->backing) {
1776da668aa1SThomas Huth         int ret;
1777da668aa1SThomas Huth 
1778da668aa1SThomas Huth         g_assert(!s->drain_count);
1779da668aa1SThomas Huth 
1780da668aa1SThomas Huth         s->io_co = qemu_coroutine_self();
1781da668aa1SThomas Huth         if (s->yield_before_read) {
1782da668aa1SThomas Huth             s->yield_before_read = false;
1783da668aa1SThomas Huth             qemu_coroutine_yield();
1784da668aa1SThomas Huth         }
1785da668aa1SThomas Huth         s->io_co = NULL;
1786da668aa1SThomas Huth 
1787da668aa1SThomas Huth         ret = bdrv_co_preadv(bs->backing, offset, bytes, qiov, 0);
1788da668aa1SThomas Huth         s->has_read = true;
1789da668aa1SThomas Huth 
1790da668aa1SThomas Huth         /* Wake up drain_co if it runs */
1791da668aa1SThomas Huth         if (s->drain_co) {
1792da668aa1SThomas Huth             aio_co_wake(s->drain_co);
1793da668aa1SThomas Huth         }
1794da668aa1SThomas Huth 
1795da668aa1SThomas Huth         return ret;
1796da668aa1SThomas Huth     }
1797da668aa1SThomas Huth 
1798da668aa1SThomas Huth     s->has_read = true;
1799da668aa1SThomas Huth     return 0;
1800da668aa1SThomas Huth }
1801da668aa1SThomas Huth 
18027bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_drain_co(void *opaque)
18037bce1c29SKevin Wolf {
18047bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
18057bce1c29SKevin Wolf     BDRVReplaceTestState *s = bs->opaque;
18067bce1c29SKevin Wolf 
18077bce1c29SKevin Wolf     /* Keep waking io_co up until it is done */
18087bce1c29SKevin Wolf     while (s->io_co) {
18097bce1c29SKevin Wolf         aio_co_wake(s->io_co);
18107bce1c29SKevin Wolf         s->io_co = NULL;
18117bce1c29SKevin Wolf         qemu_coroutine_yield();
18127bce1c29SKevin Wolf     }
18137bce1c29SKevin Wolf     s->drain_co = NULL;
18147bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
18157bce1c29SKevin Wolf }
18167bce1c29SKevin Wolf 
1817da668aa1SThomas Huth /**
1818da668aa1SThomas Huth  * If .drain_count is 0, wake up .io_co if there is one; and set
1819da668aa1SThomas Huth  * .was_drained.
1820da668aa1SThomas Huth  * Increment .drain_count.
1821da668aa1SThomas Huth  */
18225e8ac217SKevin Wolf static void bdrv_replace_test_drain_begin(BlockDriverState *bs)
1823da668aa1SThomas Huth {
1824da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1825da668aa1SThomas Huth 
182623987471SKevin Wolf     if (!s->setup_completed) {
182723987471SKevin Wolf         return;
182823987471SKevin Wolf     }
182923987471SKevin Wolf 
1830da668aa1SThomas Huth     if (!s->drain_count) {
18317bce1c29SKevin Wolf         s->drain_co = qemu_coroutine_create(bdrv_replace_test_drain_co, bs);
18327bce1c29SKevin Wolf         bdrv_inc_in_flight(bs);
18337bce1c29SKevin Wolf         aio_co_enter(bdrv_get_aio_context(bs), s->drain_co);
1834da668aa1SThomas Huth         s->was_drained = true;
1835da668aa1SThomas Huth     }
1836da668aa1SThomas Huth     s->drain_count++;
1837da668aa1SThomas Huth }
1838da668aa1SThomas Huth 
18397bce1c29SKevin Wolf static void coroutine_fn bdrv_replace_test_read_entry(void *opaque)
18407bce1c29SKevin Wolf {
18417bce1c29SKevin Wolf     BlockDriverState *bs = opaque;
18427bce1c29SKevin Wolf     char data;
18437bce1c29SKevin Wolf     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, &data, 1);
18447bce1c29SKevin Wolf     int ret;
18457bce1c29SKevin Wolf 
18467bce1c29SKevin Wolf     /* Queue a read request post-drain */
1847b9b10c35SKevin Wolf     bdrv_graph_co_rdlock();
18487bce1c29SKevin Wolf     ret = bdrv_replace_test_co_preadv(bs, 0, 1, &qiov, 0);
1849b9b10c35SKevin Wolf     bdrv_graph_co_rdunlock();
1850b9b10c35SKevin Wolf 
18517bce1c29SKevin Wolf     g_assert(ret >= 0);
18527bce1c29SKevin Wolf     bdrv_dec_in_flight(bs);
18537bce1c29SKevin Wolf }
18547bce1c29SKevin Wolf 
1855da668aa1SThomas Huth /**
1856da668aa1SThomas Huth  * Reduce .drain_count, set .was_undrained once it reaches 0.
1857da668aa1SThomas Huth  * If .drain_count reaches 0 and the node has a backing file, issue a
1858da668aa1SThomas Huth  * read request.
1859da668aa1SThomas Huth  */
18605e8ac217SKevin Wolf static void bdrv_replace_test_drain_end(BlockDriverState *bs)
1861da668aa1SThomas Huth {
1862da668aa1SThomas Huth     BDRVReplaceTestState *s = bs->opaque;
1863da668aa1SThomas Huth 
186423987471SKevin Wolf     if (!s->setup_completed) {
186523987471SKevin Wolf         return;
186623987471SKevin Wolf     }
186723987471SKevin Wolf 
1868da668aa1SThomas Huth     g_assert(s->drain_count > 0);
1869da668aa1SThomas Huth     if (!--s->drain_count) {
1870da668aa1SThomas Huth         s->was_undrained = true;
1871da668aa1SThomas Huth 
1872da668aa1SThomas Huth         if (bs->backing) {
18737bce1c29SKevin Wolf             Coroutine *co = qemu_coroutine_create(bdrv_replace_test_read_entry,
18747bce1c29SKevin Wolf                                                   bs);
18757bce1c29SKevin Wolf             bdrv_inc_in_flight(bs);
18767bce1c29SKevin Wolf             aio_co_enter(bdrv_get_aio_context(bs), co);
1877da668aa1SThomas Huth         }
1878da668aa1SThomas Huth     }
1879da668aa1SThomas Huth }
1880da668aa1SThomas Huth 
1881da668aa1SThomas Huth static BlockDriver bdrv_replace_test = {
1882da668aa1SThomas Huth     .format_name            = "replace_test",
1883da668aa1SThomas Huth     .instance_size          = sizeof(BDRVReplaceTestState),
18849ebfc111SVladimir Sementsov-Ogievskiy     .supports_backing       = true,
1885da668aa1SThomas Huth 
1886da668aa1SThomas Huth     .bdrv_close             = bdrv_replace_test_close,
1887da668aa1SThomas Huth     .bdrv_co_preadv         = bdrv_replace_test_co_preadv,
1888da668aa1SThomas Huth 
18895e8ac217SKevin Wolf     .bdrv_drain_begin       = bdrv_replace_test_drain_begin,
18905e8ac217SKevin Wolf     .bdrv_drain_end         = bdrv_replace_test_drain_end,
1891da668aa1SThomas Huth 
1892da668aa1SThomas Huth     .bdrv_child_perm        = bdrv_default_perms,
1893da668aa1SThomas Huth };
1894da668aa1SThomas Huth 
1895da668aa1SThomas Huth static void coroutine_fn test_replace_child_mid_drain_read_co(void *opaque)
1896da668aa1SThomas Huth {
1897da668aa1SThomas Huth     int ret;
1898da668aa1SThomas Huth     char data;
1899da668aa1SThomas Huth 
1900da668aa1SThomas Huth     ret = blk_co_pread(opaque, 0, 1, &data, 0);
1901da668aa1SThomas Huth     g_assert(ret >= 0);
1902da668aa1SThomas Huth }
1903da668aa1SThomas Huth 
1904da668aa1SThomas Huth /**
1905da668aa1SThomas Huth  * We test two things:
1906da668aa1SThomas Huth  * (1) bdrv_replace_child_noperm() must not undrain the parent if both
1907da668aa1SThomas Huth  *     children are drained.
1908da668aa1SThomas Huth  * (2) bdrv_replace_child_noperm() must never flush I/O requests to a
1909da668aa1SThomas Huth  *     drained child.  If the old child is drained, it must flush I/O
1910da668aa1SThomas Huth  *     requests after the new one has been attached.  If the new child
1911da668aa1SThomas Huth  *     is drained, it must flush I/O requests before the old one is
1912da668aa1SThomas Huth  *     detached.
1913da668aa1SThomas Huth  *
1914da668aa1SThomas Huth  * To do so, we create one parent node and two child nodes; then
1915da668aa1SThomas Huth  * attach one of the children (old_child_bs) to the parent, then
1916da668aa1SThomas Huth  * drain both old_child_bs and new_child_bs according to
1917da668aa1SThomas Huth  * old_drain_count and new_drain_count, respectively, and finally
1918da668aa1SThomas Huth  * we invoke bdrv_replace_node() to replace old_child_bs by
1919da668aa1SThomas Huth  * new_child_bs.
1920da668aa1SThomas Huth  *
1921da668aa1SThomas Huth  * The test block driver we use here (bdrv_replace_test) has a read
1922da668aa1SThomas Huth  * function that:
1923da668aa1SThomas Huth  * - For the parent node, can optionally yield, and then forwards the
1924da668aa1SThomas Huth  *   read to bdrv_preadv(),
1925da668aa1SThomas Huth  * - For the child node, just returns immediately.
1926da668aa1SThomas Huth  *
1927da668aa1SThomas Huth  * If the read yields, the drain_begin function will wake it up.
1928da668aa1SThomas Huth  *
1929da668aa1SThomas Huth  * The drain_end function issues a read on the parent once it is fully
1930da668aa1SThomas Huth  * undrained (which simulates requests starting to come in again).
1931da668aa1SThomas Huth  */
1932da668aa1SThomas Huth static void do_test_replace_child_mid_drain(int old_drain_count,
1933da668aa1SThomas Huth                                             int new_drain_count)
1934da668aa1SThomas Huth {
1935da668aa1SThomas Huth     BlockBackend *parent_blk;
1936da668aa1SThomas Huth     BlockDriverState *parent_bs;
1937da668aa1SThomas Huth     BlockDriverState *old_child_bs, *new_child_bs;
1938da668aa1SThomas Huth     BDRVReplaceTestState *parent_s;
1939da668aa1SThomas Huth     BDRVReplaceTestState *old_child_s, *new_child_s;
1940da668aa1SThomas Huth     Coroutine *io_co;
1941da668aa1SThomas Huth     int i;
1942da668aa1SThomas Huth 
1943da668aa1SThomas Huth     parent_bs = bdrv_new_open_driver(&bdrv_replace_test, "parent", 0,
1944da668aa1SThomas Huth                                      &error_abort);
1945da668aa1SThomas Huth     parent_s = parent_bs->opaque;
1946da668aa1SThomas Huth 
1947da668aa1SThomas Huth     parent_blk = blk_new(qemu_get_aio_context(),
1948da668aa1SThomas Huth                          BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
1949da668aa1SThomas Huth     blk_insert_bs(parent_blk, parent_bs, &error_abort);
1950da668aa1SThomas Huth 
1951da668aa1SThomas Huth     old_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "old-child", 0,
1952da668aa1SThomas Huth                                         &error_abort);
1953da668aa1SThomas Huth     new_child_bs = bdrv_new_open_driver(&bdrv_replace_test, "new-child", 0,
1954da668aa1SThomas Huth                                         &error_abort);
1955da668aa1SThomas Huth     old_child_s = old_child_bs->opaque;
1956da668aa1SThomas Huth     new_child_s = new_child_bs->opaque;
1957da668aa1SThomas Huth 
1958da668aa1SThomas Huth     /* So that we can read something */
1959da668aa1SThomas Huth     parent_bs->total_sectors = 1;
1960da668aa1SThomas Huth     old_child_bs->total_sectors = 1;
1961da668aa1SThomas Huth     new_child_bs->total_sectors = 1;
1962da668aa1SThomas Huth 
1963da668aa1SThomas Huth     bdrv_ref(old_child_bs);
1964afdaeb9eSKevin Wolf     bdrv_graph_wrlock(NULL);
19655bb04747SVladimir Sementsov-Ogievskiy     bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
19665bb04747SVladimir Sementsov-Ogievskiy                       BDRV_CHILD_COW, &error_abort);
1967afdaeb9eSKevin Wolf     bdrv_graph_wrunlock();
196823987471SKevin Wolf     parent_s->setup_completed = true;
1969da668aa1SThomas Huth 
1970da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
1971da668aa1SThomas Huth         bdrv_drained_begin(old_child_bs);
1972da668aa1SThomas Huth     }
1973da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
1974da668aa1SThomas Huth         bdrv_drained_begin(new_child_bs);
1975da668aa1SThomas Huth     }
1976da668aa1SThomas Huth 
1977da668aa1SThomas Huth     if (!old_drain_count) {
1978da668aa1SThomas Huth         /*
1979da668aa1SThomas Huth          * Start a read operation that will yield, so it will not
1980da668aa1SThomas Huth          * complete before the node is drained.
1981da668aa1SThomas Huth          */
1982da668aa1SThomas Huth         parent_s->yield_before_read = true;
1983da668aa1SThomas Huth         io_co = qemu_coroutine_create(test_replace_child_mid_drain_read_co,
1984da668aa1SThomas Huth                                       parent_blk);
1985da668aa1SThomas Huth         qemu_coroutine_enter(io_co);
1986da668aa1SThomas Huth     }
1987da668aa1SThomas Huth 
1988da668aa1SThomas Huth     /* If we have started a read operation, it should have yielded */
1989da668aa1SThomas Huth     g_assert(!parent_s->has_read);
1990da668aa1SThomas Huth 
1991da668aa1SThomas Huth     /* Reset drained status so we can see what bdrv_replace_node() does */
1992da668aa1SThomas Huth     parent_s->was_drained = false;
1993da668aa1SThomas Huth     parent_s->was_undrained = false;
1994da668aa1SThomas Huth 
1995da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == old_drain_count);
1996da668aa1SThomas Huth     bdrv_replace_node(old_child_bs, new_child_bs, &error_abort);
1997da668aa1SThomas Huth     g_assert(parent_bs->quiesce_counter == new_drain_count);
1998da668aa1SThomas Huth 
1999da668aa1SThomas Huth     if (!old_drain_count && !new_drain_count) {
2000da668aa1SThomas Huth         /*
2001da668aa1SThomas Huth          * From undrained to undrained drains and undrains the parent,
2002da668aa1SThomas Huth          * because bdrv_replace_node() contains a drained section for
2003da668aa1SThomas Huth          * @old_child_bs.
2004da668aa1SThomas Huth          */
2005da668aa1SThomas Huth         g_assert(parent_s->was_drained && parent_s->was_undrained);
2006da668aa1SThomas Huth     } else if (!old_drain_count && new_drain_count) {
2007da668aa1SThomas Huth         /*
2008da668aa1SThomas Huth          * From undrained to drained should drain the parent and keep
2009da668aa1SThomas Huth          * it that way.
2010da668aa1SThomas Huth          */
2011da668aa1SThomas Huth         g_assert(parent_s->was_drained && !parent_s->was_undrained);
2012da668aa1SThomas Huth     } else if (old_drain_count && !new_drain_count) {
2013da668aa1SThomas Huth         /*
2014da668aa1SThomas Huth          * From drained to undrained should undrain the parent and
2015da668aa1SThomas Huth          * keep it that way.
2016da668aa1SThomas Huth          */
2017da668aa1SThomas Huth         g_assert(!parent_s->was_drained && parent_s->was_undrained);
2018da668aa1SThomas Huth     } else /* if (old_drain_count && new_drain_count) */ {
2019da668aa1SThomas Huth         /*
2020da668aa1SThomas Huth          * From drained to drained must not undrain the parent at any
2021da668aa1SThomas Huth          * point
2022da668aa1SThomas Huth          */
2023da668aa1SThomas Huth         g_assert(!parent_s->was_drained && !parent_s->was_undrained);
2024da668aa1SThomas Huth     }
2025da668aa1SThomas Huth 
2026da668aa1SThomas Huth     if (!old_drain_count || !new_drain_count) {
2027da668aa1SThomas Huth         /*
2028da668aa1SThomas Huth          * If !old_drain_count, we have started a read request before
2029da668aa1SThomas Huth          * bdrv_replace_node().  If !new_drain_count, the parent must
2030da668aa1SThomas Huth          * have been undrained at some point, and
2031da668aa1SThomas Huth          * bdrv_replace_test_co_drain_end() starts a read request
2032da668aa1SThomas Huth          * then.
2033da668aa1SThomas Huth          */
2034da668aa1SThomas Huth         g_assert(parent_s->has_read);
2035da668aa1SThomas Huth     } else {
2036da668aa1SThomas Huth         /*
2037da668aa1SThomas Huth          * If the parent was never undrained, there is no way to start
2038da668aa1SThomas Huth          * a read request.
2039da668aa1SThomas Huth          */
2040da668aa1SThomas Huth         g_assert(!parent_s->has_read);
2041da668aa1SThomas Huth     }
2042da668aa1SThomas Huth 
2043da668aa1SThomas Huth     /* A drained child must have not received any request */
2044da668aa1SThomas Huth     g_assert(!(old_drain_count && old_child_s->has_read));
2045da668aa1SThomas Huth     g_assert(!(new_drain_count && new_child_s->has_read));
2046da668aa1SThomas Huth 
2047da668aa1SThomas Huth     for (i = 0; i < new_drain_count; i++) {
2048da668aa1SThomas Huth         bdrv_drained_end(new_child_bs);
2049da668aa1SThomas Huth     }
2050da668aa1SThomas Huth     for (i = 0; i < old_drain_count; i++) {
2051da668aa1SThomas Huth         bdrv_drained_end(old_child_bs);
2052da668aa1SThomas Huth     }
2053da668aa1SThomas Huth 
2054da668aa1SThomas Huth     /*
2055da668aa1SThomas Huth      * By now, bdrv_replace_test_co_drain_end() must have been called
2056da668aa1SThomas Huth      * at some point while the new child was attached to the parent.
2057da668aa1SThomas Huth      */
2058da668aa1SThomas Huth     g_assert(parent_s->has_read);
2059da668aa1SThomas Huth     g_assert(new_child_s->has_read);
2060da668aa1SThomas Huth 
2061da668aa1SThomas Huth     blk_unref(parent_blk);
2062da668aa1SThomas Huth     bdrv_unref(parent_bs);
2063da668aa1SThomas Huth     bdrv_unref(old_child_bs);
2064da668aa1SThomas Huth     bdrv_unref(new_child_bs);
2065da668aa1SThomas Huth }
2066da668aa1SThomas Huth 
2067da668aa1SThomas Huth static void test_replace_child_mid_drain(void)
2068da668aa1SThomas Huth {
2069da668aa1SThomas Huth     int old_drain_count, new_drain_count;
2070da668aa1SThomas Huth 
2071da668aa1SThomas Huth     for (old_drain_count = 0; old_drain_count < 2; old_drain_count++) {
2072da668aa1SThomas Huth         for (new_drain_count = 0; new_drain_count < 2; new_drain_count++) {
2073da668aa1SThomas Huth             do_test_replace_child_mid_drain(old_drain_count, new_drain_count);
2074da668aa1SThomas Huth         }
2075da668aa1SThomas Huth     }
2076da668aa1SThomas Huth }
2077da668aa1SThomas Huth 
2078da668aa1SThomas Huth int main(int argc, char **argv)
2079da668aa1SThomas Huth {
2080da668aa1SThomas Huth     int ret;
2081da668aa1SThomas Huth 
2082da668aa1SThomas Huth     bdrv_init();
2083da668aa1SThomas Huth     qemu_init_main_loop(&error_abort);
2084da668aa1SThomas Huth 
2085da668aa1SThomas Huth     g_test_init(&argc, &argv, NULL);
2086da668aa1SThomas Huth     qemu_event_init(&done_event, false);
2087da668aa1SThomas Huth 
2088da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all);
2089da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain);
2090da668aa1SThomas Huth 
2091da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain_all",
2092da668aa1SThomas Huth                     test_drv_cb_co_drain_all);
2093da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain);
2094da668aa1SThomas Huth 
2095da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain_all", test_quiesce_drain_all);
2096da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/drain", test_quiesce_drain);
2097da668aa1SThomas Huth 
2098da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain_all",
2099da668aa1SThomas Huth                     test_quiesce_co_drain_all);
2100da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain);
2101da668aa1SThomas Huth 
2102da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/nested", test_nested);
2103da668aa1SThomas Huth 
2104da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/graph-change/drain_all",
2105da668aa1SThomas Huth                     test_graph_change_drain_all);
2106da668aa1SThomas Huth 
2107da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all);
2108da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain);
2109da668aa1SThomas Huth 
2110da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all);
2111da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain);
2112da668aa1SThomas Huth 
2113da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain_all",
2114da668aa1SThomas Huth                     test_blockjob_error_drain_all);
2115da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/error/drain",
2116da668aa1SThomas Huth                     test_blockjob_error_drain);
2117da668aa1SThomas Huth 
2118da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain_all",
2119da668aa1SThomas Huth                     test_blockjob_iothread_drain_all);
2120da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/drain",
2121da668aa1SThomas Huth                     test_blockjob_iothread_drain);
2122da668aa1SThomas Huth 
2123da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain_all",
2124da668aa1SThomas Huth                     test_blockjob_iothread_error_drain_all);
2125da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/iothread/error/drain",
2126da668aa1SThomas Huth                     test_blockjob_iothread_error_drain);
2127da668aa1SThomas Huth 
2128da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain);
2129da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all);
2130da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain);
2131da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb);
2132da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb);
2133da668aa1SThomas Huth 
2134da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/attach/drain", test_append_to_drained);
2135da668aa1SThomas Huth 
2136da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/set_aio_context", test_set_aio_context);
2137da668aa1SThomas Huth 
2138da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/blockjob/commit_by_drained_end",
2139da668aa1SThomas Huth                     test_blockjob_commit_by_drained_end);
2140da668aa1SThomas Huth 
2141da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/bdrv_drop_intermediate/poll",
2142da668aa1SThomas Huth                     test_drop_intermediate_poll);
2143da668aa1SThomas Huth 
2144da668aa1SThomas Huth     g_test_add_func("/bdrv-drain/replace_child/mid-drain",
2145da668aa1SThomas Huth                     test_replace_child_mid_drain);
2146da668aa1SThomas Huth 
2147da668aa1SThomas Huth     ret = g_test_run();
2148da668aa1SThomas Huth     qemu_event_destroy(&done_event);
2149da668aa1SThomas Huth     return ret;
2150da668aa1SThomas Huth }
2151